]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'master' of ../netdev-next/
authorDavid S. Miller <davem@davemloft.net>
Fri, 16 Sep 2011 06:58:54 +0000 (02:58 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 16 Sep 2011 06:58:54 +0000 (02:58 -0400)
92 files changed:
Documentation/networking/stmmac.txt
arch/cris/arch-v10/drivers/Kconfig
arch/cris/arch-v32/drivers/Kconfig
arch/mips/txx9/generic/setup_tx4939.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/Kconfig
drivers/net/ethernet/adaptec/Kconfig
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/atheros/Kconfig
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bfa_cee.c
drivers/net/ethernet/brocade/bna/bfa_defs.h
drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
drivers/net/ethernet/brocade/bna/bfa_ioc.h
drivers/net/ethernet/brocade/bna/bfi.h
drivers/net/ethernet/brocade/bna/bna.h
drivers/net/ethernet/brocade/bna/bna_enet.c
drivers/net/ethernet/brocade/bna/bna_hw_defs.h
drivers/net/ethernet/brocade/bna/bna_types.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/brocade/bna/cna.h
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/davicom/Kconfig
drivers/net/ethernet/dec/tulip/Kconfig
drivers/net/ethernet/dlink/Kconfig
drivers/net/ethernet/faraday/Kconfig
drivers/net/ethernet/freescale/fs_enet/Kconfig
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/icplus/Kconfig
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/micrel/Kconfig
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/nuvoton/Kconfig
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/packetengines/Kconfig
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/rdc/Kconfig
drivers/net/ethernet/realtek/Kconfig
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/sgi/Kconfig
drivers/net/ethernet/sis/Kconfig
drivers/net/ethernet/smsc/Kconfig
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/mmc_core.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/via/Kconfig
drivers/net/irda/sh_irda.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/usb/Kconfig
drivers/s390/net/qeth_core_main.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/fcoe/fcoe.c
include/linux/ethtool.h
include/linux/mii.h
include/linux/netdevice.h
include/rdma/ib_addr.h
net/8021q/vlan_dev.c
net/bridge/br_if.c
net/core/dev.c
net/core/ethtool.c
net/core/link_watch.c
net/core/net-sysfs.c
net/ieee802154/6lowpan.c
net/packet/af_packet.c
net/rds/Kconfig
net/rds/ib_rdma.c
net/rds/xlist.h [deleted file]

index 57a24108b8455aa55b89030474bb0785e3573c1c..40ec92ce4c843397f18be218a3a188e70fbdc182 100644 (file)
@@ -235,7 +235,38 @@ reset procedure etc).
  o enh_desc.c: functions for handling enhanced descriptors
  o norm_desc.c: functions for handling normal descriptors
 
-5) TODO:
+5) Debug Information
+
+The driver exports many information i.e. internal statistics,
+debug information, MAC and DMA registers etc.
+
+These can be read in several ways depending on the
+type of the information actually needed.
+
+For example a user can be use the ethtool support
+to get statistics: e.g. using: ethtool -S ethX
+(that shows the Management counters (MMC) if supported)
+or sees the MAC/DMA registers: e.g. using: ethtool -d ethX
+
+Compiling the Kernel with CONFIG_DEBUG_FS and enabling the
+STMMAC_DEBUG_FS option the driver will export the following
+debugfs entries:
+
+/sys/kernel/debug/stmmaceth/descriptors_status
+  To show the DMA TX/RX descriptor rings
+
+Developer can also use the "debug" module parameter to get
+further debug information.
+
+In the end, there are other macros (that cannot be enabled
+via menuconfig) to turn-on the RX/TX DMA debugging,
+specific MAC core debug printk etc. Others to enable the
+debug in the TX and RX processes.
+All these are only useful during the developing stage
+and should never enabled inside the code for general usage.
+In fact, these can generate an huge amount of debug messages.
+
+6) TODO:
  o XGMAC is not supported.
  o Review the timer optimisation code to use an embedded device that will be
   available in new chip generations.
index 0d722177992389a5cb257afc302b1d9d1377ccb2..32d90867a9841098177b1fd5969af784ef88ac19 100644 (file)
@@ -4,6 +4,7 @@ config ETRAX_ETHERNET
        bool "Ethernet support"
        depends on ETRAX_ARCH_V10
        select NET_ETHERNET
+       select NET_CORE
        select MII
        help
          This option enables the ETRAX 100LX built-in 10/100Mbit Ethernet
index 41a2732e8b9c406f664d93ce065eff6e646d2487..e47e9c3401b08bbbde729c13e626b9a91823ec18 100644 (file)
@@ -4,6 +4,7 @@ config ETRAX_ETHERNET
        bool "Ethernet support"
        depends on ETRAX_ARCH_V32
        select NET_ETHERNET
+       select NET_CORE
        select MII
        help
          This option enables the ETRAX FS built-in 10/100Mbit Ethernet
index e9f95dcde3790b630d1d3a9c1be0d4adab623b10..ba3cec3155df791292fa54f4490a61aa19d2e91a 100644 (file)
@@ -321,7 +321,7 @@ void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
 static u32 tx4939_get_eth_speed(struct net_device *dev)
 {
        struct ethtool_cmd cmd;
-       if (dev_ethtool_get_settings(dev, &cmd))
+       if (__ethtool_get_settings(dev, &cmd))
                return 100;     /* default 100Mbps */
 
        return ethtool_cmd_speed(&cmd);
index 8cb75a6efec35337830f08c74ccb1a9f4f1d8e1c..1dcb07ce526317f378ad133c2a66df82498ead0b 100644 (file)
@@ -557,7 +557,7 @@ down:
 static int bond_update_speed_duplex(struct slave *slave)
 {
        struct net_device *slave_dev = slave->dev;
-       struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET };
+       struct ethtool_cmd ecmd;
        u32 slave_speed;
        int res;
 
@@ -565,18 +565,15 @@ static int bond_update_speed_duplex(struct slave *slave)
        slave->speed = SPEED_100;
        slave->duplex = DUPLEX_FULL;
 
-       if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings)
-               return -1;
-
-       res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool);
+       res = __ethtool_get_settings(slave_dev, &ecmd);
        if (res < 0)
                return -1;
 
-       slave_speed = ethtool_cmd_speed(&etool);
+       slave_speed = ethtool_cmd_speed(&ecmd);
        if (slave_speed == 0 || slave_speed == ((__u32) -1))
                return -1;
 
-       switch (etool.duplex) {
+       switch (ecmd.duplex) {
        case DUPLEX_FULL:
        case DUPLEX_HALF:
                break;
@@ -585,7 +582,7 @@ static int bond_update_speed_duplex(struct slave *slave)
        }
 
        slave->speed = slave_speed;
-       slave->duplex = etool.duplex;
+       slave->duplex = ecmd.duplex;
 
        return 0;
 }
index a439cbdda3b9bc3db0361358283e2c3ac5cdcb53..a8bb30cf512dcbe8e0f8c7e58081e42ff6a6a817 100644 (file)
@@ -81,6 +81,7 @@ config PCMCIA_3C589
 config VORTEX
        tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
        depends on (PCI || EISA)
+       select NET_CORE
        select MII
        ---help---
          This option enables driver support for a large number of 10Mbps and
index 1f647471e651c7de74338325f2d4eb302762de4b..6dff5a0e733f6f56246569f41094f1ceca88a4f5 100644 (file)
@@ -62,6 +62,7 @@ config JME
        tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver supports the PCI-Express gigabit ethernet adapters
@@ -102,6 +103,7 @@ config FEALNX
        tristate "Myson MTD-8xx PCI Ethernet support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
@@ -112,6 +114,7 @@ source "drivers/net/ethernet/8390/Kconfig"
 
 config NET_NETX
        tristate "NetX Ethernet support"
+       select NET_CORE
        select MII
        depends on ARCH_NETX
        ---help---
@@ -128,6 +131,7 @@ source "drivers/net/ethernet/oki-semi/Kconfig"
 config ETHOC
        tristate "OpenCores 10/100 Mbps Ethernet MAC support"
        depends on HAS_IOMEM && HAS_DMA
+       select NET_CORE
        select MII
        select PHYLIB
        select CRC32
index 5c804bbe3dabdeebc6dc16870e5f0b3177ed8246..0bff571b1bb3887dabf6516cbcb321b0f10dde90 100644 (file)
@@ -22,6 +22,7 @@ config ADAPTEC_STARFIRE
        tristate "Adaptec Starfire/DuraLAN support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
index 6de9851045cb4d0961c9ed9bd7bba871ace5237b..49a30d37ae4a93cf2c84f1e733093722f0799405 100644 (file)
@@ -23,6 +23,7 @@ config BFIN_MAC
        tristate "Blackfin on-chip MAC support"
        depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
        select CRC32
+       select NET_CORE
        select MII
        select PHYLIB
        select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
index 8af1c934dbd58b375cbef687dbd494e335f1da75..238b537b68fe27349ffdfaa4a6c6fc316bea18f5 100644 (file)
@@ -34,6 +34,7 @@ config AMD8111_ETH
        tristate "AMD 8111 (new PCI LANCE) support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          If you have an AMD 8111-based PCI LANCE ethernet card,
@@ -59,6 +60,7 @@ config PCNET32
        tristate "AMD PCnet32 PCI support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
index 26ab8cae28b5c02f5b4db9517b292126a7b8ecc0..1ed886d421f8ad8c24d028a3e66c460fa62caa36 100644 (file)
@@ -22,6 +22,7 @@ config ATL2
        tristate "Atheros L2 Fast Ethernet support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros L2 fast ethernet adapter.
@@ -33,6 +34,7 @@ config ATL1
        tristate "Atheros/Attansic L1 Gigabit Ethernet support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros/Attansic L1 gigabit ethernet
@@ -45,6 +47,7 @@ config ATL1E
        tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
        depends on PCI && EXPERIMENTAL
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros L1E gigabit ethernet adapter.
@@ -56,6 +59,7 @@ config ATL1C
        tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
        depends on PCI && EXPERIMENTAL
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver supports the Atheros L1C gigabit ethernet adapter.
index d82ad221ebd45366d9e08dfef9a464cfe9e63bf6..f15e72e81ac4db2c1fa623c8f09a9820e59e9408 100644 (file)
@@ -22,6 +22,7 @@ config B44
        tristate "Broadcom 440x/47xx ethernet support"
        depends on SSB_POSSIBLE && HAS_DMA
        select SSB
+       select NET_CORE
        select MII
        ---help---
          If you have a network (Ethernet) controller of this type, say Y
@@ -53,6 +54,7 @@ config B44_PCI
 config BCM63XX_ENET
        tristate "Broadcom 63xx internal mac support"
        depends on BCM63XX
+       select NET_CORE
        select MII
        select PHYLIB
        help
index a7e28a2c5348596fa7ff9e16f50e7487559d9005..1485013b4b8ce76b8f6856570ca65de7c3957e11 100644 (file)
@@ -94,6 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
 #define DRV_MODULE_RELDATE     "August 18, 2011"
 
+#define RESET_KIND_SHUTDOWN    0
+#define RESET_KIND_INIT                1
+#define RESET_KIND_SUSPEND     2
+
 #define TG3_DEF_RX_MODE                0
 #define TG3_DEF_TX_MODE                0
 #define TG3_DEF_MSG_ENABLE       \
@@ -187,6 +191,12 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
        #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
 #endif
 
+#if (NET_IP_ALIGN != 0)
+#define TG3_RX_OFFSET(tp)      ((tp)->rx_offset)
+#else
+#define TG3_RX_OFFSET(tp)      0
+#endif
+
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
 #define TG3_TX_BD_DMA_MAX              4096
@@ -718,6 +728,103 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
        tg3_ape_write32(tp, gnt + 4 * locknum, bit);
 }
 
+static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+       int i;
+       u32 apedata;
+
+       /* NCSI does not support APE events */
+       if (tg3_flag(tp, APE_HAS_NCSI))
+               return;
+
+       apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+       if (apedata != APE_SEG_SIG_MAGIC)
+               return;
+
+       apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+       if (!(apedata & APE_FW_STATUS_READY))
+               return;
+
+       /* Wait for up to 1 millisecond for APE to service previous event. */
+       for (i = 0; i < 10; i++) {
+               if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+                       return;
+
+               apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+               if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+                       tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+                                       event | APE_EVENT_STATUS_EVENT_PENDING);
+
+               tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+               if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+                       break;
+
+               udelay(100);
+       }
+
+       if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+               tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+       u32 event;
+       u32 apedata;
+
+       if (!tg3_flag(tp, ENABLE_APE))
+               return;
+
+       switch (kind) {
+       case RESET_KIND_INIT:
+               tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+                               APE_HOST_SEG_SIG_MAGIC);
+               tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+                               APE_HOST_SEG_LEN_MAGIC);
+               apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+               tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+               tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+                       APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
+               tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+                               APE_HOST_BEHAV_NO_PHYLOCK);
+               tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+                                   TG3_APE_HOST_DRVR_STATE_START);
+
+               event = APE_EVENT_STATUS_STATE_START;
+               break;
+       case RESET_KIND_SHUTDOWN:
+               /* With the interface we are currently using,
+                * APE does not track driver state.  Wiping
+                * out the HOST SEGMENT SIGNATURE forces
+                * the APE to assume OS absent status.
+                */
+               tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
+               if (device_may_wakeup(&tp->pdev->dev) &&
+                   tg3_flag(tp, WOL_ENABLE)) {
+                       tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+                                           TG3_APE_HOST_WOL_SPEED_AUTO);
+                       apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+               } else
+                       apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+               tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
+               event = APE_EVENT_STATUS_STATE_UNLOAD;
+               break;
+       case RESET_KIND_SUSPEND:
+               event = APE_EVENT_STATUS_STATE_SUSPEND;
+               break;
+       default:
+               return;
+       }
+
+       event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+       tg3_ape_send_event(tp, event);
+}
+
 static void tg3_disable_ints(struct tg3 *tp)
 {
        int i;
@@ -1390,6 +1497,149 @@ static void tg3_ump_link_report(struct tg3 *tp)
        tg3_generate_fw_event(tp);
 }
 
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+       if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+               /* Wait for RX cpu to ACK the previous event. */
+               tg3_wait_for_event_ack(tp);
+
+               tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+
+               tg3_generate_fw_event(tp);
+
+               /* Wait for RX cpu to ACK this event. */
+               tg3_wait_for_event_ack(tp);
+       }
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+       tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+                     NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+       if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+               switch (kind) {
+               case RESET_KIND_INIT:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_START);
+                       break;
+
+               case RESET_KIND_SHUTDOWN:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_UNLOAD);
+                       break;
+
+               case RESET_KIND_SUSPEND:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_SUSPEND);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
+       if (kind == RESET_KIND_INIT ||
+           kind == RESET_KIND_SUSPEND)
+               tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+       if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+               switch (kind) {
+               case RESET_KIND_INIT:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_START_DONE);
+                       break;
+
+               case RESET_KIND_SHUTDOWN:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_UNLOAD_DONE);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
+       if (kind == RESET_KIND_SHUTDOWN)
+               tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+       if (tg3_flag(tp, ENABLE_ASF)) {
+               switch (kind) {
+               case RESET_KIND_INIT:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_START);
+                       break;
+
+               case RESET_KIND_SHUTDOWN:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_UNLOAD);
+                       break;
+
+               case RESET_KIND_SUSPEND:
+                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+                                     DRV_STATE_SUSPEND);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+}
+
+static int tg3_poll_fw(struct tg3 *tp)
+{
+       int i;
+       u32 val;
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               /* Wait up to 20ms for init done. */
+               for (i = 0; i < 200; i++) {
+                       if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+                               return 0;
+                       udelay(100);
+               }
+               return -ENODEV;
+       }
+
+       /* Wait for firmware initialization to complete. */
+       for (i = 0; i < 100000; i++) {
+               tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+               if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+                       break;
+               udelay(10);
+       }
+
+       /* Chip might not be fitted with firmware.  Some Sun onboard
+        * parts are configured like that.  So don't signal the timeout
+        * of the above loop as an error, but do report the lack of
+        * running firmware once.
+        */
+       if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+               tg3_flag_set(tp, NO_FWARE_REPORTED);
+
+               netdev_info(tp->dev, "No firmware running\n");
+       }
+
+       if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+               /* The 57765 A0 needs a little more
+                * time to do some important work.
+                */
+               mdelay(10);
+       }
+
+       return 0;
+}
+
 static void tg3_link_report(struct tg3 *tp)
 {
        if (!netif_carrier_ok(tp->dev)) {
@@ -2481,12 +2731,6 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
 }
 
 static int tg3_setup_phy(struct tg3 *, int);
-
-#define RESET_KIND_SHUTDOWN    0
-#define RESET_KIND_INIT                1
-#define RESET_KIND_SUSPEND     2
-
-static void tg3_write_sig_post_reset(struct tg3 *, int);
 static int tg3_halt_cpu(struct tg3 *, u32);
 
 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
@@ -2745,15 +2989,237 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
        return ret;
 }
 
-/* Ensures NVRAM data is in bytestream format. */
-static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
-{
-       u32 v;
-       int res = tg3_nvram_read(tp, offset, &v);
-       if (!res)
-               *val = cpu_to_be32(v);
-       return res;
-}
+/* Ensures NVRAM data is in bytestream format. */
+static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
+{
+       u32 v;
+       int res = tg3_nvram_read(tp, offset, &v);
+       if (!res)
+               *val = cpu_to_be32(v);
+       return res;
+}
+
+#define RX_CPU_SCRATCH_BASE    0x30000
+#define RX_CPU_SCRATCH_SIZE    0x04000
+#define TX_CPU_SCRATCH_BASE    0x34000
+#define TX_CPU_SCRATCH_SIZE    0x04000
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+{
+       int i;
+
+       BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+               u32 val = tr32(GRC_VCPU_EXT_CTRL);
+
+               tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+               return 0;
+       }
+       if (offset == RX_CPU_BASE) {
+               for (i = 0; i < 10000; i++) {
+                       tw32(offset + CPU_STATE, 0xffffffff);
+                       tw32(offset + CPU_MODE,  CPU_MODE_HALT);
+                       if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+                               break;
+               }
+
+               tw32(offset + CPU_STATE, 0xffffffff);
+               tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
+               udelay(10);
+       } else {
+               for (i = 0; i < 10000; i++) {
+                       tw32(offset + CPU_STATE, 0xffffffff);
+                       tw32(offset + CPU_MODE,  CPU_MODE_HALT);
+                       if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+                               break;
+               }
+       }
+
+       if (i >= 10000) {
+               netdev_err(tp->dev, "%s timed out, %s CPU\n",
+                          __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+               return -ENODEV;
+       }
+
+       /* Clear firmware's nvram arbitration. */
+       if (tg3_flag(tp, NVRAM))
+               tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
+       return 0;
+}
+
+struct fw_info {
+       unsigned int fw_base;
+       unsigned int fw_len;
+       const __be32 *fw_data;
+};
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
+                                u32 cpu_scratch_base, int cpu_scratch_size,
+                                struct fw_info *info)
+{
+       int err, lock_err, i;
+       void (*write_op)(struct tg3 *, u32, u32);
+
+       if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
+               netdev_err(tp->dev,
+                          "%s: Trying to load TX cpu firmware which is 5705\n",
+                          __func__);
+               return -EINVAL;
+       }
+
+       if (tg3_flag(tp, 5705_PLUS))
+               write_op = tg3_write_mem;
+       else
+               write_op = tg3_write_indirect_reg32;
+
+       /* It is possible that bootcode is still loading at this point.
+        * Get the nvram lock first before halting the cpu.
+        */
+       lock_err = tg3_nvram_lock(tp);
+       err = tg3_halt_cpu(tp, cpu_base);
+       if (!lock_err)
+               tg3_nvram_unlock(tp);
+       if (err)
+               goto out;
+
+       for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+               write_op(tp, cpu_scratch_base + i, 0);
+       tw32(cpu_base + CPU_STATE, 0xffffffff);
+       tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
+       for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
+               write_op(tp, (cpu_scratch_base +
+                             (info->fw_base & 0xffff) +
+                             (i * sizeof(u32))),
+                             be32_to_cpu(info->fw_data[i]));
+
+       err = 0;
+
+out:
+       return err;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+       struct fw_info info;
+       const __be32 *fw_data;
+       int err, i;
+
+       fw_data = (void *)tp->fw->data;
+
+       /* Firmware blob starts with version numbers, followed by
+          start address and length. We are setting complete length.
+          length = end_address_of_bss - start_address_of_text.
+          Remainder is the blob to be loaded contiguously
+          from start address. */
+
+       info.fw_base = be32_to_cpu(fw_data[1]);
+       info.fw_len = tp->fw->size - 12;
+       info.fw_data = &fw_data[3];
+
+       err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+                                   RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+                                   &info);
+       if (err)
+               return err;
+
+       err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+                                   TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+                                   &info);
+       if (err)
+               return err;
+
+       /* Now startup only the RX cpu. */
+       tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+       tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+
+       for (i = 0; i < 5; i++) {
+               if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
+                       break;
+               tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+               tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
+               tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+               udelay(1000);
+       }
+       if (i >= 5) {
+               netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+                          "should be %08x\n", __func__,
+                          tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+               return -ENODEV;
+       }
+       tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+       tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
+
+       return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+       struct fw_info info;
+       const __be32 *fw_data;
+       unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+       int err, i;
+
+       if (tg3_flag(tp, HW_TSO_1) ||
+           tg3_flag(tp, HW_TSO_2) ||
+           tg3_flag(tp, HW_TSO_3))
+               return 0;
+
+       fw_data = (void *)tp->fw->data;
+
+       /* Firmware blob starts with version numbers, followed by
+          start address and length. We are setting complete length.
+          length = end_address_of_bss - start_address_of_text.
+          Remainder is the blob to be loaded contiguously
+          from start address. */
+
+       info.fw_base = be32_to_cpu(fw_data[1]);
+       cpu_scratch_size = tp->fw_len;
+       info.fw_len = tp->fw->size - 12;
+       info.fw_data = &fw_data[3];
+
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+               cpu_base = RX_CPU_BASE;
+               cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+       } else {
+               cpu_base = TX_CPU_BASE;
+               cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+               cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+       }
+
+       err = tg3_load_firmware_cpu(tp, cpu_base,
+                                   cpu_scratch_base, cpu_scratch_size,
+                                   &info);
+       if (err)
+               return err;
+
+       /* Now startup the cpu. */
+       tw32(cpu_base + CPU_STATE, 0xffffffff);
+       tw32_f(cpu_base + CPU_PC, info.fw_base);
+
+       for (i = 0; i < 5; i++) {
+               if (tr32(cpu_base + CPU_PC) == info.fw_base)
+                       break;
+               tw32(cpu_base + CPU_STATE, 0xffffffff);
+               tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
+               tw32_f(cpu_base + CPU_PC, info.fw_base);
+               udelay(1000);
+       }
+       if (i >= 5) {
+               netdev_err(tp->dev,
+                          "%s fails to set CPU PC, is %08x should be %08x\n",
+                          __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+               return -ENODEV;
+       }
+       tw32(cpu_base + CPU_STATE, 0xffffffff);
+       tw32_f(cpu_base + CPU_MODE,  0x00000000);
+       return 0;
+}
+
 
 /* tp->lock is held. */
 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
@@ -3334,8 +3800,9 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
        if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
                return 0;
 
-       if ((adv_reg & all_mask) != all_mask)
+       if ((adv_reg & ADVERTISE_ALL) != all_mask)
                return 0;
+
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
@@ -3348,9 +3815,11 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
                if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
                        return 0;
 
-               if ((tg3_ctrl & all_mask) != all_mask)
+               tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+               if (tg3_ctrl != all_mask)
                        return 0;
        }
+
        return 1;
 }
 
@@ -3651,8 +4120,8 @@ relink:
                        newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
                if (newlnkctl != oldlnkctl)
                        pci_write_config_word(tp->pdev,
-                                             pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
-                                             newlnkctl);
+                                             pci_pcie_cap(tp->pdev) +
+                                             PCI_EXP_LNKCTL, newlnkctl);
        }
 
        if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4982,11 +5451,11 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
         * Callers depend upon this behavior and assume that
         * we leave everything unchanged if we fail.
         */
-       skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
+       skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
        if (skb == NULL)
                return -ENOMEM;
 
-       skb_reserve(skb, tp->rx_offset);
+       skb_reserve(skb, TG3_RX_OFFSET(tp));
 
        mapping = pci_map_single(tp->pdev, skb->data, skb_size,
                                 PCI_DMA_FROMDEVICE);
@@ -5704,7 +6173,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
         * NIC to stop sending us irqs, engaging "in-intr-handler"
         * event coalescing.
         */
-       tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+       tw32_mailbox(tnapi->int_mbox, 0x00000001);
        if (likely(!tg3_irq_sync(tp)))
                napi_schedule(&tnapi->napi);
 
@@ -6265,6 +6734,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
+       if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+           !mss && skb->len > VLAN_ETH_FRAME_LEN)
+               base_flags |= TXD_FLAG_JMB_PKT;
+
 #ifdef BCM_KERNEL_SUPPORTS_8021Q
        if (vlan_tx_tag_present(skb)) {
                base_flags |= TXD_FLAG_VLAN;
@@ -6272,10 +6745,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 #endif
 
-       if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
-           !mss && skb->len > VLAN_ETH_FRAME_LEN)
-               base_flags |= TXD_FLAG_JMB_PKT;
-
        len = skb_headlen(skb);
 
        mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -7105,244 +7574,20 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
        err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
 
        tw32(FTQ_RESET, 0xffffffff);
-       tw32(FTQ_RESET, 0x00000000);
-
-       err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
-       err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
-
-       for (i = 0; i < tp->irq_cnt; i++) {
-               struct tg3_napi *tnapi = &tp->napi[i];
-               if (tnapi->hw_status)
-                       memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
-       }
-       if (tp->hw_stats)
-               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
-
-       return err;
-}
-
-static void tg3_ape_send_event(struct tg3 *tp, u32 event)
-{
-       int i;
-       u32 apedata;
-
-       /* NCSI does not support APE events */
-       if (tg3_flag(tp, APE_HAS_NCSI))
-               return;
-
-       apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
-       if (apedata != APE_SEG_SIG_MAGIC)
-               return;
-
-       apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
-       if (!(apedata & APE_FW_STATUS_READY))
-               return;
-
-       /* Wait for up to 1 millisecond for APE to service previous event. */
-       for (i = 0; i < 10; i++) {
-               if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
-                       return;
-
-               apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
-
-               if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
-                       tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
-                                       event | APE_EVENT_STATUS_EVENT_PENDING);
-
-               tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
-
-               if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
-                       break;
-
-               udelay(100);
-       }
-
-       if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
-               tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
-}
-
-static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
-{
-       u32 event;
-       u32 apedata;
-
-       if (!tg3_flag(tp, ENABLE_APE))
-               return;
-
-       switch (kind) {
-       case RESET_KIND_INIT:
-               tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
-                               APE_HOST_SEG_SIG_MAGIC);
-               tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
-                               APE_HOST_SEG_LEN_MAGIC);
-               apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
-               tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
-               tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
-                       APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
-               tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
-                               APE_HOST_BEHAV_NO_PHYLOCK);
-               tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
-                                   TG3_APE_HOST_DRVR_STATE_START);
-
-               event = APE_EVENT_STATUS_STATE_START;
-               break;
-       case RESET_KIND_SHUTDOWN:
-               /* With the interface we are currently using,
-                * APE does not track driver state.  Wiping
-                * out the HOST SEGMENT SIGNATURE forces
-                * the APE to assume OS absent status.
-                */
-               tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
-
-               if (device_may_wakeup(&tp->pdev->dev) &&
-                   tg3_flag(tp, WOL_ENABLE)) {
-                       tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
-                                           TG3_APE_HOST_WOL_SPEED_AUTO);
-                       apedata = TG3_APE_HOST_DRVR_STATE_WOL;
-               } else
-                       apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
-
-               tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
-
-               event = APE_EVENT_STATUS_STATE_UNLOAD;
-               break;
-       case RESET_KIND_SUSPEND:
-               event = APE_EVENT_STATUS_STATE_SUSPEND;
-               break;
-       default:
-               return;
-       }
-
-       event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
-
-       tg3_ape_send_event(tp, event);
-}
-
-/* tp->lock is held. */
-static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
-{
-       tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
-                     NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
-
-       if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
-               switch (kind) {
-               case RESET_KIND_INIT:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_START);
-                       break;
-
-               case RESET_KIND_SHUTDOWN:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_UNLOAD);
-                       break;
-
-               case RESET_KIND_SUSPEND:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_SUSPEND);
-                       break;
-
-               default:
-                       break;
-               }
-       }
-
-       if (kind == RESET_KIND_INIT ||
-           kind == RESET_KIND_SUSPEND)
-               tg3_ape_driver_state_change(tp, kind);
-}
-
-/* tp->lock is held. */
-static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
-{
-       if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
-               switch (kind) {
-               case RESET_KIND_INIT:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_START_DONE);
-                       break;
-
-               case RESET_KIND_SHUTDOWN:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_UNLOAD_DONE);
-                       break;
-
-               default:
-                       break;
-               }
-       }
-
-       if (kind == RESET_KIND_SHUTDOWN)
-               tg3_ape_driver_state_change(tp, kind);
-}
-
-/* tp->lock is held. */
-static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
-{
-       if (tg3_flag(tp, ENABLE_ASF)) {
-               switch (kind) {
-               case RESET_KIND_INIT:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_START);
-                       break;
-
-               case RESET_KIND_SHUTDOWN:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_UNLOAD);
-                       break;
-
-               case RESET_KIND_SUSPEND:
-                       tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
-                                     DRV_STATE_SUSPEND);
-                       break;
-
-               default:
-                       break;
-               }
-       }
-}
-
-static int tg3_poll_fw(struct tg3 *tp)
-{
-       int i;
-       u32 val;
-
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
-               /* Wait up to 20ms for init done. */
-               for (i = 0; i < 200; i++) {
-                       if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
-                               return 0;
-                       udelay(100);
-               }
-               return -ENODEV;
-       }
-
-       /* Wait for firmware initialization to complete. */
-       for (i = 0; i < 100000; i++) {
-               tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
-               if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
-                       break;
-               udelay(10);
-       }
-
-       /* Chip might not be fitted with firmware.  Some Sun onboard
-        * parts are configured like that.  So don't signal the timeout
-        * of the above loop as an error, but do report the lack of
-        * running firmware once.
-        */
-       if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
-               tg3_flag_set(tp, NO_FWARE_REPORTED);
+       tw32(FTQ_RESET, 0x00000000);
 
-               netdev_info(tp->dev, "No firmware running\n");
-       }
+       err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
+       err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
-               /* The 57765 A0 needs a little more
-                * time to do some important work.
-                */
-               mdelay(10);
+       for (i = 0; i < tp->irq_cnt; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+               if (tnapi->hw_status)
+                       memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
        }
+       if (tp->hw_stats)
+               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
-       return 0;
+       return err;
 }
 
 /* Save PCI command register before chip reset */
@@ -7416,8 +7661,6 @@ static void tg3_restore_pci_state(struct tg3 *tp)
        }
 }
 
-static void tg3_stop_fw(struct tg3 *);
-
 /* tp->lock is held. */
 static int tg3_chip_reset(struct tg3 *tp)
 {
@@ -7664,22 +7907,6 @@ static int tg3_chip_reset(struct tg3 *tp)
        return 0;
 }
 
-/* tp->lock is held. */
-static void tg3_stop_fw(struct tg3 *tp)
-{
-       if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
-               /* Wait for RX cpu to ACK the previous event. */
-               tg3_wait_for_event_ack(tp);
-
-               tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
-
-               tg3_generate_fw_event(tp);
-
-               /* Wait for RX cpu to ACK this event. */
-               tg3_wait_for_event_ack(tp);
-       }
-}
-
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
@@ -7703,227 +7930,6 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
        return 0;
 }
 
-#define RX_CPU_SCRATCH_BASE    0x30000
-#define RX_CPU_SCRATCH_SIZE    0x04000
-#define TX_CPU_SCRATCH_BASE    0x34000
-#define TX_CPU_SCRATCH_SIZE    0x04000
-
-/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
-{
-       int i;
-
-       BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
-
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
-               u32 val = tr32(GRC_VCPU_EXT_CTRL);
-
-               tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
-               return 0;
-       }
-       if (offset == RX_CPU_BASE) {
-               for (i = 0; i < 10000; i++) {
-                       tw32(offset + CPU_STATE, 0xffffffff);
-                       tw32(offset + CPU_MODE,  CPU_MODE_HALT);
-                       if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
-                               break;
-               }
-
-               tw32(offset + CPU_STATE, 0xffffffff);
-               tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
-               udelay(10);
-       } else {
-               for (i = 0; i < 10000; i++) {
-                       tw32(offset + CPU_STATE, 0xffffffff);
-                       tw32(offset + CPU_MODE,  CPU_MODE_HALT);
-                       if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
-                               break;
-               }
-       }
-
-       if (i >= 10000) {
-               netdev_err(tp->dev, "%s timed out, %s CPU\n",
-                          __func__, offset == RX_CPU_BASE ? "RX" : "TX");
-               return -ENODEV;
-       }
-
-       /* Clear firmware's nvram arbitration. */
-       if (tg3_flag(tp, NVRAM))
-               tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
-       return 0;
-}
-
-struct fw_info {
-       unsigned int fw_base;
-       unsigned int fw_len;
-       const __be32 *fw_data;
-};
-
-/* tp->lock is held. */
-static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
-                                int cpu_scratch_size, struct fw_info *info)
-{
-       int err, lock_err, i;
-       void (*write_op)(struct tg3 *, u32, u32);
-
-       if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
-               netdev_err(tp->dev,
-                          "%s: Trying to load TX cpu firmware which is 5705\n",
-                          __func__);
-               return -EINVAL;
-       }
-
-       if (tg3_flag(tp, 5705_PLUS))
-               write_op = tg3_write_mem;
-       else
-               write_op = tg3_write_indirect_reg32;
-
-       /* It is possible that bootcode is still loading at this point.
-        * Get the nvram lock first before halting the cpu.
-        */
-       lock_err = tg3_nvram_lock(tp);
-       err = tg3_halt_cpu(tp, cpu_base);
-       if (!lock_err)
-               tg3_nvram_unlock(tp);
-       if (err)
-               goto out;
-
-       for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
-               write_op(tp, cpu_scratch_base + i, 0);
-       tw32(cpu_base + CPU_STATE, 0xffffffff);
-       tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
-       for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
-               write_op(tp, (cpu_scratch_base +
-                             (info->fw_base & 0xffff) +
-                             (i * sizeof(u32))),
-                             be32_to_cpu(info->fw_data[i]));
-
-       err = 0;
-
-out:
-       return err;
-}
-
-/* tp->lock is held. */
-static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
-{
-       struct fw_info info;
-       const __be32 *fw_data;
-       int err, i;
-
-       fw_data = (void *)tp->fw->data;
-
-       /* Firmware blob starts with version numbers, followed by
-          start address and length. We are setting complete length.
-          length = end_address_of_bss - start_address_of_text.
-          Remainder is the blob to be loaded contiguously
-          from start address. */
-
-       info.fw_base = be32_to_cpu(fw_data[1]);
-       info.fw_len = tp->fw->size - 12;
-       info.fw_data = &fw_data[3];
-
-       err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
-                                   RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
-                                   &info);
-       if (err)
-               return err;
-
-       err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
-                                   TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
-                                   &info);
-       if (err)
-               return err;
-
-       /* Now startup only the RX cpu. */
-       tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
-       tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
-       for (i = 0; i < 5; i++) {
-               if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
-                       break;
-               tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
-               tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
-               tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-               udelay(1000);
-       }
-       if (i >= 5) {
-               netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
-                          "should be %08x\n", __func__,
-                          tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
-               return -ENODEV;
-       }
-       tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
-       tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
-
-       return 0;
-}
-
-/* tp->lock is held. */
-static int tg3_load_tso_firmware(struct tg3 *tp)
-{
-       struct fw_info info;
-       const __be32 *fw_data;
-       unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
-       int err, i;
-
-       if (tg3_flag(tp, HW_TSO_1) ||
-           tg3_flag(tp, HW_TSO_2) ||
-           tg3_flag(tp, HW_TSO_3))
-               return 0;
-
-       fw_data = (void *)tp->fw->data;
-
-       /* Firmware blob starts with version numbers, followed by
-          start address and length. We are setting complete length.
-          length = end_address_of_bss - start_address_of_text.
-          Remainder is the blob to be loaded contiguously
-          from start address. */
-
-       info.fw_base = be32_to_cpu(fw_data[1]);
-       cpu_scratch_size = tp->fw_len;
-       info.fw_len = tp->fw->size - 12;
-       info.fw_data = &fw_data[3];
-
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
-               cpu_base = RX_CPU_BASE;
-               cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
-       } else {
-               cpu_base = TX_CPU_BASE;
-               cpu_scratch_base = TX_CPU_SCRATCH_BASE;
-               cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
-       }
-
-       err = tg3_load_firmware_cpu(tp, cpu_base,
-                                   cpu_scratch_base, cpu_scratch_size,
-                                   &info);
-       if (err)
-               return err;
-
-       /* Now startup the cpu. */
-       tw32(cpu_base + CPU_STATE, 0xffffffff);
-       tw32_f(cpu_base + CPU_PC, info.fw_base);
-
-       for (i = 0; i < 5; i++) {
-               if (tr32(cpu_base + CPU_PC) == info.fw_base)
-                       break;
-               tw32(cpu_base + CPU_STATE, 0xffffffff);
-               tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
-               tw32_f(cpu_base + CPU_PC, info.fw_base);
-               udelay(1000);
-       }
-       if (i >= 5) {
-               netdev_err(tp->dev,
-                          "%s fails to set CPU PC, is %08x should be %08x\n",
-                          __func__, tr32(cpu_base + CPU_PC), info.fw_base);
-               return -ENODEV;
-       }
-       tw32(cpu_base + CPU_STATE, 0xffffffff);
-       tw32_f(cpu_base + CPU_MODE,  0x00000000);
-       return 0;
-}
-
-
 static int tg3_set_mac_addr(struct net_device *dev, void *p)
 {
        struct tg3 *tp = netdev_priv(dev);
@@ -8103,7 +8109,7 @@ static void tg3_rings_reset(struct tg3 *tp)
                                tw32_mailbox(tp->napi[i].prodmbox, 0);
                        tw32_rx_mbox(tp->napi[i].consmbox, 0);
                        tw32_mailbox_f(tp->napi[i].int_mbox, 1);
-                       tp->napi[0].chk_msi_cnt = 0;
+                       tp->napi[i].chk_msi_cnt = 0;
                        tp->napi[i].last_rx_cons = 0;
                        tp->napi[i].last_tx_cons = 0;
                }
@@ -8799,6 +8805,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
                val = tr32(MSGINT_MODE);
                val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+               if (!tg3_flag(tp, 1SHOT_MSI))
+                       val |= MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, val);
        }
 
@@ -9183,8 +9191,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
                                        tnapi->chk_msi_cnt++;
                                        return;
                                }
-                               tw32_mailbox(tnapi->int_mbox,
-                                            tnapi->last_tag << 24);
+                               tg3_msi(0, tnapi);
                        }
                }
                tnapi->chk_msi_cnt = 0;
@@ -9415,7 +9422,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
 
        if (intr_ok) {
                /* Reenable MSI one shot mode. */
-               if (tg3_flag(tp, 57765_PLUS)) {
+               if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
                        val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
                        tw32(MSGINT_MODE, val);
                }
@@ -9593,6 +9600,8 @@ static void tg3_ints_init(struct tg3 *tp)
                u32 msi_mode = tr32(MSGINT_MODE);
                if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
                        msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+               if (!tg3_flag(tp, 1SHOT_MSI))
+                       msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
        }
 defcfg:
@@ -14071,7 +14080,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                /* BCM5785 devices are effectively PCIe devices, and should
                 * follow PCIe codepaths, but do not have a PCIe capabilities
                 * section.
-               */
+                */
                tg3_flag_set(tp, PCI_EXPRESS);
        } else if (!tg3_flag(tp, 5705_PLUS) ||
                   tg3_flag(tp, 5780_CLASS)) {
@@ -15531,7 +15540,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
 
                tnapi->int_mbox = intmbx;
-               if (i < 4)
+               if (i <= 4)
                        intmbx += 0x8;
                else
                        intmbx += 0x4;
index b45b8eb3b9b05fab5d4f4b2035ff28ab36e770bf..8e627186507ceee29a5f281b6c2f36b52850f0b4 100644 (file)
@@ -16,8 +16,6 @@
  * www.brocade.com
  */
 
-#include "bfa_defs_cna.h"
-#include "cna.h"
 #include "bfa_cee.h"
 #include "bfi_cna.h"
 #include "bfa_ioc.h"
index 205b92b3709c6f8d1c131c48f0f8af8cddc0e12e..a81c0ccfc2f88f2d7e8cc4fcbdd0440940369880 100644 (file)
@@ -251,10 +251,10 @@ struct bfa_mfg_block {
  * ---------------------- pci definitions ------------
  */
 
-#define bfa_asic_id_ct(devid)                  \
-       ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
-       (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
-#define bfa_asic_id_ctc(devid) (bfa_asic_id_ct(devid))
+#define bfa_asic_id_ct(device)                 \
+       ((device) == PCI_DEVICE_ID_BROCADE_CT ||        \
+        (device) == PCI_DEVICE_ID_BROCADE_CT_FC)
+#define bfa_asic_id_ctc(device) (bfa_asic_id_ct(device))
 
 enum bfa_mode {
        BFA_MODE_HBA            = 1,
index 7ddd16f819f96a7ffc620873aa0a109515be1836..7e5df90528fcfeef1f06a791c253ff57fc9acdc5 100644 (file)
@@ -18,7 +18,6 @@
 #ifndef __BFA_DEFS_MFG_COMM_H__
 #define __BFA_DEFS_MFG_COMM_H__
 
-#include "cna.h"
 #include "bfa_defs.h"
 
 /**
index f5a3d4e8207846346d968c3fd6bdc16faaaf20b2..9116324865ccccbb2dc9f15a8af239de1decf975 100644 (file)
@@ -274,8 +274,10 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
        ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
                           (__ioc)->asic_mode))
 
-#define        bfa_ioc_isr_mode_set(__ioc, __msix)                     \
-                       ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define        bfa_ioc_isr_mode_set(__ioc, __msix) do {                        \
+       if ((__ioc)->ioc_hwif->ioc_isr_mode_set)                        \
+               ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));   \
+} while (0)
 #define        bfa_ioc_ownership_reset(__ioc)                          \
                        ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
 
index 19654cc7abab0e0a77866ea5e0fa3a4c7fab513f..4e04c140c84c97503b0e74ca7cab71ed5da65514 100644 (file)
@@ -73,20 +73,6 @@ struct bfi_mhdr {
  ****************************************************************************
  */
 
-#define BFI_SGE_INLINE 1
-#define BFI_SGE_INLINE_MAX     (BFI_SGE_INLINE + 1)
-
-/**
- * SG Flags
- */
-enum {
-       BFI_SGE_DATA            = 0,    /*!< data address, not last          */
-       BFI_SGE_DATA_CPL        = 1,    /*!< data addr, last in current page */
-       BFI_SGE_DATA_LAST       = 3,    /*!< data address, last              */
-       BFI_SGE_LINK            = 2,    /*!< link address                    */
-       BFI_SGE_PGDLEN          = 2,    /*!< cumulative data length for page */
-};
-
 /**
  * DMA addresses
  */
@@ -97,33 +83,6 @@ union bfi_addr_u {
        } a32;
 };
 
-/**
- * Scatter Gather Element
- */
-struct bfi_sge {
-#ifdef __BIGENDIAN
-       u32     flags:2,
-                       rsvd:2,
-                       sg_len:28;
-#else
-       u32     sg_len:28,
-                       rsvd:2,
-                       flags:2;
-#endif
-       union bfi_addr_u sga;
-};
-
-/**
- * Scatter Gather Page
- */
-#define BFI_SGPG_DATA_SGES             7
-#define BFI_SGPG_SGES_MAX              (BFI_SGPG_DATA_SGES + 1)
-#define BFI_SGPG_RSVD_WD_LEN   8
-struct bfi_sgpg {
-       struct bfi_sge sges[BFI_SGPG_SGES_MAX];
-       u32     rsvd[BFI_SGPG_RSVD_WD_LEN];
-};
-
 /*
  * Large Message structure - 128 Bytes size Msgs
  */
@@ -131,11 +90,6 @@ struct bfi_sgpg {
 #define BFI_LMSG_PL_WSZ        \
                        ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
 
-struct bfi_msg {
-       struct bfi_mhdr mhdr;
-       u32     pl[BFI_LMSG_PL_WSZ];
-};
-
 /**
  * Mailbox message structure
  */
index 2a587c5fdc2032895bb30ff8fe3b17db35c27494..3a6e7906149c5dcbc00166c9f6c648f6651ca5fb 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
 #ifndef __BNA_H__
 #define __BNA_H__
 
-#include "bfa_cs.h"
+#include "bfa_defs.h"
 #include "bfa_ioc.h"
-#include "cna.h"
+#include "bfi_enet.h"
 #include "bna_types.h"
 
 extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
@@ -395,12 +400,8 @@ void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
 void bna_uninit(struct bna *bna);
 int bna_num_txq_set(struct bna *bna, int num_txq);
 int bna_num_rxp_set(struct bna *bna, int num_rxp);
-void bna_stats_get(struct bna *bna);
-void bna_get_perm_mac(struct bna *bna, u8 *mac);
 void bna_hw_stats_get(struct bna *bna);
 
-/* APIs for Rx */
-
 /* APIs for RxF */
 struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
 void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
@@ -521,11 +522,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-void bna_rx_hds_enable(struct bna_rx *rx, struct bna_hds_config *hds_config,
-                      void (*cbfn)(struct bnad *, struct bna_rx *));
-void bna_rx_hds_disable(struct bna_rx *rx,
-                       void (*cbfn)(struct bnad *, struct bna_rx *));
-
 /**
  * ENET
  */
index 68a275d66fcfbba67952e10064793e49e735846e..26f5c5abfd1f50aced1931148dbb66b3dd81b161 100644 (file)
@@ -167,13 +167,14 @@ bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
         * Store only if not set earlier, since BNAD can override the HW
         * attributes
         */
-       if (!ioceth->attr.num_txq)
+       if (!ioceth->attr.fw_query_complete) {
                ioceth->attr.num_txq = ntohl(rsp->max_cfg);
-       if (!ioceth->attr.num_rxp)
                ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
-       ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
-       ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
-       ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+               ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
+               ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+               ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+               ioceth->attr.fw_query_complete = true;
+       }
 
        bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
 }
@@ -1693,6 +1694,16 @@ static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
        bna_cb_ioceth_reset
 };
 
+static void bna_attr_init(struct bna_ioceth *ioceth)
+{
+       ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
+       ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
+       ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
+       ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+       ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
+       ioceth->attr.fw_query_complete = false;
+}
+
 static void
 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
                struct bna_res_info *res_info)
@@ -1738,6 +1749,8 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
        ioceth->stop_cbfn = NULL;
        ioceth->stop_cbarg = NULL;
 
+       bna_attr_init(ioceth);
+
        bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
 }
 
@@ -2036,7 +2049,8 @@ bna_uninit(struct bna *bna)
 int
 bna_num_txq_set(struct bna *bna, int num_txq)
 {
-       if (num_txq > 0 && (num_txq <= bna->ioceth.attr.num_txq)) {
+       if (bna->ioceth.attr.fw_query_complete &&
+               (num_txq <= bna->ioceth.attr.num_txq)) {
                bna->ioceth.attr.num_txq = num_txq;
                return BNA_CB_SUCCESS;
        }
@@ -2047,7 +2061,8 @@ bna_num_txq_set(struct bna *bna, int num_txq)
 int
 bna_num_rxp_set(struct bna *bna, int num_rxp)
 {
-       if (num_rxp > 0 && (num_rxp <= bna->ioceth.attr.num_rxp)) {
+       if (bna->ioceth.attr.fw_query_complete &&
+               (num_rxp <= bna->ioceth.attr.num_rxp)) {
                bna->ioceth.attr.num_rxp = num_rxp;
                return BNA_CB_SUCCESS;
        }
index 07bb792898243163a67bce69fd12574b9e1baae2..dde8a463b8d9828859e1f00ff699de70e05beb41 100644 (file)
  * SW imposed limits
  *
  */
+#define BFI_ENET_DEF_TXQ               1
+#define BFI_ENET_DEF_RXP               1
+#define BFI_ENET_DEF_UCAM              1
+#define BFI_ENET_DEF_RITSZ             1
 
 #define BFI_ENET_MAX_MCAM              256
 
        (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK);          \
        (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK);            \
        (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT;              \
+       (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT;                \
 }
 
 #define ct2_reg_addr_init(_bna, _pcidev)                               \
index 8a6da0c3cd89febc63f6cfbe33832e109e19dea2..242d7997ffb260983780dd195bb42d84f3fe2f40 100644 (file)
@@ -21,7 +21,6 @@
 #include "cna.h"
 #include "bna_hw_defs.h"
 #include "bfa_cee.h"
-#include "bfi_enet.h"
 #include "bfa_msgq.h"
 
 /**
@@ -324,6 +323,7 @@ struct bna_qpt {
 };
 
 struct bna_attr {
+       bool                    fw_query_complete;
        int                     num_txq;
        int                     num_rxp;
        int                     num_ucmac;
index 181561c13c50a7c2ee9722dbe3ef8c1d8001a17b..b7f96ab8b30cbb88f3171d109d2e0f230ba40c64 100644 (file)
@@ -102,6 +102,28 @@ bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
        }
 }
 
+static u32
+bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
+       u32 index, u32 depth, struct sk_buff *skb, u32 frag)
+{
+       int j;
+       array[index].skb = NULL;
+
+       dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
+                       skb_headlen(skb), DMA_TO_DEVICE);
+       dma_unmap_addr_set(&array[index], dma_addr, 0);
+       BNA_QE_INDX_ADD(index, 1, depth);
+
+       for (j = 0; j < frag; j++) {
+               dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
+                         skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
+               dma_unmap_addr_set(&array[index], dma_addr, 0);
+               BNA_QE_INDX_ADD(index, 1, depth);
+       }
+
+       return index;
+}
+
 /*
  * Frees all pending Tx Bufs
  * At this point no activity is expected on the Q,
@@ -115,39 +137,20 @@ bnad_free_all_txbufs(struct bnad *bnad,
        struct bnad_unmap_q *unmap_q = tcb->unmap_q;
        struct bnad_skb_unmap *unmap_array;
        struct sk_buff          *skb = NULL;
-       int                     i;
+       int                     q;
 
        unmap_array = unmap_q->unmap_array;
 
-       unmap_cons = 0;
-       while (unmap_cons < unmap_q->q_depth) {
-               skb = unmap_array[unmap_cons].skb;
-               if (!skb) {
-                       unmap_cons++;
+       for (q = 0; q < unmap_q->q_depth; q++) {
+               skb = unmap_array[q].skb;
+               if (!skb)
                        continue;
-               }
-               unmap_array[unmap_cons].skb = NULL;
-
-               dma_unmap_single(&bnad->pcidev->dev,
-                                dma_unmap_addr(&unmap_array[unmap_cons],
-                                               dma_addr), skb_headlen(skb),
-                                               DMA_TO_DEVICE);
 
-               dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
-               if (++unmap_cons >= unmap_q->q_depth)
-                       break;
+               unmap_cons = q;
+               unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+                               unmap_cons, unmap_q->q_depth, skb,
+                               skb_shinfo(skb)->nr_frags);
 
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       dma_unmap_page(&bnad->pcidev->dev,
-                                      dma_unmap_addr(&unmap_array[unmap_cons],
-                                                     dma_addr),
-                                      skb_shinfo(skb)->frags[i].size,
-                                      DMA_TO_DEVICE);
-                       dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
-                                          0);
-                       if (++unmap_cons >= unmap_q->q_depth)
-                               break;
-               }
                dev_kfree_skb_any(skb);
        }
 }
@@ -164,12 +167,11 @@ static u32
 bnad_free_txbufs(struct bnad *bnad,
                 struct bna_tcb *tcb)
 {
-       u32             sent_packets = 0, sent_bytes = 0;
-       u16             wis, unmap_cons, updated_hw_cons;
+       u32             unmap_cons, sent_packets = 0, sent_bytes = 0;
+       u16             wis, updated_hw_cons;
        struct bnad_unmap_q *unmap_q = tcb->unmap_q;
        struct bnad_skb_unmap *unmap_array;
        struct sk_buff          *skb;
-       int i;
 
        /*
         * Just return if TX is stopped. This check is useful
@@ -195,32 +197,14 @@ bnad_free_txbufs(struct bnad *bnad,
        while (wis) {
                skb = unmap_array[unmap_cons].skb;
 
-               unmap_array[unmap_cons].skb = NULL;
-
                sent_packets++;
                sent_bytes += skb->len;
                wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
 
-               dma_unmap_single(&bnad->pcidev->dev,
-                                dma_unmap_addr(&unmap_array[unmap_cons],
-                                               dma_addr), skb_headlen(skb),
-                                DMA_TO_DEVICE);
-               dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
-               BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
-
-               prefetch(&unmap_array[unmap_cons + 1]);
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       prefetch(&unmap_array[unmap_cons + 1]);
-
-                       dma_unmap_page(&bnad->pcidev->dev,
-                                      dma_unmap_addr(&unmap_array[unmap_cons],
-                                                     dma_addr),
-                                      skb_shinfo(skb)->frags[i].size,
-                                      DMA_TO_DEVICE);
-                       dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
-                                          0);
-                       BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
-               }
+               unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+                               unmap_cons, unmap_q->q_depth, skb,
+                               skb_shinfo(skb)->nr_frags);
+
                dev_kfree_skb_any(skb);
        }
 
@@ -383,14 +367,14 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
        BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
 
        while (to_alloc--) {
-               if (!wi_range) {
+               if (!wi_range)
                        BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
                                             wi_range);
-               }
                skb = netdev_alloc_skb_ip_align(bnad->netdev,
                                                rcb->rxq->buffer_size);
                if (unlikely(!skb)) {
                        BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+                       rcb->rxq->rxbuf_alloc_failed++;
                        goto finishing;
                }
                unmap_array[unmap_prod].skb = skb;
@@ -535,43 +519,18 @@ next:
 
        BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
 
-       if (likely(ccb)) {
-               if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
-                       bna_ib_ack(ccb->i_dbell, packets);
-               bnad_refill_rxq(bnad, ccb->rcb[0]);
-               if (ccb->rcb[1])
-                       bnad_refill_rxq(bnad, ccb->rcb[1]);
-       } else {
-               if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
-                       bna_ib_ack(ccb->i_dbell, 0);
-       }
+       if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+               bna_ib_ack_disable_irq(ccb->i_dbell, packets);
+
+       bnad_refill_rxq(bnad, ccb->rcb[0]);
+       if (ccb->rcb[1])
+               bnad_refill_rxq(bnad, ccb->rcb[1]);
 
        clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
 
        return packets;
 }
 
-static void
-bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
-       if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
-               return;
-
-       bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
-       bna_ib_ack(ccb->i_dbell, 0);
-}
-
-static void
-bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
-       unsigned long flags;
-
-       /* Because of polling context */
-       spin_lock_irqsave(&bnad->bna_lock, flags);
-       bnad_enable_rx_irq_unsafe(ccb);
-       spin_unlock_irqrestore(&bnad->bna_lock, flags);
-}
-
 static void
 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
 {
@@ -579,10 +538,9 @@ bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
        struct napi_struct *napi = &rx_ctrl->napi;
 
        if (likely(napi_schedule_prep(napi))) {
-               bnad_disable_rx_irq(bnad, ccb);
                __napi_schedule(napi);
+               rx_ctrl->rx_schedule++;
        }
-       BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
 }
 
 /* MSIX Rx Path Handler */
@@ -590,9 +548,11 @@ static irqreturn_t
 bnad_msix_rx(int irq, void *data)
 {
        struct bna_ccb *ccb = (struct bna_ccb *)data;
-       struct bnad *bnad = ccb->bnad;
 
-       bnad_netif_rx_schedule_poll(bnad, ccb);
+       if (ccb) {
+               ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+               bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
+       }
 
        return IRQ_HANDLED;
 }
@@ -607,10 +567,11 @@ bnad_msix_mbox_handler(int irq, void *data)
        unsigned long flags;
        struct bnad *bnad = (struct bnad *)data;
 
-       if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
-               return IRQ_HANDLED;
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
+       if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               return IRQ_HANDLED;
+       }
 
        bna_intr_status_get(&bnad->bna, intr_status);
 
@@ -633,15 +594,18 @@ bnad_isr(int irq, void *data)
        struct bnad_rx_ctrl *rx_ctrl;
        struct bna_tcb *tcb = NULL;
 
-       if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
                return IRQ_NONE;
+       }
 
        bna_intr_status_get(&bnad->bna, intr_status);
 
-       if (unlikely(!intr_status))
+       if (unlikely(!intr_status)) {
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
                return IRQ_NONE;
-
-       spin_lock_irqsave(&bnad->bna_lock, flags);
+       }
 
        if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
                bna_mbox_handler(&bnad->bna, intr_status);
@@ -1001,7 +965,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
 
        mdelay(BNAD_TXRX_SYNC_MDELAY);
 
-       for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+       for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
                rx_ctrl = &rx_info->rx_ctrl[i];
                ccb = rx_ctrl->ccb;
                if (!ccb)
@@ -1030,7 +994,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
        int i;
        int j;
 
-       for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+       for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
                rx_ctrl = &rx_info->rx_ctrl[i];
                ccb = rx_ctrl->ccb;
                if (!ccb)
@@ -1658,32 +1622,32 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
 {
        struct bnad_rx_ctrl *rx_ctrl =
                container_of(napi, struct bnad_rx_ctrl, napi);
-       struct bna_ccb *ccb;
-       struct bnad *bnad;
+       struct bnad *bnad = rx_ctrl->bnad;
        int rcvd = 0;
 
-       ccb = rx_ctrl->ccb;
-
-       bnad = ccb->bnad;
+       rx_ctrl->rx_poll_ctr++;
 
        if (!netif_carrier_ok(bnad->netdev))
                goto poll_exit;
 
-       rcvd = bnad_poll_cq(bnad, ccb, budget);
-       if (rcvd == budget)
+       rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+       if (rcvd >= budget)
                return rcvd;
 
 poll_exit:
-       napi_complete((napi));
+       napi_complete(napi);
+
+       rx_ctrl->rx_complete++;
 
-       BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+       if (rx_ctrl->ccb)
+               bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
 
-       bnad_enable_rx_irq(bnad, ccb);
        return rcvd;
 }
 
+#define BNAD_NAPI_POLL_QUOTA           64
 static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+bnad_napi_init(struct bnad *bnad, u32 rx_id)
 {
        struct bnad_rx_ctrl *rx_ctrl;
        int i;
@@ -1691,9 +1655,20 @@ bnad_napi_enable(struct bnad *bnad, u32 rx_id)
        /* Initialize & enable NAPI */
        for (i = 0; i < bnad->num_rxp_per_rx; i++) {
                rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
                netif_napi_add(bnad->netdev, &rx_ctrl->napi,
-                              bnad_napi_poll_rx, 64);
+                              bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+       }
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+       struct bnad_rx_ctrl *rx_ctrl;
+       int i;
+
+       /* Initialize & enable NAPI */
+       for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+               rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
 
                napi_enable(&rx_ctrl->napi);
        }
@@ -1732,6 +1707,9 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
                bnad_tx_msix_unregister(bnad, tx_info,
                        bnad->num_txq_per_tx);
 
+       if (0 == tx_id)
+               tasklet_kill(&bnad->tx_free_tasklet);
+
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_tx_destroy(tx_info->tx);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1739,9 +1717,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
        tx_info->tx = NULL;
        tx_info->tx_id = 0;
 
-       if (0 == tx_id)
-               tasklet_kill(&bnad->tx_free_tasklet);
-
        bnad_tx_res_free(bnad, res_info);
 }
 
@@ -1852,6 +1827,16 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
        rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
 }
 
+static void
+bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
+{
+       struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+       int i;
+
+       for (i = 0; i < bnad->num_rxp_per_rx; i++)
+               rx_info->rx_ctrl[i].bnad = bnad;
+}
+
 /* Called with mutex_lock(&bnad->conf_mutex) held */
 void
 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
@@ -1860,23 +1845,23 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
        struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
        struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
        unsigned long flags;
-       int dim_timer_del = 0;
+       int to_del = 0;
 
        if (!rx_info->rx)
                return;
 
        if (0 == rx_id) {
                spin_lock_irqsave(&bnad->bna_lock, flags);
-               dim_timer_del = bnad_dim_timer_running(bnad);
-               if (dim_timer_del)
+               if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+                   test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
                        clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+                       to_del = 1;
+               }
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
-               if (dim_timer_del)
+               if (to_del)
                        del_timer_sync(&bnad->dim_timer);
        }
 
-       bnad_napi_disable(bnad, rx_id);
-
        init_completion(&bnad->bnad_completions.rx_comp);
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
@@ -1886,11 +1871,14 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
        if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
                bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
 
+       bnad_napi_disable(bnad, rx_id);
+
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_rx_destroy(rx_info->rx);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        rx_info->rx = NULL;
+       rx_info->rx_id = 0;
 
        bnad_rx_res_free(bnad, res_info);
 }
@@ -1939,15 +1927,25 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
        if (err)
                return err;
 
+       bnad_rx_ctrl_init(bnad, rx_id);
+
        /* Ask BNA to create one Rx object, supplying required resources */
        spin_lock_irqsave(&bnad->bna_lock, flags);
        rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
                        rx_info);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-       if (!rx)
+       if (!rx) {
+               err = -ENOMEM;
                goto err_return;
+       }
        rx_info->rx = rx;
 
+       /*
+        * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+        * so that IRQ handler cannot schedule NAPI at this point.
+        */
+       bnad_napi_init(bnad, rx_id);
+
        /* Register ISR for the Rx object */
        if (intr_info->intr_type == BNA_INTR_T_MSIX) {
                err = bnad_rx_msix_register(bnad, rx_info, rx_id,
@@ -1956,9 +1954,6 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
                        goto err_return;
        }
 
-       /* Enable NAPI */
-       bnad_napi_enable(bnad, rx_id);
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
        if (0 == rx_id) {
                /* Set up Dynamic Interrupt Moderation Vector */
@@ -1975,6 +1970,9 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
        bna_rx_enable(rx);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
+       /* Enable scheduling of NAPI */
+       bnad_napi_enable(bnad, rx_id);
+
        return 0;
 
 err_return:
@@ -2014,7 +2012,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad)
 /*
  * Called with bnad->bna_lock held
  */
-static int
+int
 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
 {
        int ret;
@@ -2034,7 +2032,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
 }
 
 /* Should be called with conf_lock held */
-static int
+int
 bnad_enable_default_bcast(struct bnad *bnad)
 {
        struct bnad_rx_info *rx_info = &bnad->rx_info[0];
@@ -2059,15 +2057,13 @@ bnad_enable_default_bcast(struct bnad *bnad)
        return 0;
 }
 
-/* Called with bnad_conf_lock() held */
-static void
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
 {
        u16 vid;
        unsigned long flags;
 
-       BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
-
        for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
                spin_lock_irqsave(&bnad->bna_lock, flags);
                bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
@@ -2176,9 +2172,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
 {
        int err;
 
-       /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
-       BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
-                  skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
        if (skb_header_cloned(skb)) {
                err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
                if (err) {
@@ -2205,7 +2198,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
        } else {
                struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 
-               BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
                ipv6h->payload_len = 0;
                tcp_hdr(skb)->check =
                        ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
@@ -2227,7 +2219,7 @@ bnad_q_num_init(struct bnad *bnad)
        int rxps;
 
        rxps = min((uint)num_online_cpus(),
-                       (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+                       (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
 
        if (!(bnad->cfg_flags & BNAD_CF_MSIX))
                rxps = 1;       /* INTx */
@@ -2356,15 +2348,16 @@ bnad_enable_msix(struct bnad *bnad)
        ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
        if (ret > 0) {
                /* Not enough MSI-X vectors. */
+               pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
+                       ret, bnad->msix_num);
 
                spin_lock_irqsave(&bnad->bna_lock, flags);
                /* ret = #of vectors that we got */
-               bnad_q_num_adjust(bnad, ret, 0);
+               bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
+                       (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-               bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
-                       + (bnad->num_rx
-                       * bnad->num_rxp_per_rx) +
+               bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
                         BNAD_MAILBOX_MSIX_VECTORS;
 
                if (bnad->msix_num > ret)
@@ -2385,6 +2378,7 @@ bnad_enable_msix(struct bnad *bnad)
        return;
 
 intx_mode:
+       pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
 
        kfree(bnad->msix_table);
        bnad->msix_table = NULL;
@@ -2521,30 +2515,44 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        u32             unmap_prod, wis, wis_used, wi_range;
        u32             vectors, vect_id, i, acked;
        int                     err;
+       unsigned int            len;
+       u32                             gso_size;
 
        struct bnad_unmap_q *unmap_q = tcb->unmap_q;
        dma_addr_t              dma_addr;
        struct bna_txq_entry *txqent;
        u16     flags;
 
-       if (unlikely
-           (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+       if (unlikely(skb->len <= ETH_HLEN)) {
+               dev_kfree_skb(skb);
+               BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
+               return NETDEV_TX_OK;
+       }
+       if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
                dev_kfree_skb(skb);
+               BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
+               return NETDEV_TX_OK;
+       }
+       if (unlikely(skb_headlen(skb) == 0)) {
+               dev_kfree_skb(skb);
+               BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
                return NETDEV_TX_OK;
        }
 
        /*
         * Takes care of the Tx that is scheduled between clearing the flag
-        * and the netif_stop_all_queue() call.
+        * and the netif_tx_stop_all_queues() call.
         */
        if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
                dev_kfree_skb(skb);
+               BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
                return NETDEV_TX_OK;
        }
 
        vectors = 1 + skb_shinfo(skb)->nr_frags;
-       if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+       if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
                dev_kfree_skb(skb);
+               BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
                return NETDEV_TX_OK;
        }
        wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
@@ -2582,18 +2590,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        unmap_prod = unmap_q->producer_index;
-       wis_used = 1;
-       vect_id = 0;
        flags = 0;
 
        txq_prod = tcb->producer_index;
        BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
-       BUG_ON(!(wi_range <= tcb->q_depth));
        txqent->hdr.wi.reserved = 0;
        txqent->hdr.wi.num_vectors = vectors;
-       txqent->hdr.wi.opcode =
-               htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
-                      BNA_TXQ_WI_SEND));
 
        if (vlan_tx_tag_present(skb)) {
                vlan_tag = (u16) vlan_tx_tag_get(skb);
@@ -2608,62 +2610,93 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        txqent->hdr.wi.vlan_tag = htons(vlan_tag);
 
        if (skb_is_gso(skb)) {
+               gso_size = skb_shinfo(skb)->gso_size;
+
+               if (unlikely(gso_size > netdev->mtu)) {
+                       dev_kfree_skb(skb);
+                       BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
+                       return NETDEV_TX_OK;
+               }
+               if (unlikely((gso_size + skb_transport_offset(skb) +
+                       tcp_hdrlen(skb)) >= skb->len)) {
+                       txqent->hdr.wi.opcode =
+                               __constant_htons(BNA_TXQ_WI_SEND);
+                       txqent->hdr.wi.lso_mss = 0;
+                       BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
+               } else {
+                       txqent->hdr.wi.opcode =
+                               __constant_htons(BNA_TXQ_WI_SEND_LSO);
+                       txqent->hdr.wi.lso_mss = htons(gso_size);
+               }
+
                err = bnad_tso_prepare(bnad, skb);
-               if (err) {
+               if (unlikely(err)) {
                        dev_kfree_skb(skb);
+                       BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
                        return NETDEV_TX_OK;
                }
-               txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
                flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
                txqent->hdr.wi.l4_hdr_size_n_offset =
                        htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
                              (tcp_hdrlen(skb) >> 2,
                               skb_transport_offset(skb)));
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u8 proto = 0;
-
+       } else {
+               txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
                txqent->hdr.wi.lso_mss = 0;
 
-               if (skb->protocol == htons(ETH_P_IP))
-                       proto = ip_hdr(skb)->protocol;
-               else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       /* nexthdr may not be TCP immediately. */
-                       proto = ipv6_hdr(skb)->nexthdr;
+               if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
+                       dev_kfree_skb(skb);
+                       BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
+                       return NETDEV_TX_OK;
                }
-               if (proto == IPPROTO_TCP) {
-                       flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
-                       txqent->hdr.wi.l4_hdr_size_n_offset =
-                               htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
-                                     (0, skb_transport_offset(skb)));
 
-                       BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+               if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       u8 proto = 0;
 
-                       BUG_ON(!(skb_headlen(skb) >=
-                               skb_transport_offset(skb) + tcp_hdrlen(skb)));
-
-               } else if (proto == IPPROTO_UDP) {
-                       flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
-                       txqent->hdr.wi.l4_hdr_size_n_offset =
-                               htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
-                                     (0, skb_transport_offset(skb)));
-
-                       BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+                       if (skb->protocol == __constant_htons(ETH_P_IP))
+                               proto = ip_hdr(skb)->protocol;
+                       else if (skb->protocol ==
+                                __constant_htons(ETH_P_IPV6)) {
+                               /* nexthdr may not be TCP immediately. */
+                               proto = ipv6_hdr(skb)->nexthdr;
+                       }
+                       if (proto == IPPROTO_TCP) {
+                               flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+                               txqent->hdr.wi.l4_hdr_size_n_offset =
+                                       htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+                                             (0, skb_transport_offset(skb)));
+
+                               BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+                               if (unlikely(skb_headlen(skb) <
+                               skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+                                       dev_kfree_skb(skb);
+                                       BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
+                                       return NETDEV_TX_OK;
+                               }
 
-                       BUG_ON(!(skb_headlen(skb) >=
-                                  skb_transport_offset(skb) +
-                                  sizeof(struct udphdr)));
-               } else {
-                       err = skb_checksum_help(skb);
-                       BNAD_UPDATE_CTR(bnad, csum_help);
-                       if (err) {
+                       } else if (proto == IPPROTO_UDP) {
+                               flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+                               txqent->hdr.wi.l4_hdr_size_n_offset =
+                                       htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+                                             (0, skb_transport_offset(skb)));
+
+                               BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+                               if (unlikely(skb_headlen(skb) <
+                                   skb_transport_offset(skb) +
+                                   sizeof(struct udphdr))) {
+                                       dev_kfree_skb(skb);
+                                       BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
+                                       return NETDEV_TX_OK;
+                               }
+                       } else {
                                dev_kfree_skb(skb);
-                               BNAD_UPDATE_CTR(bnad, csum_help_err);
+                               BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
                                return NETDEV_TX_OK;
                        }
+               } else {
+                       txqent->hdr.wi.l4_hdr_size_n_offset = 0;
                }
-       } else {
-               txqent->hdr.wi.lso_mss = 0;
-               txqent->hdr.wi.l4_hdr_size_n_offset = 0;
        }
 
        txqent->hdr.wi.flags = htons(flags);
@@ -2671,20 +2704,37 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        txqent->hdr.wi.frame_length = htonl(skb->len);
 
        unmap_q->unmap_array[unmap_prod].skb = skb;
-       BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
-       txqent->vector[vect_id].length = htons(skb_headlen(skb));
+       len = skb_headlen(skb);
+       txqent->vector[0].length = htons(len);
        dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
                                  skb_headlen(skb), DMA_TO_DEVICE);
        dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
                           dma_addr);
 
-       BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+       BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
        BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
 
+       vect_id = 0;
+       wis_used = 1;
+
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
                u16             size = frag->size;
 
+               if (unlikely(size == 0)) {
+                       unmap_prod = unmap_q->producer_index;
+
+                       unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+                                          unmap_q->unmap_array,
+                                          unmap_prod, unmap_q->q_depth, skb,
+                                          i);
+                       dev_kfree_skb(skb);
+                       BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
+                       return NETDEV_TX_OK;
+               }
+
+               len += size;
+
                if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
                        vect_id = 0;
                        if (--wi_range)
@@ -2695,10 +2745,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                                wis_used = 0;
                                BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
                                                     txqent, wi_range);
-                               BUG_ON(!(wi_range <= tcb->q_depth));
                        }
                        wis_used++;
-                       txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+                       txqent->hdr.wi_ext.opcode =
+                               __constant_htons(BNA_TXQ_WI_EXTENSION);
                }
 
                BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
@@ -2711,6 +2761,18 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
        }
 
+       if (unlikely(len != skb->len)) {
+               unmap_prod = unmap_q->producer_index;
+
+               unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+                               unmap_q->unmap_array, unmap_prod,
+                               unmap_q->q_depth, skb,
+                               skb_shinfo(skb)->nr_frags);
+               dev_kfree_skb(skb);
+               BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
+               return NETDEV_TX_OK;
+       }
+
        unmap_q->producer_index = unmap_prod;
        BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
        tcb->producer_index = txq_prod;
@@ -2721,6 +2783,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_OK;
 
        bna_txq_prod_indx_doorbell(tcb);
+       smp_mb();
 
        if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
                tasklet_schedule(&bnad->tx_free_tasklet);
@@ -2748,7 +2811,7 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
        return stats;
 }
 
-static void
+void
 bnad_set_rx_mode(struct net_device *netdev)
 {
        struct bnad *bnad = netdev_priv(netdev);
@@ -2787,6 +2850,9 @@ bnad_set_rx_mode(struct net_device *netdev)
                }
        }
 
+       if (bnad->rx_info[0].rx == NULL)
+               goto unlock;
+
        bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
 
        if (!netdev_mc_empty(netdev)) {
@@ -2933,18 +2999,21 @@ bnad_netpoll(struct net_device *netdev)
                bnad_isr(bnad->pcidev->irq, netdev);
                bna_intx_enable(&bnad->bna, curr_mask);
        } else {
+               /*
+                * Tx processing may happen in sending context, so no need
+                * to explicitly process completions here
+                */
+
+               /* Rx processing */
                for (i = 0; i < bnad->num_rx; i++) {
                        rx_info = &bnad->rx_info[i];
                        if (!rx_info->rx)
                                continue;
                        for (j = 0; j < bnad->num_rxp_per_rx; j++) {
                                rx_ctrl = &rx_info->rx_ctrl[j];
-                               if (rx_ctrl->ccb) {
-                                       bnad_disable_rx_irq(bnad,
-                                                           rx_ctrl->ccb);
+                               if (rx_ctrl->ccb)
                                        bnad_netif_rx_schedule_poll(bnad,
                                                            rx_ctrl->ccb);
-                               }
                        }
                }
        }
@@ -3126,7 +3195,7 @@ static int __devinit
 bnad_pci_probe(struct pci_dev *pdev,
                const struct pci_device_id *pcidev_id)
 {
-       bool    using_dac = false;
+       bool    using_dac;
        int     err;
        struct bnad *bnad;
        struct bna *bna;
@@ -3249,12 +3318,19 @@ bnad_pci_probe(struct pci_dev *pdev,
                        bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
                        err = -EIO;
        }
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+       if (err)
+               goto disable_ioceth;
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
-       if (err)
+       if (err) {
+               err = -EIO;
                goto disable_ioceth;
+       }
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
@@ -3266,6 +3342,8 @@ bnad_pci_probe(struct pci_dev *pdev,
        bnad_set_netdev_perm_addr(bnad);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
+       mutex_unlock(&bnad->conf_mutex);
+
        /* Finally, reguister with net_device layer */
        err = register_netdev(netdev);
        if (err) {
@@ -3274,6 +3352,8 @@ bnad_pci_probe(struct pci_dev *pdev,
        }
        set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
 
+       return 0;
+
 probe_success:
        mutex_unlock(&bnad->conf_mutex);
        return 0;
index 5b5451edf4979771fcd94d9b2f42641f114e73f8..1c9328d564d2894662a6488c96b27f2bc29d50ff 100644 (file)
 #define BNAD_TXQ_DEPTH         2048
 #define BNAD_RXQ_DEPTH         2048
 
-#define BNAD_MAX_TXS           1
+#define BNAD_MAX_TX            1
 #define BNAD_MAX_TXQ_PER_TX    8       /* 8 priority queues */
 #define BNAD_TXQ_NUM           1
 
-#define BNAD_MAX_RXS           1
-#define BNAD_MAX_RXPS_PER_RX   16
+#define BNAD_MAX_RX            1
+#define BNAD_MAX_RXP_PER_RX    16
 #define BNAD_MAX_RXQ_PER_RXP   2
 
 /*
  */
 struct bnad_rx_ctrl {
        struct bna_ccb *ccb;
+       struct bnad *bnad;
        unsigned long  flags;
        struct napi_struct      napi;
+       u64             rx_intr_ctr;
+       u64             rx_poll_ctr;
+       u64             rx_schedule;
+       u64             rx_keep_poll;
+       u64             rx_complete;
 };
 
 #define BNAD_RXMODE_PROMISC_DEFAULT    BNA_RXMODE_PROMISC
 
-#define BNAD_GET_TX_ID(_skb)   (0)
-
 /*
  * GLOBAL #defines (CONSTANTS)
  */
 #define BNAD_NAME                      "bna"
 #define BNAD_NAME_LEN                  64
 
-#define BNAD_VERSION                   "3.0.2.0"
+#define BNAD_VERSION                   "3.0.2.1"
 
 #define BNAD_MAILBOX_MSIX_INDEX                0
 #define BNAD_MAILBOX_MSIX_VECTORS      1
@@ -82,6 +86,10 @@ struct bnad_rx_ctrl {
 #define BNAD_MAX_Q_DEPTH               0x10000
 #define BNAD_MIN_Q_DEPTH               0x200
 
+#define BNAD_MAX_RXQ_DEPTH             (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq)
+/* keeping MAX TX and RX Q depth equal */
+#define BNAD_MAX_TXQ_DEPTH             BNAD_MAX_RXQ_DEPTH
+
 #define BNAD_JUMBO_MTU                 9000
 
 #define BNAD_NETIF_WAKE_THRESHOLD      8
@@ -146,16 +154,26 @@ struct bnad_drv_stats {
        u64             tcpcsum_offload;
        u64             udpcsum_offload;
        u64             csum_help;
-       u64             csum_help_err;
+       u64             tx_skb_too_short;
+       u64             tx_skb_stopping;
+       u64             tx_skb_max_vectors;
+       u64             tx_skb_mss_too_long;
+       u64             tx_skb_tso_too_short;
+       u64             tx_skb_tso_prepare;
+       u64             tx_skb_non_tso_too_long;
+       u64             tx_skb_tcp_hdr;
+       u64             tx_skb_udp_hdr;
+       u64             tx_skb_csum_err;
+       u64             tx_skb_headlen_too_long;
+       u64             tx_skb_headlen_zero;
+       u64             tx_skb_frag_zero;
+       u64             tx_skb_len_mismatch;
 
        u64             hw_stats_updates;
-       u64             netif_rx_schedule;
-       u64             netif_rx_complete;
        u64             netif_rx_dropped;
 
        u64             link_toggle;
        u64             cee_toggle;
-       u64             cee_up;
 
        u64             rxp_info_alloc_failed;
        u64             mbox_intr_disabled;
@@ -190,7 +208,7 @@ struct bnad_tx_info {
 struct bnad_rx_info {
        struct bna_rx *rx; /* 1:1 between rx_info & rx */
 
-       struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
+       struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
        u32 rx_id;
 } ____cacheline_aligned;
 
@@ -234,8 +252,8 @@ struct bnad {
        struct net_device       *netdev;
 
        /* Data path */
-       struct bnad_tx_info tx_info[BNAD_MAX_TXS];
-       struct bnad_rx_info rx_info[BNAD_MAX_RXS];
+       struct bnad_tx_info tx_info[BNAD_MAX_TX];
+       struct bnad_rx_info rx_info[BNAD_MAX_RX];
 
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        /*
@@ -255,8 +273,8 @@ struct bnad {
        u8                      tx_coalescing_timeo;
        u8                      rx_coalescing_timeo;
 
-       struct bna_rx_config rx_config[BNAD_MAX_RXS];
-       struct bna_tx_config tx_config[BNAD_MAX_TXS];
+       struct bna_rx_config rx_config[BNAD_MAX_RX];
+       struct bna_tx_config tx_config[BNAD_MAX_TX];
 
        void __iomem            *bar0;  /* BAR0 address */
 
@@ -283,8 +301,8 @@ struct bnad {
        /* Control path resources, memory & irq */
        struct bna_res_info res_info[BNA_RES_T_MAX];
        struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
-       struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
-       struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
+       struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX];
+       struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX];
 
        struct bnad_completion bnad_completions;
 
@@ -314,6 +332,12 @@ extern u32         bnad_rxqs_per_cq;
  */
 extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
 /* Netdev entry point prototypes */
+extern void bnad_set_rx_mode(struct net_device *netdev);
+extern struct net_device_stats *bnad_get_netdev_stats(
+                               struct net_device *netdev);
+extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+extern int bnad_enable_default_bcast(struct bnad *bnad);
+extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
 extern void bnad_set_ethtool_ops(struct net_device *netdev);
 
 /* Configuration & setup */
@@ -345,15 +369,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
 
 #define bnad_enable_rx_irq_unsafe(_ccb)                        \
 {                                                      \
-       if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
+       if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
                bna_ib_coalescing_timer_set((_ccb)->i_dbell,    \
                        (_ccb)->rx_coalescing_timeo);           \
                bna_ib_ack((_ccb)->i_dbell, 0);                 \
        }                                                       \
 }
 
-#define bnad_dim_timer_running(_bnad)                          \
-       (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) &&          \
-       (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
-
 #endif /* __BNAD_H__ */
index 1c19dcea83c2486b65f4ef66f630125fa4e0096b..48422244397c250053b92b21f745abe1e7d4df55 100644 (file)
@@ -75,14 +75,25 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "tcpcsum_offload",
        "udpcsum_offload",
        "csum_help",
-       "csum_help_err",
+       "tx_skb_too_short",
+       "tx_skb_stopping",
+       "tx_skb_max_vectors",
+       "tx_skb_mss_too_long",
+       "tx_skb_tso_too_short",
+       "tx_skb_tso_prepare",
+       "tx_skb_non_tso_too_long",
+       "tx_skb_tcp_hdr",
+       "tx_skb_udp_hdr",
+       "tx_skb_csum_err",
+       "tx_skb_headlen_too_long",
+       "tx_skb_headlen_zero",
+       "tx_skb_frag_zero",
+       "tx_skb_len_mismatch",
        "hw_stats_updates",
-       "netif_rx_schedule",
-       "netif_rx_complete",
        "netif_rx_dropped",
 
        "link_toggle",
-       "cee_up",
+       "cee_toggle",
 
        "rxp_info_alloc_failed",
        "mbox_intr_disabled",
@@ -201,6 +212,20 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
        "rad_rx_bcast_vlan",
        "rad_rx_drops",
 
+       "rlb_rad_rx_frames",
+       "rlb_rad_rx_octets",
+       "rlb_rad_rx_vlan_frames",
+       "rlb_rad_rx_ucast",
+       "rlb_rad_rx_ucast_octets",
+       "rlb_rad_rx_ucast_vlan",
+       "rlb_rad_rx_mcast",
+       "rlb_rad_rx_mcast_octets",
+       "rlb_rad_rx_mcast_vlan",
+       "rlb_rad_rx_bcast",
+       "rlb_rad_rx_bcast_octets",
+       "rlb_rad_rx_bcast_vlan",
+       "rlb_rad_rx_drops",
+
        "fc_rx_ucast_octets",
        "fc_rx_ucast",
        "fc_rx_ucast_vlan",
@@ -321,7 +346,7 @@ bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
 {
        struct bnad *bnad = netdev_priv(netdev);
        unsigned long flags;
-       int dim_timer_del = 0;
+       int to_del = 0;
 
        if (coalesce->rx_coalesce_usecs == 0 ||
            coalesce->rx_coalesce_usecs >
@@ -348,14 +373,17 @@ bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
        } else {
                if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
                        bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
-                       dim_timer_del = bnad_dim_timer_running(bnad);
-                       if (dim_timer_del) {
+                       if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+                           test_bit(BNAD_RF_DIM_TIMER_RUNNING,
+                           &bnad->run_flags)) {
                                clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
                                                        &bnad->run_flags);
-                               spin_unlock_irqrestore(&bnad->bna_lock, flags);
-                               del_timer_sync(&bnad->dim_timer);
-                               spin_lock_irqsave(&bnad->bna_lock, flags);
+                               to_del = 1;
                        }
+                       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+                       if (to_del)
+                               del_timer_sync(&bnad->dim_timer);
+                       spin_lock_irqsave(&bnad->bna_lock, flags);
                        bnad_rx_coalescing_timeo_set(bnad);
                }
        }
@@ -390,10 +418,10 @@ bnad_get_ringparam(struct net_device *netdev,
 {
        struct bnad *bnad = netdev_priv(netdev);
 
-       ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
+       ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
        ringparam->rx_mini_max_pending = 0;
        ringparam->rx_jumbo_max_pending = 0;
-       ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
+       ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
 
        ringparam->rx_pending = bnad->rxq_depth;
        ringparam->rx_mini_max_pending = 0;
@@ -407,6 +435,7 @@ bnad_set_ringparam(struct net_device *netdev,
 {
        int i, current_err, err = 0;
        struct bnad *bnad = netdev_priv(netdev);
+       unsigned long flags;
 
        mutex_lock(&bnad->conf_mutex);
        if (ringparam->rx_pending == bnad->rxq_depth &&
@@ -416,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev,
        }
 
        if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
-           ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
+           ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
            !BNA_POWER_OF_2(ringparam->rx_pending)) {
                mutex_unlock(&bnad->conf_mutex);
                return -EINVAL;
        }
        if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
-           ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
+           ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
            !BNA_POWER_OF_2(ringparam->tx_pending)) {
                mutex_unlock(&bnad->conf_mutex);
                return -EINVAL;
@@ -430,6 +459,11 @@ bnad_set_ringparam(struct net_device *netdev,
 
        if (ringparam->rx_pending != bnad->rxq_depth) {
                bnad->rxq_depth = ringparam->rx_pending;
+               if (!netif_running(netdev)) {
+                       mutex_unlock(&bnad->conf_mutex);
+                       return 0;
+               }
+
                for (i = 0; i < bnad->num_rx; i++) {
                        if (!bnad->rx_info[i].rx)
                                continue;
@@ -437,10 +471,26 @@ bnad_set_ringparam(struct net_device *netdev,
                        current_err = bnad_setup_rx(bnad, i);
                        if (current_err && !err)
                                err = current_err;
+                       if (!err)
+                               bnad_restore_vlans(bnad, i);
+               }
+
+               if (!err && bnad->rx_info[0].rx) {
+                       /* restore rx configuration */
+                       bnad_enable_default_bcast(bnad);
+                       spin_lock_irqsave(&bnad->bna_lock, flags);
+                       bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+                       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+                       bnad_set_rx_mode(netdev);
                }
        }
        if (ringparam->tx_pending != bnad->txq_depth) {
                bnad->txq_depth = ringparam->tx_pending;
+               if (!netif_running(netdev)) {
+                       mutex_unlock(&bnad->conf_mutex);
+                       return 0;
+               }
+
                for (i = 0; i < bnad->num_tx; i++) {
                        if (!bnad->tx_info[i].tx)
                                continue;
@@ -578,6 +628,16 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
                                sprintf(string, "cq%d_hw_producer_index",
                                        q_num);
                                string += ETH_GSTRING_LEN;
+                               sprintf(string, "cq%d_intr", q_num);
+                               string += ETH_GSTRING_LEN;
+                               sprintf(string, "cq%d_poll", q_num);
+                               string += ETH_GSTRING_LEN;
+                               sprintf(string, "cq%d_schedule", q_num);
+                               string += ETH_GSTRING_LEN;
+                               sprintf(string, "cq%d_keep_poll", q_num);
+                               string += ETH_GSTRING_LEN;
+                               sprintf(string, "cq%d_complete", q_num);
+                               string += ETH_GSTRING_LEN;
                                q_num++;
                        }
                }
@@ -660,7 +720,7 @@ static int
 bnad_get_stats_count_locked(struct net_device *netdev)
 {
        struct bnad *bnad = netdev_priv(netdev);
-       int i, j, count, rxf_active_num = 0, txf_active_num = 0;
+       int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
        u32 bmap;
 
        bmap = bna_tx_rid_mask(&bnad->bna);
@@ -718,6 +778,17 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
                                buf[bi++] = 0; /* ccb->consumer_index */
                                buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
                                                ccb->hw_producer_index);
+
+                               buf[bi++] = bnad->rx_info[i].
+                                               rx_ctrl[j].rx_intr_ctr;
+                               buf[bi++] = bnad->rx_info[i].
+                                               rx_ctrl[j].rx_poll_ctr;
+                               buf[bi++] = bnad->rx_info[i].
+                                               rx_ctrl[j].rx_schedule;
+                               buf[bi++] = bnad->rx_info[i].
+                                               rx_ctrl[j].rx_keep_poll;
+                               buf[bi++] = bnad->rx_info[i].
+                                               rx_ctrl[j].rx_complete;
                        }
        }
        for (i = 0; i < bnad->num_rx; i++) {
index 50fce15feaccc17338be418033b41f463a70d626..cb4874210aa3d20009715b60bbf23bc0885ab8bf 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/bitops.h>
 #include <linux/timer.h>
 #include <linux/interrupt.h>
+#include <linux/if_vlan.h>
 #include <linux/if_ether.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/string.h>
-
-#include <linux/list.h>
 
 #define bfa_sm_fault(__event)    do {                            \
-       pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
-               __event); \
+       pr_err("SM Assertion failure: %s: %d: event = %d\n",    \
+                __FILE__, __LINE__, __event);                  \
 } while (0)
 
 extern char bfa_version[];
index c00e706ab58aadf70e76fe3a14694f6121753403..98849a1fc749995070dadd607175f55ef89d2fa0 100644 (file)
@@ -25,6 +25,7 @@ if NET_ATMEL
 config ARM_AT91_ETHER
        tristate "AT91RM9200 Ethernet support"
        depends on ARM && ARCH_AT91RM9200
+       select NET_CORE
        select MII
        ---help---
          If you wish to compile a kernel for the AT91RM9200 and enable
index e0cacf662914f552de27059a8ddb8f1bb436a6c9..e9386ef524aac19c8b9c11bd81061a4aec598c1a 100644 (file)
@@ -21,6 +21,7 @@ if NET_VENDOR_CIRRUS
 config EP93XX_ETH
        tristate "EP93xx Ethernet support"
        depends on ARM && ARCH_EP93XX
+       select NET_CORE
        select MII
        help
          This is a driver for the ethernet hardware included in EP93xx CPUs.
index 73c5d2080f24cb6eb5336b86ae2be23f834da34d..972b62b318377a8236799a4561a1c9b6d0a844e7 100644 (file)
@@ -6,6 +6,7 @@ config DM9000
        tristate "DM9000 support"
        depends on ARM || BLACKFIN || MIPS
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          Support for DM9000 chipset.
index f6af772b12c904913660b392e069705c31732aa6..1203be0436e20a9851579dd7353bc61b08defd50 100644 (file)
@@ -125,6 +125,7 @@ config WINBOND_840
        tristate "Winbond W89c840 Ethernet support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver is for the Winbond W89c840 chip.  It also works with 
index 84a28a6681627648337e968c531fd3e476cb1151..b5afe218c31be3644df3c7c527a3a574b45e8320 100644 (file)
@@ -66,6 +66,7 @@ config SUNDANCE
        tristate "Sundance Alta support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver is for the Sundance "Alta" chip.
index 5918c68916946acc9cf14202b0e28d823a7515c3..b8974b9e3b479cd90b90bff7235245629ab59635 100644 (file)
@@ -21,6 +21,7 @@ if NET_VENDOR_FARADAY
 config FTMAC100
        tristate "Faraday FTMAC100 10/100 Ethernet support"
        depends on ARM
+       select NET_CORE
        select MII
        ---help---
          This driver supports the FTMAC100 10/100 Ethernet controller
index be92229f2c2a52f447c96a086e4683cc0c341e68..268414d9f2cbfc84272ed6184ea34ab211cdc537 100644 (file)
@@ -1,6 +1,7 @@
 config FS_ENET
        tristate "Freescale Ethernet Driver"
        depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+       select NET_CORE
        select MII
        select PHYLIB
 
index bba1ffcd92d1f212d5c98495c866efbfbc302f3e..8cca4a62b397ca0d0a6d3eeabaea577b9862e206 100644 (file)
@@ -1002,9 +1002,8 @@ retry_bounce:
                unsigned long dma_addr;
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
-                                       frag->page_offset, frag->size,
-                                       DMA_TO_DEVICE);
+               dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
+                                           frag->size, DMA_TO_DEVICE);
 
                if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
                        goto map_failed_frags;
index e888222762694296094131d9d2cc61f544ab6799..3aff81d7989fca246fb0b1c6dc75675a7a892511 100644 (file)
@@ -5,6 +5,7 @@
 config IP1000
        tristate "IP1000 Gigabit Ethernet support"
        depends on PCI && EXPERIMENTAL
+       select NET_CORE
        select MII
        ---help---
          This driver supports IP1000 gigabit Ethernet cards.
index 4a98e83812b75c83c6150c20181af9dc9cfe1381..61029dc7fa6f82534641e03c2d432466a8a76165 100644 (file)
@@ -21,6 +21,7 @@ if NET_VENDOR_INTEL
 config E100
        tristate "Intel(R) PRO/100+ support"
        depends on PCI
+       select NET_CORE
        select MII
        ---help---
          This driver supports Intel(R) PRO/100 family of adapters.
index a869ee47dde689af20fdf9d68cfe2450ced31934..48a0a23f342f604a04ca2f9fcd8b05a216c1d633 100644 (file)
@@ -1928,8 +1928,9 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
-                                frag->page_offset, frag->size, hidma);
+               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+                               skb_frag_page(frag),
+                               frag->page_offset, frag->size, hidma);
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
index 1e2c9f072bfd3af99f9cb5c710f752cd4d4ba9f7..7325737fe93b86340b36507939382a75e1934957 100644 (file)
@@ -752,10 +752,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 
                desc->l4i_chk = 0;
                desc->byte_cnt = this_frag->size;
-               desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
-                                            this_frag->page,
-                                            this_frag->page_offset,
-                                            this_frag->size, DMA_TO_DEVICE);
+               desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+                                                this_frag, 0,
+                                                this_frag->size,
+                                                DMA_TO_DEVICE);
        }
 }
 
index bd090dbe3ad6fc6d86aa76e720764f5ee782a51c..d10c2e15f4ed026aacdd101dd5bd82dbba985e01 100644 (file)
@@ -22,6 +22,7 @@ if NET_VENDOR_MICREL
 config ARM_KS8695_ETHER
        tristate "KS8695 Ethernet support"
        depends on ARM && ARCH_KS8695
+       select NET_CORE
        select MII
        ---help---
          If you wish to compile a kernel for the KS8695 and want to
@@ -38,6 +39,7 @@ config KS8842
 config KS8851
        tristate "Micrel KS8851 SPI"
        depends on SPI
+       select NET_CORE
        select MII
        select CRC32
        ---help---
@@ -46,6 +48,7 @@ config KS8851
 config KS8851_MLL
        tristate "Micrel KS8851 MLL"
        depends on HAS_IOMEM
+       select NET_CORE
        select MII
        ---help---
          This platform driver is for Micrel KS8851 Address/data bus
@@ -54,6 +57,7 @@ config KS8851_MLL
 config KSZ884X_PCI
        tristate "Micrel KSZ8841/2 PCI"
        depends on PCI
+       select NET_CORE
        select MII
        select CRC32
        ---help---
index 27418d31a09f52e27615c3a9051edb8a87386a8f..710c4aead146967d28101aae707b58d413399cb1 100644 (file)
@@ -4704,8 +4704,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
 
                        dma_buf->dma = pci_map_single(
                                hw_priv->pdev,
-                               page_address(this_frag->page) +
-                               this_frag->page_offset,
+                               skb_frag_address(this_frag),
                                dma_buf->len,
                                PCI_DMA_TODEVICE);
                        set_tx_buf(desc, dma_buf->dma);
index 1a1e20e97a23af42aea539c582432fa1b839a1e8..e0895e40f10adec747705c8460bf501c187d0b9b 100644 (file)
@@ -1160,9 +1160,8 @@ again:
                if (!nr_frags)
                        break;
 
-               buf = pci_map_page(dev->pci_dev, frag->page,
-                                  frag->page_offset,
-                                  frag->size, PCI_DMA_TODEVICE);
+               buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
+                                      frag->size, PCI_DMA_TODEVICE);
                dprintk("frag: buf=%08Lx  page=%08lx offset=%08lx\n",
                        (long long)buf, (long) page_to_pfn(frag->page),
                        frag->page_offset);
index 01182b559473cc4812909bccdcbf78cb81de08ce..334c17183095eda5ff5d0775171ecf0a46636c5a 100644 (file)
@@ -22,6 +22,7 @@ config W90P910_ETH
        tristate "Nuvoton w90p910 Ethernet support"
        depends on ARM && ARCH_W90X900
        select PHYLIB
+       select NET_CORE
        select MII
        ---help---
          Say Y here if you want to use built-in Ethernet ports
index c85709d6ff1b7b7633a31e2aea241a954045d518..7efa624272356a784b8c0372a2d2262fb4095ee0 100644 (file)
@@ -5,6 +5,7 @@
 config PCH_GBE
        tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)"
        depends on PCI
+       select NET_CORE
        select MII
        ---help---
          This is a gigabit ethernet driver for EG20T PCH.
index 4add1db20f1eaa3c58d059d22fa39455e329e31a..b97132d9dff0c09fdd804ad63335e10a8225343d 100644 (file)
@@ -20,6 +20,7 @@ if NET_PACKET_ENGINE
 config HAMACHI
        tristate "Packet Engines Hamachi GNIC-II support"
        depends on PCI
+       select NET_CORE
        select MII
        ---help---
          If you have a Gigabit Ethernet card of this type, say Y and read
index fad620da7c11c6f726e4300edc2efd016c877467..53220958832382603e120ccfdd5e9489624ff5ec 100644 (file)
@@ -1505,9 +1505,8 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
        for (i = 0; i < nfrags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
-                                       frag->page_offset, frag->size,
-                                       PCI_DMA_TODEVICE);
+               map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
+                                             frag->size, PCI_DMA_TODEVICE);
                map_size[i+1] = frag->size;
                if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
                        nfrags = i;
index de18e4753b64880c2fb6e56441f8ae94ecb54275..694130ebc75be335a2c9e7b38bdf5b8164eb14be 100644 (file)
@@ -1844,8 +1844,8 @@ netxen_map_tx_skb(struct pci_dev *pdev,
                frag = &skb_shinfo(skb)->frags[i];
                nf = &pbuf->frag_array[i+1];
 
-               map = pci_map_page(pdev, frag->page, frag->page_offset,
-                               frag->size, PCI_DMA_TODEVICE);
+               map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+                                      PCI_DMA_TODEVICE);
                if (pci_dma_mapping_error(pdev, map))
                        goto unwind;
 
index 8cab61c08c8da9a4084342b7bc2a2a64aead318f..1871d88ee712e3d29c220d50f4e0b2a4b1946350 100644 (file)
@@ -2388,9 +2388,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
                        seg++;
                }
 
-               map = pci_map_page(qdev->pdev, frag->page,
-                                  frag->page_offset, frag->size,
-                                  PCI_DMA_TODEVICE);
+               map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+                                      PCI_DMA_TODEVICE);
 
                err = pci_dma_mapping_error(qdev->pdev, map);
                if (err) {
index 690c93f76ae43fe4166735a85418ae44f1e9770e..501e16b9c2abee7007c689f257cf10bade5a453e 100644 (file)
@@ -2135,8 +2135,8 @@ qlcnic_map_tx_skb(struct pci_dev *pdev,
                frag = &skb_shinfo(skb)->frags[i];
                nf = &pbuf->frag_array[i+1];
 
-               map = pci_map_page(pdev, frag->page, frag->page_offset,
-                               frag->size, PCI_DMA_TODEVICE);
+               map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+                                      PCI_DMA_TODEVICE);
                if (pci_dma_mapping_error(pdev, map))
                        goto unwind;
 
index 39360c485867dd23434ab0fe4efd352edf9cdcc2..ce6c6fee3089f63b2f0f900968f6f03a773aaa38 100644 (file)
@@ -1431,10 +1431,8 @@ static int ql_map_send(struct ql_adapter *qdev,
                        map_idx++;
                }
 
-               map =
-                   pci_map_page(qdev->pdev, frag->page,
-                                frag->page_offset, frag->size,
-                                PCI_DMA_TODEVICE);
+               map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+                                      PCI_DMA_TODEVICE);
 
                err = pci_dma_mapping_error(qdev->pdev, map);
                if (err) {
@@ -1477,8 +1475,6 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
 {
        struct sk_buff *skb;
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
-       struct skb_frag_struct *rx_frag;
-       int nr_frags;
        struct napi_struct *napi = &rx_ring->napi;
 
        napi->dev = qdev->ndev;
@@ -1492,12 +1488,10 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
                return;
        }
        prefetch(lbq_desc->p.pg_chunk.va);
-       rx_frag = skb_shinfo(skb)->frags;
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       rx_frag += nr_frags;
-       rx_frag->page = lbq_desc->p.pg_chunk.page;
-       rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
-       rx_frag->size = length;
+       __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+                            lbq_desc->p.pg_chunk.page,
+                            lbq_desc->p.pg_chunk.offset,
+                            length);
 
        skb->len += length;
        skb->data_len += length;
index 2055f7eb2ba94f6855a37bf8a613dd77e75a757e..c8ba4b3494c17181e507af32cfcd722c1da28c68 100644 (file)
@@ -22,6 +22,7 @@ config R6040
        tristate "RDC R6040 Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        select PHYLIB
        ---help---
index d8df67ac51b9d8bf3eb99f64f96c39e12d3952d8..84083ec6e612774c1c026c48295e216db85a3bf6 100644 (file)
@@ -37,6 +37,7 @@ config 8139CP
        tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
        depends on PCI && EXPERIMENTAL
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This is a driver for the Fast Ethernet PCI network cards based on
@@ -51,6 +52,7 @@ config 8139TOO
        tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This is a driver for the Fast Ethernet PCI network cards based on
@@ -105,6 +107,7 @@ config R8169
        depends on PCI
        select FW_LOADER
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
index 1cf8c3c1328df82f0e6657b3a013b0efc06d8b19..835bbb534c5dfe96474431bdeb373063f7d16d1f 100644 (file)
@@ -5027,7 +5027,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
 
                txd = tp->TxDescArray + entry;
                len = frag->size;
-               addr = ((void *) page_address(frag->page)) + frag->page_offset;
+               addr = skb_frag_address(frag);
                mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(d, mapping))) {
                        if (net_ratelimit())
index f57ae230817b217c784895691fb1dfadd28470c7..9755b49bbefb209d26ec17066ae7eebcee7fdd00 100644 (file)
@@ -9,6 +9,7 @@ config SH_ETH
                 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
                 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
        select CRC32
+       select NET_CORE
        select MII
        select MDIO_BITBANG
        select PHYLIB
index e832f46660c9edab927d7d335cd6be6a6ff7d989..c1c4bb868a3b06ba23faf5ec7dcd80d06034ed79 100644 (file)
@@ -22,6 +22,7 @@ config SGI_IOC3_ETH
        bool "SGI IOC3 Ethernet"
        depends on PCI && SGI_IP27
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
index 68d052b09af1e1ee9950f3b2e0944aca7a8e86c3..f1135cc1bd48abd5b60714c459922df7f762f06a 100644 (file)
@@ -22,6 +22,7 @@ config SIS900
        tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This is a driver for the Fast Ethernet PCI network cards based on
@@ -38,6 +39,7 @@ config SIS190
        tristate "SiS190/SiS191 gigabit ethernet support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
index f9619285b5ef20a88d54431da5434b3edf2fc893..1854c88dfb92115c5556be3947882a36f5aa7464 100644 (file)
@@ -37,6 +37,7 @@ config SMC9194
 config SMC91X
        tristate "SMC 91C9x/91C1xxx support"
        select CRC32
+       select NET_CORE
        select MII
        depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
                    MN10300 || COLDFIRE)
@@ -56,6 +57,7 @@ config PCMCIA_SMC91C92
        tristate "SMC 91Cxx PCMCIA support"
        depends on PCMCIA
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
@@ -68,6 +70,7 @@ config EPIC100
        tristate "SMC EtherPower II"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
@@ -78,6 +81,7 @@ config EPIC100
 config SMC911X
        tristate "SMSC LAN911[5678] support"
        select CRC32
+       select NET_CORE
        select MII
        depends on (ARM || SUPERH || MN10300)
        ---help---
@@ -95,6 +99,7 @@ config SMSC911X
        tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
        depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300)
        select CRC32
+       select NET_CORE
        select MII
        select PHYLIB
        ---help---
index cda61e37c357b817ccf17399b0475caf03ad6a98..8cd9ddec05a0cb9749338ae3b6e9fce251c7a923 100644 (file)
@@ -1,6 +1,7 @@
 config STMMAC_ETH
        tristate "STMicroelectronics 10/100/1000 Ethernet driver"
        depends on HAS_IOMEM
+       select NET_CORE
        select MII
        select PHYLIB
        select CRC32
@@ -11,6 +12,14 @@ config STMMAC_ETH
 
 if STMMAC_ETH
 
+config STMMAC_DEBUG_FS
+       bool "Enable monitoring via sysFS "
+       default n
+       depends on STMMAC_ETH && DEBUG_FS
+       -- help
+         The stmmac entry in /sys reports DMA TX/RX rings
+         or (if supported) the HW cap register.
+
 config STMMAC_DA
        bool "STMMAC DMA arbitration scheme"
        default n
index 9691733ddb8e7cea628ae6091897f66a458d2759..0f23d95746b76d0f4e4cf47176538936d98f7222 100644 (file)
@@ -2,4 +2,5 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
 stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o     \
              dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o     \
-             dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o $(stmmac-y)
+             dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
+             mmc_core.o $(stmmac-y)
index 375ea193e139a4a1bd23ecf68937007402682745..22c61b2ebfa3ce5a6c73ac97abea1234b8f0b3ff 100644 (file)
@@ -29,6 +29,7 @@
 #endif
 
 #include "descs.h"
+#include "mmc.h"
 
 #undef CHIP_DEBUG_PRINT
 /* Turn-on extra printk debug for MAC core, dma and descriptors */
@@ -115,6 +116,37 @@ enum tx_dma_irq_status {
        handle_tx_rx = 3,
 };
 
+/* DMA HW capabilities */
+struct dma_features {
+       unsigned int mbps_10_100;
+       unsigned int mbps_1000;
+       unsigned int half_duplex;
+       unsigned int hash_filter;
+       unsigned int multi_addr;
+       unsigned int pcs;
+       unsigned int sma_mdio;
+       unsigned int pmt_remote_wake_up;
+       unsigned int pmt_magic_frame;
+       unsigned int rmon;
+       /* IEEE 1588-2002*/
+       unsigned int time_stamp;
+       /* IEEE 1588-2008*/
+       unsigned int atime_stamp;
+       /* 802.3az - Energy-Efficient Ethernet (EEE) */
+       unsigned int eee;
+       unsigned int av;
+       /* TX and RX csum */
+       unsigned int tx_coe;
+       unsigned int rx_coe_type1;
+       unsigned int rx_coe_type2;
+       unsigned int rxfifo_over_2048;
+       /* TX and RX number of channels */
+       unsigned int number_rx_channel;
+       unsigned int number_tx_channel;
+       /* Alternate (enhanced) DESC mode*/
+       unsigned int enh_desc;
+};
+
 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
 #define BUF_SIZE_16KiB 16384
 #define BUF_SIZE_8KiB 8192
@@ -130,17 +162,6 @@ enum tx_dma_irq_status {
 #define MAC_ENABLE_TX          0x00000008      /* Transmitter Enable */
 #define MAC_RNABLE_RX          0x00000004      /* Receiver Enable */
 
-/* MAC Management Counters register */
-#define MMC_CONTROL            0x00000100      /* MMC Control */
-#define MMC_HIGH_INTR          0x00000104      /* MMC High Interrupt */
-#define MMC_LOW_INTR           0x00000108      /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK     0x0000010c      /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK      0x00000110      /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK       0x0003ff8       /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT      3
-#define MMC_CONTROL_MAX_FRAME          0x7FF
-
 struct stmmac_desc_ops {
        /* DMA RX descriptor ring initialization */
        void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -198,6 +219,8 @@ struct stmmac_dma_ops {
        void (*stop_rx) (void __iomem *ioaddr);
        int (*dma_interrupt) (void __iomem *ioaddr,
                              struct stmmac_extra_stats *x);
+       /* If supported then get the optional core features */
+       unsigned int (*get_hw_feature) (void __iomem *ioaddr);
 };
 
 struct stmmac_ops {
@@ -240,6 +263,7 @@ struct mac_device_info {
        const struct stmmac_dma_ops     *dma;
        struct mii_regs mii;    /* MII register Addresses */
        struct mac_link link;
+       unsigned int synopsys_uid;
 };
 
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
index 0f63b3c83c196c9cafefd7c0f8a386d5d044c759..b1c48b975945b951c27e1853ad3388283bcf6dbe 100644 (file)
@@ -37,11 +37,6 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
        value |= GMAC_CORE_INIT;
        writel(value, ioaddr + GMAC_CONTROL);
 
-       /* STBus Bridge Configuration */
-       /*writel(0xc5608, ioaddr + 0x00007000);*/
-
-       /* Freeze MMC counters */
-       writel(0x8, ioaddr + GMAC_MMC_CTRL);
        /* Mask GMAC interrupts */
        writel(0x207, ioaddr + GMAC_INT_MASK);
 
@@ -229,10 +224,7 @@ static const struct stmmac_ops dwmac1000_ops = {
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
 {
        struct mac_device_info *mac;
-       u32 uid = readl(ioaddr + GMAC_VERSION);
-
-       pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
-               ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+       u32 hwid = readl(ioaddr + GMAC_VERSION);
 
        mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
        if (!mac)
@@ -246,6 +238,7 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
        mac->link.speed = GMAC_CONTROL_FES;
        mac->mii.addr = GMAC_MII_ADDR;
        mac->mii.data = GMAC_MII_DATA;
+       mac->synopsys_uid = hwid;
 
        return mac;
 }
index 3dbeea61908563e7261b455a6e333f6003cdfe3b..da66ac511c4c60df8dabe849cd02ba9ae2d76dad 100644 (file)
@@ -118,13 +118,6 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
        writel(csr6, ioaddr + DMA_CONTROL);
 }
 
-/* Not yet implemented --- no RMON module */
-static void dwmac1000_dma_diagnostic_fr(void *data,
-                 struct stmmac_extra_stats *x, void __iomem *ioaddr)
-{
-       return;
-}
-
 static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
 {
        int i;
@@ -139,11 +132,15 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
        }
 }
 
+static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+{
+       return readl(ioaddr + DMA_HW_FEATURE);
+}
+
 const struct stmmac_dma_ops dwmac1000_dma_ops = {
        .init = dwmac1000_dma_init,
        .dump_regs = dwmac1000_dump_dma_regs,
        .dma_mode = dwmac1000_dma_operation_mode,
-       .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
        .enable_dma_transmission = dwmac_enable_dma_transmission,
        .enable_dma_irq = dwmac_enable_dma_irq,
        .disable_dma_irq = dwmac_disable_dma_irq,
@@ -152,4 +149,5 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
        .start_rx = dwmac_dma_start_rx,
        .stop_rx = dwmac_dma_stop_rx,
        .dma_interrupt = dwmac_dma_interrupt,
+       .get_hw_feature = dwmac1000_get_hw_feature,
 };
index 743a58017637b2cf6eaf05328cd6d8857b724a80..138fb8dd1e878f3f5512cd4baf19b1d197a18e2b 100644 (file)
@@ -70,17 +70,6 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
                readl(ioaddr + MAC_VLAN1));
        pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
                readl(ioaddr + MAC_VLAN2));
-       pr_info("\n\tMAC management counter registers\n");
-       pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
-               MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
-       pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
-               MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
-       pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
-               MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
-       pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
-               MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
-       pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
-               MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
 }
 
 static void dwmac100_irq_status(void __iomem *ioaddr)
@@ -199,6 +188,7 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
        mac->link.speed = 0;
        mac->mii.addr = MAC_MII_ADDR;
        mac->mii.data = MAC_MII_DATA;
+       mac->synopsys_uid = 0;
 
        return mac;
 }
index da3f5ccf83d300e09d1b7804717ad2c16e722977..437edacd602e2de94ae46dd658fd9fa23f7c1849 100644 (file)
@@ -34,6 +34,7 @@
 #define DMA_MISSED_FRAME_CTR   0x00001020      /* Missed Frame Counter */
 #define DMA_CUR_TX_BUF_ADDR    0x00001050      /* Current Host Tx Buffer */
 #define DMA_CUR_RX_BUF_ADDR    0x00001054      /* Current Host Rx Buffer */
+#define DMA_HW_FEATURE         0x00001058      /* HW Feature Register */
 
 /* DMA Control register defines */
 #define DMA_CONTROL_ST         0x00002000      /* Start/Stop Transmission */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
new file mode 100644 (file)
index 0000000..a383520
--- /dev/null
@@ -0,0 +1,131 @@
+/*******************************************************************************
+  MMC Header file
+
+  Copyright (C) 2011  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* MMC control register */
+/* When set, all counter are reset */
+#define MMC_CNTRL_COUNTER_RESET                0x1
+/* When set, do not roll over zero
+ * after reaching the max value*/
+#define MMC_CNTRL_COUNTER_STOP_ROLLOVER        0x2
+#define MMC_CNTRL_RESET_ON_READ                0x4     /* Reset after reading */
+#define MMC_CNTRL_COUNTER_FREEZER      0x8     /* Freeze counter values to the
+                                                * current value.*/
+#define MMC_CNTRL_PRESET               0x10
+#define MMC_CNTRL_FULL_HALF_PRESET     0x20
+struct stmmac_counters {
+       unsigned int mmc_tx_octetcount_gb;
+       unsigned int mmc_tx_framecount_gb;
+       unsigned int mmc_tx_broadcastframe_g;
+       unsigned int mmc_tx_multicastframe_g;
+       unsigned int mmc_tx_64_octets_gb;
+       unsigned int mmc_tx_65_to_127_octets_gb;
+       unsigned int mmc_tx_128_to_255_octets_gb;
+       unsigned int mmc_tx_256_to_511_octets_gb;
+       unsigned int mmc_tx_512_to_1023_octets_gb;
+       unsigned int mmc_tx_1024_to_max_octets_gb;
+       unsigned int mmc_tx_unicast_gb;
+       unsigned int mmc_tx_multicast_gb;
+       unsigned int mmc_tx_broadcast_gb;
+       unsigned int mmc_tx_underflow_error;
+       unsigned int mmc_tx_singlecol_g;
+       unsigned int mmc_tx_multicol_g;
+       unsigned int mmc_tx_deferred;
+       unsigned int mmc_tx_latecol;
+       unsigned int mmc_tx_exesscol;
+       unsigned int mmc_tx_carrier_error;
+       unsigned int mmc_tx_octetcount_g;
+       unsigned int mmc_tx_framecount_g;
+       unsigned int mmc_tx_excessdef;
+       unsigned int mmc_tx_pause_frame;
+       unsigned int mmc_tx_vlan_frame_g;
+
+       /* MMC RX counter registers */
+       unsigned int mmc_rx_framecount_gb;
+       unsigned int mmc_rx_octetcount_gb;
+       unsigned int mmc_rx_octetcount_g;
+       unsigned int mmc_rx_broadcastframe_g;
+       unsigned int mmc_rx_multicastframe_g;
+       unsigned int mmc_rx_crc_errror;
+       unsigned int mmc_rx_align_error;
+       unsigned int mmc_rx_run_error;
+       unsigned int mmc_rx_jabber_error;
+       unsigned int mmc_rx_undersize_g;
+       unsigned int mmc_rx_oversize_g;
+       unsigned int mmc_rx_64_octets_gb;
+       unsigned int mmc_rx_65_to_127_octets_gb;
+       unsigned int mmc_rx_128_to_255_octets_gb;
+       unsigned int mmc_rx_256_to_511_octets_gb;
+       unsigned int mmc_rx_512_to_1023_octets_gb;
+       unsigned int mmc_rx_1024_to_max_octets_gb;
+       unsigned int mmc_rx_unicast_g;
+       unsigned int mmc_rx_length_error;
+       unsigned int mmc_rx_autofrangetype;
+       unsigned int mmc_rx_pause_frames;
+       unsigned int mmc_rx_fifo_overflow;
+       unsigned int mmc_rx_vlan_frames_gb;
+       unsigned int mmc_rx_watchdog_error;
+       /* IPC */
+       unsigned int mmc_rx_ipc_intr_mask;
+       unsigned int mmc_rx_ipc_intr;
+       /* IPv4 */
+       unsigned int mmc_rx_ipv4_gd;
+       unsigned int mmc_rx_ipv4_hderr;
+       unsigned int mmc_rx_ipv4_nopay;
+       unsigned int mmc_rx_ipv4_frag;
+       unsigned int mmc_rx_ipv4_udsbl;
+
+       unsigned int mmc_rx_ipv4_gd_octets;
+       unsigned int mmc_rx_ipv4_hderr_octets;
+       unsigned int mmc_rx_ipv4_nopay_octets;
+       unsigned int mmc_rx_ipv4_frag_octets;
+       unsigned int mmc_rx_ipv4_udsbl_octets;
+
+       /* IPV6 */
+       unsigned int mmc_rx_ipv6_gd_octets;
+       unsigned int mmc_rx_ipv6_hderr_octets;
+       unsigned int mmc_rx_ipv6_nopay_octets;
+
+       unsigned int mmc_rx_ipv6_gd;
+       unsigned int mmc_rx_ipv6_hderr;
+       unsigned int mmc_rx_ipv6_nopay;
+
+       /* Protocols */
+       unsigned int mmc_rx_udp_gd;
+       unsigned int mmc_rx_udp_err;
+       unsigned int mmc_rx_tcp_gd;
+       unsigned int mmc_rx_tcp_err;
+       unsigned int mmc_rx_icmp_gd;
+       unsigned int mmc_rx_icmp_err;
+
+       unsigned int mmc_rx_udp_gd_octets;
+       unsigned int mmc_rx_udp_err_octets;
+       unsigned int mmc_rx_tcp_gd_octets;
+       unsigned int mmc_rx_tcp_err_octets;
+       unsigned int mmc_rx_icmp_gd_octets;
+       unsigned int mmc_rx_icmp_err_octets;
+};
+
+extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
new file mode 100644 (file)
index 0000000..41e6b33
--- /dev/null
@@ -0,0 +1,265 @@
+/*******************************************************************************
+  DWMAC Management Counters
+
+  Copyright (C) 2011  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "mmc.h"
+
+/* MAC Management Counters register offset */
+
+#define MMC_CNTRL              0x00000100      /* MMC Control */
+#define MMC_RX_INTR            0x00000104      /* MMC RX Interrupt */
+#define MMC_TX_INTR            0x00000108      /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK       0x0000010c      /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK       0x00000110      /* MMC Interrupt Mask */
+#define MMC_DEFAUL_MASK                0xffffffff
+
+/* MMC TX counter registers */
+
+/* Note:
+ * _GB register stands for good and bad frames
+ * _G is for good only.
+ */
+#define MMC_TX_OCTETCOUNT_GB           0x00000114
+#define MMC_TX_FRAMECOUNT_GB           0x00000118
+#define MMC_TX_BROADCASTFRAME_G                0x0000011c
+#define MMC_TX_MULTICASTFRAME_G                0x00000120
+#define MMC_TX_64_OCTETS_GB            0x00000124
+#define MMC_TX_65_TO_127_OCTETS_GB     0x00000128
+#define MMC_TX_128_TO_255_OCTETS_GB    0x0000012c
+#define MMC_TX_256_TO_511_OCTETS_GB    0x00000130
+#define MMC_TX_512_TO_1023_OCTETS_GB   0x00000134
+#define MMC_TX_1024_TO_MAX_OCTETS_GB   0x00000138
+#define MMC_TX_UNICAST_GB              0x0000013c
+#define MMC_TX_MULTICAST_GB            0x00000140
+#define MMC_TX_BROADCAST_GB            0x00000144
+#define MMC_TX_UNDERFLOW_ERROR         0x00000148
+#define MMC_TX_SINGLECOL_G             0x0000014c
+#define MMC_TX_MULTICOL_G              0x00000150
+#define MMC_TX_DEFERRED                        0x00000154
+#define MMC_TX_LATECOL                 0x00000158
+#define MMC_TX_EXESSCOL                        0x0000015c
+#define MMC_TX_CARRIER_ERROR           0x00000160
+#define MMC_TX_OCTETCOUNT_G            0x00000164
+#define MMC_TX_FRAMECOUNT_G            0x00000168
+#define MMC_TX_EXCESSDEF               0x0000016c
+#define MMC_TX_PAUSE_FRAME             0x00000170
+#define MMC_TX_VLAN_FRAME_G            0x00000174
+
+/* MMC RX counter registers */
+#define MMC_RX_FRAMECOUNT_GB           0x00000180
+#define MMC_RX_OCTETCOUNT_GB           0x00000184
+#define MMC_RX_OCTETCOUNT_G            0x00000188
+#define MMC_RX_BROADCASTFRAME_G                0x0000018c
+#define MMC_RX_MULTICASTFRAME_G                0x00000190
+#define MMC_RX_CRC_ERRROR              0x00000194
+#define MMC_RX_ALIGN_ERROR             0x00000198
+#define MMC_RX_RUN_ERROR               0x0000019C
+#define MMC_RX_JABBER_ERROR            0x000001A0
+#define MMC_RX_UNDERSIZE_G             0x000001A4
+#define MMC_RX_OVERSIZE_G              0x000001A8
+#define MMC_RX_64_OCTETS_GB            0x000001AC
+#define MMC_RX_65_TO_127_OCTETS_GB     0x000001b0
+#define MMC_RX_128_TO_255_OCTETS_GB    0x000001b4
+#define MMC_RX_256_TO_511_OCTETS_GB    0x000001b8
+#define MMC_RX_512_TO_1023_OCTETS_GB   0x000001bc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB   0x000001c0
+#define MMC_RX_UNICAST_G               0x000001c4
+#define MMC_RX_LENGTH_ERROR            0x000001c8
+#define MMC_RX_AUTOFRANGETYPE          0x000001cc
+#define MMC_RX_PAUSE_FRAMES            0x000001d0
+#define MMC_RX_FIFO_OVERFLOW           0x000001d4
+#define MMC_RX_VLAN_FRAMES_GB          0x000001d8
+#define MMC_RX_WATCHDOG_ERROR          0x000001dc
+/* IPC*/
+#define MMC_RX_IPC_INTR_MASK           0x00000200
+#define MMC_RX_IPC_INTR                        0x00000208
+/* IPv4*/
+#define MMC_RX_IPV4_GD                 0x00000210
+#define MMC_RX_IPV4_HDERR              0x00000214
+#define MMC_RX_IPV4_NOPAY              0x00000218
+#define MMC_RX_IPV4_FRAG               0x0000021C
+#define MMC_RX_IPV4_UDSBL              0x00000220
+
+#define MMC_RX_IPV4_GD_OCTETS          0x00000250
+#define MMC_RX_IPV4_HDERR_OCTETS       0x00000254
+#define MMC_RX_IPV4_NOPAY_OCTETS       0x00000258
+#define MMC_RX_IPV4_FRAG_OCTETS                0x0000025c
+#define MMC_RX_IPV4_UDSBL_OCTETS       0x00000260
+
+/* IPV6*/
+#define MMC_RX_IPV6_GD_OCTETS          0x00000264
+#define MMC_RX_IPV6_HDERR_OCTETS       0x00000268
+#define MMC_RX_IPV6_NOPAY_OCTETS       0x0000026c
+
+#define MMC_RX_IPV6_GD                 0x00000224
+#define MMC_RX_IPV6_HDERR              0x00000228
+#define MMC_RX_IPV6_NOPAY              0x0000022c
+
+/* Protocols*/
+#define MMC_RX_UDP_GD                  0x00000230
+#define MMC_RX_UDP_ERR                 0x00000234
+#define MMC_RX_TCP_GD                  0x00000238
+#define MMC_RX_TCP_ERR                 0x0000023c
+#define MMC_RX_ICMP_GD                 0x00000240
+#define MMC_RX_ICMP_ERR                        0x00000244
+
+#define MMC_RX_UDP_GD_OCTETS           0x00000270
+#define MMC_RX_UDP_ERR_OCTETS          0x00000274
+#define MMC_RX_TCP_GD_OCTETS           0x00000278
+#define MMC_RX_TCP_ERR_OCTETS          0x0000027c
+#define MMC_RX_ICMP_GD_OCTETS          0x00000280
+#define MMC_RX_ICMP_ERR_OCTETS         0x00000284
+
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+{
+       u32 value = readl(ioaddr + MMC_CNTRL);
+
+       value |= (mode & 0x3F);
+
+       writel(value, ioaddr + MMC_CNTRL);
+
+       pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+                MMC_CNTRL, value);
+}
+
+/* To mask all all interrupts.*/
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+{
+       writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
+       writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+{
+       mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
+       mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
+       mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
+       mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
+       mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+       mmc->mmc_tx_65_to_127_octets_gb +=
+           readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+       mmc->mmc_tx_128_to_255_octets_gb +=
+           readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+       mmc->mmc_tx_256_to_511_octets_gb +=
+           readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+       mmc->mmc_tx_512_to_1023_octets_gb +=
+           readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+       mmc->mmc_tx_1024_to_max_octets_gb +=
+           readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+       mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
+       mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
+       mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
+       mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
+       mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
+       mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
+       mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
+       mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
+       mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
+       mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
+       mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
+       mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
+       mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
+       mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
+       mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+
+       /* MMC RX counter registers */
+       mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
+       mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
+       mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
+       mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
+       mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
+       mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+       mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
+       mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
+       mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
+       mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
+       mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
+       mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+       mmc->mmc_rx_65_to_127_octets_gb +=
+           readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+       mmc->mmc_rx_128_to_255_octets_gb +=
+           readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+       mmc->mmc_rx_256_to_511_octets_gb +=
+           readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+       mmc->mmc_rx_512_to_1023_octets_gb +=
+           readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+       mmc->mmc_rx_1024_to_max_octets_gb +=
+           readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+       mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
+       mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
+       mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
+       mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
+       mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
+       mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
+       mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+       /* IPC */
+       mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
+       mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+       /* IPv4 */
+       mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
+       mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
+       mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
+       mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
+       mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+
+       mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+       mmc->mmc_rx_ipv4_hderr_octets +=
+           readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+       mmc->mmc_rx_ipv4_nopay_octets +=
+           readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+       mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+       mmc->mmc_rx_ipv4_udsbl_octets +=
+           readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+
+       /* IPV6 */
+       mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+       mmc->mmc_rx_ipv6_hderr_octets +=
+           readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+       mmc->mmc_rx_ipv6_nopay_octets +=
+           readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+
+       mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
+       mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
+       mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+
+       /* Protocols */
+       mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
+       mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
+       mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
+       mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
+       mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
+       mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+
+       mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
+       mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
+       mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
+       mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
+       mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
+       mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+}
index de1929b2641bca97baf2e20e4db03c5fd991ad00..1434bdb390d42043fb3eeceed8a52c2e6237269e 100644 (file)
@@ -20,7 +20,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
-#define DRV_MODULE_VERSION     "July_2011"
+#define DRV_MODULE_VERSION     "Aug_2011"
 #include <linux/stmmac.h>
 
 #include "common.h"
@@ -72,10 +72,13 @@ struct stmmac_priv {
        spinlock_t lock;
        int wolopts;
        int wolenabled;
+       int wol_irq;
 #ifdef CONFIG_STMMAC_TIMER
        struct stmmac_timer *tm;
 #endif
        struct plat_stmmacenet_data *plat;
+       struct stmmac_counters mmc;
+       struct dma_features dma_cap;
 };
 
 extern int stmmac_mdio_unregister(struct net_device *ndev);
index 7ed8fb6c2117250e7c55ceb6eef9a009e3223dae..aedff9a90ebc2fa5814224d9d1c57f2405564035 100644 (file)
@@ -46,7 +46,7 @@ struct stmmac_stats {
        { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m),       \
        offsetof(struct stmmac_priv, xstats.m)}
 
-static const struct  stmmac_stats stmmac_gstrings_stats[] = {
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
        STMMAC_STAT(tx_underflow),
        STMMAC_STAT(tx_carrier),
        STMMAC_STAT(tx_losscarrier),
@@ -91,19 +91,106 @@ static const struct  stmmac_stats stmmac_gstrings_stats[] = {
 };
 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
 
+/* HW MAC Management counters (if supported) */
+#define STMMAC_MMC_STAT(m)     \
+       { #m, FIELD_SIZEOF(struct stmmac_counters, m),  \
+       offsetof(struct stmmac_priv, mmc.m)}
+
+static const struct stmmac_stats stmmac_gstr_mmc[] = {
+       STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
+       STMMAC_MMC_STAT(mmc_tx_framecount_gb),
+       STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
+       STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
+       STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
+       STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
+       STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
+       STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
+       STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
+       STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
+       STMMAC_MMC_STAT(mmc_tx_unicast_gb),
+       STMMAC_MMC_STAT(mmc_tx_multicast_gb),
+       STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
+       STMMAC_MMC_STAT(mmc_tx_underflow_error),
+       STMMAC_MMC_STAT(mmc_tx_singlecol_g),
+       STMMAC_MMC_STAT(mmc_tx_multicol_g),
+       STMMAC_MMC_STAT(mmc_tx_deferred),
+       STMMAC_MMC_STAT(mmc_tx_latecol),
+       STMMAC_MMC_STAT(mmc_tx_exesscol),
+       STMMAC_MMC_STAT(mmc_tx_carrier_error),
+       STMMAC_MMC_STAT(mmc_tx_octetcount_g),
+       STMMAC_MMC_STAT(mmc_tx_framecount_g),
+       STMMAC_MMC_STAT(mmc_tx_excessdef),
+       STMMAC_MMC_STAT(mmc_tx_pause_frame),
+       STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+       STMMAC_MMC_STAT(mmc_rx_framecount_gb),
+       STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
+       STMMAC_MMC_STAT(mmc_rx_octetcount_g),
+       STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
+       STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
+       STMMAC_MMC_STAT(mmc_rx_crc_errror),
+       STMMAC_MMC_STAT(mmc_rx_align_error),
+       STMMAC_MMC_STAT(mmc_rx_run_error),
+       STMMAC_MMC_STAT(mmc_rx_jabber_error),
+       STMMAC_MMC_STAT(mmc_rx_undersize_g),
+       STMMAC_MMC_STAT(mmc_rx_oversize_g),
+       STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
+       STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
+       STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
+       STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
+       STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
+       STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
+       STMMAC_MMC_STAT(mmc_rx_unicast_g),
+       STMMAC_MMC_STAT(mmc_rx_length_error),
+       STMMAC_MMC_STAT(mmc_rx_autofrangetype),
+       STMMAC_MMC_STAT(mmc_rx_pause_frames),
+       STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
+       STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
+       STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+       STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
+       STMMAC_MMC_STAT(mmc_rx_ipc_intr),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
+       STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
+       STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
+       STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
+       STMMAC_MMC_STAT(mmc_rx_udp_gd),
+       STMMAC_MMC_STAT(mmc_rx_udp_err),
+       STMMAC_MMC_STAT(mmc_rx_tcp_gd),
+       STMMAC_MMC_STAT(mmc_rx_tcp_err),
+       STMMAC_MMC_STAT(mmc_rx_icmp_gd),
+       STMMAC_MMC_STAT(mmc_rx_icmp_err),
+       STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
+       STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
+       STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
+       STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
+       STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
+       STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+};
+#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_gstr_mmc)
+
 static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
                                      struct ethtool_drvinfo *info)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (!priv->plat->has_gmac)
-               strcpy(info->driver, MAC100_ETHTOOL_NAME);
-       else
+       if (priv->plat->has_gmac)
                strcpy(info->driver, GMAC_ETHTOOL_NAME);
+       else
+               strcpy(info->driver, MAC100_ETHTOOL_NAME);
 
        strcpy(info->version, DRV_MODULE_VERSION);
        info->fw_version[0] = '\0';
-       info->n_stats = STMMAC_STATS_LEN;
 }
 
 static int stmmac_ethtool_getsettings(struct net_device *dev,
@@ -252,24 +339,44 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
                                 struct ethtool_stats *dummy, u64 *data)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
-       int i;
-
-       /* Update HW stats if supported */
-       priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
-                                        priv->ioaddr);
+       int i, j = 0;
 
+       /* Update the DMA HW counters for dwmac10/100 */
+       if (!priv->plat->has_gmac)
+               priv->hw->dma->dma_diagnostic_fr(&dev->stats,
+                                                (void *) &priv->xstats,
+                                                priv->ioaddr);
+       else {
+               /* If supported, for new GMAC chips expose the MMC counters */
+               dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+
+               for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+                       char *p = (char *)priv + stmmac_gstr_mmc[i].stat_offset;
+
+                       data[j++] = (stmmac_gstr_mmc[i].sizeof_stat ==
+                                    sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+               }
+       }
        for (i = 0; i < STMMAC_STATS_LEN; i++) {
                char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
-               data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
-               sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+               data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
+                            sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
        }
 }
 
 static int stmmac_get_sset_count(struct net_device *netdev, int sset)
 {
+       struct stmmac_priv *priv = netdev_priv(netdev);
+       int len;
+
        switch (sset) {
        case ETH_SS_STATS:
-               return STMMAC_STATS_LEN;
+               len = STMMAC_STATS_LEN;
+
+               if (priv->plat->has_gmac)
+                       len += STMMAC_MMC_STATS_LEN;
+
+               return len;
        default:
                return -EOPNOTSUPP;
        }
@@ -279,9 +386,16 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
        int i;
        u8 *p = data;
+       struct stmmac_priv *priv = netdev_priv(dev);
 
        switch (stringset) {
        case ETH_SS_STATS:
+               if (priv->plat->has_gmac)
+                       for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+                               memcpy(p, stmmac_gstr_mmc[i].stat_string,
+                                      ETH_GSTRING_LEN);
+                               p += ETH_GSTRING_LEN;
+                       }
                for (i = 0; i < STMMAC_STATS_LEN; i++) {
                        memcpy(p, stmmac_gstrings_stats[i].stat_string,
                                ETH_GSTRING_LEN);
@@ -321,10 +435,10 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (wol->wolopts) {
                pr_info("stmmac: wakeup enable\n");
                device_set_wakeup_enable(priv->device, 1);
-               enable_irq_wake(dev->irq);
+               enable_irq_wake(priv->wol_irq);
        } else {
                device_set_wakeup_enable(priv->device, 0);
-               disable_irq_wake(dev->irq);
+               disable_irq_wake(priv->wol_irq);
        }
 
        spin_lock_irq(&priv->lock);
index 68fb5b0593a09056dc581ae8913c5d7ac74f5c32..d0fbc5477d10e72563e8dc679544fdad2b39c056 100644 (file)
 #include <linux/slab.h>
 #include <linux/prefetch.h>
 #include "stmmac.h"
+#ifdef CONFIG_STMMAC_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 
@@ -748,6 +752,77 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
                stmmac_tx_err(priv);
 }
 
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+       unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+                           MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+       /* Do not manage MMC IRQ (FIXME) */
+       dwmac_mmc_intr_all_mask(priv->ioaddr);
+       dwmac_mmc_ctrl(priv->ioaddr, mode);
+       memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+}
+
+static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
+{
+       u32 hwid = priv->hw->synopsys_uid;
+
+       /* Only check valid Synopsys Id because old MAC chips
+        * have no HW registers where get the ID */
+       if (likely(hwid)) {
+               u32 uid = ((hwid & 0x0000ff00) >> 8);
+               u32 synid = (hwid & 0x000000ff);
+
+               pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+                       uid, synid);
+
+               return synid;
+       }
+       return 0;
+}
+
+/* New GMAC chips support a new register to indicate the
+ * presence of the optional feature/functions.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+       u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
+
+       if (likely(hw_cap)) {
+               priv->dma_cap.mbps_10_100 = (hw_cap & 0x1);
+               priv->dma_cap.mbps_1000 = (hw_cap & 0x2) >> 1;
+               priv->dma_cap.half_duplex = (hw_cap & 0x4) >> 2;
+               priv->dma_cap.hash_filter = (hw_cap & 0x10) >> 4;
+               priv->dma_cap.multi_addr = (hw_cap & 0x20) >> 5;
+               priv->dma_cap.pcs = (hw_cap & 0x40) >> 6;
+               priv->dma_cap.sma_mdio = (hw_cap & 0x100) >> 8;
+               priv->dma_cap.pmt_remote_wake_up = (hw_cap & 0x200) >> 9;
+               priv->dma_cap.pmt_magic_frame = (hw_cap & 0x400) >> 10;
+               priv->dma_cap.rmon = (hw_cap & 0x800) >> 11; /* MMC */
+               /* IEEE 1588-2002*/
+               priv->dma_cap.time_stamp = (hw_cap & 0x1000) >> 12;
+               /* IEEE 1588-2008*/
+               priv->dma_cap.atime_stamp = (hw_cap & 0x2000) >> 13;
+               /* 802.3az - Energy-Efficient Ethernet (EEE) */
+               priv->dma_cap.eee = (hw_cap & 0x4000) >> 14;
+               priv->dma_cap.av = (hw_cap & 0x8000) >> 15;
+               /* TX and RX csum */
+               priv->dma_cap.tx_coe = (hw_cap & 0x10000) >> 16;
+               priv->dma_cap.rx_coe_type1 = (hw_cap & 0x20000) >> 17;
+               priv->dma_cap.rx_coe_type2 = (hw_cap & 0x40000) >> 18;
+               priv->dma_cap.rxfifo_over_2048 = (hw_cap & 0x80000) >> 19;
+               /* TX and RX number of channels */
+               priv->dma_cap.number_rx_channel = (hw_cap & 0x300000) >> 20;
+               priv->dma_cap.number_tx_channel = (hw_cap & 0xc00000) >> 22;
+               /* Alternate (enhanced) DESC mode*/
+               priv->dma_cap.enh_desc = (hw_cap & 0x1000000) >> 24;
+
+       } else
+               pr_debug("\tNo HW DMA feature register supported");
+
+       return hw_cap;
+}
+
 /**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
@@ -820,17 +895,16 @@ static int stmmac_open(struct net_device *dev)
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->ioaddr);
 
-       priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+       stmmac_get_synopsys_id(priv);
+
+       stmmac_get_hw_features(priv);
+
        if (priv->rx_coe)
                pr_info("stmmac: Rx Checksum Offload Engine supported\n");
        if (priv->plat->tx_coe)
                pr_info("\tTX Checksum insertion supported\n");
        netdev_update_features(dev);
 
-       /* Initialise the MMC (if present) to disable all interrupts. */
-       writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
-       writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
-
        /* Request the IRQ lines */
        ret = request_irq(dev->irq, stmmac_interrupt,
                         IRQF_SHARED, dev->name, dev);
@@ -850,6 +924,8 @@ static int stmmac_open(struct net_device *dev)
        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
        priv->xstats.threshold = tc;
 
+       stmmac_mmc_setup(priv);
+
        /* Start the ball rolling... */
        DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
        priv->hw->dma->start_tx(priv->ioaddr);
@@ -1416,6 +1492,182 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return ret;
 }
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+static struct dentry *stmmac_rings_status;
+static struct dentry *stmmac_dma_cap;
+
+static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+{
+       struct tmp_s {
+               u64 a;
+               unsigned int b;
+               unsigned int c;
+       };
+       int i;
+       struct net_device *dev = seq->private;
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       seq_printf(seq, "=======================\n");
+       seq_printf(seq, " RX descriptor ring\n");
+       seq_printf(seq, "=======================\n");
+
+       for (i = 0; i < priv->dma_rx_size; i++) {
+               struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i);
+               seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+                          i, (unsigned int)(x->a),
+                          (unsigned int)((x->a) >> 32), x->b, x->c);
+               seq_printf(seq, "\n");
+       }
+
+       seq_printf(seq, "\n");
+       seq_printf(seq, "=======================\n");
+       seq_printf(seq, "  TX descriptor ring\n");
+       seq_printf(seq, "=======================\n");
+
+       for (i = 0; i < priv->dma_tx_size; i++) {
+               struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i);
+               seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+                          i, (unsigned int)(x->a),
+                          (unsigned int)((x->a) >> 32), x->b, x->c);
+               seq_printf(seq, "\n");
+       }
+
+       return 0;
+}
+
+static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_rings_status_fops = {
+       .owner = THIS_MODULE,
+       .open = stmmac_sysfs_ring_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+{
+       struct net_device *dev = seq->private;
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       if (!stmmac_get_hw_features(priv)) {
+               seq_printf(seq, "DMA HW features not supported\n");
+               return 0;
+       }
+
+       seq_printf(seq, "==============================\n");
+       seq_printf(seq, "\tDMA HW features\n");
+       seq_printf(seq, "==============================\n");
+
+       seq_printf(seq, "\t10/100 Mbps %s\n",
+                  (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+       seq_printf(seq, "\t1000 Mbps %s\n",
+                  (priv->dma_cap.mbps_1000) ? "Y" : "N");
+       seq_printf(seq, "\tHalf duple %s\n",
+                  (priv->dma_cap.half_duplex) ? "Y" : "N");
+       seq_printf(seq, "\tHash Filter: %s\n",
+                  (priv->dma_cap.hash_filter) ? "Y" : "N");
+       seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+                  (priv->dma_cap.multi_addr) ? "Y" : "N");
+       seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
+                  (priv->dma_cap.pcs) ? "Y" : "N");
+       seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+                  (priv->dma_cap.sma_mdio) ? "Y" : "N");
+       seq_printf(seq, "\tPMT Remote wake up: %s\n",
+                  (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+       seq_printf(seq, "\tPMT Magic Frame: %s\n",
+                  (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+       seq_printf(seq, "\tRMON module: %s\n",
+                  (priv->dma_cap.rmon) ? "Y" : "N");
+       seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+                  (priv->dma_cap.time_stamp) ? "Y" : "N");
+       seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
+                  (priv->dma_cap.atime_stamp) ? "Y" : "N");
+       seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
+                  (priv->dma_cap.eee) ? "Y" : "N");
+       seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+       seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+                  (priv->dma_cap.tx_coe) ? "Y" : "N");
+       seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+                  (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+       seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+                  (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+       seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+                  (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+       seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+                  priv->dma_cap.number_rx_channel);
+       seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+                  priv->dma_cap.number_tx_channel);
+       seq_printf(seq, "\tEnhanced descriptors: %s\n",
+                  (priv->dma_cap.enh_desc) ? "Y" : "N");
+
+       return 0;
+}
+
+static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_dma_cap_fops = {
+       .owner = THIS_MODULE,
+       .open = stmmac_sysfs_dma_cap_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static int stmmac_init_fs(struct net_device *dev)
+{
+       /* Create debugfs entries */
+       stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+       if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+               pr_err("ERROR %s, debugfs create directory failed\n",
+                      STMMAC_RESOURCE_NAME);
+
+               return -ENOMEM;
+       }
+
+       /* Entry to report DMA RX/TX rings */
+       stmmac_rings_status = debugfs_create_file("descriptors_status",
+                                          S_IRUGO, stmmac_fs_dir, dev,
+                                          &stmmac_rings_status_fops);
+
+       if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+               pr_info("ERROR creating stmmac ring debugfs file\n");
+               debugfs_remove(stmmac_fs_dir);
+
+               return -ENOMEM;
+       }
+
+       /* Entry to report the DMA HW features */
+       stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
+                                            dev, &stmmac_dma_cap_fops);
+
+       if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+               pr_info("ERROR creating stmmac MMC debugfs file\n");
+               debugfs_remove(stmmac_rings_status);
+               debugfs_remove(stmmac_fs_dir);
+
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void stmmac_exit_fs(void)
+{
+       debugfs_remove(stmmac_rings_status);
+       debugfs_remove(stmmac_dma_cap);
+       debugfs_remove(stmmac_fs_dir);
+}
+#endif /* CONFIG_STMMAC_DEBUG_FS */
+
 static const struct net_device_ops stmmac_netdev_ops = {
        .ndo_open = stmmac_open,
        .ndo_start_xmit = stmmac_xmit,
@@ -1519,7 +1771,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
 
        if (device_can_wakeup(priv->device)) {
                priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
-               enable_irq_wake(dev->irq);
+               enable_irq_wake(priv->wol_irq);
        }
 
        return 0;
@@ -1592,6 +1844,18 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
                pr_info("\tPMT module supported\n");
                device_set_wakeup_capable(&pdev->dev, 1);
        }
+       /*
+        * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+        * The external wake up irq can be passed through the platform code
+        * named as "eth_wake_irq"
+        *
+        * In case the wake up interrupt is not passed from the platform
+        * so the driver will continue to use the mac irq (ndev->irq)
+        */
+       priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (priv->wol_irq == -ENXIO)
+               priv->wol_irq = ndev->irq;
+
 
        platform_set_drvdata(pdev, ndev);
 
@@ -1630,6 +1894,13 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out_unregister;
        pr_debug("registered!\n");
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+       ret = stmmac_init_fs(ndev);
+       if (ret < 0)
+               pr_warning("\tFailed debugFS registration");
+#endif
+
        return 0;
 
 out_unregister:
@@ -1682,6 +1953,10 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(res->start, resource_size(res));
 
+#ifdef CONFIG_STMMAC_DEBUG_FS
+       stmmac_exit_fs();
+#endif
+
        free_netdev(ndev);
 
        return 0;
index 3c9ef1c196a920170a1075c66eb20ec1ba063245..cad58f26c47c476f81b0bb7087ad9016f1976ac6 100644 (file)
@@ -3290,11 +3290,8 @@ static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
                              u32 offset, u32 size)
 {
        int i = skb_shinfo(skb)->nr_frags;
-       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-       frag->page = page;
-       frag->page_offset = offset;
-       frag->size = size;
+       __skb_fill_page_desc(skb, i, page, offset, size);
 
        skb->len += size;
        skb->data_len += size;
@@ -6737,7 +6734,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                len = frag->size;
-               mapping = np->ops->map_page(np->device, frag->page,
+               mapping = np->ops->map_page(np->device, skb_frag_page(frag),
                                            frag->page_offset, len,
                                            DMA_TO_DEVICE);
 
index e5d82a53ea57af5ac68e1ac42a67cf54e02eec72..68a9ba66feba866f2da80d39f34eed63fc8082ca 100644 (file)
@@ -22,6 +22,7 @@ config VIA_RHINE
        tristate "VIA Rhine support"
        depends on PCI
        select CRC32
+       select NET_CORE
        select MII
        ---help---
          If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
@@ -47,6 +48,7 @@ config VIA_VELOCITY
        depends on PCI
        select CRC32
        select CRC_CCITT
+       select NET_CORE
        select MII
        ---help---
          If you have a VIA "Velocity" based network card say Y here.
index 82660672dcd951da07971cff2ce5b0b396bbc823..d275e276e742a2234dc4d42c5df8647f09b49fa5 100644 (file)
@@ -2,7 +2,7 @@
  * SuperH IrDA Driver
  *
  * Copyright (C) 2010 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  *
  * Based on sh_sir.c
  * Copyright (C) 2009 Renesas Solutions Corp.
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <net/irda/wrapper.h>
 #include <net/irda/irda_device.h>
@@ -144,8 +145,8 @@ struct sh_irda_xir_func {
 
 struct sh_irda_self {
        void __iomem            *membase;
-       unsigned int             irq;
-       struct clk              *clk;
+       unsigned int            irq;
+       struct platform_device  *pdev;
 
        struct net_device       *ndev;
 
@@ -264,7 +265,7 @@ static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
        return 0;
 }
 
-static int xir_get_rcv_length(struct sh_irda_self *self)
+static int sh_irda_get_rcv_length(struct sh_irda_self *self)
 {
        return RFL_MASK & sh_irda_read(self, IRRFLR);
 }
@@ -274,47 +275,47 @@ static int xir_get_rcv_length(struct sh_irda_self *self)
  *             NONE MODE
  *
  *=====================================*/
-static int xir_fre(struct sh_irda_self *self)
+static int sh_irda_xir_fre(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
        dev_err(dev, "none mode: frame recv\n");
        return 0;
 }
 
-static int xir_trov(struct sh_irda_self *self)
+static int sh_irda_xir_trov(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
        dev_err(dev, "none mode: buffer ram over\n");
        return 0;
 }
 
-static int xir_9(struct sh_irda_self *self)
+static int sh_irda_xir_9(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
        dev_err(dev, "none mode: time over\n");
        return 0;
 }
 
-static int xir_8(struct sh_irda_self *self)
+static int sh_irda_xir_8(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
        dev_err(dev, "none mode: framing error\n");
        return 0;
 }
 
-static int xir_fte(struct sh_irda_self *self)
+static int sh_irda_xir_fte(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
        dev_err(dev, "none mode: frame transmit end\n");
        return 0;
 }
 
-static struct sh_irda_xir_func xir_func = {
-       .xir_fre        = xir_fre,
-       .xir_trov       = xir_trov,
-       .xir_9          = xir_9,
-       .xir_8          = xir_8,
-       .xir_fte        = xir_fte,
+static struct sh_irda_xir_func sh_irda_xir_func = {
+       .xir_fre        = sh_irda_xir_fre,
+       .xir_trov       = sh_irda_xir_trov,
+       .xir_9          = sh_irda_xir_9,
+       .xir_8          = sh_irda_xir_8,
+       .xir_fte        = sh_irda_xir_fte,
 };
 
 /*=====================================
@@ -323,12 +324,12 @@ static struct sh_irda_xir_func xir_func = {
  *
  * MIR/FIR are not supported now
  *=====================================*/
-static struct sh_irda_xir_func mfir_func = {
-       .xir_fre        = xir_fre,
-       .xir_trov       = xir_trov,
-       .xir_9          = xir_9,
-       .xir_8          = xir_8,
-       .xir_fte        = xir_fte,
+static struct sh_irda_xir_func sh_irda_mfir_func = {
+       .xir_fre        = sh_irda_xir_fre,
+       .xir_trov       = sh_irda_xir_trov,
+       .xir_9          = sh_irda_xir_9,
+       .xir_8          = sh_irda_xir_8,
+       .xir_fte        = sh_irda_xir_fte,
 };
 
 /*=====================================
@@ -336,12 +337,12 @@ static struct sh_irda_xir_func mfir_func = {
  *             SIR MODE
  *
  *=====================================*/
-static int sir_fre(struct sh_irda_self *self)
+static int sh_irda_sir_fre(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
        u16 data16;
        u8  *data = (u8 *)&data16;
-       int len = xir_get_rcv_length(self);
+       int len = sh_irda_get_rcv_length(self);
        int i, j;
 
        if (len > IRDARAM_LEN)
@@ -364,7 +365,7 @@ static int sir_fre(struct sh_irda_self *self)
        return 0;
 }
 
-static int sir_trov(struct sh_irda_self *self)
+static int sh_irda_sir_trov(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
 
@@ -373,7 +374,7 @@ static int sir_trov(struct sh_irda_self *self)
        return 0;
 }
 
-static int sir_tot(struct sh_irda_self *self)
+static int sh_irda_sir_tot(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
 
@@ -383,7 +384,7 @@ static int sir_tot(struct sh_irda_self *self)
        return 0;
 }
 
-static int sir_fer(struct sh_irda_self *self)
+static int sh_irda_sir_fer(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
 
@@ -392,7 +393,7 @@ static int sir_fer(struct sh_irda_self *self)
        return 0;
 }
 
-static int sir_fte(struct sh_irda_self *self)
+static int sh_irda_sir_fte(struct sh_irda_self *self)
 {
        struct device *dev = &self->ndev->dev;
 
@@ -402,12 +403,12 @@ static int sir_fte(struct sh_irda_self *self)
        return 0;
 }
 
-static struct sh_irda_xir_func sir_func = {
-       .xir_fre        = sir_fre,
-       .xir_trov       = sir_trov,
-       .xir_9          = sir_tot,
-       .xir_8          = sir_fer,
-       .xir_fte        = sir_fte,
+static struct sh_irda_xir_func sh_irda_sir_func = {
+       .xir_fre        = sh_irda_sir_fre,
+       .xir_trov       = sh_irda_sir_trov,
+       .xir_9          = sh_irda_sir_tot,
+       .xir_8          = sh_irda_sir_fer,
+       .xir_fte        = sh_irda_sir_fte,
 };
 
 static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
@@ -421,22 +422,22 @@ static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
        case SH_IRDA_SIR:
                name    = "SIR";
                data    = TMD_SIR;
-               func    = &sir_func;
+               func    = &sh_irda_sir_func;
                break;
        case SH_IRDA_MIR:
                name    = "MIR";
                data    = TMD_MIR;
-               func    = &mfir_func;
+               func    = &sh_irda_mfir_func;
                break;
        case SH_IRDA_FIR:
                name    = "FIR";
                data    = TMD_FIR;
-               func    = &mfir_func;
+               func    = &sh_irda_mfir_func;
                break;
        default:
-               name = "NONE";
-               data = 0;
-               func = &xir_func;
+               name    = "NONE";
+               data    = 0;
+               func    = &sh_irda_xir_func;
                break;
        }
 
@@ -694,7 +695,7 @@ static int sh_irda_open(struct net_device *ndev)
        struct sh_irda_self *self = netdev_priv(ndev);
        int err;
 
-       clk_enable(self->clk);
+       pm_runtime_get_sync(&self->pdev->dev);
        err = sh_irda_crc_init(self);
        if (err)
                goto open_err;
@@ -718,7 +719,7 @@ static int sh_irda_open(struct net_device *ndev)
        return 0;
 
 open_err:
-       clk_disable(self->clk);
+       pm_runtime_put_sync(&self->pdev->dev);
 
        return err;
 }
@@ -734,6 +735,7 @@ static int sh_irda_stop(struct net_device *ndev)
        }
 
        netif_stop_queue(ndev);
+       pm_runtime_put_sync(&self->pdev->dev);
 
        dev_info(&ndev->dev, "stoped\n");
 
@@ -786,11 +788,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
        if (err)
                goto err_mem_2;
 
-       self->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(self->clk)) {
-               dev_err(&pdev->dev, "cannot get irda clock\n");
-               goto err_mem_3;
-       }
+       self->pdev = pdev;
+       pm_runtime_enable(&pdev->dev);
 
        irda_init_max_qos_capabilies(&self->qos);
 
@@ -820,8 +819,7 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
        goto exit;
 
 err_mem_4:
-       clk_put(self->clk);
-err_mem_3:
+       pm_runtime_disable(&pdev->dev);
        sh_irda_remove_iobuf(self);
 err_mem_2:
        iounmap(self->membase);
@@ -840,7 +838,7 @@ static int __devexit sh_irda_remove(struct platform_device *pdev)
                return 0;
 
        unregister_netdev(ndev);
-       clk_put(self->clk);
+       pm_runtime_disable(&pdev->dev);
        sh_irda_remove_iobuf(self);
        iounmap(self->membase);
        free_netdev(ndev);
@@ -849,11 +847,29 @@ static int __devexit sh_irda_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int sh_irda_runtime_nop(struct device *dev)
+{
+       /* Runtime PM callback shared between ->runtime_suspend()
+        * and ->runtime_resume(). Simply returns success.
+        *
+        * This driver re-initializes all registers after
+        * pm_runtime_get_sync() anyway so there is no need
+        * to save and restore registers here.
+        */
+       return 0;
+}
+
+static const struct dev_pm_ops sh_irda_pm_ops = {
+       .runtime_suspend        = sh_irda_runtime_nop,
+       .runtime_resume         = sh_irda_runtime_nop,
+};
+
 static struct platform_driver sh_irda_driver = {
-       .probe   = sh_irda_probe,
-       .remove  = __devexit_p(sh_irda_remove),
-       .driver  = {
-               .name = DRIVER_NAME,
+       .probe  = sh_irda_probe,
+       .remove = __devexit_p(sh_irda_remove),
+       .driver = {
+               .name   = DRIVER_NAME,
+               .pm     = &sh_irda_pm_ops,
        },
 };
 
@@ -870,6 +886,6 @@ static void __exit sh_irda_exit(void)
 module_init(sh_irda_init);
 module_exit(sh_irda_exit);
 
-MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
 MODULE_LICENSE("GPL");
index 836e13fcb3ecce21f498bbbd822f32362e345ce0..b100c90e850791b37f585c6a779a585f73e2cb44 100644 (file)
@@ -543,7 +543,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
                                        struct ethtool_cmd *cmd)
 {
        const struct macvlan_dev *vlan = netdev_priv(dev);
-       return dev_ethtool_get_settings(vlan->lowerdev, cmd);
+
+       return __ethtool_get_settings(vlan->lowerdev, cmd);
 }
 
 static const struct ethtool_ops macvlan_ethtool_ops = {
index ab96c319a240070112789b6ce0ceabd176dc170f..7c3f84acfdfbb9cb8d513a34f6c124c69c30342f 100644 (file)
@@ -503,10 +503,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                skb->truesize += len;
                atomic_add(len, &skb->sk->sk_wmem_alloc);
                while (len) {
-                       f = &skb_shinfo(skb)->frags[i];
-                       f->page = page[i];
-                       f->page_offset = base & ~PAGE_MASK;
-                       f->size = min_t(int, len, PAGE_SIZE - f->page_offset);
+                       __skb_fill_page_desc(
+                               skb, i, page[i],
+                               base & ~PAGE_MASK,
+                               min_t(int, len, PAGE_SIZE - f->page_offset));
                        skb_shinfo(skb)->nr_frags++;
                        /* increase sk_wmem_alloc */
                        base += f->size;
index 84d4608153c992556a2d14a4a6bab37160abbf1b..23357612793486a2929e4c6030a9d4afb42b9bf4 100644 (file)
@@ -68,6 +68,7 @@ config USB_KAWETH
 
 config USB_PEGASUS
        tristate "USB Pegasus/Pegasus-II based ethernet device support"
+       select NET_CORE
        select MII
        ---help---
          Say Y here if you know you have Pegasus or Pegasus-II based adapter.
@@ -84,6 +85,7 @@ config USB_PEGASUS
 config USB_RTL8150
        tristate "USB RTL8150 based ethernet device support (EXPERIMENTAL)"
        depends on EXPERIMENTAL
+       select NET_CORE
        select MII
        help
          Say Y here if you have RTL8150 based usb-ethernet adapter.
@@ -95,6 +97,7 @@ config USB_RTL8150
 
 config USB_USBNET
        tristate "Multi-purpose USB Networking Framework"
+       select NET_CORE
        select MII
        ---help---
          This driver supports several kinds of network links over USB,
index 97172f8a15b7a8eccfb0baeb514d08193752b97a..81534437373a3c1209c914db4fb2a32d45860e44 100644 (file)
@@ -3694,7 +3694,8 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
 
        for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
                frag = &skb_shinfo(skb)->frags[cnt];
-               buffer->element[element].addr = (char *)page_to_phys(frag->page)
+               buffer->element[element].addr = (char *)
+                       page_to_phys(skb_frag_page(frag))
                        + frag->page_offset;
                buffer->element[element].length = frag->size;
                buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
index 2c780a78fcbd31f9ad0d9e53d0bf7b833698d0fe..820a1840c3f755b5c90fd66922e2da5f8d729dab 100644 (file)
@@ -673,7 +673,7 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
        struct net_device *netdev = interface->netdev;
        struct ethtool_cmd ecmd;
 
-       if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+       if (!__ethtool_get_settings(netdev, &ecmd)) {
                lport->link_supported_speeds &=
                        ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
                if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -1001,9 +1001,11 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
                        "this interface\n");
                return -EIO;
        }
+       rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
        vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
        mutex_unlock(&bnx2fc_dev_lock);
+       rtnl_unlock();
 
        if (IS_ERR(vn_port)) {
                printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
index 3416ab6738143d1b1be4e09caf362ccafdc30cbe..83aa3ac52c40754b97896f2a80cc12f34c418ac1 100644 (file)
@@ -2043,7 +2043,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
        struct net_device *netdev = fcoe_netdev(lport);
        struct ethtool_cmd ecmd;
 
-       if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+       if (!__ethtool_get_settings(netdev, &ecmd)) {
                lport->link_supported_speeds &=
                        ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
                if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -2452,7 +2452,9 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
        }
 
        mutex_lock(&fcoe_config_mutex);
+       rtnl_lock();
        vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
+       rtnl_unlock();
        mutex_unlock(&fcoe_config_mutex);
 
        if (IS_ERR(vn_port)) {
index 3829712ccc054e129b9bc389b029367c28a17475..8571f18c38a652e86082a135d618792e51b7f4d3 100644 (file)
@@ -728,6 +728,9 @@ enum ethtool_sfeatures_retval_bits {
 /* needed by dev_disable_lro() */
 extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
 
+extern int __ethtool_get_settings(struct net_device *dev,
+                                 struct ethtool_cmd *cmd);
+
 /**
  * enum ethtool_phys_id_state - indicator state for physical identification
  * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
index 103113a2fd187df38ee8362c6a720a2ab8c6f170..27748230aa69440e9b6d728a61a2b9ac425ac9c0 100644 (file)
 #include <linux/types.h>
 
 /* Generic MII registers. */
-
-#define MII_BMCR            0x00        /* Basic mode control register */
-#define MII_BMSR            0x01        /* Basic mode status register  */
-#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
-#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
-#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
-#define MII_LPA             0x05        /* Link partner ability reg    */
-#define MII_EXPANSION       0x06        /* Expansion register          */
-#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
-#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
-#define MII_ESTATUS        0x0f        /* Extended Status */
-#define MII_DCOUNTER        0x12        /* Disconnect counter          */
-#define MII_FCSCOUNTER      0x13        /* False carrier counter       */
-#define MII_NWAYTEST        0x14        /* N-way auto-neg test reg     */
-#define MII_RERRCOUNTER     0x15        /* Receive error counter       */
-#define MII_SREVISION       0x16        /* Silicon revision            */
-#define MII_RESV1           0x17        /* Reserved...                 */
-#define MII_LBRERROR        0x18        /* Lpback, rx, bypass error    */
-#define MII_PHYADDR         0x19        /* PHY address                 */
-#define MII_RESV2           0x1a        /* Reserved...                 */
-#define MII_TPISTATUS       0x1b        /* TPI status for 10mbps       */
-#define MII_NCONFIG         0x1c        /* Network interface config    */
+#define MII_BMCR               0x00    /* Basic mode control register */
+#define MII_BMSR               0x01    /* Basic mode status register  */
+#define MII_PHYSID1            0x02    /* PHYS ID 1                   */
+#define MII_PHYSID2            0x03    /* PHYS ID 2                   */
+#define MII_ADVERTISE          0x04    /* Advertisement control reg   */
+#define MII_LPA                        0x05    /* Link partner ability reg    */
+#define MII_EXPANSION          0x06    /* Expansion register          */
+#define MII_CTRL1000           0x09    /* 1000BASE-T control          */
+#define MII_STAT1000           0x0a    /* 1000BASE-T status           */
+#define MII_ESTATUS            0x0f    /* Extended Status             */
+#define MII_DCOUNTER           0x12    /* Disconnect counter          */
+#define MII_FCSCOUNTER         0x13    /* False carrier counter       */
+#define MII_NWAYTEST           0x14    /* N-way auto-neg test reg     */
+#define MII_RERRCOUNTER                0x15    /* Receive error counter       */
+#define MII_SREVISION          0x16    /* Silicon revision            */
+#define MII_RESV1              0x17    /* Reserved...                 */
+#define MII_LBRERROR           0x18    /* Lpback, rx, bypass error    */
+#define MII_PHYADDR            0x19    /* PHY address                 */
+#define MII_RESV2              0x1a    /* Reserved...                 */
+#define MII_TPISTATUS          0x1b    /* TPI status for 10mbps       */
+#define MII_NCONFIG            0x1c    /* Network interface config    */
 
 /* Basic mode control register. */
-#define BMCR_RESV               0x003f  /* Unused...                   */
-#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
-#define BMCR_CTST               0x0080  /* Collision test              */
-#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
-#define BMCR_ANRESTART          0x0200  /* Auto negotiation restart    */
-#define BMCR_ISOLATE            0x0400  /* Disconnect DP83840 from MII */
-#define BMCR_PDOWN              0x0800  /* Powerdown the DP83840       */
-#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
-#define BMCR_SPEED100           0x2000  /* Select 100Mbps              */
-#define BMCR_LOOPBACK           0x4000  /* TXD loopback bits           */
-#define BMCR_RESET              0x8000  /* Reset the DP83840           */
+#define BMCR_RESV              0x003f  /* Unused...                   */
+#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
+#define BMCR_CTST              0x0080  /* Collision test              */
+#define BMCR_FULLDPLX          0x0100  /* Full duplex                 */
+#define BMCR_ANRESTART         0x0200  /* Auto negotiation restart    */
+#define BMCR_ISOLATE           0x0400  /* Isolate data paths from MII */
+#define BMCR_PDOWN             0x0800  /* Enable low power state      */
+#define BMCR_ANENABLE          0x1000  /* Enable auto negotiation     */
+#define BMCR_SPEED100          0x2000  /* Select 100Mbps              */
+#define BMCR_LOOPBACK          0x4000  /* TXD loopback bits           */
+#define BMCR_RESET             0x8000  /* Reset to default state      */
 
 /* Basic mode status register. */
-#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
-#define BMSR_JCD                0x0002  /* Jabber detected             */
-#define BMSR_LSTATUS            0x0004  /* Link status                 */
-#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
-#define BMSR_RFAULT             0x0010  /* Remote fault detected       */
-#define BMSR_ANEGCOMPLETE       0x0020  /* Auto-negotiation complete   */
-#define BMSR_RESV               0x00c0  /* Unused...                   */
-#define BMSR_ESTATEN           0x0100  /* Extended Status in R15 */
-#define BMSR_100HALF2           0x0200  /* Can do 100BASE-T2 HDX */
-#define BMSR_100FULL2           0x0400  /* Can do 100BASE-T2 FDX */
-#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
-#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
-#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
-#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
-#define BMSR_100BASE4           0x8000  /* Can do 100mbps, 4k packets  */
+#define BMSR_ERCAP             0x0001  /* Ext-reg capability          */
+#define BMSR_JCD               0x0002  /* Jabber detected             */
+#define BMSR_LSTATUS           0x0004  /* Link status                 */
+#define BMSR_ANEGCAPABLE       0x0008  /* Able to do auto-negotiation */
+#define BMSR_RFAULT            0x0010  /* Remote fault detected       */
+#define BMSR_ANEGCOMPLETE      0x0020  /* Auto-negotiation complete   */
+#define BMSR_RESV              0x00c0  /* Unused...                   */
+#define BMSR_ESTATEN           0x0100  /* Extended Status in R15      */
+#define BMSR_100HALF2          0x0200  /* Can do 100BASE-T2 HDX       */
+#define BMSR_100FULL2          0x0400  /* Can do 100BASE-T2 FDX       */
+#define BMSR_10HALF            0x0800  /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL            0x1000  /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF           0x2000  /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL           0x4000  /* Can do 100mbps, full-duplex */
+#define BMSR_100BASE4          0x8000  /* Can do 100mbps, 4k packets  */
 
 /* Advertisement control register. */
-#define ADVERTISE_SLCT          0x001f  /* Selector bits               */
-#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
-#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
-#define ADVERTISE_1000XFULL     0x0020  /* Try for 1000BASE-X full-duplex */
-#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
-#define ADVERTISE_1000XHALF     0x0040  /* Try for 1000BASE-X half-duplex */
-#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
-#define ADVERTISE_1000XPAUSE    0x0080  /* Try for 1000BASE-X pause    */
-#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
-#define ADVERTISE_1000XPSE_ASYM 0x0100  /* Try for 1000BASE-X asym pause */
-#define ADVERTISE_100BASE4      0x0200  /* Try for 100mbps 4k packets  */
-#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
-#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymetric pause     */
-#define ADVERTISE_RESV          0x1000  /* Unused...                   */
-#define ADVERTISE_RFAULT        0x2000  /* Say we can detect faults    */
-#define ADVERTISE_LPACK         0x4000  /* Ack link partners response  */
-#define ADVERTISE_NPAGE         0x8000  /* Next page bit               */
-
-#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
-                       ADVERTISE_CSMA)
-#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
-                       ADVERTISE_100HALF | ADVERTISE_100FULL)
+#define ADVERTISE_SLCT         0x001f  /* Selector bits               */
+#define ADVERTISE_CSMA         0x0001  /* Only selector supported     */
+#define ADVERTISE_10HALF       0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_1000XFULL    0x0020  /* Try for 1000BASE-X full-duplex */
+#define ADVERTISE_10FULL       0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_1000XHALF    0x0040  /* Try for 1000BASE-X half-duplex */
+#define ADVERTISE_100HALF      0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_1000XPAUSE   0x0080  /* Try for 1000BASE-X pause    */
+#define ADVERTISE_100FULL      0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_1000XPSE_ASYM        0x0100  /* Try for 1000BASE-X asym pause */
+#define ADVERTISE_100BASE4     0x0200  /* Try for 100mbps 4k packets  */
+#define ADVERTISE_PAUSE_CAP    0x0400  /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM   0x0800  /* Try for asymetric pause     */
+#define ADVERTISE_RESV         0x1000  /* Unused...                   */
+#define ADVERTISE_RFAULT       0x2000  /* Say we can detect faults    */
+#define ADVERTISE_LPACK                0x4000  /* Ack link partners response  */
+#define ADVERTISE_NPAGE                0x8000  /* Next page bit               */
+
+#define ADVERTISE_FULL         (ADVERTISE_100FULL | ADVERTISE_10FULL | \
+                                 ADVERTISE_CSMA)
+#define ADVERTISE_ALL          (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+                                 ADVERTISE_100HALF | ADVERTISE_100FULL)
 
 /* Link partner ability register. */
-#define LPA_SLCT                0x001f  /* Same as advertise selector  */
-#define LPA_10HALF              0x0020  /* Can do 10mbps half-duplex   */
-#define LPA_1000XFULL           0x0020  /* Can do 1000BASE-X full-duplex */
-#define LPA_10FULL              0x0040  /* Can do 10mbps full-duplex   */
-#define LPA_1000XHALF           0x0040  /* Can do 1000BASE-X half-duplex */
-#define LPA_100HALF             0x0080  /* Can do 100mbps half-duplex  */
-#define LPA_1000XPAUSE          0x0080  /* Can do 1000BASE-X pause     */
-#define LPA_100FULL             0x0100  /* Can do 100mbps full-duplex  */
-#define LPA_1000XPAUSE_ASYM     0x0100  /* Can do 1000BASE-X pause asym*/
-#define LPA_100BASE4            0x0200  /* Can do 100mbps 4k packets   */
-#define LPA_PAUSE_CAP           0x0400  /* Can pause                   */
-#define LPA_PAUSE_ASYM          0x0800  /* Can pause asymetrically     */
-#define LPA_RESV                0x1000  /* Unused...                   */
-#define LPA_RFAULT              0x2000  /* Link partner faulted        */
-#define LPA_LPACK               0x4000  /* Link partner acked us       */
-#define LPA_NPAGE               0x8000  /* Next page bit               */
+#define LPA_SLCT               0x001f  /* Same as advertise selector  */
+#define LPA_10HALF             0x0020  /* Can do 10mbps half-duplex   */
+#define LPA_1000XFULL          0x0020  /* Can do 1000BASE-X full-duplex */
+#define LPA_10FULL             0x0040  /* Can do 10mbps full-duplex   */
+#define LPA_1000XHALF          0x0040  /* Can do 1000BASE-X half-duplex */
+#define LPA_100HALF            0x0080  /* Can do 100mbps half-duplex  */
+#define LPA_1000XPAUSE         0x0080  /* Can do 1000BASE-X pause     */
+#define LPA_100FULL            0x0100  /* Can do 100mbps full-duplex  */
+#define LPA_1000XPAUSE_ASYM    0x0100  /* Can do 1000BASE-X pause asym*/
+#define LPA_100BASE4           0x0200  /* Can do 100mbps 4k packets   */
+#define LPA_PAUSE_CAP          0x0400  /* Can pause                   */
+#define LPA_PAUSE_ASYM         0x0800  /* Can pause asymetrically     */
+#define LPA_RESV               0x1000  /* Unused...                   */
+#define LPA_RFAULT             0x2000  /* Link partner faulted        */
+#define LPA_LPACK              0x4000  /* Link partner acked us       */
+#define LPA_NPAGE              0x8000  /* Next page bit               */
 
 #define LPA_DUPLEX             (LPA_10FULL | LPA_100FULL)
 #define LPA_100                        (LPA_100FULL | LPA_100HALF | LPA_100BASE4)
 
 /* Expansion register for auto-negotiation. */
-#define EXPANSION_NWAY          0x0001  /* Can do N-way auto-nego      */
-#define EXPANSION_LCWP          0x0002  /* Got new RX page code word   */
-#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
-#define EXPANSION_NPCAPABLE     0x0008  /* Link partner supports npage */
-#define EXPANSION_MFAULTS       0x0010  /* Multiple faults detected    */
-#define EXPANSION_RESV          0xffe0  /* Unused...                   */
+#define EXPANSION_NWAY         0x0001  /* Can do N-way auto-nego      */
+#define EXPANSION_LCWP         0x0002  /* Got new RX page code word   */
+#define EXPANSION_ENABLENPAGE  0x0004  /* This enables npage words    */
+#define EXPANSION_NPCAPABLE    0x0008  /* Link partner supports npage */
+#define EXPANSION_MFAULTS      0x0010  /* Multiple faults detected    */
+#define EXPANSION_RESV         0xffe0  /* Unused...                   */
 
-#define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full */
-#define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half */
+#define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full          */
+#define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half          */
 
 /* N-way test register. */
-#define NWAYTEST_RESV1          0x00ff  /* Unused...                   */
-#define NWAYTEST_LOOPBACK       0x0100  /* Enable loopback for N-way   */
-#define NWAYTEST_RESV2          0xfe00  /* Unused...                   */
+#define NWAYTEST_RESV1         0x00ff  /* Unused...                   */
+#define NWAYTEST_LOOPBACK      0x0100  /* Enable loopback for N-way   */
+#define NWAYTEST_RESV2         0xfe00  /* Unused...                   */
 
 /* 1000BASE-T Control register */
-#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
-#define ADVERTISE_1000HALF      0x0100  /* Advertise 1000BASE-T half duplex */
+#define ADVERTISE_1000FULL     0x0200  /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF     0x0100  /* Advertise 1000BASE-T half duplex */
 #define CTL1000_AS_MASTER      0x0800
 #define CTL1000_ENABLE_MASTER  0x1000
 
 /* 1000BASE-T Status register */
-#define LPA_1000LOCALRXOK       0x2000  /* Link partner local receiver status */
-#define LPA_1000REMRXOK         0x1000  /* Link partner remote receiver status */
-#define LPA_1000FULL            0x0800  /* Link partner 1000BASE-T full duplex */
-#define LPA_1000HALF            0x0400  /* Link partner 1000BASE-T half duplex */
+#define LPA_1000LOCALRXOK      0x2000  /* Link partner local receiver status */
+#define LPA_1000REMRXOK                0x1000  /* Link partner remote receiver status */
+#define LPA_1000FULL           0x0800  /* Link partner 1000BASE-T full duplex */
+#define LPA_1000HALF           0x0400  /* Link partner 1000BASE-T half duplex */
 
 /* Flow control flags */
 #define FLOW_CTRL_TX           0x01
@@ -149,7 +148,7 @@ struct mii_ioctl_data {
        __u16           val_out;
 };
 
-#ifdef __KERNEL__ 
+#ifdef __KERNEL__
 
 #include <linux/if.h>
 
@@ -180,7 +179,7 @@ extern unsigned int mii_check_media (struct mii_if_info *mii,
                                     unsigned int ok_to_print,
                                     unsigned int init_media);
 extern int generic_mii_ioctl(struct mii_if_info *mii_if,
-                            struct mii_ioctl_data *mii_data, int cmd,
+                            struct mii_ioctl_data *mii_data, int cmd,
                             unsigned int *duplex_changed);
 
 
@@ -189,7 +188,6 @@ static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
        return (struct mii_ioctl_data *) &rq->ifr_ifru;
 }
 
-
 /**
  * mii_nway_result
  * @negotiated: value of MII ANAR and'd with ANLPAR
index 0a7f619f284eaa800b3a82da854472a551bfc4b2..43b32983ba104092f79dd4bb7d5db9d884134abe 100644 (file)
@@ -2589,9 +2589,6 @@ static inline int netif_is_bond_slave(struct net_device *dev)
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
-int dev_ethtool_get_settings(struct net_device *dev,
-                            struct ethtool_cmd *cmd);
-
 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
 {
        if (dev->features & NETIF_F_RXCSUM)
index ae8c68f30f1bc36c3ffa1c7095e85125f85f6ae7..639a4491fc0dd8fcf4e26931f2c6e8d1c7ed0566 100644 (file)
@@ -218,8 +218,12 @@ static inline int iboe_get_rate(struct net_device *dev)
 {
        struct ethtool_cmd cmd;
        u32 speed;
+       int err;
 
-       if (dev_ethtool_get_settings(dev, &cmd))
+       rtnl_lock();
+       err = __ethtool_get_settings(dev, &cmd);
+       rtnl_unlock();
+       if (err)
                return IB_RATE_PORT_CURRENT;
 
        speed = ethtool_cmd_speed(&cmd);
index eba705b92d6f562c2247f08594efc168c275386f..c8cf9391417ec9442b47f31e3c3cde891cfeaf1d 100644 (file)
@@ -610,7 +610,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
                                     struct ethtool_cmd *cmd)
 {
        const struct vlan_dev_info *vlan = vlan_dev_info(dev);
-       return dev_ethtool_get_settings(vlan->real_dev, cmd);
+
+       return __ethtool_get_settings(vlan->real_dev, cmd);
 }
 
 static void vlan_ethtool_get_drvinfo(struct net_device *dev,
index 2cdf0070419f368738464ad5972fb4a0018f7820..043a5eb8cafc5dba2c87c941893fab0af5848c17 100644 (file)
  */
 static int port_cost(struct net_device *dev)
 {
-       if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
-               struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
-
-               if (!dev_ethtool_get_settings(dev, &ecmd)) {
-                       switch (ethtool_cmd_speed(&ecmd)) {
-                       case SPEED_10000:
-                               return 2;
-                       case SPEED_1000:
-                               return 4;
-                       case SPEED_100:
-                               return 19;
-                       case SPEED_10:
-                               return 100;
-                       }
+       struct ethtool_cmd ecmd;
+
+       if (!__ethtool_get_settings(dev, &ecmd)) {
+               switch (ethtool_cmd_speed(&ecmd)) {
+               case SPEED_10000:
+                       return 2;
+               case SPEED_1000:
+                       return 4;
+               case SPEED_100:
+                       return 19;
+               case SPEED_10:
+                       return 100;
                }
        }
 
index b2e262ed3963cafe6a8d6379a790540b6ca788f4..4b9981caf06fbefcb397cfd512d3012e9b312167 100644 (file)
@@ -4565,30 +4565,6 @@ void dev_set_rx_mode(struct net_device *dev)
        netif_addr_unlock_bh(dev);
 }
 
-/**
- *     dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
- *     @dev: device
- *     @cmd: memory area for ethtool_ops::get_settings() result
- *
- *      The cmd arg is initialized properly (cleared and
- *      ethtool_cmd::cmd field set to ETHTOOL_GSET).
- *
- *     Return device's ethtool_ops::get_settings() result value or
- *     -EOPNOTSUPP when device doesn't expose
- *     ethtool_ops::get_settings() operation.
- */
-int dev_ethtool_get_settings(struct net_device *dev,
-                            struct ethtool_cmd *cmd)
-{
-       if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
-               return -EOPNOTSUPP;
-
-       memset(cmd, 0, sizeof(struct ethtool_cmd));
-       cmd->cmd = ETHTOOL_GSET;
-       return dev->ethtool_ops->get_settings(dev, cmd);
-}
-EXPORT_SYMBOL(dev_ethtool_get_settings);
-
 /**
  *     dev_get_flags - get flags reported to userspace
  *     @dev: device
index 6cdba5fc2beddf4e8b5c02ca3907a199bb09198c..f444817071245006200ba882a2f085ee43ae3773 100644 (file)
@@ -569,15 +569,25 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
        return 0;
 }
 
-static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
-       struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
-       int err;
+       ASSERT_RTNL();
 
-       if (!dev->ethtool_ops->get_settings)
+       if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
                return -EOPNOTSUPP;
 
-       err = dev->ethtool_ops->get_settings(dev, &cmd);
+       memset(cmd, 0, sizeof(struct ethtool_cmd));
+       cmd->cmd = ETHTOOL_GSET;
+       return dev->ethtool_ops->get_settings(dev, cmd);
+}
+EXPORT_SYMBOL(__ethtool_get_settings);
+
+static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+{
+       int err;
+       struct ethtool_cmd cmd;
+
+       err = __ethtool_get_settings(dev, &cmd);
        if (err < 0)
                return err;
 
index 357bd4ee4baa324f1402ae3d7a44c37ce4d1e46e..c3519c6d1b169a5c895efd781c7958218d7f8dc6 100644 (file)
@@ -78,8 +78,13 @@ static void rfc2863_policy(struct net_device *dev)
 
 static bool linkwatch_urgent_event(struct net_device *dev)
 {
-       return netif_running(dev) && netif_carrier_ok(dev) &&
-               qdisc_tx_changing(dev);
+       if (!netif_running(dev))
+               return false;
+
+       if (dev->ifindex != dev->iflink)
+               return true;
+
+       return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
 }
 
 
index 56e42ab7cbc6f292da4ae3502f0ab8da55d3a722..7604a635376bdf61f259e940ef042ba6d0820a10 100644 (file)
@@ -147,7 +147,7 @@ static ssize_t show_speed(struct device *dev,
 
        if (netif_running(netdev)) {
                struct ethtool_cmd cmd;
-               if (!dev_ethtool_get_settings(netdev, &cmd))
+               if (!__ethtool_get_settings(netdev, &cmd))
                        ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
        }
        rtnl_unlock();
@@ -165,7 +165,7 @@ static ssize_t show_duplex(struct device *dev,
 
        if (netif_running(netdev)) {
                struct ethtool_cmd cmd;
-               if (!dev_ethtool_get_settings(netdev, &cmd))
+               if (!__ethtool_get_settings(netdev, &cmd))
                        ret = sprintf(buf, "%s\n",
                                      cmd.duplex ? "full" : "half");
        }
index cf304cc8c8ef0cd3727b48a7ddf98be5514d416d..19d6aefe97d4b9de337d8df3848a4b988bb5a5bd 100644 (file)
@@ -479,10 +479,10 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
        int stat = NET_RX_SUCCESS;
 
        new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
-                                                               GFP_KERNEL);
+                                                               GFP_ATOMIC);
        kfree_skb(skb);
 
-       if (NULL == new)
+       if (!new)
                return -ENOMEM;
 
        skb_push(new, sizeof(struct ipv6hdr));
@@ -495,13 +495,14 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
        rcu_read_lock();
        list_for_each_entry_rcu(entry, &lowpan_devices, list)
                if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
-                       skb = skb_copy(new, GFP_KERNEL);
-                       skb->dev = entry->ldev;
+                       skb = skb_copy(new, GFP_ATOMIC);
+                       if (!skb) {
+                               stat = -ENOMEM;
+                               break;
+                       }
 
-                       if (in_interrupt())
-                               stat = netif_rx(skb);
-                       else
-                               stat = netif_rx_ni(skb);
+                       skb->dev = entry->ldev;
+                       stat = netif_rx(skb);
                }
        rcu_read_unlock();
 
@@ -674,7 +675,7 @@ lowpan_process_data(struct sk_buff *skb)
                                                        sizeof(hdr));
        return lowpan_skb_deliver(skb, &hdr);
 drop:
-       kfree(skb);
+       kfree_skb(skb);
        return -EINVAL;
 }
 
@@ -793,8 +794,11 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
        mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
 
        entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
-       if (!entry)
+       if (!entry) {
+               dev_put(real_dev);
+               lowpan_dev_info(dev)->real_dev = NULL;
                return -ENOMEM;
+       }
 
        entry->ldev = dev;
 
@@ -813,15 +817,17 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
        struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
        struct net_device *real_dev = lowpan_dev->real_dev;
        struct lowpan_dev_record *entry;
+       struct lowpan_dev_record *tmp;
 
        ASSERT_RTNL();
 
        mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       list_for_each_entry(entry, &lowpan_devices, list)
+       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
                if (entry->ldev == dev) {
                        list_del(&entry->list);
                        kfree(entry);
                }
+       }
        mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
 
        mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
index 2ea3d63e1d4c6a12fd7cffe24cb53f0205e937e1..25e68f56b4ba4efc4dc0e5c8a1317a9d10a979af 100644 (file)
@@ -530,33 +530,35 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 {
        struct net_device *dev;
        unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
+       struct ethtool_cmd ecmd;
+       int err;
 
-       dev = dev_get_by_index(sock_net(&po->sk), po->ifindex);
-       if (unlikely(dev == NULL))
+       rtnl_lock();
+       dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
+       if (unlikely(!dev)) {
+               rtnl_unlock();
                return DEFAULT_PRB_RETIRE_TOV;
-
-       if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
-               struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
-
-               if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
-                       switch (ecmd.speed) {
-                       case SPEED_10000:
-                               msec = 1;
-                               div = 10000/1000;
-                               break;
-                       case SPEED_1000:
-                               msec = 1;
-                               div = 1000/1000;
-                               break;
-                       /*
-                        * If the link speed is so slow you don't really
-                        * need to worry about perf anyways
-                        */
-                       case SPEED_100:
-                       case SPEED_10:
-                       default:
-                               return DEFAULT_PRB_RETIRE_TOV;
-                       }
+       }
+       err = __ethtool_get_settings(dev, &ecmd);
+       rtnl_unlock();
+       if (!err) {
+               switch (ecmd.speed) {
+               case SPEED_10000:
+                       msec = 1;
+                       div = 10000/1000;
+                       break;
+               case SPEED_1000:
+                       msec = 1;
+                       div = 1000/1000;
+                       break;
+               /*
+                * If the link speed is so slow you don't really
+                * need to worry about perf anyways
+                */
+               case SPEED_100:
+               case SPEED_10:
+               default:
+                       return DEFAULT_PRB_RETIRE_TOV;
                }
        }
 
index ec753b3ae72ade6005cecee012ec1e2759a7fb45..4cf6dc7910e4b0c13a2baf32447bd9000feb1f19 100644 (file)
@@ -9,6 +9,7 @@ config RDS
 
 config RDS_RDMA
        tristate "RDS over Infiniband and iWARP"
+       select LLIST
        depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
        ---help---
          Allow RDS to use Infiniband and iWARP as a transport.
index 819c35a0d9cbf9fa8527ba3668e2f80ca304676e..e8fdb172adbb23940b8d2d698cd72fa216cd6ba1 100644 (file)
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/rculist.h>
+#include <linux/llist.h>
 
 #include "rds.h"
 #include "ib.h"
-#include "xlist.h"
 
 static DEFINE_PER_CPU(unsigned long, clean_list_grace);
 #define CLEAN_LIST_BUSY_BIT 0
@@ -49,7 +49,7 @@ struct rds_ib_mr {
        struct rds_ib_mr_pool   *pool;
        struct ib_fmr           *fmr;
 
-       struct xlist_head       xlist;
+       struct llist_node       llnode;
 
        /* unmap_list is for freeing */
        struct list_head        unmap_list;
@@ -71,9 +71,9 @@ struct rds_ib_mr_pool {
        atomic_t                item_count;             /* total # of MRs */
        atomic_t                dirty_count;            /* # dirty of MRs */
 
-       struct xlist_head       drop_list;              /* MRs that have reached their max_maps limit */
-       struct xlist_head       free_list;              /* unused MRs */
-       struct xlist_head       clean_list;             /* global unused & unamapped MRs */
+       struct llist_head       drop_list;              /* MRs that have reached their max_maps limit */
+       struct llist_head       free_list;              /* unused MRs */
+       struct llist_head       clean_list;             /* global unused & unamapped MRs */
        wait_queue_head_t       flush_wait;
 
        atomic_t                free_pinned;            /* memory pinned by free MRs */
@@ -220,9 +220,9 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
        if (!pool)
                return ERR_PTR(-ENOMEM);
 
-       INIT_XLIST_HEAD(&pool->free_list);
-       INIT_XLIST_HEAD(&pool->drop_list);
-       INIT_XLIST_HEAD(&pool->clean_list);
+       init_llist_head(&pool->free_list);
+       init_llist_head(&pool->drop_list);
+       init_llist_head(&pool->clean_list);
        mutex_init(&pool->flush_lock);
        init_waitqueue_head(&pool->flush_wait);
        INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
@@ -260,26 +260,18 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
        kfree(pool);
 }
 
-static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
-                        struct rds_ib_mr **ibmr_ret)
-{
-       struct xlist_head *ibmr_xl;
-       ibmr_xl = xlist_del_head_fast(xl);
-       *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
-}
-
 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
 {
        struct rds_ib_mr *ibmr = NULL;
-       struct xlist_head *ret;
+       struct llist_node *ret;
        unsigned long *flag;
 
        preempt_disable();
        flag = &__get_cpu_var(clean_list_grace);
        set_bit(CLEAN_LIST_BUSY_BIT, flag);
-       ret = xlist_del_head(&pool->clean_list);
+       ret = llist_del_first(&pool->clean_list);
        if (ret)
-               ibmr = list_entry(ret, struct rds_ib_mr, xlist);
+               ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
 
        clear_bit(CLEAN_LIST_BUSY_BIT, flag);
        preempt_enable();
@@ -529,46 +521,44 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
 }
 
 /*
- * given an xlist of mrs, put them all into the list_head for more processing
+ * given an llist of mrs, put them all into the list_head for more processing
  */
-static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
+static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
 {
        struct rds_ib_mr *ibmr;
-       struct xlist_head splice;
-       struct xlist_head *cur;
-       struct xlist_head *next;
-
-       splice.next = NULL;
-       xlist_splice(xlist, &splice);
-       cur = splice.next;
-       while (cur) {
-               next = cur->next;
-               ibmr = list_entry(cur, struct rds_ib_mr, xlist);
+       struct llist_node *node;
+       struct llist_node *next;
+
+       node = llist_del_all(llist);
+       while (node) {
+               next = node->next;
+               ibmr = llist_entry(node, struct rds_ib_mr, llnode);
                list_add_tail(&ibmr->unmap_list, list);
-               cur = next;
+               node = next;
        }
 }
 
 /*
- * this takes a list head of mrs and turns it into an xlist of clusters.
- * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
- * reuse.
+ * this takes a list head of mrs and turns it into linked llist nodes
+ * of clusters.  Each cluster has linked llist nodes of
+ * MR_CLUSTER_SIZE mrs that are ready for reuse.
  */
-static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
-                               struct list_head *list, struct xlist_head *xlist,
-                               struct xlist_head **tail_ret)
+static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
+                               struct list_head *list,
+                               struct llist_node **nodes_head,
+                               struct llist_node **nodes_tail)
 {
        struct rds_ib_mr *ibmr;
-       struct xlist_head *cur_mr = xlist;
-       struct xlist_head *tail_mr = NULL;
+       struct llist_node *cur = NULL;
+       struct llist_node **next = nodes_head;
 
        list_for_each_entry(ibmr, list, unmap_list) {
-               tail_mr = &ibmr->xlist;
-               tail_mr->next = NULL;
-               cur_mr->next = tail_mr;
-               cur_mr = tail_mr;
+               cur = &ibmr->llnode;
+               *next = cur;
+               next = &cur->next;
        }
-       *tail_ret = tail_mr;
+       *next = NULL;
+       *nodes_tail = cur;
 }
 
 /*
@@ -581,8 +571,8 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
                                int free_all, struct rds_ib_mr **ibmr_ret)
 {
        struct rds_ib_mr *ibmr, *next;
-       struct xlist_head clean_xlist;
-       struct xlist_head *clean_tail;
+       struct llist_node *clean_nodes;
+       struct llist_node *clean_tail;
        LIST_HEAD(unmap_list);
        LIST_HEAD(fmr_list);
        unsigned long unpinned = 0;
@@ -603,7 +593,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
 
                        prepare_to_wait(&pool->flush_wait, &wait,
                                        TASK_UNINTERRUPTIBLE);
-                       if (xlist_empty(&pool->clean_list))
+                       if (llist_empty(&pool->clean_list))
                                schedule();
 
                        ibmr = rds_ib_reuse_fmr(pool);
@@ -628,10 +618,10 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
        /* Get the list of all MRs to be dropped. Ordering matters -
         * we want to put drop_list ahead of free_list.
         */
-       xlist_append_to_list(&pool->drop_list, &unmap_list);
-       xlist_append_to_list(&pool->free_list, &unmap_list);
+       llist_append_to_list(&pool->drop_list, &unmap_list);
+       llist_append_to_list(&pool->free_list, &unmap_list);
        if (free_all)
-               xlist_append_to_list(&pool->clean_list, &unmap_list);
+               llist_append_to_list(&pool->clean_list, &unmap_list);
 
        free_goal = rds_ib_flush_goal(pool, free_all);
 
@@ -663,22 +653,22 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
        if (!list_empty(&unmap_list)) {
                /* we have to make sure that none of the things we're about
                 * to put on the clean list would race with other cpus trying
-                * to pull items off.  The xlist would explode if we managed to
+                * to pull items off.  The llist would explode if we managed to
                 * remove something from the clean list and then add it back again
-                * while another CPU was spinning on that same item in xlist_del_head.
+                * while another CPU was spinning on that same item in llist_del_first.
                 *
-                * This is pretty unlikely, but just in case  wait for an xlist grace period
+                * This is pretty unlikely, but just in case  wait for an llist grace period
                 * here before adding anything back into the clean list.
                 */
                wait_clean_list_grace();
 
-               list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
+               list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
                if (ibmr_ret)
-                       refill_local(pool, &clean_xlist, ibmr_ret);
+                       *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
 
-               /* refill_local may have emptied our list */
-               if (!xlist_empty(&clean_xlist))
-                       xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
+               /* more than one entry in llist nodes */
+               if (clean_nodes->next)
+                       llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
 
        }
 
@@ -711,9 +701,9 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
 
        /* Return it to the pool's free list */
        if (ibmr->remap_count >= pool->fmr_attr.max_maps)
-               xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
+               llist_add(&ibmr->llnode, &pool->drop_list);
        else
-               xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
+               llist_add(&ibmr->llnode, &pool->free_list);
 
        atomic_add(ibmr->sg_len, &pool->free_pinned);
        atomic_inc(&pool->dirty_count);
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
deleted file mode 100644 (file)
index e6b5190..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef _LINUX_XLIST_H
-#define _LINUX_XLIST_H
-
-#include <linux/stddef.h>
-#include <linux/poison.h>
-#include <linux/prefetch.h>
-#include <asm/system.h>
-
-struct xlist_head {
-       struct xlist_head *next;
-};
-
-static inline void INIT_XLIST_HEAD(struct xlist_head *list)
-{
-       list->next = NULL;
-}
-
-static inline int xlist_empty(struct xlist_head *head)
-{
-       return head->next == NULL;
-}
-
-static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
-                            struct xlist_head *head)
-{
-       struct xlist_head *cur;
-       struct xlist_head *check;
-
-       while (1) {
-               cur = head->next;
-               tail->next = cur;
-               check = cmpxchg(&head->next, cur, new);
-               if (check == cur)
-                       break;
-       }
-}
-
-static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
-{
-       struct xlist_head *cur;
-       struct xlist_head *check;
-       struct xlist_head *next;
-
-       while (1) {
-               cur = head->next;
-               if (!cur)
-                       goto out;
-
-               next = cur->next;
-               check = cmpxchg(&head->next, cur, next);
-               if (check == cur)
-                       goto out;
-       }
-out:
-       return cur;
-}
-
-static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
-{
-       struct xlist_head *cur;
-
-       cur = head->next;
-       if (!cur)
-               return NULL;
-
-       head->next = cur->next;
-       return cur;
-}
-
-static inline void xlist_splice(struct xlist_head *list,
-                               struct xlist_head *head)
-{
-       struct xlist_head *cur;
-
-       WARN_ON(head->next);
-       cur = xchg(&list->next, NULL);
-       head->next = cur;
-}
-
-#endif