]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
authorDavid S. Miller <davem@davemloft.net>
Wed, 28 Nov 2012 22:49:16 +0000 (17:49 -0500)
committerDavid S. Miller <davem@davemloft.net>
Wed, 28 Nov 2012 22:49:16 +0000 (17:49 -0500)
John W. Linville says:

====================
This pull request is intended for the 3.8 stream.  It is a bit large
-- I guess Thanksgiving got me off track!  At least the code got to
spend some time in linux-next... :-)

This includes the usual batch of pulls for Bluetooth, NFC, and mac80211
as well as iwlwifi.  Also here is an ath6kl pull, and a new driver
in the rtlwifi family.  The brcmfmac, brcmsmac, ath9k, and mwl8k get
their usual levels of attention, and a handful of other updates tag
along as well.

For more detail on the pulls, please see below...

On Bluetooth, Gustavo says:

"Another set of patches for integration in wireless-next. There are two big set
of changes in it: Andrei Emeltchenko and Mat Martineau added more patches
towards a full Bluetooth High Speed support and Johan Hedberg improve the
single mode support for Bluetooth dongles. Apart from that we have small fixes
and improvements."

...and:

"A few patches to 3.8. The majority of the work here is from Andrei on the High
Speed support. Other than that Johan added support for setting LE advertising
data. The rest are fixes and clean ups and small improvements like support for
a new broadcom hardware."

On mac80211, Johannes says:

"This is for mac80211, for -next (3.8). Plenty of changes, as you can see
below. Some fixes for previous changes like the export.h include, the
beacon listener fix from Ben Greear, etc. Overall, no exciting new
features, though hwsim does gain channel context support for people to
try it out and look at."

...and...:

"This one contains the mac80211-next material. Apart from a few small new
features and cleanups I have two fixes for the channel context code. The
RX_END timestamp support will probably be reworked again as Simon Barber
noted the calculations weren't really valid, but the discussions there
are still going on and it's better than what we had before."

...and:

"Please pull (see below) to get the following changes:
 * a fix & a debug aid in IBSS from Antonio,
 * mesh cleanups from Marco,
 * a few bugfixes for some of my previous patches from Arend and myself,
 * and the big initial VHT support patchset"

And on iwlwifi, Johannes says:

"In addition to the previous four patches that I'm not resending,
we have a number of cleanups, message reduction, firmware error
handling improvements (yes yes... we need to fix them instead)
and various other small things all over."

...and:

"In his quest to try to understand the current iwlwifi problems (like
stuck queues etc.) Emmanuel has first cleaned up the PCIe code, I'm
including his changes in this pull request. Other than that I only have
a small cleanup from Sachin Kamat to remove a duplicate include and a
bugfix to turn off MFP if software crypto is enabled, but this isn't
really interesting as MFP isn't supported right now anyway."

On NFC, Samuel says:

"With this one we have:

- A few HCI improvements in preparation for an upcoming HCI chipset support.
- A pn544 code cleanup after the old driver was removed.
- An LLCP improvement for notifying user space when one peer stops ACKing I
  frames."

On ath6kl, Kalle says:

"Major changes this time are firmware recover support to gracefully
handle if firmware crashes, support for changing regulatory domain and
support for new ar6004 hardware revision 1.4. Otherwise there are just
smaller fixes or cleanups from different people."

Thats about it... :-)  Please let me know if there are problems!
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
46 files changed:
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/flexcan.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/Makefile
drivers/net/can/usb/kvaser_usb.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/dec/ewrk3.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/defines.h
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/qlogic/qlcnic/Makefile
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c [new file with mode: 0644]
drivers/net/usb/smsc75xx.c
net/ipv6/ip6_tunnel.c
net/ipv6/sit.c
net/sched/sch_qfq.c

index 5f5b69f37d2e50d4a6ebbe91f4c3bc1346e042e3..c8bff3e83a5916caa11e14ff4a8a6b84f14ba13a 100644 (file)
@@ -1838,7 +1838,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 * anyway (it holds no special properties of the bond device),
                 * so we can change it without calling change_active_interface()
                 */
-               if (!bond->curr_active_slave)
+               if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
                        bond->curr_active_slave = new_slave;
 
                break;
index fcff73a73b1d9b068565207f7d46479504fc9723..14b166bdbeafce294de12810524ac827c7351886 100644 (file)
@@ -1372,6 +1372,7 @@ static const struct platform_device_id at91_can_id_table[] = {
                /* sentinel */
        }
 };
+MODULE_DEVICE_TABLE(platform, at91_can_id_table);
 
 static struct platform_driver at91_can_driver = {
        .probe = at91_can_probe,
index f2d6d258a28629e3462921d37555cc85fbdb1ccb..a3f8de962258f3c0aad56378647541134e765bd0 100644 (file)
@@ -691,3 +691,4 @@ module_platform_driver(bfin_can_driver);
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
+MODULE_ALIAS("platform:" DRV_NAME);
index e5180dfddba54dc6918b8dbb3e2c8643316e130b..5233b8f58d773b6edb44f306f477674f05415084 100644 (file)
@@ -233,6 +233,12 @@ static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
                pm_runtime_put_sync(priv->device);
 }
 
+static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
+{
+       if (priv->raminit)
+               priv->raminit(priv, enable);
+}
+
 static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
 {
        return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -1090,6 +1096,7 @@ static int c_can_open(struct net_device *dev)
        struct c_can_priv *priv = netdev_priv(dev);
 
        c_can_pm_runtime_get_sync(priv);
+       c_can_reset_ram(priv, true);
 
        /* open the can device */
        err = open_candev(dev);
@@ -1118,6 +1125,7 @@ static int c_can_open(struct net_device *dev)
 exit_irq_fail:
        close_candev(dev);
 exit_open_fail:
+       c_can_reset_ram(priv, false);
        c_can_pm_runtime_put_sync(priv);
        return err;
 }
@@ -1131,6 +1139,8 @@ static int c_can_close(struct net_device *dev)
        c_can_stop(dev);
        free_irq(dev->irq, dev);
        close_candev(dev);
+
+       c_can_reset_ram(priv, false);
        c_can_pm_runtime_put_sync(priv);
 
        return 0;
@@ -1188,6 +1198,7 @@ int c_can_power_down(struct net_device *dev)
 
        c_can_stop(dev);
 
+       c_can_reset_ram(priv, false);
        c_can_pm_runtime_put_sync(priv);
 
        return 0;
@@ -1206,6 +1217,7 @@ int c_can_power_up(struct net_device *dev)
        WARN_ON(priv->type != BOSCH_D_CAN);
 
        c_can_pm_runtime_get_sync(priv);
+       c_can_reset_ram(priv, true);
 
        /* Clear PDR and INIT bits */
        val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
index e5ed41dafa1b94aa234749064813c587aa79c468..d2e1c21b143f4ae729edd45312de1db0a804f438 100644 (file)
@@ -169,6 +169,9 @@ struct c_can_priv {
        void *priv;             /* for board-specific data */
        u16 irqstatus;
        enum c_can_dev_id type;
+       u32 __iomem *raminit_ctrlreg;
+       unsigned int instance;
+       void (*raminit) (const struct c_can_priv *priv, bool enable);
 };
 
 struct net_device *alloc_c_can_dev(void);
index ee1416132aba2e1f9582b7911f67e94eb21b3084..0044fd859db3d193bd64a6c2cecf549c046bded3 100644 (file)
@@ -38,6 +38,8 @@
 
 #include "c_can.h"
 
+#define CAN_RAMINIT_START_MASK(i)      (1 << (i))
+
 /*
  * 16-bit c_can registers can be arranged differently in the memory
  * architecture of different implementations. For example: 16-bit
@@ -68,6 +70,18 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+       u32 val;
+
+       val = readl(priv->raminit_ctrlreg);
+       if (enable)
+               val |= CAN_RAMINIT_START_MASK(priv->instance);
+       else
+               val &= ~CAN_RAMINIT_START_MASK(priv->instance);
+       writel(val, priv->raminit_ctrlreg);
+}
+
 static struct platform_device_id c_can_id_table[] = {
        [BOSCH_C_CAN_PLATFORM] = {
                .name = KBUILD_MODNAME,
@@ -83,12 +97,14 @@ static struct platform_device_id c_can_id_table[] = {
        }, {
        }
 };
+MODULE_DEVICE_TABLE(platform, c_can_id_table);
 
 static const struct of_device_id c_can_of_table[] = {
        { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
        { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, c_can_of_table);
 
 static int __devinit c_can_plat_probe(struct platform_device *pdev)
 {
@@ -99,7 +115,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        const struct platform_device_id *id;
        struct pinctrl *pinctrl;
-       struct resource *mem;
+       struct resource *mem, *res;
        int irq;
        struct clk *clk;
 
@@ -178,6 +194,18 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
                priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+
+               if (pdev->dev.of_node)
+                       priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
+               else
+                       priv->instance = pdev->id;
+
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res);
+               if (!priv->raminit_ctrlreg || priv->instance < 0)
+                       dev_info(&pdev->dev, "control memory is not used for raminit\n");
+               else
+                       priv->raminit = c_can_hw_raminit;
                break;
        default:
                ret = -EINVAL;
index 688371cda37afc51ff125efa547e819126e4ca24..3da6cbb542aeaee5516b8fa4687c5e0a4a108bab 100644 (file)
@@ -60,6 +60,7 @@
 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
 
 #define CC770_PLATFORM_CAN_CLOCK  16000000
 
@@ -258,6 +259,7 @@ static struct of_device_id __devinitdata cc770_platform_table[] = {
        {.compatible = "intc,82527"},  /* AN82527 from Intel CP */
        {},
 };
+MODULE_DEVICE_TABLE(of, cc770_platform_table);
 
 static struct platform_driver cc770_platform_driver = {
        .driver = {
index a412bf6d73ef6b0464f5441c85df94c3ab6b4f15..9a17965e00fdf1409a5dcf50bb05b8da20a7b9fc 100644 (file)
@@ -979,11 +979,13 @@ static const struct of_device_id flexcan_of_match[] = {
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, flexcan_of_match);
 
 static const struct platform_device_id flexcan_id_table[] = {
        { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
 
 static int __devinit flexcan_probe(struct platform_device *pdev)
 {
index 799c354083c4800cf738837587a202725322cbac..514d020642558b9daa9a5fa616fda919a9ca6971 100644 (file)
@@ -396,6 +396,7 @@ static const struct of_device_id __devinitconst mpc5xxx_can_table[] = {
        { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc5xxx_can_table);
 
 static struct platform_driver mpc5xxx_can_driver = {
        .driver = {
index 559be87a09040803501987841c08f7b05a71157e..92f73c708a3d7c3ec80133fb4b2f05872fd11849 100644 (file)
@@ -21,7 +21,7 @@ config CAN_SJA1000_PLATFORM
 
 config CAN_SJA1000_OF_PLATFORM
        tristate "Generic OF Platform Bus based SJA1000 driver"
-       depends on PPC_OF
+       depends on OF
        ---help---
          This driver adds support for the SJA1000 chips connected to
          the OpenFirmware "platform bus" found on embedded systems with
index f2683eb6a3d588a7dff7af9f8830a79dc18206c7..e45258d4369bb0fb721f3bfbc1a09832b26e57f9 100644 (file)
@@ -42,6 +42,8 @@
 #include <linux/can/dev.h>
 
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <asm/prom.h>
 
 #include "sja1000.h"
@@ -59,13 +61,13 @@ MODULE_LICENSE("GPL v2");
 
 static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
 {
-       return in_8(priv->reg_base + reg);
+       return ioread8(priv->reg_base + reg);
 }
 
 static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
                                  int reg, u8 val)
 {
-       out_8(priv->reg_base + reg, val);
+       iowrite8(val, priv->reg_base + reg);
 }
 
 static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
index 662c5f7eb0c54af4cb3c788959109a7693798629..21619bb5b869282a2d0a2f20090d93ea35908282 100644 (file)
@@ -34,6 +34,7 @@
 
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
+MODULE_ALIAS("platform:" DRV_NAME);
 MODULE_LICENSE("GPL v2");
 
 static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
index 9ded21e79db5866602706ca568dea6613bc0a727..1267b366dcfe491260095ab20389989a9efcbee2 100644 (file)
@@ -1055,3 +1055,4 @@ module_platform_driver(ti_hecc_driver);
 MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
index 0a6876841c20b62ab75f49e41b33ca6c45afbf12..a4e4bee35710fa3f71b1b597c98782765603c45f 100644 (file)
@@ -13,6 +13,35 @@ config CAN_ESD_USB2
           This driver supports the CAN-USB/2 interface
           from esd electronic system design gmbh (http://www.esd.eu).
 
+config CAN_KVASER_USB
+       tristate "Kvaser CAN/USB interface"
+       ---help---
+         This driver adds support for Kvaser CAN/USB devices like Kvaser
+         Leaf Light.
+
+         The driver gives support for the following devices:
+           - Kvaser Leaf Light
+           - Kvaser Leaf Professional HS
+           - Kvaser Leaf SemiPro HS
+           - Kvaser Leaf Professional LS
+           - Kvaser Leaf Professional SWC
+           - Kvaser Leaf Professional LIN
+           - Kvaser Leaf SemiPro LS
+           - Kvaser Leaf SemiPro SWC
+           - Kvaser Memorator II HS/HS
+           - Kvaser USBcan Professional HS/HS
+           - Kvaser Leaf Light GI
+           - Kvaser Leaf Professional HS (OBD-II connector)
+           - Kvaser Memorator Professional HS/LS
+           - Kvaser Leaf Light "China"
+           - Kvaser BlackBird SemiPro
+           - Kvaser USBcan R
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called kvaser_usb.
+
 config CAN_PEAK_USB
        tristate "PEAK PCAN-USB/USB Pro interfaces"
        ---help---
index da6d1d3b2969939758b6165c67ce3278a1e3e831..80a2ee41fd61726dc43b70724d20e6c63c9b8cac 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
 obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
new file mode 100644 (file)
index 0000000..5b58a4d
--- /dev/null
@@ -0,0 +1,1627 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * Parts of this driver are based on the following:
+ *  - Kvaser linux leaf driver (version 4.78)
+ *  - CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ */
+
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_TX_URBS                    16
+#define MAX_RX_URBS                    4
+#define START_TIMEOUT                  1000 /* msecs */
+#define STOP_TIMEOUT                   1000 /* msecs */
+#define USB_SEND_TIMEOUT               1000 /* msecs */
+#define USB_RECV_TIMEOUT               1000 /* msecs */
+#define RX_BUFFER_SIZE                 3072
+#define CAN_USB_CLOCK                  8000000
+#define MAX_NET_DEVICES                        3
+
+/* Kvaser USB devices */
+#define KVASER_VENDOR_ID               0x0bfd
+#define USB_LEAF_DEVEL_PRODUCT_ID      10
+#define USB_LEAF_LITE_PRODUCT_ID       11
+#define USB_LEAF_PRO_PRODUCT_ID                12
+#define USB_LEAF_SPRO_PRODUCT_ID       14
+#define USB_LEAF_PRO_LS_PRODUCT_ID     15
+#define USB_LEAF_PRO_SWC_PRODUCT_ID    16
+#define USB_LEAF_PRO_LIN_PRODUCT_ID    17
+#define USB_LEAF_SPRO_LS_PRODUCT_ID    18
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID   19
+#define USB_MEMO2_DEVEL_PRODUCT_ID     22
+#define USB_MEMO2_HSHS_PRODUCT_ID      23
+#define USB_UPRO_HSHS_PRODUCT_ID       24
+#define USB_LEAF_LITE_GI_PRODUCT_ID    25
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID  26
+#define USB_MEMO2_HSLS_PRODUCT_ID      27
+#define USB_LEAF_LITE_CH_PRODUCT_ID    28
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID  29
+#define USB_OEM_MERCURY_PRODUCT_ID     34
+#define USB_OEM_LEAF_PRODUCT_ID                35
+#define USB_CAN_R_PRODUCT_ID           39
+
+/* USB devices features */
+#define KVASER_HAS_SILENT_MODE         BIT(0)
+#define KVASER_HAS_TXRX_ERRORS         BIT(1)
+
+/* Message header size */
+#define MSG_HEADER_LEN                 2
+
+/* Can message flags */
+#define MSG_FLAG_ERROR_FRAME           BIT(0)
+#define MSG_FLAG_OVERRUN               BIT(1)
+#define MSG_FLAG_NERR                  BIT(2)
+#define MSG_FLAG_WAKEUP                        BIT(3)
+#define MSG_FLAG_REMOTE_FRAME          BIT(4)
+#define MSG_FLAG_RESERVED              BIT(5)
+#define MSG_FLAG_TX_ACK                        BIT(6)
+#define MSG_FLAG_TX_REQUEST            BIT(7)
+
+/* Can states */
+#define M16C_STATE_BUS_RESET           BIT(0)
+#define M16C_STATE_BUS_ERROR           BIT(4)
+#define M16C_STATE_BUS_PASSIVE         BIT(5)
+#define M16C_STATE_BUS_OFF             BIT(6)
+
+/* Can msg ids */
+#define CMD_RX_STD_MESSAGE             12
+#define CMD_TX_STD_MESSAGE             13
+#define CMD_RX_EXT_MESSAGE             14
+#define CMD_TX_EXT_MESSAGE             15
+#define CMD_SET_BUS_PARAMS             16
+#define CMD_GET_BUS_PARAMS             17
+#define CMD_GET_BUS_PARAMS_REPLY       18
+#define CMD_GET_CHIP_STATE             19
+#define CMD_CHIP_STATE_EVENT           20
+#define CMD_SET_CTRL_MODE              21
+#define CMD_GET_CTRL_MODE              22
+#define CMD_GET_CTRL_MODE_REPLY                23
+#define CMD_RESET_CHIP                 24
+#define CMD_RESET_CARD                 25
+#define CMD_START_CHIP                 26
+#define CMD_START_CHIP_REPLY           27
+#define CMD_STOP_CHIP                  28
+#define CMD_STOP_CHIP_REPLY            29
+#define CMD_GET_CARD_INFO2             32
+#define CMD_GET_CARD_INFO              34
+#define CMD_GET_CARD_INFO_REPLY                35
+#define CMD_GET_SOFTWARE_INFO          38
+#define CMD_GET_SOFTWARE_INFO_REPLY    39
+#define CMD_ERROR_EVENT                        45
+#define CMD_FLUSH_QUEUE                        48
+#define CMD_RESET_ERROR_COUNTER                49
+#define CMD_TX_ACKNOWLEDGE             50
+#define CMD_CAN_ERROR_EVENT            51
+#define CMD_USB_THROTTLE               77
+#define CMD_LOG_MESSAGE                        106
+
+/* error factors */
+#define M16C_EF_ACKE                   BIT(0)
+#define M16C_EF_CRCE                   BIT(1)
+#define M16C_EF_FORME                  BIT(2)
+#define M16C_EF_STFE                   BIT(3)
+#define M16C_EF_BITE0                  BIT(4)
+#define M16C_EF_BITE1                  BIT(5)
+#define M16C_EF_RCVE                   BIT(6)
+#define M16C_EF_TRE                    BIT(7)
+
+/* bittiming parameters */
+#define KVASER_USB_TSEG1_MIN           1
+#define KVASER_USB_TSEG1_MAX           16
+#define KVASER_USB_TSEG2_MIN           1
+#define KVASER_USB_TSEG2_MAX           8
+#define KVASER_USB_SJW_MAX             4
+#define KVASER_USB_BRP_MIN             1
+#define KVASER_USB_BRP_MAX             64
+#define KVASER_USB_BRP_INC             1
+
+/* ctrl modes */
+#define KVASER_CTRL_MODE_NORMAL                1
+#define KVASER_CTRL_MODE_SILENT                2
+#define KVASER_CTRL_MODE_SELFRECEPTION 3
+#define KVASER_CTRL_MODE_OFF           4
+
+struct kvaser_msg_simple {
+       u8 tid;
+       u8 channel;
+} __packed;
+
+struct kvaser_msg_cardinfo {
+       u8 tid;
+       u8 nchannels;
+       __le32 serial_number;
+       __le32 padding;
+       __le32 clock_resolution;
+       __le32 mfgdate;
+       u8 ean[8];
+       u8 hw_revision;
+       u8 usb_hs_mode;
+       __le16 padding2;
+} __packed;
+
+struct kvaser_msg_cardinfo2 {
+       u8 tid;
+       u8 channel;
+       u8 pcb_id[24];
+       __le32 oem_unlock_code;
+} __packed;
+
+struct kvaser_msg_softinfo {
+       u8 tid;
+       u8 channel;
+       __le32 sw_options;
+       __le32 fw_version;
+       __le16 max_outstanding_tx;
+       __le16 padding[9];
+} __packed;
+
+struct kvaser_msg_busparams {
+       u8 tid;
+       u8 channel;
+       __le32 bitrate;
+       u8 tseg1;
+       u8 tseg2;
+       u8 sjw;
+       u8 no_samp;
+} __packed;
+
+struct kvaser_msg_tx_can {
+       u8 channel;
+       u8 tid;
+       u8 msg[14];
+       u8 padding;
+       u8 flags;
+} __packed;
+
+struct kvaser_msg_rx_can {
+       u8 channel;
+       u8 flag;
+       __le16 time[3];
+       u8 msg[14];
+} __packed;
+
+struct kvaser_msg_chip_state_event {
+       u8 tid;
+       u8 channel;
+       __le16 time[3];
+       u8 tx_errors_count;
+       u8 rx_errors_count;
+       u8 status;
+       u8 padding[3];
+} __packed;
+
+struct kvaser_msg_tx_acknowledge {
+       u8 channel;
+       u8 tid;
+       __le16 time[3];
+       u8 flags;
+       u8 time_offset;
+} __packed;
+
+struct kvaser_msg_error_event {
+       u8 tid;
+       u8 flags;
+       __le16 time[3];
+       u8 channel;
+       u8 padding;
+       u8 tx_errors_count;
+       u8 rx_errors_count;
+       u8 status;
+       u8 error_factor;
+} __packed;
+
+struct kvaser_msg_ctrl_mode {
+       u8 tid;
+       u8 channel;
+       u8 ctrl_mode;
+       u8 padding[3];
+} __packed;
+
+struct kvaser_msg_flush_queue {
+       u8 tid;
+       u8 channel;
+       u8 flags;
+       u8 padding[3];
+} __packed;
+
+struct kvaser_msg_log_message {
+       u8 channel;
+       u8 flags;
+       __le16 time[3];
+       u8 dlc;
+       u8 time_offset;
+       __le32 id;
+       u8 data[8];
+} __packed;
+
+struct kvaser_msg {
+       u8 len;
+       u8 id;
+       union   {
+               struct kvaser_msg_simple simple;
+               struct kvaser_msg_cardinfo cardinfo;
+               struct kvaser_msg_cardinfo2 cardinfo2;
+               struct kvaser_msg_softinfo softinfo;
+               struct kvaser_msg_busparams busparams;
+               struct kvaser_msg_tx_can tx_can;
+               struct kvaser_msg_rx_can rx_can;
+               struct kvaser_msg_chip_state_event chip_state_event;
+               struct kvaser_msg_tx_acknowledge tx_acknowledge;
+               struct kvaser_msg_error_event error_event;
+               struct kvaser_msg_ctrl_mode ctrl_mode;
+               struct kvaser_msg_flush_queue flush_queue;
+               struct kvaser_msg_log_message log_message;
+       } u;
+} __packed;
+
+struct kvaser_usb_tx_urb_context {
+       struct kvaser_usb_net_priv *priv;
+       u32 echo_index;
+       int dlc;
+};
+
+struct kvaser_usb {
+       struct usb_device *udev;
+       struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
+
+       struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+       struct usb_anchor rx_submitted;
+
+       u32 fw_version;
+       unsigned int nchannels;
+
+       bool rxinitdone;
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
+};
+
+struct kvaser_usb_net_priv {
+       struct can_priv can;
+
+       atomic_t active_tx_urbs;
+       struct usb_anchor tx_submitted;
+       struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+       struct completion start_comp, stop_comp;
+
+       struct kvaser_usb *dev;
+       struct net_device *netdev;
+       int channel;
+
+       struct can_berr_counter bec;
+};
+
+static const struct usb_device_id kvaser_usb_table[] = {
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS |
+                              KVASER_HAS_SILENT_MODE },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { }
+};
+MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+
+static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
+                                     struct kvaser_msg *msg)
+{
+       int actual_len;
+
+       return usb_bulk_msg(dev->udev,
+                           usb_sndbulkpipe(dev->udev,
+                                       dev->bulk_out->bEndpointAddress),
+                           msg, msg->len, &actual_len,
+                           USB_SEND_TIMEOUT);
+}
+
+static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
+                              struct kvaser_msg *msg)
+{
+       struct kvaser_msg *tmp;
+       void *buf;
+       int actual_len;
+       int err;
+       int pos = 0;
+
+       buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       err = usb_bulk_msg(dev->udev,
+                          usb_rcvbulkpipe(dev->udev,
+                                          dev->bulk_in->bEndpointAddress),
+                          buf, RX_BUFFER_SIZE, &actual_len,
+                          USB_RECV_TIMEOUT);
+       if (err < 0)
+               goto end;
+
+       while (pos <= actual_len - MSG_HEADER_LEN) {
+               tmp = buf + pos;
+
+               if (!tmp->len)
+                       break;
+
+               if (pos + tmp->len > actual_len) {
+                       dev_err(dev->udev->dev.parent, "Format error\n");
+                       break;
+               }
+
+               if (tmp->id == id) {
+                       memcpy(msg, tmp, tmp->len);
+                       goto end;
+               }
+
+               pos += tmp->len;
+       }
+
+       err = -EINVAL;
+
+end:
+       kfree(buf);
+
+       return err;
+}
+
+static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
+                                     u8 msg_id, int channel)
+{
+       struct kvaser_msg *msg;
+       int rc;
+
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->id = msg_id;
+       msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
+       msg->u.simple.channel = channel;
+       msg->u.simple.tid = 0xff;
+
+       rc = kvaser_usb_send_msg(dev, msg);
+
+       kfree(msg);
+       return rc;
+}
+
+static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
+{
+       struct kvaser_msg msg;
+       int err;
+
+       err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
+       if (err)
+               return err;
+
+       err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
+       if (err)
+               return err;
+
+       dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version);
+
+       return 0;
+}
+
+static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
+{
+       struct kvaser_msg msg;
+       int err;
+
+       err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
+       if (err)
+               return err;
+
+       err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
+       if (err)
+               return err;
+
+       dev->nchannels = msg.u.cardinfo.nchannels;
+
+       return 0;
+}
+
+static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
+                                     const struct kvaser_msg *msg)
+{
+       struct net_device_stats *stats;
+       struct kvaser_usb_tx_urb_context *context;
+       struct kvaser_usb_net_priv *priv;
+       struct sk_buff *skb;
+       struct can_frame *cf;
+       u8 channel = msg->u.tx_acknowledge.channel;
+       u8 tid = msg->u.tx_acknowledge.tid;
+
+       if (channel >= dev->nchannels) {
+               dev_err(dev->udev->dev.parent,
+                       "Invalid channel number (%d)\n", channel);
+               return;
+       }
+
+       priv = dev->nets[channel];
+
+       if (!netif_device_present(priv->netdev))
+               return;
+
+       stats = &priv->netdev->stats;
+
+       context = &priv->tx_contexts[tid % MAX_TX_URBS];
+
+       /* Sometimes the state change doesn't come after a bus-off event */
+       if (priv->can.restart_ms &&
+           (priv->can.state >= CAN_STATE_BUS_OFF)) {
+               skb = alloc_can_err_skb(priv->netdev, &cf);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_RESTARTED;
+                       netif_rx(skb);
+
+                       stats->rx_packets++;
+                       stats->rx_bytes += cf->can_dlc;
+               } else {
+                       netdev_err(priv->netdev,
+                                  "No memory left for err_skb\n");
+               }
+
+               priv->can.can_stats.restarts++;
+               netif_carrier_on(priv->netdev);
+
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       }
+
+       stats->tx_packets++;
+       stats->tx_bytes += context->dlc;
+       can_get_echo_skb(priv->netdev, context->echo_index);
+
+       context->echo_index = MAX_TX_URBS;
+       atomic_dec(&priv->active_tx_urbs);
+
+       netif_wake_queue(priv->netdev);
+}
+
+static void kvaser_usb_simple_msg_callback(struct urb *urb)
+{
+       struct net_device *netdev = urb->context;
+
+       kfree(urb->transfer_buffer);
+
+       if (urb->status)
+               netdev_warn(netdev, "urb status received: %d\n",
+                           urb->status);
+}
+
+static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
+                                      u8 msg_id)
+{
+       struct kvaser_usb *dev = priv->dev;
+       struct net_device *netdev = priv->netdev;
+       struct kvaser_msg *msg;
+       struct urb *urb;
+       void *buf;
+       int err;
+
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               netdev_err(netdev, "No memory left for URBs\n");
+               return -ENOMEM;
+       }
+
+       buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+       if (!buf) {
+               netdev_err(netdev, "No memory left for USB buffer\n");
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+
+       msg = (struct kvaser_msg *)buf;
+       msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
+       msg->id = msg_id;
+       msg->u.simple.channel = priv->channel;
+
+       usb_fill_bulk_urb(urb, dev->udev,
+                         usb_sndbulkpipe(dev->udev,
+                                         dev->bulk_out->bEndpointAddress),
+                         buf, msg->len,
+                         kvaser_usb_simple_msg_callback, priv);
+       usb_anchor_urb(urb, &priv->tx_submitted);
+
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err) {
+               netdev_err(netdev, "Error transmitting URB\n");
+               usb_unanchor_urb(urb);
+               usb_free_urb(urb);
+               kfree(buf);
+               return err;
+       }
+
+       usb_free_urb(urb);
+
+       return 0;
+}
+
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+       int i;
+
+       usb_kill_anchored_urbs(&priv->tx_submitted);
+       atomic_set(&priv->active_tx_urbs, 0);
+
+       for (i = 0; i < MAX_TX_URBS; i++)
+               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+                               const struct kvaser_msg *msg)
+{
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       struct net_device_stats *stats;
+       struct kvaser_usb_net_priv *priv;
+       unsigned int new_state;
+       u8 channel, status, txerr, rxerr, error_factor;
+
+       switch (msg->id) {
+       case CMD_CAN_ERROR_EVENT:
+               channel = msg->u.error_event.channel;
+               status =  msg->u.error_event.status;
+               txerr = msg->u.error_event.tx_errors_count;
+               rxerr = msg->u.error_event.rx_errors_count;
+               error_factor = msg->u.error_event.error_factor;
+               break;
+       case CMD_LOG_MESSAGE:
+               channel = msg->u.log_message.channel;
+               status = msg->u.log_message.data[0];
+               txerr = msg->u.log_message.data[2];
+               rxerr = msg->u.log_message.data[3];
+               error_factor = msg->u.log_message.data[1];
+               break;
+       case CMD_CHIP_STATE_EVENT:
+               channel = msg->u.chip_state_event.channel;
+               status =  msg->u.chip_state_event.status;
+               txerr = msg->u.chip_state_event.tx_errors_count;
+               rxerr = msg->u.chip_state_event.rx_errors_count;
+               error_factor = 0;
+               break;
+       default:
+               dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+                       msg->id);
+               return;
+       }
+
+       if (channel >= dev->nchannels) {
+               dev_err(dev->udev->dev.parent,
+                       "Invalid channel number (%d)\n", channel);
+               return;
+       }
+
+       priv = dev->nets[channel];
+       stats = &priv->netdev->stats;
+
+       if (status & M16C_STATE_BUS_RESET) {
+               kvaser_usb_unlink_tx_urbs(priv);
+               return;
+       }
+
+       skb = alloc_can_err_skb(priv->netdev, &cf);
+       if (!skb) {
+               stats->rx_dropped++;
+               return;
+       }
+
+       new_state = priv->can.state;
+
+       netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
+
+       if (status & M16C_STATE_BUS_OFF) {
+               cf->can_id |= CAN_ERR_BUSOFF;
+
+               priv->can.can_stats.bus_off++;
+               if (!priv->can.restart_ms)
+                       kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
+
+               netif_carrier_off(priv->netdev);
+
+               new_state = CAN_STATE_BUS_OFF;
+       } else if (status & M16C_STATE_BUS_PASSIVE) {
+               if (priv->can.state != CAN_STATE_ERROR_PASSIVE) {
+                       cf->can_id |= CAN_ERR_CRTL;
+
+                       if (txerr || rxerr)
+                               cf->data[1] = (txerr > rxerr)
+                                               ? CAN_ERR_CRTL_TX_PASSIVE
+                                               : CAN_ERR_CRTL_RX_PASSIVE;
+                       else
+                               cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
+                                             CAN_ERR_CRTL_RX_PASSIVE;
+
+                       priv->can.can_stats.error_passive++;
+               }
+
+               new_state = CAN_STATE_ERROR_PASSIVE;
+       }
+
+       if (status == M16C_STATE_BUS_ERROR) {
+               if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
+                   ((txerr >= 96) || (rxerr >= 96))) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] = (txerr > rxerr)
+                                       ? CAN_ERR_CRTL_TX_WARNING
+                                       : CAN_ERR_CRTL_RX_WARNING;
+
+                       priv->can.can_stats.error_warning++;
+                       new_state = CAN_STATE_ERROR_WARNING;
+               } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+                       cf->can_id |= CAN_ERR_PROT;
+                       cf->data[2] = CAN_ERR_PROT_ACTIVE;
+
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+               }
+       }
+
+       if (!status) {
+               cf->can_id |= CAN_ERR_PROT;
+               cf->data[2] = CAN_ERR_PROT_ACTIVE;
+
+               new_state = CAN_STATE_ERROR_ACTIVE;
+       }
+
+       if (priv->can.restart_ms &&
+           (priv->can.state >= CAN_STATE_BUS_OFF) &&
+           (new_state < CAN_STATE_BUS_OFF)) {
+               cf->can_id |= CAN_ERR_RESTARTED;
+               netif_carrier_on(priv->netdev);
+
+               priv->can.can_stats.restarts++;
+       }
+
+       if (error_factor) {
+               priv->can.can_stats.bus_error++;
+               stats->rx_errors++;
+
+               cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+               if (error_factor & M16C_EF_ACKE)
+                       cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+               if (error_factor & M16C_EF_CRCE)
+                       cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+                                       CAN_ERR_PROT_LOC_CRC_DEL);
+               if (error_factor & M16C_EF_FORME)
+                       cf->data[2] |= CAN_ERR_PROT_FORM;
+               if (error_factor & M16C_EF_STFE)
+                       cf->data[2] |= CAN_ERR_PROT_STUFF;
+               if (error_factor & M16C_EF_BITE0)
+                       cf->data[2] |= CAN_ERR_PROT_BIT0;
+               if (error_factor & M16C_EF_BITE1)
+                       cf->data[2] |= CAN_ERR_PROT_BIT1;
+               if (error_factor & M16C_EF_TRE)
+                       cf->data[2] |= CAN_ERR_PROT_TX;
+       }
+
+       cf->data[6] = txerr;
+       cf->data[7] = rxerr;
+
+       priv->bec.txerr = txerr;
+       priv->bec.rxerr = rxerr;
+
+       priv->can.state = new_state;
+
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+}
+
+static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
+                                 const struct kvaser_msg *msg)
+{
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       struct net_device_stats *stats = &priv->netdev->stats;
+
+       if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+                                        MSG_FLAG_NERR)) {
+               netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+                          msg->u.rx_can.flag);
+
+               stats->rx_errors++;
+               return;
+       }
+
+       if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) {
+               skb = alloc_can_err_skb(priv->netdev, &cf);
+               if (!skb) {
+                       stats->rx_dropped++;
+                       return;
+               }
+
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+               stats->rx_over_errors++;
+               stats->rx_errors++;
+
+               netif_rx(skb);
+
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+       }
+}
+
+static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
+                                 const struct kvaser_msg *msg)
+{
+       struct kvaser_usb_net_priv *priv;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       struct net_device_stats *stats;
+       u8 channel = msg->u.rx_can.channel;
+
+       if (channel >= dev->nchannels) {
+               dev_err(dev->udev->dev.parent,
+                       "Invalid channel number (%d)\n", channel);
+               return;
+       }
+
+       priv = dev->nets[channel];
+       stats = &priv->netdev->stats;
+
+       if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
+                                 MSG_FLAG_OVERRUN)) {
+               kvaser_usb_rx_can_err(priv, msg);
+               return;
+       } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
+               netdev_warn(priv->netdev,
+                           "Unhandled frame (flags: 0x%02x)",
+                           msg->u.rx_can.flag);
+               return;
+       }
+
+       skb = alloc_can_skb(priv->netdev, &cf);
+       if (!skb) {
+               stats->tx_dropped++;
+               return;
+       }
+
+       cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+                    (msg->u.rx_can.msg[1] & 0x3f);
+       cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+       if (msg->id == CMD_RX_EXT_MESSAGE) {
+               cf->can_id <<= 18;
+               cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+                             ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+                             (msg->u.rx_can.msg[4] & 0x3f);
+               cf->can_id |= CAN_EFF_FLAG;
+       }
+
+       if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+               cf->can_id |= CAN_RTR_FLAG;
+       else
+               memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+}
+
+static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
+                                       const struct kvaser_msg *msg)
+{
+       struct kvaser_usb_net_priv *priv;
+       u8 channel = msg->u.simple.channel;
+
+       if (channel >= dev->nchannels) {
+               dev_err(dev->udev->dev.parent,
+                       "Invalid channel number (%d)\n", channel);
+               return;
+       }
+
+       priv = dev->nets[channel];
+
+       if (completion_done(&priv->start_comp) &&
+           netif_queue_stopped(priv->netdev)) {
+               netif_wake_queue(priv->netdev);
+       } else {
+               netif_start_queue(priv->netdev);
+               complete(&priv->start_comp);
+       }
+}
+
+static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
+                                      const struct kvaser_msg *msg)
+{
+       struct kvaser_usb_net_priv *priv;
+       u8 channel = msg->u.simple.channel;
+
+       if (channel >= dev->nchannels) {
+               dev_err(dev->udev->dev.parent,
+                       "Invalid channel number (%d)\n", channel);
+               return;
+       }
+
+       priv = dev->nets[channel];
+
+       complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
+                                     const struct kvaser_msg *msg)
+{
+       switch (msg->id) {
+       case CMD_START_CHIP_REPLY:
+               kvaser_usb_start_chip_reply(dev, msg);
+               break;
+
+       case CMD_STOP_CHIP_REPLY:
+               kvaser_usb_stop_chip_reply(dev, msg);
+               break;
+
+       case CMD_RX_STD_MESSAGE:
+       case CMD_RX_EXT_MESSAGE:
+               kvaser_usb_rx_can_msg(dev, msg);
+               break;
+
+       case CMD_CHIP_STATE_EVENT:
+       case CMD_CAN_ERROR_EVENT:
+               kvaser_usb_rx_error(dev, msg);
+               break;
+
+       case CMD_LOG_MESSAGE:
+               if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
+                       kvaser_usb_rx_error(dev, msg);
+               break;
+
+       case CMD_TX_ACKNOWLEDGE:
+               kvaser_usb_tx_acknowledge(dev, msg);
+               break;
+
+       default:
+               dev_warn(dev->udev->dev.parent,
+                        "Unhandled message (%d)\n", msg->id);
+               break;
+       }
+}
+
+static void kvaser_usb_read_bulk_callback(struct urb *urb)
+{
+       struct kvaser_usb *dev = urb->context;
+       struct kvaser_msg *msg;
+       int pos = 0;
+       int err, i;
+
+       switch (urb->status) {
+       case 0:
+               break;
+       case -ENOENT:
+       case -ESHUTDOWN:
+               return;
+       default:
+               dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
+                        urb->status);
+               goto resubmit_urb;
+       }
+
+       while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+               msg = urb->transfer_buffer + pos;
+
+               if (!msg->len)
+                       break;
+
+               if (pos + msg->len > urb->actual_length) {
+                       dev_err(dev->udev->dev.parent, "Format error\n");
+                       break;
+               }
+
+               kvaser_usb_handle_message(dev, msg);
+
+               pos += msg->len;
+       }
+
+resubmit_urb:
+       usb_fill_bulk_urb(urb, dev->udev,
+                         usb_rcvbulkpipe(dev->udev,
+                                         dev->bulk_in->bEndpointAddress),
+                         urb->transfer_buffer, RX_BUFFER_SIZE,
+                         kvaser_usb_read_bulk_callback, dev);
+
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err == -ENODEV) {
+               for (i = 0; i < dev->nchannels; i++) {
+                       if (!dev->nets[i])
+                               continue;
+
+                       netif_device_detach(dev->nets[i]->netdev);
+               }
+       } else if (err) {
+               dev_err(dev->udev->dev.parent,
+                       "Failed resubmitting read bulk urb: %d\n", err);
+       }
+
+       return;
+}
+
+static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
+{
+       int i, err = 0;
+
+       if (dev->rxinitdone)
+               return 0;
+
+       for (i = 0; i < MAX_RX_URBS; i++) {
+               struct urb *urb = NULL;
+               u8 *buf = NULL;
+               dma_addr_t buf_dma;
+
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       dev_warn(dev->udev->dev.parent,
+                                "No memory left for URBs\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
+                                        GFP_KERNEL, &buf_dma);
+               if (!buf) {
+                       dev_warn(dev->udev->dev.parent,
+                                "No memory left for USB buffer\n");
+                       usb_free_urb(urb);
+                       err = -ENOMEM;
+                       break;
+               }
+
+               usb_fill_bulk_urb(urb, dev->udev,
+                                 usb_rcvbulkpipe(dev->udev,
+                                         dev->bulk_in->bEndpointAddress),
+                                 buf, RX_BUFFER_SIZE,
+                                 kvaser_usb_read_bulk_callback,
+                                 dev);
+               urb->transfer_dma = buf_dma;
+               urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+               usb_anchor_urb(urb, &dev->rx_submitted);
+
+               err = usb_submit_urb(urb, GFP_KERNEL);
+               if (err) {
+                       usb_unanchor_urb(urb);
+                       usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+                                         buf_dma);
+                       usb_free_urb(urb);
+                       break;
+               }
+
+               dev->rxbuf[i] = buf;
+               dev->rxbuf_dma[i] = buf_dma;
+
+               usb_free_urb(urb);
+       }
+
+       if (i == 0) {
+               dev_warn(dev->udev->dev.parent,
+                        "Cannot setup read URBs, error %d\n", err);
+               return err;
+       } else if (i < MAX_RX_URBS) {
+               dev_warn(dev->udev->dev.parent,
+                        "RX performances may be slow\n");
+       }
+
+       dev->rxinitdone = true;
+
+       return 0;
+}
+
+static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+       struct kvaser_msg *msg;
+       int rc;
+
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->id = CMD_SET_CTRL_MODE;
+       msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
+       msg->u.ctrl_mode.tid = 0xff;
+       msg->u.ctrl_mode.channel = priv->channel;
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
+       else
+               msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
+
+       rc = kvaser_usb_send_msg(priv->dev, msg);
+
+       kfree(msg);
+       return rc;
+}
+
+static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
+{
+       int err;
+
+       init_completion(&priv->start_comp);
+
+       err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
+                                        priv->channel);
+       if (err)
+               return err;
+
+       if (!wait_for_completion_timeout(&priv->start_comp,
+                                        msecs_to_jiffies(START_TIMEOUT)))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int kvaser_usb_open(struct net_device *netdev)
+{
+       struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+       struct kvaser_usb *dev = priv->dev;
+       int err;
+
+       err = open_candev(netdev);
+       if (err)
+               return err;
+
+       err = kvaser_usb_setup_rx_urbs(dev);
+       if (err)
+               goto error;
+
+       err = kvaser_usb_set_opt_mode(priv);
+       if (err)
+               goto error;
+
+       err = kvaser_usb_start_chip(priv);
+       if (err) {
+               netdev_warn(netdev, "Cannot start device, error %d\n", err);
+               goto error;
+       }
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       return 0;
+
+error:
+       close_candev(netdev);
+       return err;
+}
+
+static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
+{
+       int i;
+
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+
+       for (i = 0; i < MAX_RX_URBS; i++)
+               usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+                                 dev->rxbuf[i],
+                                 dev->rxbuf_dma[i]);
+
+       for (i = 0; i < MAX_NET_DEVICES; i++) {
+               struct kvaser_usb_net_priv *priv = dev->nets[i];
+
+               if (priv)
+                       kvaser_usb_unlink_tx_urbs(priv);
+       }
+}
+
+static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+       int err;
+
+       init_completion(&priv->stop_comp);
+
+       err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
+                                        priv->channel);
+       if (err)
+               return err;
+
+       if (!wait_for_completion_timeout(&priv->stop_comp,
+                                        msecs_to_jiffies(STOP_TIMEOUT)))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+       struct kvaser_msg *msg;
+       int rc;
+
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->id = CMD_FLUSH_QUEUE;
+       msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
+       msg->u.flush_queue.channel = priv->channel;
+       msg->u.flush_queue.flags = 0x00;
+
+       rc = kvaser_usb_send_msg(priv->dev, msg);
+
+       kfree(msg);
+       return rc;
+}
+
+static int kvaser_usb_close(struct net_device *netdev)
+{
+       struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+       struct kvaser_usb *dev = priv->dev;
+       int err;
+
+       netif_stop_queue(netdev);
+
+       err = kvaser_usb_flush_queue(priv);
+       if (err)
+               netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+
+       if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+               netdev_warn(netdev, "Cannot reset card, error %d\n", err);
+
+       err = kvaser_usb_stop_chip(priv);
+       if (err)
+               netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+
+       priv->can.state = CAN_STATE_STOPPED;
+       close_candev(priv->netdev);
+
+       return 0;
+}
+
+static void kvaser_usb_write_bulk_callback(struct urb *urb)
+{
+       struct kvaser_usb_tx_urb_context *context = urb->context;
+       struct kvaser_usb_net_priv *priv;
+       struct net_device *netdev;
+
+       if (WARN_ON(!context))
+               return;
+
+       priv = context->priv;
+       netdev = priv->netdev;
+
+       kfree(urb->transfer_buffer);
+
+       if (!netif_device_present(netdev))
+               return;
+
+       if (urb->status)
+               netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+}
+
+static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+                                        struct net_device *netdev)
+{
+       struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+       struct kvaser_usb *dev = priv->dev;
+       struct net_device_stats *stats = &netdev->stats;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       struct kvaser_usb_tx_urb_context *context = NULL;
+       struct urb *urb;
+       void *buf;
+       struct kvaser_msg *msg;
+       int i, err;
+       int ret = NETDEV_TX_OK;
+
+       if (can_dropped_invalid_skb(netdev, skb))
+               return NETDEV_TX_OK;
+
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               netdev_err(netdev, "No memory left for URBs\n");
+               stats->tx_dropped++;
+               goto nourbmem;
+       }
+
+       buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+       if (!buf) {
+               netdev_err(netdev, "No memory left for USB buffer\n");
+               stats->tx_dropped++;
+               goto nobufmem;
+       }
+
+       msg = buf;
+       msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
+       msg->u.tx_can.flags = 0;
+       msg->u.tx_can.channel = priv->channel;
+
+       if (cf->can_id & CAN_EFF_FLAG) {
+               msg->id = CMD_TX_EXT_MESSAGE;
+               msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
+               msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
+               msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
+               msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
+               msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
+       } else {
+               msg->id = CMD_TX_STD_MESSAGE;
+               msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
+               msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
+       }
+
+       msg->u.tx_can.msg[5] = cf->can_dlc;
+       memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
+
+       if (cf->can_id & CAN_RTR_FLAG)
+               msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME;
+
+       for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
+               if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+                       context = &priv->tx_contexts[i];
+                       break;
+               }
+       }
+
+       if (!context) {
+               netdev_warn(netdev, "cannot find free context\n");
+               ret =  NETDEV_TX_BUSY;
+               goto releasebuf;
+       }
+
+       context->priv = priv;
+       context->echo_index = i;
+       context->dlc = cf->can_dlc;
+
+       msg->u.tx_can.tid = context->echo_index;
+
+       usb_fill_bulk_urb(urb, dev->udev,
+                         usb_sndbulkpipe(dev->udev,
+                                         dev->bulk_out->bEndpointAddress),
+                         buf, msg->len,
+                         kvaser_usb_write_bulk_callback, context);
+       usb_anchor_urb(urb, &priv->tx_submitted);
+
+       can_put_echo_skb(skb, netdev, context->echo_index);
+
+       atomic_inc(&priv->active_tx_urbs);
+
+       if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+               netif_stop_queue(netdev);
+
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (unlikely(err)) {
+               can_free_echo_skb(netdev, context->echo_index);
+
+               skb = NULL; /* set to NULL to avoid double free in
+                            * dev_kfree_skb(skb) */
+
+               atomic_dec(&priv->active_tx_urbs);
+               usb_unanchor_urb(urb);
+
+               stats->tx_dropped++;
+
+               if (err == -ENODEV)
+                       netif_device_detach(netdev);
+               else
+                       netdev_warn(netdev, "Failed tx_urb %d\n", err);
+
+               goto releasebuf;
+       }
+
+       usb_free_urb(urb);
+
+       return NETDEV_TX_OK;
+
+releasebuf:
+       kfree(buf);
+nobufmem:
+       usb_free_urb(urb);
+nourbmem:
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
+       .ndo_open = kvaser_usb_open,
+       .ndo_stop = kvaser_usb_close,
+       .ndo_start_xmit = kvaser_usb_start_xmit,
+};
+
+static const struct can_bittiming_const kvaser_usb_bittiming_const = {
+       .name = "kvaser_usb",
+       .tseg1_min = KVASER_USB_TSEG1_MIN,
+       .tseg1_max = KVASER_USB_TSEG1_MAX,
+       .tseg2_min = KVASER_USB_TSEG2_MIN,
+       .tseg2_max = KVASER_USB_TSEG2_MAX,
+       .sjw_max = KVASER_USB_SJW_MAX,
+       .brp_min = KVASER_USB_BRP_MIN,
+       .brp_max = KVASER_USB_BRP_MAX,
+       .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+       struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       struct kvaser_usb *dev = priv->dev;
+       struct kvaser_msg *msg;
+       int rc;
+
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->id = CMD_SET_BUS_PARAMS;
+       msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
+       msg->u.busparams.channel = priv->channel;
+       msg->u.busparams.tid = 0xff;
+       msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+       msg->u.busparams.sjw = bt->sjw;
+       msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+       msg->u.busparams.tseg2 = bt->phase_seg2;
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               msg->u.busparams.no_samp = 3;
+       else
+               msg->u.busparams.no_samp = 1;
+
+       rc = kvaser_usb_send_msg(dev, msg);
+
+       kfree(msg);
+       return rc;
+}
+
+static int kvaser_usb_set_mode(struct net_device *netdev,
+                              enum can_mode mode)
+{
+       struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+       int err;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
+               if (err)
+                       return err;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
+                                      struct can_berr_counter *bec)
+{
+       struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+
+       *bec = priv->bec;
+
+       return 0;
+}
+
+static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->nchannels; i++) {
+               if (!dev->nets[i])
+                       continue;
+
+               unregister_netdev(dev->nets[i]->netdev);
+       }
+
+       kvaser_usb_unlink_all_urbs(dev);
+
+       for (i = 0; i < dev->nchannels; i++) {
+               if (!dev->nets[i])
+                       continue;
+
+               free_candev(dev->nets[i]->netdev);
+       }
+}
+
+static int kvaser_usb_init_one(struct usb_interface *intf,
+                              const struct usb_device_id *id, int channel)
+{
+       struct kvaser_usb *dev = usb_get_intfdata(intf);
+       struct net_device *netdev;
+       struct kvaser_usb_net_priv *priv;
+       int i, err;
+
+       netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+       if (!netdev) {
+               dev_err(&intf->dev, "Cannot alloc candev\n");
+               return -ENOMEM;
+       }
+
+       priv = netdev_priv(netdev);
+
+       init_completion(&priv->start_comp);
+       init_completion(&priv->stop_comp);
+
+       init_usb_anchor(&priv->tx_submitted);
+       atomic_set(&priv->active_tx_urbs, 0);
+
+       for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
+               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+       priv->dev = dev;
+       priv->netdev = netdev;
+       priv->channel = channel;
+
+       priv->can.state = CAN_STATE_STOPPED;
+       priv->can.clock.freq = CAN_USB_CLOCK;
+       priv->can.bittiming_const = &kvaser_usb_bittiming_const;
+       priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+       priv->can.do_set_mode = kvaser_usb_set_mode;
+       if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
+               priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+       if (id->driver_info & KVASER_HAS_SILENT_MODE)
+               priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+       netdev->flags |= IFF_ECHO;
+
+       netdev->netdev_ops = &kvaser_usb_netdev_ops;
+
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
+       dev->nets[channel] = priv;
+
+       err = register_candev(netdev);
+       if (err) {
+               dev_err(&intf->dev, "Failed to register can device\n");
+               free_candev(netdev);
+               dev->nets[channel] = NULL;
+               return err;
+       }
+
+       netdev_dbg(netdev, "device registered\n");
+
+       return 0;
+}
+
+static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
+                                    struct usb_endpoint_descriptor **in,
+                                    struct usb_endpoint_descriptor **out)
+{
+       const struct usb_host_interface *iface_desc;
+       struct usb_endpoint_descriptor *endpoint;
+       int i;
+
+       iface_desc = &intf->altsetting[0];
+
+       for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+               endpoint = &iface_desc->endpoint[i].desc;
+
+               if (usb_endpoint_is_bulk_in(endpoint))
+                       *in = endpoint;
+
+               if (usb_endpoint_is_bulk_out(endpoint))
+                       *out = endpoint;
+       }
+}
+
+static int kvaser_usb_probe(struct usb_interface *intf,
+                           const struct usb_device_id *id)
+{
+       struct kvaser_usb *dev;
+       int err = -ENOMEM;
+       int i;
+
+       dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
+       if (!dev->bulk_in || !dev->bulk_out) {
+               dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+               return err;
+       }
+
+       dev->udev = interface_to_usbdev(intf);
+
+       init_usb_anchor(&dev->rx_submitted);
+
+       usb_set_intfdata(intf, dev);
+
+       for (i = 0; i < MAX_NET_DEVICES; i++)
+               kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+
+       err = kvaser_usb_get_software_info(dev);
+       if (err) {
+               dev_err(&intf->dev,
+                       "Cannot get software infos, error %d\n", err);
+               return err;
+       }
+
+       err = kvaser_usb_get_card_info(dev);
+       if (err) {
+               dev_err(&intf->dev,
+                       "Cannot get card infos, error %d\n", err);
+               return err;
+       }
+
+       dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+               ((dev->fw_version >> 24) & 0xff),
+               ((dev->fw_version >> 16) & 0xff),
+               (dev->fw_version & 0xffff));
+
+       for (i = 0; i < dev->nchannels; i++) {
+               err = kvaser_usb_init_one(intf, id, i);
+               if (err) {
+                       kvaser_usb_remove_interfaces(dev);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static void kvaser_usb_disconnect(struct usb_interface *intf)
+{
+       struct kvaser_usb *dev = usb_get_intfdata(intf);
+
+       usb_set_intfdata(intf, NULL);
+
+       if (!dev)
+               return;
+
+       kvaser_usb_remove_interfaces(dev);
+}
+
+static struct usb_driver kvaser_usb_driver = {
+       .name = "kvaser_usb",
+       .probe = kvaser_usb_probe,
+       .disconnect = kvaser_usb_disconnect,
+       .id_table = kvaser_usb_table,
+};
+
+module_usb_driver(kvaser_usb_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
+MODULE_LICENSE("GPL v2");
index 19517b34d1441a9d177a4f179d97e2f7446e060f..641d8847c32675179a01c9995d88bb2bbcff2de6 100644 (file)
@@ -936,7 +936,6 @@ struct bnx2x_port {
 
        /* used to synchronize phy accesses */
        struct mutex            phy_mutex;
-       int                     need_hw_lock;
 
        u32                     port_stx;
 
index 54d522da1aa77b0087471ffa7bebdc496b4c90bf..8779ac1f89a24a7eca0a4a86facd8dea2956f2f0 100644 (file)
@@ -948,14 +948,12 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp)
 {
        mutex_lock(&bp->port.phy_mutex);
 
-       if (bp->port.need_hw_lock)
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 }
 
 void bnx2x_release_phy_lock(struct bnx2x *bp)
 {
-       if (bp->port.need_hw_lock)
-               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 
        mutex_unlock(&bp->port.phy_mutex);
 }
@@ -2248,7 +2246,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                         DRV_PULSE_SEQ_MASK);
                BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
 
-               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+                                            DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
                if (!load_code) {
                        BNX2X_ERR("MCP response failure, aborting\n");
                        rc = -EBUSY;
index c40c0253e105ac433ebc7d6a7483e2f67f6c2609..e05f981398be844bc090a0ac5883a4214c27e708 100644 (file)
@@ -2660,20 +2660,25 @@ static int bnx2x_set_phys_id(struct net_device *dev,
                return 1;       /* cycle on/off once per second */
 
        case ETHTOOL_ID_ON:
+               bnx2x_acquire_phy_lock(bp);
                bnx2x_set_led(&bp->link_params, &bp->link_vars,
                              LED_MODE_ON, SPEED_1000);
+               bnx2x_release_phy_lock(bp);
                break;
 
        case ETHTOOL_ID_OFF:
+               bnx2x_acquire_phy_lock(bp);
                bnx2x_set_led(&bp->link_params, &bp->link_vars,
                              LED_MODE_FRONT_PANEL_OFF, 0);
-
+               bnx2x_release_phy_lock(bp);
                break;
 
        case ETHTOOL_ID_INACTIVE:
+               bnx2x_acquire_phy_lock(bp);
                bnx2x_set_led(&bp->link_params, &bp->link_vars,
                              LED_MODE_OPER,
                              bp->link_vars.line_speed);
+               bnx2x_release_phy_lock(bp);
        }
 
        return 0;
index 7eaa74b78a5b10f1d4fa5aa073eef15b46f846e6..1504e0a0f12a5fe6cb4cbf211fc04b3912d59117 100644 (file)
@@ -695,6 +695,7 @@ struct port_hw_cfg {                    /* port 0: 0x12c  port 1: 0x2bc */
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE    0x00000e00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
+               #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
 
@@ -751,6 +752,7 @@ struct port_hw_cfg {                    /* port 0: 0x12c  port 1: 0x2bc */
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE     0x00000e00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
+               #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
@@ -1246,6 +1248,7 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED        0xa2000000
        #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED        0x00070002
        #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+       #define REQ_BC_VER_4_MT_SUPPORTED               0x00070201
        #define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
        #define REQ_BC_VER_4_FCOE_FEATURES              0x00070209
 
@@ -2159,6 +2162,16 @@ struct shmem2_region {
        #define SHMEM_EEE_TIME_OUTPUT_BIT          0x80000000
 
        u32 sizeof_port_stats;
+
+       /* Link Flap Avoidance */
+       u32 lfa_host_addr[PORT_MAX];
+       u32 reserved1;
+
+       u32 reserved2;                          /* Offset 0x148 */
+       u32 reserved3;                          /* Offset 0x14C */
+       u32 reserved4;                          /* Offset 0x150 */
+       u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
+       #define LINK_ATTR_SYNC_KR2_ENABLE       (1<<0)
 };
 
 
index c98da25f22eb2b659f97d36f43ccd140d98c47cc..3e7d8246c5015603b6de071e20014ae0db69463d 100644 (file)
 #define        GP_STATUS_10G_XFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
 #define        GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
 #define        GP_STATUS_10G_SFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define        GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
 #define LINK_10THD             LINK_STATUS_SPEED_AND_DUPLEX_10THD
 #define LINK_10TFD             LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 #define LINK_100TXHD           LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
@@ -1440,30 +1441,47 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 /******************************************************************/
 /*                     MAC/PBF section                           */
 /******************************************************************/
-static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
+                              u32 emac_base)
 {
-       u32 mode, emac_base;
+       u32 new_mode, cur_mode;
+       u32 clc_cnt;
        /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
         * (a value of 49==0x31) and make sure that the AUTO poll is off
         */
+       cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
 
-       if (CHIP_IS_E2(bp))
-               emac_base = GRCBASE_EMAC0;
-       else
-               emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-       mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
-       mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
-                 EMAC_MDIO_MODE_CLOCK_CNT);
        if (USES_WARPCORE(bp))
-               mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+               clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
        else
-               mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+               clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
 
-       mode |= (EMAC_MDIO_MODE_CLAUSE_45);
-       REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+       if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
+           (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
+               return;
+
+       new_mode = cur_mode &
+               ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+       new_mode |= clc_cnt;
+       new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
 
+       DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
+          cur_mode, new_mode);
+       REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
        udelay(40);
 }
+
+static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
+                                       struct link_params *params)
+{
+       u8 phy_index;
+       /* Set mdio clock per phy */
+       for (phy_index = INT_PHY; phy_index < params->num_phys;
+             phy_index++)
+               bnx2x_set_mdio_clk(bp, params->chip_id,
+                                  params->phy[phy_index].mdio_ctrl);
+}
+
 static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
 {
        u32 port4mode_ovwr_val;
@@ -1508,7 +1526,8 @@ static void bnx2x_emac_init(struct link_params *params,
                }
                timeout--;
        } while (val & EMAC_MODE_RESET);
-       bnx2x_set_mdio_clk(bp, params->chip_id, port);
+
+       bnx2x_set_mdio_emac_per_phy(bp, params);
        /* Set mac address */
        val = ((params->mac_addr[0] << 8) |
                params->mac_addr[1]);
@@ -1664,7 +1683,10 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
         * ports of the path
         */
 
-       if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
+       if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
+            (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
+            (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
+           is_port4mode &&
            (REG_RD(bp, MISC_REG_RESET_REG_2) &
             MISC_REGISTERS_RESET_REG_2_XMAC)) {
                DP(NETIF_MSG_LINK,
@@ -1760,6 +1782,18 @@ static int bnx2x_xmac_enable(struct link_params *params,
         */
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
 
+       /* When XMAC is in XLGMII mode, disable sending idles for fault
+        * detection.
+        */
+       if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
+               REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
+                      (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
+                       XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
+               REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+               REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+                      XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+                      XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+       }
        /* Set Max packet size */
        REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
 
@@ -1780,6 +1814,12 @@ static int bnx2x_xmac_enable(struct link_params *params,
        /* Enable TX and RX */
        val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
 
+       /* Set MAC in XLGMII mode for dual-mode */
+       if ((vars->line_speed == SPEED_20000) &&
+           (params->phy[INT_PHY].supported &
+            SUPPORTED_20000baseKR2_Full))
+               val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
+
        /* Check loopback mode */
        if (lb)
                val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
@@ -2096,6 +2136,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
                        port_mb[params->port].link_status), link_status);
 }
 
+static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
+{
+       struct bnx2x *bp = params->bp;
+
+       if (SHMEM2_HAS(bp, link_attr_sync))
+               REG_WR(bp, params->shmem2_base +
+                      offsetof(struct shmem2_region,
+                               link_attr_sync[params->port]), link_attr);
+}
+
 static void bnx2x_update_pfc_nig(struct link_params *params,
                struct link_vars *vars,
                struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2126,7 +2176,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                if (CHIP_IS_E3(bp))
                        ppp_enable = 0;
                else
-               ppp_enable = 1;
+                       ppp_enable = 1;
                xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
                                     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
                xcm_out_en = 0;
@@ -2247,7 +2297,6 @@ int bnx2x_update_pfc(struct link_params *params,
        return bnx2x_status;
 }
 
-
 static int bnx2x_bmac1_enable(struct link_params *params,
                              struct link_vars *vars,
                              u8 is_lb)
@@ -2651,6 +2700,13 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
        u32 val;
        u16 i;
        int rc = 0;
+       u32 chip_id;
+       if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+               chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+                         ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+               bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+       }
+
        if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
                bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
                              EMAC_MDIO_STATUS_10MB);
@@ -2719,6 +2775,13 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        u32 tmp;
        u8 i;
        int rc = 0;
+       u32 chip_id;
+       if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+               chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+                         ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+               bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+       }
+
        if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
                bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
                              EMAC_MDIO_STATUS_10MB);
@@ -3147,6 +3210,15 @@ static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
 }
 
+static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
+                                     struct bnx2x_phy *phy,
+                                     u8 devad, u16 reg, u16 and_val)
+{
+       u16 val;
+       bnx2x_cl45_read(bp, phy, devad, reg, &val);
+       bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
+}
+
 int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
                   u8 devad, u16 reg, u16 *ret_val)
 {
@@ -3551,6 +3623,44 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
  * init configuration, and set/clear SGMII flag. Internal
  * phy init is done purely in phy_init stage.
  */
+static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
+                                        struct link_params *params,
+                                        struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       u16 i;
+       static struct bnx2x_reg_set reg_set[] = {
+               /* Step 1 - Program the TX/RX alignment markers */
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
+               /* Step 2 - Configure the NP registers */
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
+       };
+       DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
+
+       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
+
+       for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+                                reg_set[i].val);
+
+       /* Start KR2 work-around timer which handles BCM8073 link-parner */
+       vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, vars->link_attr_sync);
+}
 
 static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
                                               struct link_params *params)
@@ -3564,6 +3674,21 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
                                 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
 }
 
+static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
+                                        struct link_params *params)
+{
+       /* Restart autoneg on the leading lane only */
+       struct bnx2x *bp = params->bp;
+       u16 lane = bnx2x_get_warpcore_lane(phy, params);
+       CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, lane);
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                        MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+       /* Restore AER */
+       bnx2x_set_aer_mmd(params, phy);
+}
+
 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                                        struct link_params *params,
                                        struct link_vars *vars) {
@@ -3576,7 +3701,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
                {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
                /* Disable Autoneg: re-enable it after adv is done. */
-               {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+               {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
+               {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
        };
        DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
        /* Set to default registers that may be overriden by 10G force */
@@ -3585,11 +3712,11 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                                 reg_set[i].val);
 
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-               MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
-       cl72_ctrl &= 0xf8ff;
+                       MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+       cl72_ctrl &= 0x08ff;
        cl72_ctrl |= 0x3800;
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-               MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+                        MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
 
        /* Check adding advertisement for 1G KX */
        if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3624,6 +3751,16 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
                      (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
                      (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+       /* Configure the next lane if dual mode */
+       if (phy->flags & FLAGS_WC_DUAL_MODE)
+               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
+                                ((0x02 <<
+                                MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+                                 (0x06 <<
+                                  MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+                                 (0x09 <<
+                               MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
                         0x03f0);
@@ -3670,10 +3807,26 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                        MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
 
-       /* Enable Autoneg */
-       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
-                        MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+            (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+           (phy->req_line_speed == SPEED_20000)) {
+
+               CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                                 MDIO_AER_BLOCK_AER_REG, lane);
 
+               bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                        MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
+                                        (1<<11));
+
+               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
+               bnx2x_set_aer_mmd(params, phy);
+
+               bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+       }
+
+       /* Enable Autoneg: only on the main lane */
+       bnx2x_warpcore_restart_AN_KR(phy, params);
 }
 
 static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
@@ -3692,9 +3845,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
                {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
                {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
                /* Leave cl72 training enable, needed for KR */
-               {MDIO_PMA_DEVAD,
-               MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
-               0x2}
+               {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
        };
 
        for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
@@ -3764,27 +3915,21 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
 
        /* Disable 100FX Enable and Auto-Detect */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_FX100_CTRL1, &val);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
 
        /* Disable 100FX Idle detect */
        bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                 MDIO_WC_REG_FX100_CTRL3, 0x0080);
 
        /* Set Block address to Remote PHY & Clear forced_speed[5] */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_DIGITAL4_MISC3, &val);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
 
        /* Turn off auto-detect & fiber mode */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
-                        (val & 0xFFEE));
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+                                 0xFFEE);
 
        /* Set filter_force_link, disable_false_link and parallel_detect */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3846,22 +3991,65 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
                         MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
 
        /* Release tx_fifo_reset */
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+                                 0xFFFE);
+       /* Release rxSeqStart */
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
+}
+
+static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
+                                            struct link_params *params)
+{
+       u16 val;
+       struct bnx2x *bp = params->bp;
+       /* Set global registers, so set AER lane to 0 */
+       CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, 0);
+
+       /* Disable sequencer */
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
+
+       bnx2x_set_aer_mmd(params, phy);
+
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
+                                 MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                        MDIO_AN_REG_CTRL, 0);
+       /* Turn off CL73 */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+                       MDIO_WC_REG_CL73_USERB0_CTRL, &val);
+       val &= ~(1<<5);
+       val |= (1<<6);
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+                        MDIO_WC_REG_CL73_USERB0_CTRL, val);
+
+       /* Set 20G KR2 force speed */
+       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
+
+       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
 
-       /* Release rxSeqStart */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+                       MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
+       val &= ~(3<<14);
+       val |= (1<<15);
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
-}
+                        MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
 
-static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
-                                      struct bnx2x_phy *phy)
-{
-       DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+       /* Enable sequencer (over lane 0) */
+       CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, 0);
+
+       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
+
+       bnx2x_set_aer_mmd(params, phy);
 }
 
 static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
@@ -3931,20 +4119,16 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        u16 val16, digctrl_kx1, digctrl_kx2;
 
        /* Clear XFI clock comp in non-10G single lane mode. */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_RX66_CONTROL, &val16);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
 
        bnx2x_warpcore_set_lpi_passthrough(phy, params);
 
        if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
                /* SGMII Autoneg */
-               bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                                MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
-                                val16 | 0x1000);
+               bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                        MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+                                        0x1000);
                DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
        } else {
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4086,7 +4270,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
                if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
                    (cfg_pin > PIN_CFG_GPIO3_P1)) {
                        DP(NETIF_MSG_LINK,
-                          "ERROR: Invalid cfg pin %x for module detect indication\n",
+                          "No cfg pin %x for module detect indication\n",
                           cfg_pin);
                        return -EINVAL;
                }
@@ -4097,7 +4281,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
                *gpio_num = MISC_REGISTERS_GPIO_3;
                *gpio_port = port;
        }
-       DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+
        return 0;
 }
 
@@ -4120,7 +4304,7 @@ static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
                return 0;
 }
 static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
-                                       struct link_params *params)
+                                    struct link_params *params)
 {
        u16 gp2_status_reg0, lane;
        struct bnx2x *bp = params->bp;
@@ -4134,8 +4318,8 @@ static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
 }
 
 static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
-                                      struct link_params *params,
-                                      struct link_vars *vars)
+                                         struct link_params *params,
+                                         struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u32 serdes_net_if;
@@ -4163,7 +4347,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
                case PORT_HW_CFG_NET_SERDES_IF_KR:
                        /* Do we get link yet? */
                        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
-                                                               &gp_status1);
+                                       &gp_status1);
                        lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
                                /*10G KR*/
                        lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
@@ -4215,6 +4399,27 @@ static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
        }
 }
 
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+                                        struct bnx2x_phy *phy,
+                                        u8 tx_en)
+{
+       struct bnx2x *bp = params->bp;
+       u32 cfg_pin;
+       u8 port = params->port;
+
+       cfg_pin = REG_RD(bp, params->shmem_base +
+                        offsetof(struct shmem_region,
+                                 dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+               PORT_HW_CFG_E3_TX_LASER_MASK;
+       /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+       DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+
+       /* For 20G, the expected pin to be used is 3 pins after the current */
+       bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+       if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+               bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                                       struct link_params *params,
                                       struct link_vars *vars)
@@ -4275,9 +4480,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                        break;
 
                case PORT_HW_CFG_NET_SERDES_IF_SFI:
-                       /* Issue Module detection */
+                       /* Issue Module detection if module is plugged, or
+                        * enabled transmitter to avoid current leakage in case
+                        * no module is connected
+                        */
                        if (bnx2x_is_sfp_module_plugged(phy, params))
                                bnx2x_sfp_module_detection(phy, params);
+                       else
+                               bnx2x_sfp_e3_set_transmitter(params, phy, 1);
 
                        bnx2x_warpcore_config_sfi(phy, params);
                        break;
@@ -4293,16 +4503,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
 
                        bnx2x_sfp_module_detection(phy, params);
                        break;
-
                case PORT_HW_CFG_NET_SERDES_IF_KR2:
-                       if (vars->line_speed != SPEED_20000) {
-                               DP(NETIF_MSG_LINK, "Speed not supported yet\n");
-                               return;
+                       if (!params->loopback_mode) {
+                               bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+                       } else {
+                               DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
+                               bnx2x_warpcore_set_20G_force_KR2(phy, params);
                        }
-                       DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
-                       bnx2x_warpcore_set_20G_KR2(bp, phy);
                        break;
-
                default:
                        DP(NETIF_MSG_LINK,
                           "Unsupported Serdes Net Interface 0x%x\n",
@@ -4316,68 +4524,35 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Exit config init\n");
 }
 
-static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
-                                        struct bnx2x_phy *phy,
-                                        u8 tx_en)
-{
-       struct bnx2x *bp = params->bp;
-       u32 cfg_pin;
-       u8 port = params->port;
-
-       cfg_pin = REG_RD(bp, params->shmem_base +
-                               offsetof(struct shmem_region,
-                               dev_info.port_hw_config[port].e3_sfp_ctrl)) &
-                               PORT_HW_CFG_TX_LASER_MASK;
-       /* Set the !tx_en since this pin is DISABLE_TX_LASER */
-       DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
-       /* For 20G, the expected pin to be used is 3 pins after the current */
-
-       bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
-       if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
-               bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
-}
-
 static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
                                      struct link_params *params)
 {
        struct bnx2x *bp = params->bp;
        u16 val16, lane;
        bnx2x_sfp_e3_set_transmitter(params, phy, 0);
-       bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+       bnx2x_set_mdio_emac_per_phy(bp, params);
        bnx2x_set_aer_mmd(params, phy);
        /* Global register */
        bnx2x_warpcore_reset_lane(bp, phy, 1);
 
        /* Clear loopback settings (if any) */
        /* 10G & 20G */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
-                        0xBFFF);
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
 
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
 
        /* Update those 1-copy registers */
        CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
                          MDIO_AER_BLOCK_AER_REG, 0);
        /* Enable 1G MDIO (1-copy) */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
-                       &val16);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
-                        val16 & ~0x10);
-
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_XGXSBLK1_LANECTRL2,
-                        val16 & 0xff00);
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+                                 ~0x10);
 
+       bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+                                 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
        lane = bnx2x_get_warpcore_lane(phy, params);
        /* Disable CL36 PCS Tx */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4413,8 +4588,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
                       params->loopback_mode, phy->req_line_speed);
 
-       if (phy->req_line_speed < SPEED_10000) {
-               /* 10/100/1000 */
+       if (phy->req_line_speed < SPEED_10000 ||
+           phy->supported & SUPPORTED_20000baseKR2_Full) {
+               /* 10/100/1000/20G-KR2 */
 
                /* Update those 1-copy registers */
                CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
@@ -4427,18 +4603,20 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
                lane = bnx2x_get_warpcore_lane(phy, params);
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                                MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+               val16 |= (1<<lane);
+               if (phy->flags & FLAGS_WC_DUAL_MODE)
+                       val16 |= (2<<lane);
                bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_XGXSBLK1_LANECTRL2,
-                               val16 | (1<<lane));
+                                MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+                                val16);
 
                /* Switch back to 4-copy registers */
                bnx2x_set_aer_mmd(params, phy);
        } else {
-               /* 10G & 20G */
+               /* 10G / 20G-DXGXS */
                bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                         MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
                                         0x4000);
-
                bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                         MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
        }
@@ -4603,6 +4781,10 @@ void bnx2x_link_status_update(struct link_params *params,
                params->feature_config_flags &=
                                        ~FEATURE_CONFIG_PFC_ENABLED;
 
+       if (SHMEM2_HAS(bp, link_attr_sync))
+               vars->link_attr_sync = SHMEM2_RD(bp,
+                                                link_attr_sync[params->port]);
+
        DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
                 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
        DP(NETIF_MSG_LINK, "line_speed %x  duplex %x  flow_ctrl 0x%x\n",
@@ -5332,6 +5514,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
                        vars->link_status |= LINK_10GTFD;
                        break;
                case GP_STATUS_20G_DXGXS:
+               case GP_STATUS_20G_KR2:
                        vars->line_speed = SPEED_20000;
                        vars->link_status |= LINK_20GTFD;
                        break;
@@ -5439,7 +5622,15 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
        int rc = 0;
        lane = bnx2x_get_warpcore_lane(phy, params);
        /* Read gp_status */
-       if (phy->req_line_speed > SPEED_10000) {
+       if ((params->loopback_mode) &&
+           (phy->flags & FLAGS_WC_DUAL_MODE)) {
+               bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+                               MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+               bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+                               MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+               link_up &= 0x1;
+       } else if ((phy->req_line_speed > SPEED_10000) &&
+               (phy->supported & SUPPORTED_20000baseMLD2_Full)) {
                u16 temp_link_up;
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                                1, &temp_link_up);
@@ -5452,12 +5643,22 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
                        bnx2x_ext_phy_resolve_fc(phy, params, vars);
        } else {
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+                               MDIO_WC_REG_GP2_STATUS_GP_2_1,
+                               &gp_status1);
                DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
-               /* Check for either KR or generic link up. */
-               gp_status1 = ((gp_status1 >> 8) & 0xf) |
-                       ((gp_status1 >> 12) & 0xf);
-               link_up = gp_status1 & (1 << lane);
+               /* Check for either KR, 1G, or AN up. */
+               link_up = ((gp_status1 >> 8) |
+                          (gp_status1 >> 12) |
+                          (gp_status1)) &
+                       (1 << lane);
+               if (phy->supported & SUPPORTED_20000baseKR2_Full) {
+                       u16 an_link;
+                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_STATUS, &an_link);
+                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_STATUS, &an_link);
+                       link_up |= (an_link & (1<<2));
+               }
                if (link_up && SINGLE_MEDIA_DIRECT(params)) {
                        u16 pd, gp_status4;
                        if (phy->req_line_speed == SPEED_AUTO_NEG) {
@@ -5522,7 +5723,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
        if ((lane & 1) == 0)
                gp_speed <<= 8;
        gp_speed &= 0x3f00;
-
+       link_up = !!link_up;
 
        rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
                                         duplex);
@@ -6683,7 +6884,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                } else if (prev_line_speed != vars->line_speed) {
                        REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
                               0);
-                        usleep_range(1000, 2000);
+                       usleep_range(1000, 2000);
                }
        }
 
@@ -6753,7 +6954,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
 {
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
                       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-        usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
                       MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 }
@@ -6894,7 +7095,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
 
-                usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
        } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
                        ((fw_msgout & 0xff) != 0x03 && (phy->type ==
                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7604,13 +7805,12 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
                        return 0;
-                usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
        }
        return -EINVAL;
 }
 
 static void bnx2x_warpcore_power_module(struct link_params *params,
-                                       struct bnx2x_phy *phy,
                                        u8 power)
 {
        u32 pin_cfg;
@@ -7652,10 +7852,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        addr32 = addr & (~0x3);
        do {
                if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
-                       bnx2x_warpcore_power_module(params, phy, 0);
+                       bnx2x_warpcore_power_module(params, 0);
                        /* Note that 100us are not enough here */
                        usleep_range(1000, 2000);
-                       bnx2x_warpcore_power_module(params, phy, 1);
+                       bnx2x_warpcore_power_module(params, 1);
                }
                rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
                                    data_array);
@@ -7715,7 +7915,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* Wait appropriate time for two-wire command to finish before
         * polling the status register
         */
-        usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
 
        /* Wait up to 500us for command complete status */
        for (i = 0; i < 100; i++) {
@@ -7751,7 +7951,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
                        return 0;
-                usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
        }
 
        return -EINVAL;
@@ -7786,9 +7986,8 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u32 sync_offset = 0, phy_idx, media_types;
-       u8 val[2], check_limiting_mode = 0;
+       u8 gport, val[2], check_limiting_mode = 0;
        *edc_mode = EDC_MODE_LIMITING;
-
        phy->media_type = ETH_PHY_UNSPECIFIED;
        /* First check for copper cable */
        if (bnx2x_read_sfp_module_eeprom(phy,
@@ -7843,8 +8042,15 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                               SFP_EEPROM_COMP_CODE_LR_MASK |
                               SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
                        DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+                       gport = params->port;
                        phy->media_type = ETH_PHY_SFP_1G_FIBER;
                        phy->req_line_speed = SPEED_1000;
+                       if (!CHIP_IS_E1x(bp))
+                               gport = BP_PATH(bp) + (params->port << 1);
+                       netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
+                             " Current SFP module in port %d is not"
+                             " compliant with 10G Ethernet\n",
+                        gport);
                } else {
                        int idx, cfg_idx = 0;
                        DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8241,7 +8447,7 @@ static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
                                    struct link_params *params)
 {
        struct bnx2x *bp = params->bp;
-       bnx2x_warpcore_power_module(params, phy, 0);
+       bnx2x_warpcore_power_module(params, 0);
        /* Put Warpcore in low power mode */
        REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
 
@@ -8264,7 +8470,7 @@ static void bnx2x_power_sfp_module(struct link_params *params,
                bnx2x_8727_power_module(params->bp, phy, power);
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-               bnx2x_warpcore_power_module(params, phy, power);
+               bnx2x_warpcore_power_module(params, power);
                break;
        default:
                break;
@@ -8337,7 +8543,8 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
        u32 val = REG_RD(bp, params->shmem_base +
                             offsetof(struct shmem_region, dev_info.
                                     port_feature_config[params->port].config));
-
+       /* Enabled transmitter by default */
+       bnx2x_sfp_set_transmitter(params, phy, 1);
        DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
                 params->port);
        /* Power up module */
@@ -8370,14 +8577,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
         */
        bnx2x_set_limiting_mode(params, phy, edc_mode);
 
-       /* Enable transmit for this module if the module is approved, or
-        * if unapproved modules should also enable the Tx laser
+       /* Disable transmit for this module if the module is not approved, and
+        * laser needs to be disabled.
         */
-       if (rc == 0 ||
-           (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
-           PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-               bnx2x_sfp_set_transmitter(params, phy, 1);
-       else
+       if ((rc) &&
+           ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+            PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
                bnx2x_sfp_set_transmitter(params, phy, 0);
 
        return rc;
@@ -8389,11 +8594,13 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
        struct bnx2x_phy *phy;
        u32 gpio_val;
        u8 gpio_num, gpio_port;
-       if (CHIP_IS_E3(bp))
+       if (CHIP_IS_E3(bp)) {
                phy = &params->phy[INT_PHY];
-       else
+               /* Always enable TX laser,will be disabled in case of fault */
+               bnx2x_sfp_set_transmitter(params, phy, 1);
+       } else {
                phy = &params->phy[EXT_PHY1];
-
+       }
        if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
                                      params->port, &gpio_num, &gpio_port) ==
            -EINVAL) {
@@ -8409,7 +8616,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
 
        /* Call the handling function in case module is detected */
        if (gpio_val == 0) {
-               bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+               bnx2x_set_mdio_emac_per_phy(bp, params);
                bnx2x_set_aer_mmd(params, phy);
 
                bnx2x_power_sfp_module(params, phy, 1);
@@ -8438,10 +8645,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
                        DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
                }
        } else {
-               u32 val = REG_RD(bp, params->shmem_base +
-                                offsetof(struct shmem_region, dev_info.
-                                         port_feature_config[params->port].
-                                         config));
                bnx2x_set_gpio_int(bp, gpio_num,
                                   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
                                   gpio_port);
@@ -8449,10 +8652,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
                 * Disable transmit for this module
                 */
                phy->media_type = ETH_PHY_NOT_PRESENT;
-               if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
-                    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
-                   CHIP_IS_E3(bp))
-                       bnx2x_sfp_set_transmitter(params, phy, 0);
        }
 }
 
@@ -9192,6 +9391,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                        bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+                       bnx2x_8727_power_module(params->bp, phy, 0);
                        return 0;
                }
        } /* Over current check */
@@ -9296,20 +9496,28 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
                                            struct bnx2x *bp,
                                            u8 port)
 {
-       u16 val, fw_ver1, fw_ver2, cnt;
+       u16 val, fw_ver2, cnt, i;
+       static struct bnx2x_reg_set reg_set[] = {
+               {MDIO_PMA_DEVAD, 0xA819, 0x0014},
+               {MDIO_PMA_DEVAD, 0xA81A, 0xc200},
+               {MDIO_PMA_DEVAD, 0xA81B, 0x0000},
+               {MDIO_PMA_DEVAD, 0xA81C, 0x0300},
+               {MDIO_PMA_DEVAD, 0xA817, 0x0009}
+       };
+       u16 fw_ver1;
 
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
                bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
                                phy->ver_addr);
        } else {
                /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
                /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+               for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
+                     i++)
+                       bnx2x_cl45_write(bp, phy, reg_set[i].devad,
+                                        reg_set[i].reg, reg_set[i].val);
 
                for (cnt = 0; cnt < 100; cnt++) {
                        bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
@@ -9357,8 +9565,16 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
-       u16 val, offset;
-
+       u16 val, offset, i;
+       static struct bnx2x_reg_set reg_set[] = {
+               {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
+               {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
+               {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
+               {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
+               {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+                       MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+               {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
+       };
        /* PHYC_CTL_LED_CTL */
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD,
@@ -9370,49 +9586,20 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                         MDIO_PMA_DEVAD,
                         MDIO_PMA_REG_8481_LINK_SIGNAL, val);
 
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED1_MASK,
-                        0x80);
-
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED2_MASK,
-                        0x18);
-
-       /* Select activity source by Tx and Rx, as suggested by PHY AE */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED3_MASK,
-                        0x0006);
-
-       /* Select the closest activity blink rate to that in 10/100/1000 */
-       bnx2x_cl45_write(bp, phy,
-                       MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_8481_LED3_BLINK,
-                       0);
-
-       /* Configure the blink rate to ~15.9 Hz */
-       bnx2x_cl45_write(bp, phy,
-                       MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
-                       MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+       for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+                                reg_set[i].val);
 
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
                offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
        else
                offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
 
-       bnx2x_cl45_read(bp, phy,
-                       MDIO_PMA_DEVAD, offset, &val);
-       val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, offset, val);
-
-       /* 'Interrupt Mask' */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_AN_DEVAD,
-                        0xFFFB, 0xFFFD);
+       /* stretch_en for LED3*/
+       bnx2x_cl45_read_or_write(bp, phy,
+                                MDIO_PMA_DEVAD, offset,
+                                MDIO_PMA_REG_84823_LED3_STRETCH_EN);
 }
 
 static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9422,7 +9609,8 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        switch (action) {
        case PHY_INIT:
-               if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+                   (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
                        /* Save spirom version */
                        bnx2x_save_848xx_spirom_version(phy, bp, params->port);
                }
@@ -9443,7 +9631,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                       struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
+       u16 autoneg_val, an_1000_val, an_10_100_val;
 
        bnx2x_848xx_specific_func(phy, params, PHY_INIT);
        bnx2x_cl45_write(bp, phy,
@@ -9542,11 +9730,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        if (phy->req_duplex == DUPLEX_FULL)
                autoneg_val |= (1<<8);
 
-       /* Always write this if this is not 84833.
-        * For 84833, write it only when it's a forced speed.
+       /* Always write this if this is not 84833/4.
+        * For 84833/4, write it only when it's a forced speed.
         */
-       if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-               ((autoneg_val & (1<<12)) == 0))
+       if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+            (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+           ((autoneg_val & (1<<12)) == 0))
                bnx2x_cl45_write(bp, phy,
                         MDIO_AN_DEVAD,
                         MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -9558,14 +9747,11 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK, "Advertising 10G\n");
                        /* Restart autoneg for 10G*/
 
-                       bnx2x_cl45_read(bp, phy,
-                                       MDIO_AN_DEVAD,
-                                       MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
-                                       &an_10g_val);
-                       bnx2x_cl45_write(bp, phy,
-                                        MDIO_AN_DEVAD,
-                                        MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
-                                        an_10g_val | 0x1000);
+                       bnx2x_cl45_read_or_write(
+                               bp, phy,
+                               MDIO_AN_DEVAD,
+                               MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+                               0x1000);
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
                                         0x3200);
@@ -9598,9 +9784,8 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
 #define PHY84833_CMDHDLR_WAIT 300
 #define PHY84833_CMDHDLR_MAX_ARGS 5
 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
-                                  struct link_params *params,
-                  u16 fw_cmd,
-                  u16 cmd_args[], int argc)
+                               struct link_params *params, u16 fw_cmd,
+                               u16 cmd_args[], int argc)
 {
        int idx;
        u16 val;
@@ -9614,7 +9799,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
                                MDIO_84833_CMD_HDLR_STATUS, &val);
                if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
                        break;
-                usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
        }
        if (idx >= PHY84833_CMDHDLR_WAIT) {
                DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9635,7 +9820,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
                if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
                        (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
                        break;
-                usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
        }
        if ((idx >= PHY84833_CMDHDLR_WAIT) ||
                (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9654,7 +9839,6 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
        return 0;
 }
 
-
 static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
                                   struct link_params *params,
                                   struct link_vars *vars)
@@ -9802,11 +9986,11 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u8 port, initialize = 1;
        u16 val;
-       u32 actual_phy_selection, cms_enable;
+       u32 actual_phy_selection;
        u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
        int rc = 0;
 
-        usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
 
        if (!(CHIP_IS_E1x(bp)))
                port = BP_PATH(bp);
@@ -9828,7 +10012,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+       if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+           (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
                /* BCM84823 requires that XGXS links up first @ 10G for normal
                 * behavior.
                 */
@@ -9884,7 +10069,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
                bnx2x_84833_pair_swap_cfg(phy, params, vars);
 
                /* Keep AutogrEEEn disabled. */
@@ -9904,7 +10090,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_save_848xx_spirom_version(phy, bp, params->port);
        /* 84833 PHY has a better feature and doesn't need to support this. */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
-               cms_enable = REG_RD(bp, params->shmem_base +
+               u32 cms_enable = REG_RD(bp, params->shmem_base +
                        offsetof(struct shmem_region,
                        dev_info.port_hw_config[params->port].default_cfg)) &
                        PORT_HW_CFG_ENABLE_CMS_MASK;
@@ -9933,7 +10119,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                        return rc;
                }
 
-               if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+               if ((phy->req_duplex == DUPLEX_FULL) &&
                    (params->eee_mode & EEE_MODE_ADV_LPI) &&
                    (bnx2x_eee_calc_timer(params) ||
                     !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
@@ -9948,15 +10134,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
        }
 
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
                /* Bring PHY out of super isolate mode as the final step. */
-               bnx2x_cl45_read(bp, phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
-               val &= ~MDIO_84833_SUPER_ISOLATE;
-               bnx2x_cl45_write(bp, phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+               bnx2x_cl45_read_and_write(bp, phy,
+                                         MDIO_CTL_DEVAD,
+                                         MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+                                         (u16)~MDIO_84833_SUPER_ISOLATE);
        }
        return rc;
 }
@@ -10090,7 +10274,6 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
        return link_up;
 }
 
-
 static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
 {
        int status = 0;
@@ -10962,7 +11145,7 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
 /*                     STATIC PHY DECLARATION                    */
 /******************************************************************/
 
-static struct bnx2x_phy phy_null = {
+static const struct bnx2x_phy phy_null = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
        .addr           = 0,
        .def_md_devad   = 0,
@@ -10988,7 +11171,7 @@ static struct bnx2x_phy phy_null = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_serdes = {
+static const struct bnx2x_phy phy_serdes = {
        .type           = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11023,7 +11206,7 @@ static struct bnx2x_phy phy_serdes = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_xgxs = {
+static const struct bnx2x_phy phy_xgxs = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11058,12 +11241,11 @@ static struct bnx2x_phy phy_xgxs = {
        .set_link_led   = (set_link_led_t)NULL,
        .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
 };
-static struct bnx2x_phy phy_warpcore = {
+static const struct bnx2x_phy phy_warpcore = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_TX_ERROR_CHECK),
+       .flags          = FLAGS_TX_ERROR_CHECK,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11097,7 +11279,7 @@ static struct bnx2x_phy phy_warpcore = {
 };
 
 
-static struct bnx2x_phy phy_7101 = {
+static const struct bnx2x_phy phy_7101 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11126,11 +11308,11 @@ static struct bnx2x_phy phy_7101 = {
        .set_link_led   = (set_link_led_t)bnx2x_7101_set_link_led,
        .phy_specific_func = (phy_specific_func_t)NULL
 };
-static struct bnx2x_phy phy_8073 = {
+static const struct bnx2x_phy phy_8073 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = FLAGS_HW_LOCK_REQUIRED,
+       .flags          = 0,
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11157,7 +11339,7 @@ static struct bnx2x_phy phy_8073 = {
        .set_link_led   = (set_link_led_t)NULL,
        .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
 };
-static struct bnx2x_phy phy_8705 = {
+static const struct bnx2x_phy phy_8705 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11185,7 +11367,7 @@ static struct bnx2x_phy phy_8705 = {
        .set_link_led   = (set_link_led_t)NULL,
        .phy_specific_func = (phy_specific_func_t)NULL
 };
-static struct bnx2x_phy phy_8706 = {
+static const struct bnx2x_phy phy_8706 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11215,12 +11397,11 @@ static struct bnx2x_phy phy_8706 = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_8726 = {
+static const struct bnx2x_phy phy_8726 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
        .addr           = 0xff,
        .def_md_devad   = 0,
-       .flags          = (FLAGS_HW_LOCK_REQUIRED |
-                          FLAGS_INIT_XGXS_FIRST |
+       .flags          = (FLAGS_INIT_XGXS_FIRST |
                           FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
@@ -11248,7 +11429,7 @@ static struct bnx2x_phy phy_8726 = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_8727 = {
+static const struct bnx2x_phy phy_8727 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11278,7 +11459,7 @@ static struct bnx2x_phy phy_8727 = {
        .set_link_led   = (set_link_led_t)bnx2x_8727_set_link_led,
        .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
 };
-static struct bnx2x_phy phy_8481 = {
+static const struct bnx2x_phy phy_8481 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11314,7 +11495,7 @@ static struct bnx2x_phy phy_8481 = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
-static struct bnx2x_phy phy_84823 = {
+static const struct bnx2x_phy phy_84823 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11351,7 +11532,7 @@ static struct bnx2x_phy phy_84823 = {
        .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
-static struct bnx2x_phy phy_84833 = {
+static const struct bnx2x_phy phy_84833 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11386,7 +11567,41 @@ static struct bnx2x_phy phy_84833 = {
        .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
-static struct bnx2x_phy phy_54618se = {
+static const struct bnx2x_phy phy_84834 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834,
+       .addr           = 0xff,
+       .def_md_devad   = 0,
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
+                           FLAGS_REARM_LATCH_SIGNAL,
+       .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .mdio_ctrl      = 0,
+       .supported      = (SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_TP |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
+       .media_type     = ETH_PHY_BASE_T,
+       .ver_addr       = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+       .config_init    = (config_init_t)bnx2x_848x3_config_init,
+       .read_status    = (read_status_t)bnx2x_848xx_read_status,
+       .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+       .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+       .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+       .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_54618se = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
        .addr           = 0xff,
        .def_md_devad   = 0,
@@ -11564,9 +11779,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        phy->media_type = ETH_PHY_KR;
                        phy->flags |= FLAGS_WC_DUAL_MODE;
                        phy->supported &= (SUPPORTED_20000baseKR2_Full |
+                                          SUPPORTED_Autoneg |
                                           SUPPORTED_FIBRE |
                                           SUPPORTED_Pause |
                                           SUPPORTED_Asym_Pause);
+                       phy->flags &= ~FLAGS_TX_ERROR_CHECK;
                        break;
                default:
                        DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
@@ -11665,6 +11882,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
                *phy = phy_84833;
                break;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+               *phy = phy_84834;
+               break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
                *phy = phy_54618se;
@@ -11721,9 +11941,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+       if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+            (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
            (phy->ver_addr)) {
-               /* Remove 100Mb link supported for BCM84833 when phy fw
+               /* Remove 100Mb link supported for BCM84833/4 when phy fw
                 * version lower than or equal to 1.39
                 */
                u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11733,12 +11954,6 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
                                            SUPPORTED_100baseT_Full);
        }
 
-       /* In case mdc/mdio_access of the external phy is different than the
-        * mdc/mdio access of the XGXS, a HW lock must be taken in each access
-        * to prevent one port interfere with another port's CL45 operations.
-        */
-       if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
-               phy->flags |= FLAGS_HW_LOCK_REQUIRED;
        DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
                   phy_type, port, phy_index);
        DP(NETIF_MSG_LINK, "             addr=0x%x, mdio_ctl=0x%x\n",
@@ -11863,7 +12078,6 @@ u32 bnx2x_phy_selection(struct link_params *params)
        return return_cfg;
 }
 
-
 int bnx2x_phy_probe(struct link_params *params)
 {
        u8 phy_index, actual_phy_idx;
@@ -11907,6 +12121,10 @@ int bnx2x_phy_probe(struct link_params *params)
                    FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
                        phy->flags &= ~FLAGS_TX_ERROR_CHECK;
 
+               if (!(params->feature_config_flags &
+                     FEATURE_CONFIG_MT_SUPPORT))
+                       phy->flags |= FLAGS_MDC_MDIO_WA_G;
+
                sync_offset = params->shmem_base +
                        offsetof(struct shmem_region,
                        dev_info.port_hw_config[params->port].media_type);
@@ -12018,13 +12236,17 @@ static void bnx2x_init_xgxs_loopback(struct link_params *params,
                                     struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-               vars->link_up = 1;
-               vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-               vars->duplex = DUPLEX_FULL;
+       struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+       vars->link_up = 1;
+       vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+       vars->duplex = DUPLEX_FULL;
        if (params->req_line_speed[0] == SPEED_1000)
-                       vars->line_speed = SPEED_1000;
+               vars->line_speed = SPEED_1000;
+       else if ((params->req_line_speed[0] == SPEED_20000) ||
+                (int_phy->flags & FLAGS_WC_DUAL_MODE))
+               vars->line_speed = SPEED_20000;
        else
-                       vars->line_speed = SPEED_10000;
+               vars->line_speed = SPEED_10000;
 
        if (!USES_WARPCORE(bp))
                bnx2x_xgxs_deassert(params);
@@ -12044,24 +12266,20 @@ static void bnx2x_init_xgxs_loopback(struct link_params *params,
                        bnx2x_bmac_enable(params, vars, 0, 1);
        }
 
-               if (params->loopback_mode == LOOPBACK_XGXS) {
-                       /* set 10G XGXS loopback */
-                       params->phy[INT_PHY].config_loopback(
-                               &params->phy[INT_PHY],
-                               params);
-
-               } else {
-                       /* set external phy loopback */
-                       u8 phy_index;
-                       for (phy_index = EXT_PHY1;
-                             phy_index < params->num_phys; phy_index++) {
-                               if (params->phy[phy_index].config_loopback)
-                                       params->phy[phy_index].config_loopback(
-                                               &params->phy[phy_index],
-                                               params);
-                       }
-               }
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+       if (params->loopback_mode == LOOPBACK_XGXS) {
+               /* Set 10G XGXS loopback */
+               int_phy->config_loopback(int_phy, params);
+       } else {
+               /* Set external phy loopback */
+               u8 phy_index;
+               for (phy_index = EXT_PHY1;
+                     phy_index < params->num_phys; phy_index++)
+                       if (params->phy[phy_index].config_loopback)
+                               params->phy[phy_index].config_loopback(
+                                       &params->phy[phy_index],
+                                       params);
+       }
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
        bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 }
@@ -12071,7 +12289,7 @@ void bnx2x_set_rx_filter(struct link_params *params, u8 en)
        struct bnx2x *bp = params->bp;
        u8 val = en * 0x1F;
 
-       /* Open the gate between the NIG to the BRB */
+       /* Open / close the gate between the NIG and the BRB */
        if (!CHIP_IS_E1x(bp))
                val |= en * 0x20;
        REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
@@ -12345,7 +12563,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
         * Hold it as vars low
         */
         /* Clear link led */
-       bnx2x_set_mdio_clk(bp, params->chip_id, port);
+       bnx2x_set_mdio_emac_per_phy(bp, params);
        bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
 
        if (reset_ext_phy) {
@@ -12696,7 +12914,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
        /* Initiate PHY reset*/
        bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
                       port);
-        usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
        bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
                       port);
 
@@ -12784,7 +13002,8 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
 }
 
 static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
-                                              struct bnx2x_phy *phy)
+                                   struct bnx2x_phy *phy,
+                                   u8 port)
 {
        u16 val, cnt;
        /* Wait for FW completing its initialization. */
@@ -12794,7 +13013,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
                                MDIO_PMA_REG_CTRL, &val);
                if (!(val & (1<<15)))
                        break;
-                usleep_range(1000, 2000);
+               usleep_range(1000, 2000);
        }
        if (cnt >= 1500) {
                DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12811,26 +13030,28 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
                         MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
 
        /* Save spirom version */
-       bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
+       bnx2x_save_848xx_spirom_version(phy, bp, port);
        return 0;
 }
 
 int bnx2x_pre_init_phy(struct bnx2x *bp,
                                  u32 shmem_base,
                                  u32 shmem2_base,
-                                 u32 chip_id)
+                                 u32 chip_id,
+                                 u8 port)
 {
        int rc = 0;
        struct bnx2x_phy phy;
-       bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
        if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
-                              PORT_0, &phy)) {
+                              port, &phy) != 0) {
                DP(NETIF_MSG_LINK, "populate_phy failed\n");
                return -EINVAL;
        }
+       bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
        switch (phy.type) {
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
-               rc = bnx2x_84833_pre_init_phy(bp, &phy);
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+               rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
                break;
        default:
                break;
@@ -12867,6 +13088,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                                                phy_index, chip_id);
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
                /* GPIO3's are linked, and so both need to be toggled
                 * to obtain required 2us pulse.
                 */
@@ -12898,8 +13120,9 @@ int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
        u32 phy_ver, val;
        u8 phy_index = 0;
        u32 ext_phy_type, ext_phy_config;
-       bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
-       bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
+
+       bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0);
+       bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1);
        DP(NETIF_MSG_LINK, "Begin common phy init\n");
        if (CHIP_IS_E3(bp)) {
                /* Enable EPIO */
@@ -12960,6 +13183,7 @@ static void bnx2x_check_over_curr(struct link_params *params,
                                            " error.\n",
                         params->port);
                        vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+                       bnx2x_warpcore_power_module(params, 0);
                }
        } else
                vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
@@ -13139,6 +13363,108 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
                }
        }
 }
+static void bnx2x_disable_kr2(struct link_params *params,
+                             struct link_vars *vars,
+                             struct bnx2x_phy *phy)
+{
+       struct bnx2x *bp = params->bp;
+       int i;
+       static struct bnx2x_reg_set reg_set[] = {
+               /* Step 1 - Program the TX/RX alignment markers */
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+       };
+       DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+       for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+                                reg_set[i].val);
+       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+       /* Restart AN on leading lane */
+       bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_kr2_recovery(struct link_params *params,
+                              struct link_vars *vars,
+                              struct bnx2x_phy *phy)
+{
+       struct bnx2x *bp = params->bp;
+       DP(NETIF_MSG_LINK, "KR2 recovery\n");
+       bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+       bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_check_kr2_wa(struct link_params *params,
+                              struct link_vars *vars,
+                              struct bnx2x_phy *phy)
+{
+       struct bnx2x *bp = params->bp;
+       u16 base_page, next_page, not_kr2_device, lane;
+       int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+
+       if (!sigdet) {
+               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+                       bnx2x_kr2_recovery(params, vars, phy);
+               return;
+       }
+
+       lane = bnx2x_get_warpcore_lane(phy, params);
+       CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, lane);
+       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                       MDIO_AN_REG_LP_AUTO_NEG, &base_page);
+       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                       MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
+       bnx2x_set_aer_mmd(params, phy);
+
+       /* CL73 has not begun yet */
+       if (base_page == 0) {
+               if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+                       bnx2x_kr2_recovery(params, vars, phy);
+               return;
+       }
+
+       /* In case NP bit is not set in the BasePage, or it is set,
+        * but only KX is advertised, declare this link partner as non-KR2
+        * device.
+        */
+       not_kr2_device = (((base_page & 0x8000) == 0) ||
+                         (((base_page & 0x8000) &&
+                           ((next_page & 0xe0) == 0x2))));
+
+       /* In case KR2 is already disabled, check if we need to re-enable it */
+       if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+               if (!not_kr2_device) {
+                       DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
+                                      next_page);
+                       bnx2x_kr2_recovery(params, vars, phy);
+               }
+               return;
+       }
+       /* KR2 is enabled, but not KR2 device */
+       if (not_kr2_device) {
+               /* Disable KR2 on both lanes */
+               DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
+               bnx2x_disable_kr2(params, vars, phy);
+               return;
+       }
+}
+
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
 {
        u16 phy_idx;
@@ -13156,6 +13482,9 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
        if (CHIP_IS_E3(bp)) {
                struct bnx2x_phy *phy = &params->phy[INT_PHY];
                bnx2x_set_aer_mmd(params, phy);
+               if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
+                   (phy->speed_cap_mask & SPEED_20000))
+                       bnx2x_check_kr2_wa(params, vars, phy);
                bnx2x_check_over_curr(params, vars);
                if (vars->rx_tx_asic_rst)
                        bnx2x_warpcore_config_runtime(phy, params, vars);
@@ -13176,27 +13505,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
                                bnx2x_update_mng(params, vars->link_status);
                        }
                }
-
        }
-
-}
-
-u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
-{
-       u8 phy_index;
-       struct bnx2x_phy phy;
-       for (phy_index = INT_PHY; phy_index < MAX_PHYS;
-             phy_index++) {
-               if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
-                                      0, &phy) != 0) {
-                       DP(NETIF_MSG_LINK, "populate phy failed\n");
-                       return 0;
-               }
-
-               if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
-                       return 1;
-       }
-       return 0;
 }
 
 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
index ba981ced628b95f622cec9fe4229e433f3674762..181c5ce490bc5a94f176345bb0f73a132b8a8582 100644 (file)
@@ -139,8 +139,6 @@ struct bnx2x_phy {
        u8 addr;
        u8 def_md_devad;
        u16 flags;
-       /* Require HW lock */
-#define FLAGS_HW_LOCK_REQUIRED         (1<<0)
        /* No Over-Current detection */
 #define FLAGS_NOC                      (1<<1)
        /* Fan failure detection required */
@@ -156,6 +154,7 @@ struct bnx2x_phy {
 #define FLAGS_MDC_MDIO_WA_B0           (1<<10)
 #define FLAGS_TX_ERROR_CHECK           (1<<12)
 #define FLAGS_EEE                      (1<<13)
+#define FLAGS_MDC_MDIO_WA_G            (1<<15)
 
        /* preemphasis values for the rx side */
        u16 rx_preemphasis[4];
@@ -267,6 +266,8 @@ struct link_params {
 #define FEATURE_CONFIG_AUTOGREEEN_ENABLED                      (1<<9)
 #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED             (1<<10)
 #define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET                (1<<11)
+#define FEATURE_CONFIG_MT_SUPPORT                      (1<<13)
+
        /* Will be populated during common init */
        struct bnx2x_phy phy[MAX_PHYS];
 
@@ -347,6 +348,8 @@ struct link_vars {
        u8 rx_tx_asic_rst;
        u8 turn_to_run_wc_rt;
        u16 rsrv2;
+       /* The same definitions as the shmem2 parameter */
+       u32 link_attr_sync;
 };
 
 /***********************************************************/
@@ -418,10 +421,6 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 
 void bnx2x_hw_reset_phy(struct link_params *params);
 
-/* Checks if HW lock is required for this phy/board type */
-u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
-                         u32 shmem2_base);
-
 /* Check swap bit and adjust PHY order */
 u32 bnx2x_phy_selection(struct link_params *params);
 
@@ -460,9 +459,6 @@ struct bnx2x_nig_brb_pfc_port_params {
        u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
        u32 llfc_high_priority_classes;
        u32 llfc_low_priority_classes;
-       /* BRB */
-       u32 cos0_pauseable;
-       u32 cos1_pauseable;
 };
 
 
index 54b8c1f19d39aeb256e947fc2703fa0475a5f6e3..b4659c4b6fbd8616858f3471a1033f0e16bab155 100644 (file)
@@ -6267,6 +6267,10 @@ void bnx2x_pf_disable(struct bnx2x *bp)
 static void bnx2x__common_init_phy(struct bnx2x *bp)
 {
        u32 shmem_base[2], shmem2_base[2];
+       /* Avoid common init in case MFW supports LFA */
+       if (SHMEM2_RD(bp, size) >
+           (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+               return;
        shmem_base[0] =  bp->common.shmem_base;
        shmem2_base[0] = bp->common.shmem2_base;
        if (!CHIP_IS_E1x(bp)) {
@@ -9862,6 +9866,14 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 
        bp->link_params.shmem_base = bp->common.shmem_base;
        bp->link_params.shmem2_base = bp->common.shmem2_base;
+       if (SHMEM2_RD(bp, size) >
+           (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+               bp->link_params.lfa_base =
+               REG_RD(bp, bp->common.shmem2_base +
+                      (u32)offsetof(struct shmem2_region,
+                                    lfa_host_addr[BP_PORT(bp)]));
+       else
+               bp->link_params.lfa_base = 0;
        BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
                       bp->common.shmem_base, bp->common.shmem2_base);
 
@@ -9909,6 +9921,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+       bp->link_params.feature_config_flags |=
+               (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
+               FEATURE_CONFIG_MT_SUPPORT : 0;
+
        bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
                        BC_SUPPORTS_PFC_STATS : 0;
 
@@ -10360,17 +10377,6 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                bp->mdio.prtad =
                        XGXS_EXT_PHY_ADDR(ext_phy_config);
 
-       /*
-        * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
-        * In MF mode, it is set to cover self test cases
-        */
-       if (IS_MF(bp))
-               bp->port.need_hw_lock = 1;
-       else
-               bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-                                                       bp->common.shmem_base,
-                                                       bp->common.shmem2_base);
-
        /* Configure link feature according to nvram value */
        eee_mode = (((SHMEM_RD(bp, dev_info.
                      port_feature_config[port].eee_power_mode)) &
index 7d93adb57f3122cd10e0f329adad4fa1987d808d..f8d432af256332fa1d0cdab3256ca3914079ba55 100644 (file)
 #define XMAC_CTRL_REG_RX_EN                                     (0x1<<1)
 #define XMAC_CTRL_REG_SOFT_RESET                                (0x1<<6)
 #define XMAC_CTRL_REG_TX_EN                                     (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB                          (0x1<<7)
 #define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN                                 (0x1<<18)
 #define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN                                 (0x1<<17)
 #define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON                      (0x1<<1)
 #define XMAC_REG_PAUSE_CTRL                                     0x68
 #define XMAC_REG_PFC_CTRL                                       0x70
 #define XMAC_REG_PFC_CTRL_HI                                    0x74
+#define XMAC_REG_RX_LSS_CTRL                                    0x50
 #define XMAC_REG_RX_LSS_STATUS                                  0x58
 /* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
  * CRC in strip mode */
 #define XMAC_REG_RX_MAX_SIZE                                    0x40
 #define XMAC_REG_TX_CTRL                                        0x20
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE                (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE               (0x1<<1)
 /* [RW 16] Indirect access to the XX table of the XX protection mechanism.
    The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
    header pointer. */
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI     0x1B00
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS   0x1E00
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI     0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2     0x3900
 
 
 #define MDIO_REG_BANK_10G_PARALLEL_DETECT              0x8130
@@ -7062,7 +7067,8 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2      0x12
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY    0x4000
 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ                0x8000
-#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96
+#define MDIO_WC_REG_PCS_STATUS2                                0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL                     0x0096
 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL               0x8000
 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1              0x800e
 #define MDIO_WC_REG_XGXSBLK1_DESKEW                    0x8010
@@ -7094,6 +7100,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_PAR_DET_10G_STATUS                 0x8130
 #define MDIO_WC_REG_PAR_DET_10G_CTRL                   0x8131
 #define MDIO_WC_REG_XGXS_X2_CONTROL2                   0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3                   0x8142
 #define MDIO_WC_REG_XGXS_RX_LN_SWAP1                   0x816B
 #define MDIO_WC_REG_XGXS_TX_LN_SWAP1                   0x8169
 #define MDIO_WC_REG_GP2_STATUS_GP_2_0                  0x81d0
@@ -7128,6 +7135,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET         0x0a
 #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK           0x7c00
 #define MDIO_WC_REG_TX_FIR_TAP_ENABLE          0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP                0x82e2
 #define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL     0x82e3
 #define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL       0x82e6
 #define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL       0x82e7
@@ -7145,9 +7153,16 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_DIGITAL4_MISC5                     0x833e
 #define MDIO_WC_REG_DIGITAL5_MISC6                     0x8345
 #define MDIO_WC_REG_DIGITAL5_MISC7                     0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS               0x834d
 #define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED              0x834e
 #define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL          0x8350
 #define MDIO_WC_REG_CL49_USERB0_CTRL                   0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL                   0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT                  0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1                     0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2                     0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3                     0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD                        0x837b
 #define MDIO_WC_REG_EEE_COMBO_CONTROL0                 0x8390
 #define MDIO_WC_REG_TX66_CONTROL                       0x83b0
 #define MDIO_WC_REG_RX66_CONTROL                       0x83c0
@@ -7161,7 +7176,17 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_WC_REG_RX66_SCW3_MASK                     0x83c9
 #define MDIO_WC_REG_FX100_CTRL1                                0x8400
 #define MDIO_WC_REG_FX100_CTRL3                                0x8402
-
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5               0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6               0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7               0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9               0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10              0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11              0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1                      0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2                      0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3                      0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE               0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE                        0x8457
 #define MDIO_WC_REG_MICROBLK_CMD                       0xffc2
 #define MDIO_WC_REG_MICROBLK_DL_STATUS                 0xffc5
 #define MDIO_WC_REG_MICROBLK_CMD3                      0xffcc
index a71c0f317a93dc13610913e4ea1793d3188686b5..d40c994a4f6a2c807965e44eaf81f2be0053fb50 100644 (file)
@@ -48,7 +48,7 @@ config CHELSIO_T1_1G
 
 config CHELSIO_T3
        tristate "Chelsio Communications T3 10Gb Ethernet support"
-       depends on PCI
+       depends on PCI && INET
        select FW_LOADER
        select MDIO
        ---help---
index 17ae8c61968017aa6aa6a7477ac062f8dba79669..9f992b95eddc1b13209082890f13d1d9a8248964 100644 (file)
@@ -1910,9 +1910,8 @@ static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
 static int ndevs;
 static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
 
-/* '21' below should really be 'MAX_NUM_EWRK3S' */
 module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
+module_param_array(irq, byte, NULL, 0);
 MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
 MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
 
index adef536c1586f7552f09bccb92e35b57e09b8bc5..0661e9379583ec215b644e6f4fed4d2076498b7c 100644 (file)
@@ -1675,24 +1675,6 @@ static inline int events_get(struct be_eq_obj *eqo)
        return num;
 }
 
-static int event_handle(struct be_eq_obj *eqo)
-{
-       bool rearm = false;
-       int num = events_get(eqo);
-
-       /* Deal with any spurious interrupts that come without events */
-       if (!num)
-               rearm = true;
-
-       if (num || msix_enabled(eqo->adapter))
-               be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
-
-       if (num)
-               napi_schedule(&eqo->napi);
-
-       return num;
-}
-
 /* Leaves the EQ is disarmed state */
 static void be_eq_clean(struct be_eq_obj *eqo)
 {
@@ -2014,15 +1996,23 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
 
 static irqreturn_t be_intx(int irq, void *dev)
 {
-       struct be_adapter *adapter = dev;
-       int num_evts;
+       struct be_eq_obj *eqo = dev;
+       struct be_adapter *adapter = eqo->adapter;
+       int num_evts = 0;
 
-       /* With INTx only one EQ is used */
-       num_evts = event_handle(&adapter->eq_obj[0]);
-       if (num_evts)
-               return IRQ_HANDLED;
-       else
-               return IRQ_NONE;
+       /* On Lancer, clear-intr bit of the EQ DB does not work.
+        * INTx is de-asserted only on notifying num evts.
+        */
+       if (lancer_chip(adapter))
+               num_evts = events_get(eqo);
+
+       /* The EQ-notify may not de-assert INTx rightaway, causing
+        * the ISR to be invoked again. So, return HANDLED even when
+        * num_evts is zero.
+        */
+       be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+       napi_schedule(&eqo->napi);
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t be_msix(int irq, void *dev)
@@ -2342,10 +2332,10 @@ static int be_irq_register(struct be_adapter *adapter)
                        return status;
        }
 
-       /* INTx */
+       /* INTx: only the first EQ is used */
        netdev->irq = adapter->pdev->irq;
        status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
-                       adapter);
+                            &adapter->eq_obj[0]);
        if (status) {
                dev_err(&adapter->pdev->dev,
                        "INTx request IRQ failed - err %d\n", status);
@@ -2367,7 +2357,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
 
        /* INTx */
        if (!msix_enabled(adapter)) {
-               free_irq(netdev->irq, adapter);
+               free_irq(netdev->irq, &adapter->eq_obj[0]);
                goto done;
        }
 
@@ -3023,8 +3013,10 @@ static void be_netpoll(struct net_device *netdev)
        struct be_eq_obj *eqo;
        int i;
 
-       for_all_evt_queues(adapter, eqo, i)
-               event_handle(eqo);
+       for_all_evt_queues(adapter, eqo, i) {
+               be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+               napi_schedule(&eqo->napi);
+       }
 
        return;
 }
index 0ce145e93545c5e102a95931ac5bdf2005ff23f9..b85b15a889811e00c4073a9130744f823ff36f9e 100644 (file)
@@ -1589,8 +1589,7 @@ void igb_reset(struct igb_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_mac_info *mac = &hw->mac;
        struct e1000_fc_info *fc = &hw->fc;
-       u32 pba = 0, tx_space, min_tx_space, min_rx_space;
-       u16 hwm;
+       u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
 
        /* Repartition Pba for greater than 9k mtu
         * To take effect CTRL.RST is required.
@@ -1665,7 +1664,7 @@ void igb_reset(struct igb_adapter *adapter)
        hwm = min(((pba << 10) * 9 / 10),
                        ((pba << 10) - 2 * adapter->max_frame_size));
 
-       fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
+       fc->high_water = hwm & 0xFFFFFFF0;      /* 16-byte granularity */
        fc->low_water = fc->high_water - 16;
        fc->pause_time = 0xFFFF;
        fc->send_xon = 1;
index 3e18045d8f89f69d5e374d246f49a91320399a0c..d9fa999b16856e41f79a750c92d94223dfaa6d98 100644 (file)
@@ -46,6 +46,7 @@
 #define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
+#define E1000_RXDEXT_STATERR_LB    0x00040000
 #define E1000_RXDEXT_STATERR_CE    0x01000000
 #define E1000_RXDEXT_STATERR_SE    0x02000000
 #define E1000_RXDEXT_STATERR_SEQ   0x04000000
index a895e2f7b34d99b4afa870cfaab801b1061ab9db..fdca7b6727764fb574ffc88a85f47ee81fdc5f23 100644 (file)
@@ -295,7 +295,7 @@ struct igbvf_info {
 
 /* hardware capability, feature, and workaround flags */
 #define IGBVF_FLAG_RX_CSUM_DISABLED             (1 << 0)
-
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP            (1 << 1)
 #define IGBVF_RX_DESC_ADV(R, i)     \
        (&((((R).desc))[i].rx_desc))
 #define IGBVF_TX_DESC_ADV(R, i)     \
index 4051ec404613c4788485f58125f9d76888a1dc69..3d92ad8cdca144f28880ace6ff7cd6a9aa43dd51 100644 (file)
@@ -47,7 +47,7 @@
 
 #include "igbvf.h"
 
-#define DRV_VERSION "2.0.1-k"
+#define DRV_VERSION "2.0.2-k"
 char igbvf_driver_name[] = "igbvf";
 const char igbvf_driver_version[] = DRV_VERSION;
 static const char igbvf_driver_string[] =
@@ -107,12 +107,19 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
                               struct sk_buff *skb,
                               u32 status, u16 vlan)
 {
+       u16 vid;
+
        if (status & E1000_RXD_STAT_VP) {
-               u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+               if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
+                   (status & E1000_RXDEXT_STATERR_LB))
+                       vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+               else
+                       vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
                if (test_bit(vid, adapter->active_vlans))
                        __vlan_hwaccel_put_tag(skb, vid);
        }
-       netif_receive_skb(skb);
+
+       napi_gro_receive(&adapter->rx_ring->napi, skb);
 }
 
 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
@@ -2767,6 +2774,10 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
        /* reset the hardware with the new settings */
        igbvf_reset(adapter);
 
+       /* set hardware-specific flags */
+       if (adapter->hw.mac.type == e1000_vfadapt_i350)
+               adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
+
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
index 7ff4c4fdcb0d68aa80a91238741f47dbceba6094..8e786764c60ea186084a7c5b18e9641abeef2a0e 100644 (file)
@@ -483,6 +483,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 9)
 #define IXGBE_FLAG2_PTP_ENABLED                        (u32)(1 << 10)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 11)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB            (u32)(1 << 12)
 
        /* Tx fast path data */
        int num_tx_queues;
index 80e3cb7c39e82c13e5a465361e2d20ba238e90b5..484bbedffe2abd2876a27fe53a2ae7d4e13ff313 100644 (file)
@@ -63,11 +63,7 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define MAJ 3
-#define MIN 9
-#define BUILD 15
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-       __stringify(BUILD) "-k"
+#define DRV_VERSION "3.11.33-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2012 Intel Corporation.";
@@ -703,6 +699,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_hw_stats *hwstats = &adapter->stats;
        u32 xoff[8] = {0};
+       u8 tc;
        int i;
        bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
 
@@ -716,21 +713,26 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 
        /* update stats for each tc, only valid with PFC enabled */
        for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+               u32 pxoffrxc;
+
                switch (hw->mac.type) {
                case ixgbe_mac_82598EB:
-                       xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+                       pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
                        break;
                default:
-                       xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+                       pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
                }
-               hwstats->pxoffrxc[i] += xoff[i];
+               hwstats->pxoffrxc[i] += pxoffrxc;
+               /* Get the TC for given UP */
+               tc = netdev_get_prio_tc_map(adapter->netdev, i);
+               xoff[tc] += pxoffrxc;
        }
 
        /* disarm tx queues that have received xoff frames */
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
-               u8 tc = tx_ring->dcb_tc;
 
+               tc = tx_ring->dcb_tc;
                if (xoff[tc])
                        clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
        }
@@ -3170,14 +3172,6 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
 
-       /* If operating in IOV mode set RLPML for X540 */
-       if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
-           hw->mac.type == ixgbe_mac_X540) {
-               rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
-               rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
-                           ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
-       }
-
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /*
                 * enable cache line friendly hardware writes:
@@ -3249,6 +3243,8 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
+       if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 
        /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
        hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -7041,11 +7037,13 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                        continue;
 
                mode = nla_get_u16(attr);
-               if (mode == BRIDGE_MODE_VEPA)
+               if (mode == BRIDGE_MODE_VEPA) {
                        reg = 0;
-               else if (mode == BRIDGE_MODE_VEB)
+                       adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
+               } else if (mode == BRIDGE_MODE_VEB) {
                        reg = IXGBE_PFDTXGSWC_VT_LBEN;
-               else
+                       adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+               } else
                        return -EINVAL;
 
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
@@ -7066,7 +7064,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
                return 0;
 
-       if (IXGBE_READ_REG(&adapter->hw, IXGBE_PFDTXGSWC) & 1)
+       if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
                mode = BRIDGE_MODE_VEB;
        else
                mode = BRIDGE_MODE_VEPA;
index 4993642d1ce13ba25c3c0351074fd9a69e99fd42..85cddac673ef41716d9a34f027b1bcbb0cfb903d 100644 (file)
@@ -119,6 +119,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
 
        /* Initialize default switching mode VEB */
        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+       adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
 
        /* If call to enable VFs succeeded then allocate memory
         * for per VF control structures.
index ddba83ef3f4468ddc5471b3b04ddeb314c0d4c7f..c4b8ced8382989ac539ea9458f74082f0e735866 100644 (file)
@@ -5,4 +5,5 @@
 obj-$(CONFIG_QLCNIC) := qlcnic.o
 
 qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
-       qlcnic_ethtool.o qlcnic_ctx.o
+       qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
+       qlcnic_sysfs.o qlcnic_minidump.o
index ec29f7988d4224e91a0cce2f191293410a84e021..082eecbf414832739d577fe9173626d15c1df206 100644 (file)
 #define QLCNIC_CT_DEFAULT_RX_BUF_LEN   2048
 #define QLCNIC_LRO_BUFFER_EXTRA                2048
 
-/* Opcodes to be used with the commands */
-#define TX_ETHER_PKT   0x01
-#define TX_TCP_PKT     0x02
-#define TX_UDP_PKT     0x03
-#define TX_IP_PKT      0x04
-#define TX_TCP_LSO     0x05
-#define TX_TCP_LSO6    0x06
-#define TX_TCPV6_PKT   0x0b
-#define TX_UDPV6_PKT   0x0c
-
 /* Tx defines */
 #define QLCNIC_MAX_FRAGS_PER_TX        14
 #define MAX_TSO_HEADER_DESC    2
  * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
  * we are doing LSO (above the 1500 size packet) only.
  */
-
-#define FLAGS_VLAN_TAGGED      0x10
-#define FLAGS_VLAN_OOB         0x40
-
-#define qlcnic_set_tx_vlan_tci(cmd_desc, v)    \
-       (cmd_desc)->vlan_TCI = cpu_to_le16(v);
-#define qlcnic_set_cmd_desc_port(cmd_desc, var)        \
-       ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
-#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)       \
-       ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
-
-#define qlcnic_set_tx_port(_desc, _port) \
-       ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
-
-#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
-       ((_desc)->flags_opcode |= \
-       cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
-
-#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
-       ((_desc)->nfrags__length = \
-       cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
-
 struct cmd_desc_type0 {
        u8 tcp_hdr_offset;      /* For LSO only */
        u8 ip_hdr_offset;       /* For LSO only */
@@ -203,65 +171,6 @@ struct rcv_desc {
        __le64 addr_buffer;
 } __packed;
 
-/* opcode field in status_desc */
-#define QLCNIC_SYN_OFFLOAD     0x03
-#define QLCNIC_RXPKT_DESC      0x04
-#define QLCNIC_OLD_RXPKT_DESC  0x3f
-#define QLCNIC_RESPONSE_DESC   0x05
-#define QLCNIC_LRO_DESC        0x12
-
-/* for status field in status_desc */
-#define STATUS_CKSUM_LOOP      0
-#define STATUS_CKSUM_OK                2
-
-/* owner bits of status_desc */
-#define STATUS_OWNER_HOST      (0x1ULL << 56)
-#define STATUS_OWNER_PHANTOM   (0x2ULL << 56)
-
-/* Status descriptor:
-   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
-   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
-   53-55 desc_cnt, 56-57 owner, 58-63 opcode
- */
-#define qlcnic_get_sts_port(sts_data)  \
-       ((sts_data) & 0x0F)
-#define qlcnic_get_sts_status(sts_data)        \
-       (((sts_data) >> 4) & 0x0F)
-#define qlcnic_get_sts_type(sts_data)  \
-       (((sts_data) >> 8) & 0x0F)
-#define qlcnic_get_sts_totallength(sts_data)   \
-       (((sts_data) >> 12) & 0xFFFF)
-#define qlcnic_get_sts_refhandle(sts_data)     \
-       (((sts_data) >> 28) & 0xFFFF)
-#define qlcnic_get_sts_prot(sts_data)  \
-       (((sts_data) >> 44) & 0x0F)
-#define qlcnic_get_sts_pkt_offset(sts_data)    \
-       (((sts_data) >> 48) & 0x1F)
-#define qlcnic_get_sts_desc_cnt(sts_data)      \
-       (((sts_data) >> 53) & 0x7)
-#define qlcnic_get_sts_opcode(sts_data)        \
-       (((sts_data) >> 58) & 0x03F)
-
-#define qlcnic_get_lro_sts_refhandle(sts_data)         \
-       ((sts_data) & 0x0FFFF)
-#define qlcnic_get_lro_sts_length(sts_data)    \
-       (((sts_data) >> 16) & 0x0FFFF)
-#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)     \
-       (((sts_data) >> 32) & 0x0FF)
-#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)     \
-       (((sts_data) >> 40) & 0x0FF)
-#define qlcnic_get_lro_sts_timestamp(sts_data) \
-       (((sts_data) >> 48) & 0x1)
-#define qlcnic_get_lro_sts_type(sts_data)      \
-       (((sts_data) >> 49) & 0x7)
-#define qlcnic_get_lro_sts_push_flag(sts_data)         \
-       (((sts_data) >> 52) & 0x1)
-#define qlcnic_get_lro_sts_seq_number(sts_data)                \
-       ((sts_data) & 0x0FFFFFFFF)
-#define qlcnic_get_lro_sts_mss(sts_data1)              \
-       ((sts_data1 >> 32) & 0x0FFFF)
-
-
 struct status_desc {
        __le64 status_desc_data[2];
 } __attribute__ ((aligned(16)));
@@ -1346,142 +1255,7 @@ struct qlcnic_esw_statistics {
        struct __qlcnic_esw_statistics tx;
 };
 
-struct qlcnic_common_entry_hdr {
-       u32     type;
-       u32     offset;
-       u32     cap_size;
-       u8      mask;
-       u8      rsvd[2];
-       u8      flags;
-} __packed;
-
-struct __crb {
-       u32     addr;
-       u8      stride;
-       u8      rsvd1[3];
-       u32     data_size;
-       u32     no_ops;
-       u32     rsvd2[4];
-} __packed;
-
-struct __ctrl {
-       u32     addr;
-       u8      stride;
-       u8      index_a;
-       u16     timeout;
-       u32     data_size;
-       u32     no_ops;
-       u8      opcode;
-       u8      index_v;
-       u8      shl_val;
-       u8      shr_val;
-       u32     val1;
-       u32     val2;
-       u32     val3;
-} __packed;
-
-struct __cache {
-       u32     addr;
-       u16     stride;
-       u16     init_tag_val;
-       u32     size;
-       u32     no_ops;
-       u32     ctrl_addr;
-       u32     ctrl_val;
-       u32     read_addr;
-       u8      read_addr_stride;
-       u8      read_addr_num;
-       u8      rsvd1[2];
-} __packed;
-
-struct __ocm {
-       u8      rsvd[8];
-       u32     size;
-       u32     no_ops;
-       u8      rsvd1[8];
-       u32     read_addr;
-       u32     read_addr_stride;
-} __packed;
-
-struct __mem {
-       u8      rsvd[24];
-       u32     addr;
-       u32     size;
-} __packed;
-
-struct __mux {
-       u32     addr;
-       u8      rsvd[4];
-       u32     size;
-       u32     no_ops;
-       u32     val;
-       u32     val_stride;
-       u32     read_addr;
-       u8      rsvd2[4];
-} __packed;
-
-struct __queue {
-       u32     sel_addr;
-       u16     stride;
-       u8      rsvd[2];
-       u32     size;
-       u32     no_ops;
-       u8      rsvd2[8];
-       u32     read_addr;
-       u8      read_addr_stride;
-       u8      read_addr_cnt;
-       u8      rsvd3[2];
-} __packed;
-
-struct qlcnic_dump_entry {
-       struct qlcnic_common_entry_hdr hdr;
-       union {
-               struct __crb    crb;
-               struct __cache  cache;
-               struct __ocm    ocm;
-               struct __mem    mem;
-               struct __mux    mux;
-               struct __queue  que;
-               struct __ctrl   ctrl;
-       } region;
-} __packed;
-
-enum op_codes {
-       QLCNIC_DUMP_NOP         = 0,
-       QLCNIC_DUMP_READ_CRB    = 1,
-       QLCNIC_DUMP_READ_MUX    = 2,
-       QLCNIC_DUMP_QUEUE       = 3,
-       QLCNIC_DUMP_BRD_CONFIG  = 4,
-       QLCNIC_DUMP_READ_OCM    = 6,
-       QLCNIC_DUMP_PEG_REG     = 7,
-       QLCNIC_DUMP_L1_DTAG     = 8,
-       QLCNIC_DUMP_L1_ITAG     = 9,
-       QLCNIC_DUMP_L1_DATA     = 11,
-       QLCNIC_DUMP_L1_INST     = 12,
-       QLCNIC_DUMP_L2_DTAG     = 21,
-       QLCNIC_DUMP_L2_ITAG     = 22,
-       QLCNIC_DUMP_L2_DATA     = 23,
-       QLCNIC_DUMP_L2_INST     = 24,
-       QLCNIC_DUMP_READ_ROM    = 71,
-       QLCNIC_DUMP_READ_MEM    = 72,
-       QLCNIC_DUMP_READ_CTRL   = 98,
-       QLCNIC_DUMP_TLHDR       = 99,
-       QLCNIC_DUMP_RDEND       = 255
-};
-
-#define QLCNIC_DUMP_WCRB       BIT_0
-#define QLCNIC_DUMP_RWCRB      BIT_1
-#define QLCNIC_DUMP_ANDCRB     BIT_2
-#define QLCNIC_DUMP_ORCRB      BIT_3
-#define QLCNIC_DUMP_POLLCRB    BIT_4
-#define QLCNIC_DUMP_RD_SAVE    BIT_5
-#define QLCNIC_DUMP_WRT_SAVED  BIT_6
-#define QLCNIC_DUMP_MOD_SAVE_ST        BIT_7
-#define QLCNIC_DUMP_SKIP       BIT_7
-
-#define QLCNIC_DUMP_MASK_MIN           3
 #define QLCNIC_DUMP_MASK_DEF           0x1f
-#define QLCNIC_DUMP_MASK_MAX           0xff
 #define QLCNIC_FORCE_FW_DUMP_KEY       0xdeadfeed
 #define QLCNIC_ENABLE_FW_DUMP          0xaddfeed
 #define QLCNIC_DISABLE_FW_DUMP         0xbadfeed
@@ -1489,12 +1263,6 @@ enum op_codes {
 #define QLCNIC_SET_QUIESCENT           0xadd00010
 #define QLCNIC_RESET_QUIESCENT         0xadd00020
 
-struct qlcnic_dump_operations {
-       enum op_codes opcode;
-       u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
-                      __le32 *);
-};
-
 struct _cdrp_cmd {
        u32 cmd;
        u32 arg1;
@@ -1552,6 +1320,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
 #define __QLCNIC_MAX_LED_RATE  0xf
 #define __QLCNIC_MAX_LED_STATE 0x2
 
+#define MAX_CTL_CHECK 1000
+
 int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
 int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
 int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
@@ -1648,6 +1418,26 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
 int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
 extern int qlcnic_config_tso;
 
+int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_napi_del(struct qlcnic_adapter *adapter);
+void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
+void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_free_tx_rings(struct qlcnic_adapter *);
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_set_vlan_config(struct qlcnic_adapter *,
+                           struct qlcnic_esw_func_cfg *);
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
+                                     struct qlcnic_esw_func_cfg *);
+
 /*
  * QLOGIC Board information
  */
@@ -1694,6 +1484,21 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+       writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+       writel(0x1, sds_ring->crb_intr_mask);
+
+       if (!QLCNIC_IS_MSI_FAMILY(adapter))
+               writel(0xfbff, adapter->tgt_mask_reg);
+}
+
 extern const struct ethtool_ops qlcnic_ethtool_ops;
 extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
 
index aeacf1deea47ef5853d910ff9de1e194534b80fc..ff879cd2925b76545f0e9090a8cbf01044719482 100644 (file)
@@ -266,33 +266,6 @@ static const unsigned crb_hub_agt[64] = {
        0,
 };
 
-static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
-{
-       u32 dest;
-       void __iomem *window_reg;
-
-       dest = addr & 0xFFFF0000;
-       window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
-       writel(dest, window_reg);
-       readl(window_reg);
-       window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
-       *data = readl(window_reg);
-}
-
-static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
-{
-       u32 dest;
-       void __iomem *window_reg;
-
-       dest = addr & 0xFFFF0000;
-       window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
-       writel(dest, window_reg);
-       readl(window_reg);
-       window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
-       writel(data, window_reg);
-       readl(window_reg);
-}
-
 /*  PCI Windowing for DDR regions.  */
 
 #define QLCNIC_PCIE_SEM_TIMEOUT        10000
@@ -1088,8 +1061,6 @@ qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
        mutex_unlock(&adapter->ahw->mem_lock);
 }
 
-#define MAX_CTL_CHECK   1000
-
 int
 qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
                u64 off, u64 data)
@@ -1347,460 +1318,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
 
        return rv;
 }
-
-/* FW dump related functions */
-static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
-                          struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i;
-       u32 addr, data;
-       struct __crb *crb = &entry->region.crb;
-       void __iomem *base = adapter->ahw->pci_base0;
-
-       addr = crb->addr;
-
-       for (i = 0; i < crb->no_ops; i++) {
-               qlcnic_read_dump_reg(addr, base, &data);
-               *buffer++ = cpu_to_le32(addr);
-               *buffer++ = cpu_to_le32(data);
-               addr += crb->stride;
-       }
-       return crb->no_ops * 2 * sizeof(u32);
-}
-
-static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
-                           struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i, k, timeout = 0;
-       void __iomem *base = adapter->ahw->pci_base0;
-       u32 addr, data;
-       u8 opcode, no_ops;
-       struct __ctrl *ctr = &entry->region.ctrl;
-       struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
-
-       addr = ctr->addr;
-       no_ops = ctr->no_ops;
-
-       for (i = 0; i < no_ops; i++) {
-               k = 0;
-               opcode = 0;
-               for (k = 0; k < 8; k++) {
-                       if (!(ctr->opcode & (1 << k)))
-                               continue;
-                       switch (1 << k) {
-                       case QLCNIC_DUMP_WCRB:
-                               qlcnic_write_dump_reg(addr, base, ctr->val1);
-                               break;
-                       case QLCNIC_DUMP_RWCRB:
-                               qlcnic_read_dump_reg(addr, base, &data);
-                               qlcnic_write_dump_reg(addr, base, data);
-                               break;
-                       case QLCNIC_DUMP_ANDCRB:
-                               qlcnic_read_dump_reg(addr, base, &data);
-                               qlcnic_write_dump_reg(addr, base,
-                                                     data & ctr->val2);
-                               break;
-                       case QLCNIC_DUMP_ORCRB:
-                               qlcnic_read_dump_reg(addr, base, &data);
-                               qlcnic_write_dump_reg(addr, base,
-                                                     data | ctr->val3);
-                               break;
-                       case QLCNIC_DUMP_POLLCRB:
-                               while (timeout <= ctr->timeout) {
-                                       qlcnic_read_dump_reg(addr, base, &data);
-                                       if ((data & ctr->val2) == ctr->val1)
-                                               break;
-                                       msleep(1);
-                                       timeout++;
-                               }
-                               if (timeout > ctr->timeout) {
-                                       dev_info(&adapter->pdev->dev,
-                                       "Timed out, aborting poll CRB\n");
-                                       return -EINVAL;
-                               }
-                               break;
-                       case QLCNIC_DUMP_RD_SAVE:
-                               if (ctr->index_a)
-                                       addr = t_hdr->saved_state[ctr->index_a];
-                               qlcnic_read_dump_reg(addr, base, &data);
-                               t_hdr->saved_state[ctr->index_v] = data;
-                               break;
-                       case QLCNIC_DUMP_WRT_SAVED:
-                               if (ctr->index_v)
-                                       data = t_hdr->saved_state[ctr->index_v];
-                               else
-                                       data = ctr->val1;
-                               if (ctr->index_a)
-                                       addr = t_hdr->saved_state[ctr->index_a];
-                               qlcnic_write_dump_reg(addr, base, data);
-                               break;
-                       case QLCNIC_DUMP_MOD_SAVE_ST:
-                               data = t_hdr->saved_state[ctr->index_v];
-                               data <<= ctr->shl_val;
-                               data >>= ctr->shr_val;
-                               if (ctr->val2)
-                                       data &= ctr->val2;
-                               data |= ctr->val3;
-                               data += ctr->val1;
-                               t_hdr->saved_state[ctr->index_v] = data;
-                               break;
-                       default:
-                               dev_info(&adapter->pdev->dev,
-                                       "Unknown opcode\n");
-                               break;
-                       }
-               }
-               addr += ctr->stride;
-       }
-       return 0;
-}
-
-static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
-                          struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int loop;
-       u32 val, data = 0;
-       struct __mux *mux = &entry->region.mux;
-       void __iomem *base = adapter->ahw->pci_base0;
-
-       val = mux->val;
-       for (loop = 0; loop < mux->no_ops; loop++) {
-               qlcnic_write_dump_reg(mux->addr, base, val);
-               qlcnic_read_dump_reg(mux->read_addr, base, &data);
-               *buffer++ = cpu_to_le32(val);
-               *buffer++ = cpu_to_le32(data);
-               val += mux->val_stride;
-       }
-       return 2 * mux->no_ops * sizeof(u32);
-}
-
-static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
-                          struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i, loop;
-       u32 cnt, addr, data, que_id = 0;
-       void __iomem *base = adapter->ahw->pci_base0;
-       struct __queue *que = &entry->region.que;
-
-       addr = que->read_addr;
-       cnt = que->read_addr_cnt;
-
-       for (loop = 0; loop < que->no_ops; loop++) {
-               qlcnic_write_dump_reg(que->sel_addr, base, que_id);
-               addr = que->read_addr;
-               for (i = 0; i < cnt; i++) {
-                       qlcnic_read_dump_reg(addr, base, &data);
-                       *buffer++ = cpu_to_le32(data);
-                       addr += que->read_addr_stride;
-               }
-               que_id += que->stride;
-       }
-       return que->no_ops * cnt * sizeof(u32);
-}
-
-static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
-                          struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i;
-       u32 data;
-       void __iomem *addr;
-       struct __ocm *ocm = &entry->region.ocm;
-
-       addr = adapter->ahw->pci_base0 + ocm->read_addr;
-       for (i = 0; i < ocm->no_ops; i++) {
-               data = readl(addr);
-               *buffer++ = cpu_to_le32(data);
-               addr += ocm->read_addr_stride;
-       }
-       return ocm->no_ops * sizeof(u32);
-}
-
-static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
-                          struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i, count = 0;
-       u32 fl_addr, size, val, lck_val, addr;
-       struct __mem *rom = &entry->region.mem;
-       void __iomem *base = adapter->ahw->pci_base0;
-
-       fl_addr = rom->addr;
-       size = rom->size/4;
-lock_try:
-       lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
-       if (!lck_val && count < MAX_CTL_CHECK) {
-               msleep(10);
-               count++;
-               goto lock_try;
-       }
-       writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
-       for (i = 0; i < size; i++) {
-               addr = fl_addr & 0xFFFF0000;
-               qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
-               addr = LSW(fl_addr) + FLASH_ROM_DATA;
-               qlcnic_read_dump_reg(addr, base, &val);
-               fl_addr += 4;
-               *buffer++ = cpu_to_le32(val);
-       }
-       readl(base + QLCNIC_FLASH_SEM2_ULK);
-       return rom->size;
-}
-
-static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
-                               struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i;
-       u32 cnt, val, data, addr;
-       void __iomem *base = adapter->ahw->pci_base0;
-       struct __cache *l1 = &entry->region.cache;
-
-       val = l1->init_tag_val;
-
-       for (i = 0; i < l1->no_ops; i++) {
-               qlcnic_write_dump_reg(l1->addr, base, val);
-               qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
-               addr = l1->read_addr;
-               cnt = l1->read_addr_num;
-               while (cnt) {
-                       qlcnic_read_dump_reg(addr, base, &data);
-                       *buffer++ = cpu_to_le32(data);
-                       addr += l1->read_addr_stride;
-                       cnt--;
-               }
-               val += l1->stride;
-       }
-       return l1->no_ops * l1->read_addr_num * sizeof(u32);
-}
-
-static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
-                               struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       int i;
-       u32 cnt, val, data, addr;
-       u8 poll_mask, poll_to, time_out = 0;
-       void __iomem *base = adapter->ahw->pci_base0;
-       struct __cache *l2 = &entry->region.cache;
-
-       val = l2->init_tag_val;
-       poll_mask = LSB(MSW(l2->ctrl_val));
-       poll_to = MSB(MSW(l2->ctrl_val));
-
-       for (i = 0; i < l2->no_ops; i++) {
-               qlcnic_write_dump_reg(l2->addr, base, val);
-               if (LSW(l2->ctrl_val))
-                       qlcnic_write_dump_reg(l2->ctrl_addr, base,
-                                             LSW(l2->ctrl_val));
-               if (!poll_mask)
-                       goto skip_poll;
-               do {
-                       qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
-                       if (!(data & poll_mask))
-                               break;
-                       msleep(1);
-                       time_out++;
-               } while (time_out <= poll_to);
-
-               if (time_out > poll_to) {
-                       dev_err(&adapter->pdev->dev,
-                               "Timeout exceeded in %s, aborting dump\n",
-                               __func__);
-                       return -EINVAL;
-               }
-skip_poll:
-               addr = l2->read_addr;
-               cnt = l2->read_addr_num;
-               while (cnt) {
-                       qlcnic_read_dump_reg(addr, base, &data);
-                       *buffer++ = cpu_to_le32(data);
-                       addr += l2->read_addr_stride;
-                       cnt--;
-               }
-               val += l2->stride;
-       }
-       return l2->no_ops * l2->read_addr_num * sizeof(u32);
-}
-
-static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
-                             struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       u32 addr, data, test, ret = 0;
-       int i, reg_read;
-       struct __mem *mem = &entry->region.mem;
-       void __iomem *base = adapter->ahw->pci_base0;
-
-       reg_read = mem->size;
-       addr = mem->addr;
-       /* check for data size of multiple of 16 and 16 byte alignment */
-       if ((addr & 0xf) || (reg_read%16)) {
-               dev_info(&adapter->pdev->dev,
-                       "Unaligned memory addr:0x%x size:0x%x\n",
-                       addr, reg_read);
-               return -EINVAL;
-       }
-
-       mutex_lock(&adapter->ahw->mem_lock);
-
-       while (reg_read != 0) {
-               qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
-               qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
-               qlcnic_write_dump_reg(MIU_TEST_CTR, base,
-                                     TA_CTL_ENABLE | TA_CTL_START);
-
-               for (i = 0; i < MAX_CTL_CHECK; i++) {
-                       qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
-                       if (!(test & TA_CTL_BUSY))
-                               break;
-               }
-               if (i == MAX_CTL_CHECK) {
-                       if (printk_ratelimit()) {
-                               dev_err(&adapter->pdev->dev,
-                                       "failed to read through agent\n");
-                               ret = -EINVAL;
-                               goto out;
-                       }
-               }
-               for (i = 0; i < 4; i++) {
-                       qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
-                                            &data);
-                       *buffer++ = cpu_to_le32(data);
-               }
-               addr += 16;
-               reg_read -= 16;
-               ret += 16;
-       }
-out:
-       mutex_unlock(&adapter->ahw->mem_lock);
-       return mem->size;
-}
-
-static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
-                          struct qlcnic_dump_entry *entry, __le32 *buffer)
-{
-       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
-       return 0;
-}
-
-static const struct qlcnic_dump_operations fw_dump_ops[] = {
-       { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
-       { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
-       { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
-       { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
-       { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
-       { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
-       { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
-       { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
-       { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
-       { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
-       { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
-       { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
-       { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
-       { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
-       { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
-       { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
-       { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
-       { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
-       { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
-       { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
-       u32 size)
-{
-       int ret = 1;
-       if (size != entry->hdr.cap_size) {
-               dev_info(dev,
-               "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
-               entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
-               dev_info(dev, "Aborting further dump capture\n");
-               ret = 0;
-       }
-       return ret;
-}
-
-int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
-{
-       __le32 *buffer;
-       char mesg[64];
-       char *msg[] = {mesg, NULL};
-       int i, k, ops_cnt, ops_index, dump_size = 0;
-       u32 entry_offset, dump, no_entries, buf_offset = 0;
-       struct qlcnic_dump_entry *entry;
-       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
-       struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
-
-       if (fw_dump->clr) {
-               dev_info(&adapter->pdev->dev,
-                       "Previous dump not cleared, not capturing dump\n");
-               return -EIO;
-       }
-       /* Calculate the size for dump data area only */
-       for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
-               if (i & tmpl_hdr->drv_cap_mask)
-                       dump_size += tmpl_hdr->cap_sizes[k];
-       if (!dump_size)
-               return -EIO;
-
-       fw_dump->data = vzalloc(dump_size);
-       if (!fw_dump->data) {
-               dev_info(&adapter->pdev->dev,
-                       "Unable to allocate (%d KB) for fw dump\n",
-                       dump_size/1024);
-               return -ENOMEM;
-       }
-       buffer = fw_dump->data;
-       fw_dump->size = dump_size;
-       no_entries = tmpl_hdr->num_entries;
-       ops_cnt = ARRAY_SIZE(fw_dump_ops);
-       entry_offset = tmpl_hdr->offset;
-       tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
-       tmpl_hdr->sys_info[1] = adapter->fw_version;
-
-       for (i = 0; i < no_entries; i++) {
-               entry = (void *)tmpl_hdr + entry_offset;
-               if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
-                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
-                       entry_offset += entry->hdr.offset;
-                       continue;
-               }
-               /* Find the handler for this entry */
-               ops_index = 0;
-               while (ops_index < ops_cnt) {
-                       if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
-                               break;
-                       ops_index++;
-               }
-               if (ops_index == ops_cnt) {
-                       dev_info(&adapter->pdev->dev,
-                               "Invalid entry type %d, exiting dump\n",
-                               entry->hdr.type);
-                       goto error;
-               }
-               /* Collect dump for this entry */
-               dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
-               if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
-                       dump))
-                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
-               buf_offset += entry->hdr.cap_size;
-               entry_offset += entry->hdr.offset;
-               buffer = fw_dump->data + buf_offset;
-       }
-       if (dump_size != buf_offset) {
-               dev_info(&adapter->pdev->dev,
-                       "Captured(%d) and expected size(%d) do not match\n",
-                       buf_offset, dump_size);
-               goto error;
-       } else {
-               fw_dump->clr = 1;
-               snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
-                       adapter->netdev->name);
-               dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
-                       fw_dump->size);
-               /* Send a udev event to notify availability of FW dump */
-               kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
-               return 0;
-       }
-error:
-       vfree(fw_dump->data);
-       return -EINVAL;
-}
index ccbef8491d2cb623bc0b412f0c75cc3db11c4a5b..d8610ea56a48f5edefe27f986c916221d3d00c6e 100644 (file)
@@ -25,10 +25,6 @@ static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
 
 #define QLCNIC_ADDR_ERROR (0xffffffff)
 
-static void
-qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_rds_ring *rds_ring);
-
 static int
 qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
 
@@ -1332,629 +1328,3 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
        release_firmware(adapter->fw);
        adapter->fw = NULL;
 }
-
-static void
-qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
-                               struct qlcnic_fw_msg *msg)
-{
-       u32 cable_OUI;
-       u16 cable_len;
-       u16 link_speed;
-       u8  link_status, module, duplex, autoneg;
-       u8 lb_status = 0;
-       struct net_device *netdev = adapter->netdev;
-
-       adapter->has_link_events = 1;
-
-       cable_OUI = msg->body[1] & 0xffffffff;
-       cable_len = (msg->body[1] >> 32) & 0xffff;
-       link_speed = (msg->body[1] >> 48) & 0xffff;
-
-       link_status = msg->body[2] & 0xff;
-       duplex = (msg->body[2] >> 16) & 0xff;
-       autoneg = (msg->body[2] >> 24) & 0xff;
-       lb_status = (msg->body[2] >> 32) & 0x3;
-
-       module = (msg->body[2] >> 8) & 0xff;
-       if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
-               dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
-                               "length %d\n", cable_OUI, cable_len);
-       else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
-               dev_info(&netdev->dev, "unsupported cable length %d\n",
-                               cable_len);
-
-       if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
-           lb_status == QLCNIC_ELB_MODE))
-               adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
-
-       qlcnic_advert_link_change(adapter, link_status);
-
-       if (duplex == LINKEVENT_FULL_DUPLEX)
-               adapter->link_duplex = DUPLEX_FULL;
-       else
-               adapter->link_duplex = DUPLEX_HALF;
-
-       adapter->module_type = module;
-       adapter->link_autoneg = autoneg;
-
-       if (link_status) {
-               adapter->link_speed = link_speed;
-       } else {
-               adapter->link_speed = SPEED_UNKNOWN;
-               adapter->link_duplex = DUPLEX_UNKNOWN;
-       }
-}
-
-static void
-qlcnic_handle_fw_message(int desc_cnt, int index,
-               struct qlcnic_host_sds_ring *sds_ring)
-{
-       struct qlcnic_fw_msg msg;
-       struct status_desc *desc;
-       struct qlcnic_adapter *adapter;
-       struct device *dev;
-       int i = 0, opcode, ret;
-
-       while (desc_cnt > 0 && i < 8) {
-               desc = &sds_ring->desc_head[index];
-               msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
-               msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
-
-               index = get_next_index(index, sds_ring->num_desc);
-               desc_cnt--;
-       }
-
-       adapter = sds_ring->adapter;
-       dev = &adapter->pdev->dev;
-       opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
-
-       switch (opcode) {
-       case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
-               qlcnic_handle_linkevent(adapter, &msg);
-               break;
-       case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
-               ret = (u32)(msg.body[1]);
-               switch (ret) {
-               case 0:
-                       adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
-                       break;
-               case 1:
-                       dev_info(dev, "loopback already in progress\n");
-                       adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
-                       break;
-               case 2:
-                       dev_info(dev, "loopback cable is not connected\n");
-                       adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
-                       break;
-               default:
-                       dev_info(dev, "loopback configure request failed,"
-                                       " ret %x\n", ret);
-                       adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
-                       break;
-               }
-               break;
-       default:
-               break;
-       }
-}
-
-static int
-qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_rds_ring *rds_ring,
-               struct qlcnic_rx_buffer *buffer)
-{
-       struct sk_buff *skb;
-       dma_addr_t dma;
-       struct pci_dev *pdev = adapter->pdev;
-
-       skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
-       if (!skb) {
-               adapter->stats.skb_alloc_failure++;
-               return -ENOMEM;
-       }
-
-       skb_reserve(skb, NET_IP_ALIGN);
-
-       dma = pci_map_single(pdev, skb->data,
-                       rds_ring->dma_size, PCI_DMA_FROMDEVICE);
-
-       if (pci_dma_mapping_error(pdev, dma)) {
-               adapter->stats.rx_dma_map_error++;
-               dev_kfree_skb_any(skb);
-               return -ENOMEM;
-       }
-
-       buffer->skb = skb;
-       buffer->dma = dma;
-
-       return 0;
-}
-
-static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
-{
-       struct qlcnic_rx_buffer *buffer;
-       struct sk_buff *skb;
-
-       buffer = &rds_ring->rx_buf_arr[index];
-
-       if (unlikely(buffer->skb == NULL)) {
-               WARN_ON(1);
-               return NULL;
-       }
-
-       pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
-                       PCI_DMA_FROMDEVICE);
-
-       skb = buffer->skb;
-
-       if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
-           (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
-               adapter->stats.csummed++;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               skb_checksum_none_assert(skb);
-       }
-
-       buffer->skb = NULL;
-
-       return skb;
-}
-
-static inline int
-qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
-                       u16 *vlan_tag)
-{
-       struct ethhdr *eth_hdr;
-
-       if (!__vlan_get_tag(skb, vlan_tag)) {
-               eth_hdr = (struct ethhdr *) skb->data;
-               memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
-               skb_pull(skb, VLAN_HLEN);
-       }
-       if (!adapter->pvid)
-               return 0;
-
-       if (*vlan_tag == adapter->pvid) {
-               /* Outer vlan tag. Packet should follow non-vlan path */
-               *vlan_tag = 0xffff;
-               return 0;
-       }
-       if (adapter->flags & QLCNIC_TAGGING_ENABLED)
-               return 0;
-
-       return -EINVAL;
-}
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_rcv(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_sds_ring *sds_ring,
-               int ring, u64 sts_data0)
-{
-       struct net_device *netdev = adapter->netdev;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       struct qlcnic_rx_buffer *buffer;
-       struct sk_buff *skb;
-       struct qlcnic_host_rds_ring *rds_ring;
-       int index, length, cksum, pkt_offset;
-       u16 vid = 0xffff;
-
-       if (unlikely(ring >= adapter->max_rds_rings))
-               return NULL;
-
-       rds_ring = &recv_ctx->rds_rings[ring];
-
-       index = qlcnic_get_sts_refhandle(sts_data0);
-       if (unlikely(index >= rds_ring->num_desc))
-               return NULL;
-
-       buffer = &rds_ring->rx_buf_arr[index];
-
-       length = qlcnic_get_sts_totallength(sts_data0);
-       cksum  = qlcnic_get_sts_status(sts_data0);
-       pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
-       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
-       if (!skb)
-               return buffer;
-
-       if (length > rds_ring->skb_size)
-               skb_put(skb, rds_ring->skb_size);
-       else
-               skb_put(skb, length);
-
-       if (pkt_offset)
-               skb_pull(skb, pkt_offset);
-
-       if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
-               adapter->stats.rxdropped++;
-               dev_kfree_skb(skb);
-               return buffer;
-       }
-
-       skb->protocol = eth_type_trans(skb, netdev);
-
-       if (vid != 0xffff)
-               __vlan_hwaccel_put_tag(skb, vid);
-
-       napi_gro_receive(&sds_ring->napi, skb);
-
-       adapter->stats.rx_pkts++;
-       adapter->stats.rxbytes += length;
-
-       return buffer;
-}
-
-#define QLC_TCP_HDR_SIZE            20
-#define QLC_TCP_TS_OPTION_SIZE      12
-#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_lro(struct qlcnic_adapter *adapter,
-               int ring, u64 sts_data0, u64 sts_data1)
-{
-       struct net_device *netdev = adapter->netdev;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       struct qlcnic_rx_buffer *buffer;
-       struct sk_buff *skb;
-       struct qlcnic_host_rds_ring *rds_ring;
-       struct iphdr *iph;
-       struct tcphdr *th;
-       bool push, timestamp;
-       int l2_hdr_offset, l4_hdr_offset;
-       int index;
-       u16 lro_length, length, data_offset;
-       u32 seq_number;
-       u16 vid = 0xffff;
-
-       if (unlikely(ring > adapter->max_rds_rings))
-               return NULL;
-
-       rds_ring = &recv_ctx->rds_rings[ring];
-
-       index = qlcnic_get_lro_sts_refhandle(sts_data0);
-       if (unlikely(index > rds_ring->num_desc))
-               return NULL;
-
-       buffer = &rds_ring->rx_buf_arr[index];
-
-       timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
-       lro_length = qlcnic_get_lro_sts_length(sts_data0);
-       l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
-       l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
-       push = qlcnic_get_lro_sts_push_flag(sts_data0);
-       seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
-
-       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
-       if (!skb)
-               return buffer;
-
-       if (timestamp)
-               data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
-       else
-               data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
-
-       skb_put(skb, lro_length + data_offset);
-
-       skb_pull(skb, l2_hdr_offset);
-
-       if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
-               adapter->stats.rxdropped++;
-               dev_kfree_skb(skb);
-               return buffer;
-       }
-
-       skb->protocol = eth_type_trans(skb, netdev);
-
-       iph = (struct iphdr *)skb->data;
-       th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
-
-       length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
-       iph->tot_len = htons(length);
-       iph->check = 0;
-       iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-       th->psh = push;
-       th->seq = htonl(seq_number);
-
-       length = skb->len;
-
-       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
-               skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
-
-       if (vid != 0xffff)
-               __vlan_hwaccel_put_tag(skb, vid);
-       netif_receive_skb(skb);
-
-       adapter->stats.lro_pkts++;
-       adapter->stats.lrobytes += length;
-
-       return buffer;
-}
-
-int
-qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
-{
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       struct list_head *cur;
-       struct status_desc *desc;
-       struct qlcnic_rx_buffer *rxbuf;
-       u64 sts_data0, sts_data1;
-
-       int count = 0;
-       int opcode, ring, desc_cnt;
-       u32 consumer = sds_ring->consumer;
-
-       while (count < max) {
-               desc = &sds_ring->desc_head[consumer];
-               sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
-               if (!(sts_data0 & STATUS_OWNER_HOST))
-                       break;
-
-               desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
-               opcode = qlcnic_get_sts_opcode(sts_data0);
-
-               switch (opcode) {
-               case QLCNIC_RXPKT_DESC:
-               case QLCNIC_OLD_RXPKT_DESC:
-               case QLCNIC_SYN_OFFLOAD:
-                       ring = qlcnic_get_sts_type(sts_data0);
-                       rxbuf = qlcnic_process_rcv(adapter, sds_ring,
-                                       ring, sts_data0);
-                       break;
-               case QLCNIC_LRO_DESC:
-                       ring = qlcnic_get_lro_sts_type(sts_data0);
-                       sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
-                       rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
-                                                  sts_data1);
-                       break;
-               case QLCNIC_RESPONSE_DESC:
-                       qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
-               default:
-                       goto skip;
-               }
-
-               WARN_ON(desc_cnt > 1);
-
-               if (likely(rxbuf))
-                       list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
-               else
-                       adapter->stats.null_rxbuf++;
-
-skip:
-               for (; desc_cnt > 0; desc_cnt--) {
-                       desc = &sds_ring->desc_head[consumer];
-                       desc->status_desc_data[0] =
-                               cpu_to_le64(STATUS_OWNER_PHANTOM);
-                       consumer = get_next_index(consumer, sds_ring->num_desc);
-               }
-               count++;
-       }
-
-       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
-               struct qlcnic_host_rds_ring *rds_ring =
-                       &adapter->recv_ctx->rds_rings[ring];
-
-               if (!list_empty(&sds_ring->free_list[ring])) {
-                       list_for_each(cur, &sds_ring->free_list[ring]) {
-                               rxbuf = list_entry(cur,
-                                               struct qlcnic_rx_buffer, list);
-                               qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
-                       }
-                       spin_lock(&rds_ring->lock);
-                       list_splice_tail_init(&sds_ring->free_list[ring],
-                                               &rds_ring->free_list);
-                       spin_unlock(&rds_ring->lock);
-               }
-
-               qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
-       }
-
-       if (count) {
-               sds_ring->consumer = consumer;
-               writel(consumer, sds_ring->crb_sts_consumer);
-       }
-
-       return count;
-}
-
-void
-qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
-       struct qlcnic_host_rds_ring *rds_ring)
-{
-       struct rcv_desc *pdesc;
-       struct qlcnic_rx_buffer *buffer;
-       int count = 0;
-       u32 producer;
-       struct list_head *head;
-
-       producer = rds_ring->producer;
-
-       head = &rds_ring->free_list;
-       while (!list_empty(head)) {
-
-               buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
-
-               if (!buffer->skb) {
-                       if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
-                               break;
-               }
-
-               count++;
-               list_del(&buffer->list);
-
-               /* make a rcv descriptor  */
-               pdesc = &rds_ring->desc_head[producer];
-               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
-               pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
-               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
-
-               producer = get_next_index(producer, rds_ring->num_desc);
-       }
-
-       if (count) {
-               rds_ring->producer = producer;
-               writel((producer-1) & (rds_ring->num_desc-1),
-                               rds_ring->crb_rcv_producer);
-       }
-}
-
-static void
-qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_rds_ring *rds_ring)
-{
-       struct rcv_desc *pdesc;
-       struct qlcnic_rx_buffer *buffer;
-       int  count = 0;
-       uint32_t producer;
-       struct list_head *head;
-
-       if (!spin_trylock(&rds_ring->lock))
-               return;
-
-       producer = rds_ring->producer;
-
-       head = &rds_ring->free_list;
-       while (!list_empty(head)) {
-
-               buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
-
-               if (!buffer->skb) {
-                       if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
-                               break;
-               }
-
-               count++;
-               list_del(&buffer->list);
-
-               /* make a rcv descriptor  */
-               pdesc = &rds_ring->desc_head[producer];
-               pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
-               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
-               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
-
-               producer = get_next_index(producer, rds_ring->num_desc);
-       }
-
-       if (count) {
-               rds_ring->producer = producer;
-               writel((producer - 1) & (rds_ring->num_desc - 1),
-                               rds_ring->crb_rcv_producer);
-       }
-       spin_unlock(&rds_ring->lock);
-}
-
-static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
-{
-       int i;
-       unsigned char *data = skb->data;
-
-       printk(KERN_INFO "\n");
-       for (i = 0; i < skb->len; i++) {
-               QLCDB(adapter, DRV, "%02x ", data[i]);
-               if ((i & 0x0f) == 8)
-                       printk(KERN_INFO "\n");
-       }
-}
-
-static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
-                                   u64 sts_data0)
-{
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-       struct sk_buff *skb;
-       struct qlcnic_host_rds_ring *rds_ring;
-       int index, length, cksum, pkt_offset;
-
-       if (unlikely(ring >= adapter->max_rds_rings))
-               return;
-
-       rds_ring = &recv_ctx->rds_rings[ring];
-
-       index = qlcnic_get_sts_refhandle(sts_data0);
-       length = qlcnic_get_sts_totallength(sts_data0);
-       if (unlikely(index >= rds_ring->num_desc))
-               return;
-
-       cksum  = qlcnic_get_sts_status(sts_data0);
-       pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
-       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
-       if (!skb)
-               return;
-
-       if (length > rds_ring->skb_size)
-               skb_put(skb, rds_ring->skb_size);
-       else
-               skb_put(skb, length);
-
-       if (pkt_offset)
-               skb_pull(skb, pkt_offset);
-
-       if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
-               adapter->diag_cnt++;
-       else
-               dump_skb(skb, adapter);
-
-       dev_kfree_skb_any(skb);
-       adapter->stats.rx_pkts++;
-       adapter->stats.rxbytes += length;
-
-       return;
-}
-
-void
-qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
-{
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       struct status_desc *desc;
-       u64 sts_data0;
-       int ring, opcode, desc_cnt;
-
-       u32 consumer = sds_ring->consumer;
-
-       desc = &sds_ring->desc_head[consumer];
-       sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
-       if (!(sts_data0 & STATUS_OWNER_HOST))
-               return;
-
-       desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
-       opcode = qlcnic_get_sts_opcode(sts_data0);
-       switch (opcode) {
-       case QLCNIC_RESPONSE_DESC:
-               qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
-               break;
-       default:
-               ring = qlcnic_get_sts_type(sts_data0);
-               qlcnic_process_rcv_diag(adapter, ring, sts_data0);
-               break;
-       }
-
-       for (; desc_cnt > 0; desc_cnt--) {
-               desc = &sds_ring->desc_head[consumer];
-               desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
-               consumer = get_next_index(consumer, sds_ring->num_desc);
-       }
-
-       sds_ring->consumer = consumer;
-       writel(consumer, sds_ring->crb_sts_consumer);
-}
-
-void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
-{
-       u32 mac_low, mac_high;
-       int i;
-
-       mac_low = off1;
-       mac_high = off2;
-
-       if (alt_mac) {
-               mac_low |= (mac_low >> 16) | (mac_high << 16);
-               mac_high >>= 16;
-       }
-
-       for (i = 0; i < 2; i++)
-               mac[i] = (u8)(mac_high >> ((1 - i) * 8));
-       for (i = 2; i < 6; i++)
-               mac[i] = (u8)(mac_low >> ((5 - i) * 8));
-}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644 (file)
index 0000000..ba352c1
--- /dev/null
@@ -0,0 +1,1309 @@
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+
+#include "qlcnic.h"
+
+#define QLCNIC_MAC_HASH(MAC)\
+       ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+
+#define TX_ETHER_PKT   0x01
+#define TX_TCP_PKT     0x02
+#define TX_UDP_PKT     0x03
+#define TX_IP_PKT      0x04
+#define TX_TCP_LSO     0x05
+#define TX_TCP_LSO6    0x06
+#define TX_TCPV6_PKT   0x0b
+#define TX_UDPV6_PKT   0x0c
+#define FLAGS_VLAN_TAGGED      0x10
+#define FLAGS_VLAN_OOB         0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v)    \
+       (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var)        \
+       ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)       \
+       ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+       ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+       ((_desc)->flags_opcode |= \
+       cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+       ((_desc)->nfrags__length = \
+       cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST      (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM   (0x2ULL << 56)
+
+/* Status descriptor:
+   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+   53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data)  \
+       ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data)        \
+       (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data)  \
+       (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data)   \
+       (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data)     \
+       (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data)  \
+       (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data)    \
+       (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data)      \
+       (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data)        \
+       (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data)         \
+       ((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data)    \
+       (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)     \
+       (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)     \
+       (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+       (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data)      \
+       (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data)         \
+       (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data)                \
+       ((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1)              \
+       ((sts_data1 >> 32) & 0x0FFFF)
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD     0x03
+#define QLCNIC_RXPKT_DESC      0x04
+#define QLCNIC_OLD_RXPKT_DESC  0x3f
+#define QLCNIC_RESPONSE_DESC   0x05
+#define QLCNIC_LRO_DESC        0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP      0
+#define STATUS_CKSUM_OK                2
+
+static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+                                u64 uaddr, __le16 vlan_id,
+                                struct qlcnic_host_tx_ring *tx_ring)
+{
+       struct cmd_desc_type0 *hwdesc;
+       struct qlcnic_nic_req *req;
+       struct qlcnic_mac_req *mac_req;
+       struct qlcnic_vlan_req *vlan_req;
+       u32 producer;
+       u64 word;
+
+       producer = tx_ring->producer;
+       hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+       req = (struct qlcnic_nic_req *)hwdesc;
+       memset(req, 0, sizeof(struct qlcnic_nic_req));
+       req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+       word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+       req->req_hdr = cpu_to_le64(word);
+
+       mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+       mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+       memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+
+       vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+       vlan_req->vlan_id = vlan_id;
+
+       tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+       smp_mb();
+}
+
+static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+                              struct qlcnic_host_tx_ring *tx_ring,
+                              struct cmd_desc_type0 *first_desc,
+                              struct sk_buff *skb)
+{
+       struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+       struct qlcnic_filter *fil, *tmp_fil;
+       struct hlist_node *tmp_hnode, *n;
+       struct hlist_head *head;
+       u64 src_addr = 0;
+       __le16 vlan_id = 0;
+       u8 hindex;
+
+       if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+               return;
+
+       if (adapter->fhash.fnum >= adapter->fhash.fmax)
+               return;
+
+       /* Only NPAR capable devices support vlan based learning*/
+       if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+               vlan_id = first_desc->vlan_TCI;
+       memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+       hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+       head = &(adapter->fhash.fhead[hindex]);
+
+       hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+               if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+                           tmp_fil->vlan_id == vlan_id) {
+
+                       if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+                               qlcnic_change_filter(adapter, src_addr, vlan_id,
+                                                    tx_ring);
+                       tmp_fil->ftime = jiffies;
+                       return;
+               }
+       }
+
+       fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+       if (!fil)
+               return;
+
+       qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+
+       fil->ftime = jiffies;
+       fil->vlan_id = vlan_id;
+       memcpy(fil->faddr, &src_addr, ETH_ALEN);
+
+       spin_lock(&adapter->mac_learn_lock);
+
+       hlist_add_head(&(fil->fnode), head);
+       adapter->fhash.fnum++;
+
+       spin_unlock(&adapter->mac_learn_lock);
+}
+
+static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+                        struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+{
+       u8 l4proto, opcode = 0, hdr_len = 0;
+       u16 flags = 0, vlan_tci = 0;
+       int copied, offset, copy_len, size;
+       struct cmd_desc_type0 *hwdesc;
+       struct vlan_ethhdr *vh;
+       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+       u16 protocol = ntohs(skb->protocol);
+       u32 producer = tx_ring->producer;
+
+       if (protocol == ETH_P_8021Q) {
+               vh = (struct vlan_ethhdr *)skb->data;
+               flags = FLAGS_VLAN_TAGGED;
+               vlan_tci = ntohs(vh->h_vlan_TCI);
+               protocol = ntohs(vh->h_vlan_encapsulated_proto);
+       } else if (vlan_tx_tag_present(skb)) {
+               flags = FLAGS_VLAN_OOB;
+               vlan_tci = vlan_tx_tag_get(skb);
+       }
+       if (unlikely(adapter->pvid)) {
+               if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+                       return -EIO;
+               if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+                       goto set_flags;
+
+               flags = FLAGS_VLAN_OOB;
+               vlan_tci = adapter->pvid;
+       }
+set_flags:
+       qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       if (*(skb->data) & BIT_0) {
+               flags |= BIT_0;
+               memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+       }
+       opcode = TX_ETHER_PKT;
+       if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+           skb_shinfo(skb)->gso_size > 0) {
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               first_desc->total_hdr_length = hdr_len;
+               opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+
+               /* For LSO, we need to copy the MAC/IP/TCP headers into
+               * the descriptor ring */
+               copied = 0;
+               offset = 2;
+
+               if (flags & FLAGS_VLAN_OOB) {
+                       first_desc->total_hdr_length += VLAN_HLEN;
+                       first_desc->tcp_hdr_offset = VLAN_HLEN;
+                       first_desc->ip_hdr_offset = VLAN_HLEN;
+
+                       /* Only in case of TSO on vlan device */
+                       flags |= FLAGS_VLAN_TAGGED;
+
+                       /* Create a TSO vlan header template for firmware */
+                       hwdesc = &tx_ring->desc_head[producer];
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+                       copy_len = min((int)sizeof(struct cmd_desc_type0) -
+                                      offset, hdr_len + VLAN_HLEN);
+
+                       vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+                       skb_copy_from_linear_data(skb, vh, 12);
+                       vh->h_vlan_proto = htons(ETH_P_8021Q);
+                       vh->h_vlan_TCI = htons(vlan_tci);
+
+                       skb_copy_from_linear_data_offset(skb, 12,
+                                                        (char *)vh + 16,
+                                                        copy_len - 16);
+                       copied = copy_len - VLAN_HLEN;
+                       offset = 0;
+                       producer = get_next_index(producer, tx_ring->num_desc);
+               }
+
+               while (copied < hdr_len) {
+                       size = (int)sizeof(struct cmd_desc_type0) - offset;
+                       copy_len = min(size, (hdr_len - copied));
+                       hwdesc = &tx_ring->desc_head[producer];
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+                       skb_copy_from_linear_data_offset(skb, copied,
+                                                        (char *)hwdesc +
+                                                        offset, copy_len);
+                       copied += copy_len;
+                       offset = 0;
+                       producer = get_next_index(producer, tx_ring->num_desc);
+               }
+
+               tx_ring->producer = producer;
+               smp_mb();
+               adapter->stats.lso_frames++;
+
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (protocol == ETH_P_IP) {
+                       l4proto = ip_hdr(skb)->protocol;
+
+                       if (l4proto == IPPROTO_TCP)
+                               opcode = TX_TCP_PKT;
+                       else if (l4proto == IPPROTO_UDP)
+                               opcode = TX_UDP_PKT;
+               } else if (protocol == ETH_P_IPV6) {
+                       l4proto = ipv6_hdr(skb)->nexthdr;
+
+                       if (l4proto == IPPROTO_TCP)
+                               opcode = TX_TCPV6_PKT;
+                       else if (l4proto == IPPROTO_UDP)
+                               opcode = TX_UDPV6_PKT;
+               }
+       }
+       first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+       first_desc->ip_hdr_offset += skb_network_offset(skb);
+       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       return 0;
+}
+
+static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+                            struct qlcnic_cmd_buffer *pbuf)
+{
+       struct qlcnic_skb_frag *nf;
+       struct skb_frag_struct *frag;
+       int i, nr_frags;
+       dma_addr_t map;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       nf = &pbuf->frag_array[0];
+
+       map = pci_map_single(pdev, skb->data, skb_headlen(skb),
+                            PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(pdev, map))
+               goto out_err;
+
+       nf->dma = map;
+       nf->length = skb_headlen(skb);
+
+       for (i = 0; i < nr_frags; i++) {
+               frag = &skb_shinfo(skb)->frags[i];
+               nf = &pbuf->frag_array[i+1];
+               map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(&pdev->dev, map))
+                       goto unwind;
+
+               nf->dma = map;
+               nf->length = skb_frag_size(frag);
+       }
+
+       return 0;
+
+unwind:
+       while (--i >= 0) {
+               nf = &pbuf->frag_array[i+1];
+               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+       }
+
+       nf = &pbuf->frag_array[0];
+       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+       return -ENOMEM;
+}
+
+static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+                                struct qlcnic_cmd_buffer *pbuf)
+{
+       struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+       int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+       for (i = 0; i < nr_frags; i++) {
+               nf = &pbuf->frag_array[i+1];
+               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+       }
+
+       nf = &pbuf->frag_array[0];
+       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+       pbuf->skb = NULL;
+}
+
+static inline void qlcnic_clear_cmddesc(u64 *desc)
+{
+       desc[0] = 0ULL;
+       desc[2] = 0ULL;
+       desc[7] = 0ULL;
+}
+
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+       struct qlcnic_cmd_buffer *pbuf;
+       struct qlcnic_skb_frag *buffrag;
+       struct cmd_desc_type0 *hwdesc, *first_desc;
+       struct pci_dev *pdev;
+       struct ethhdr *phdr;
+       int i, k, frag_count, delta = 0;
+       u32 producer, num_txd;
+
+       num_txd = tx_ring->num_desc;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               netif_stop_queue(netdev);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (adapter->flags & QLCNIC_MACSPOOF) {
+               phdr = (struct ethhdr *)skb->data;
+               if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
+                       goto drop_packet;
+       }
+
+       frag_count = skb_shinfo(skb)->nr_frags + 1;
+       /* 14 frags supported for normal packet and
+        * 32 frags supported for TSO packet
+        */
+       if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+               for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+                       delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+               if (!__pskb_pull_tail(skb, delta))
+                       goto drop_packet;
+
+               frag_count = 1 + skb_shinfo(skb)->nr_frags;
+       }
+
+       if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+               netif_stop_queue(netdev);
+               if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+                       netif_start_queue(netdev);
+               } else {
+                       adapter->stats.xmit_off++;
+                       return NETDEV_TX_BUSY;
+               }
+       }
+
+       producer = tx_ring->producer;
+       pbuf = &tx_ring->cmd_buf_arr[producer];
+       pdev = adapter->pdev;
+       first_desc = &tx_ring->desc_head[producer];
+       hwdesc = &tx_ring->desc_head[producer];
+       qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+       if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+               adapter->stats.tx_dma_map_error++;
+               goto drop_packet;
+       }
+
+       pbuf->skb = skb;
+       pbuf->frag_count = frag_count;
+
+       qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+       qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+       for (i = 0; i < frag_count; i++) {
+               k = i % 4;
+
+               if ((k == 0) && (i > 0)) {
+                       /* move to next desc.*/
+                       producer = get_next_index(producer, num_txd);
+                       hwdesc = &tx_ring->desc_head[producer];
+                       qlcnic_clear_cmddesc((u64 *)hwdesc);
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+               }
+
+               buffrag = &pbuf->frag_array[i];
+               hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+               switch (k) {
+               case 0:
+                       hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 1:
+                       hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 2:
+                       hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+                       break;
+               case 3:
+                       hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+                       break;
+               }
+       }
+
+       tx_ring->producer = get_next_index(producer, num_txd);
+       smp_mb();
+
+       if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+               goto unwind_buff;
+
+       if (adapter->mac_learn)
+               qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+
+       adapter->stats.txbytes += skb->len;
+       adapter->stats.xmitcalled++;
+
+       qlcnic_update_cmd_producer(tx_ring);
+
+       return NETDEV_TX_OK;
+
+unwind_buff:
+       qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+       adapter->stats.txdropped++;
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (adapter->ahw->linkup && !linkup) {
+               netdev_info(netdev, "NIC Link is down\n");
+               adapter->ahw->linkup = 0;
+               if (netif_running(netdev)) {
+                       netif_carrier_off(netdev);
+                       netif_stop_queue(netdev);
+               }
+       } else if (!adapter->ahw->linkup && linkup) {
+               netdev_info(netdev, "NIC Link is up\n");
+               adapter->ahw->linkup = 1;
+               if (netif_running(netdev)) {
+                       netif_carrier_on(netdev);
+                       netif_wake_queue(netdev);
+               }
+       }
+}
+
+static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+                              struct qlcnic_host_rds_ring *rds_ring,
+                              struct qlcnic_rx_buffer *buffer)
+{
+       struct sk_buff *skb;
+       dma_addr_t dma;
+       struct pci_dev *pdev = adapter->pdev;
+
+       skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+       if (!skb) {
+               adapter->stats.skb_alloc_failure++;
+               return -ENOMEM;
+       }
+
+       skb_reserve(skb, NET_IP_ALIGN);
+       dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
+                            PCI_DMA_FROMDEVICE);
+
+       if (pci_dma_mapping_error(pdev, dma)) {
+               adapter->stats.rx_dma_map_error++;
+               dev_kfree_skb_any(skb);
+               return -ENOMEM;
+       }
+
+       buffer->skb = skb;
+       buffer->dma = dma;
+
+       return 0;
+}
+
+static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+                                        struct qlcnic_host_rds_ring *rds_ring)
+{
+       struct rcv_desc *pdesc;
+       struct qlcnic_rx_buffer *buffer;
+       int  count = 0;
+       uint32_t producer;
+       struct list_head *head;
+
+       if (!spin_trylock(&rds_ring->lock))
+               return;
+
+       producer = rds_ring->producer;
+       head = &rds_ring->free_list;
+
+       while (!list_empty(head)) {
+               buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+               if (!buffer->skb) {
+                       if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+                               break;
+               }
+
+               count++;
+               list_del(&buffer->list);
+
+               /* make a rcv descriptor  */
+               pdesc = &rds_ring->desc_head[producer];
+               pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+               producer = get_next_index(producer, rds_ring->num_desc);
+       }
+
+       if (count) {
+               rds_ring->producer = producer;
+               writel((producer - 1) & (rds_ring->num_desc - 1),
+                      rds_ring->crb_rcv_producer);
+       }
+
+       spin_unlock(&rds_ring->lock);
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+       u32 sw_consumer, hw_consumer;
+       int i, done, count = 0;
+       struct qlcnic_cmd_buffer *buffer;
+       struct pci_dev *pdev = adapter->pdev;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_skb_frag *frag;
+       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+       if (!spin_trylock(&adapter->tx_clean_lock))
+               return 1;
+
+       sw_consumer = tx_ring->sw_consumer;
+       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+       while (sw_consumer != hw_consumer) {
+               buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+               if (buffer->skb) {
+                       frag = &buffer->frag_array[0];
+                       pci_unmap_single(pdev, frag->dma, frag->length,
+                                        PCI_DMA_TODEVICE);
+                       frag->dma = 0ULL;
+                       for (i = 1; i < buffer->frag_count; i++) {
+                               frag++;
+                               pci_unmap_page(pdev, frag->dma, frag->length,
+                                              PCI_DMA_TODEVICE);
+                               frag->dma = 0ULL;
+                       }
+
+                       adapter->stats.xmitfinished++;
+                       dev_kfree_skb_any(buffer->skb);
+                       buffer->skb = NULL;
+               }
+
+               sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+               if (++count >= MAX_STATUS_HANDLE)
+                       break;
+       }
+
+       if (count && netif_running(netdev)) {
+               tx_ring->sw_consumer = sw_consumer;
+
+               smp_mb();
+
+               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+                       if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+                               netif_wake_queue(netdev);
+                               adapter->stats.xmit_on++;
+                       }
+               }
+               adapter->tx_timeo_cnt = 0;
+       }
+       /*
+        * If everything is freed up to consumer then check if the ring is full
+        * If the ring is full then check if more needs to be freed and
+        * schedule the call back again.
+        *
+        * This happens when there are 2 CPUs. One could be freeing and the
+        * other filling it. If the ring is full when we get out of here and
+        * the card has already interrupted the host then the host can miss the
+        * interrupt.
+        *
+        * There is still a possible race condition and the host could miss an
+        * interrupt. The card has to take care of this.
+        */
+       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+       done = (sw_consumer == hw_consumer);
+
+       spin_unlock(&adapter->tx_clean_lock);
+
+       return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+       int tx_complete, work_done;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+
+       tx_complete = qlcnic_process_cmd_ring(adapter);
+       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+       if ((work_done < budget) && tx_complete) {
+               napi_complete(&sds_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+                       qlcnic_enable_int(sds_ring);
+       }
+
+       return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter;
+       int work_done;
+
+       sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+       adapter = sds_ring->adapter;
+
+       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+       if (work_done < budget) {
+               napi_complete(&sds_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+                       qlcnic_enable_int(sds_ring);
+       }
+
+       return work_done;
+}
+
+static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_fw_msg *msg)
+{
+       u32 cable_OUI;
+       u16 cable_len, link_speed;
+       u8  link_status, module, duplex, autoneg, lb_status = 0;
+       struct net_device *netdev = adapter->netdev;
+
+       adapter->has_link_events = 1;
+
+       cable_OUI = msg->body[1] & 0xffffffff;
+       cable_len = (msg->body[1] >> 32) & 0xffff;
+       link_speed = (msg->body[1] >> 48) & 0xffff;
+
+       link_status = msg->body[2] & 0xff;
+       duplex = (msg->body[2] >> 16) & 0xff;
+       autoneg = (msg->body[2] >> 24) & 0xff;
+       lb_status = (msg->body[2] >> 32) & 0x3;
+
+       module = (msg->body[2] >> 8) & 0xff;
+       if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+               dev_info(&netdev->dev,
+                        "unsupported cable: OUI 0x%x, length %d\n",
+                        cable_OUI, cable_len);
+       else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+               dev_info(&netdev->dev, "unsupported cable length %d\n",
+                        cable_len);
+
+       if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+           lb_status == QLCNIC_ELB_MODE))
+               adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+       qlcnic_advert_link_change(adapter, link_status);
+
+       if (duplex == LINKEVENT_FULL_DUPLEX)
+               adapter->link_duplex = DUPLEX_FULL;
+       else
+               adapter->link_duplex = DUPLEX_HALF;
+
+       adapter->module_type = module;
+       adapter->link_autoneg = autoneg;
+
+       if (link_status) {
+               adapter->link_speed = link_speed;
+       } else {
+               adapter->link_speed = SPEED_UNKNOWN;
+               adapter->link_duplex = DUPLEX_UNKNOWN;
+       }
+}
+
+static void qlcnic_handle_fw_message(int desc_cnt, int index,
+                                    struct qlcnic_host_sds_ring *sds_ring)
+{
+       struct qlcnic_fw_msg msg;
+       struct status_desc *desc;
+       struct qlcnic_adapter *adapter;
+       struct device *dev;
+       int i = 0, opcode, ret;
+
+       while (desc_cnt > 0 && i < 8) {
+               desc = &sds_ring->desc_head[index];
+               msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+               msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+               index = get_next_index(index, sds_ring->num_desc);
+               desc_cnt--;
+       }
+
+       adapter = sds_ring->adapter;
+       dev = &adapter->pdev->dev;
+       opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+       switch (opcode) {
+       case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+               qlcnic_handle_linkevent(adapter, &msg);
+               break;
+       case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+               ret = (u32)(msg.body[1]);
+               switch (ret) {
+               case 0:
+                       adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+                       break;
+               case 1:
+                       dev_info(dev, "loopback already in progress\n");
+                       adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+                       break;
+               case 2:
+                       dev_info(dev, "loopback cable is not connected\n");
+                       adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+                       break;
+               default:
+                       dev_info(dev,
+                                "loopback configure request failed, err %x\n",
+                                ret);
+                       adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static struct sk_buff *
+qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+                    struct qlcnic_host_rds_ring *rds_ring, u16 index,
+                    u16 cksum)
+{
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       if (unlikely(buffer->skb == NULL)) {
+               WARN_ON(1);
+               return NULL;
+       }
+
+       pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+                        PCI_DMA_FROMDEVICE);
+
+       skb = buffer->skb;
+
+       if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+                  (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+               adapter->stats.csummed++;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else {
+               skb_checksum_none_assert(skb);
+       }
+
+       buffer->skb = NULL;
+
+       return skb;
+}
+
+static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
+                                         struct sk_buff *skb, u16 *vlan_tag)
+{
+       struct ethhdr *eth_hdr;
+
+       if (!__vlan_get_tag(skb, vlan_tag)) {
+               eth_hdr = (struct ethhdr *)skb->data;
+               memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+               skb_pull(skb, VLAN_HLEN);
+       }
+       if (!adapter->pvid)
+               return 0;
+
+       if (*vlan_tag == adapter->pvid) {
+               /* Outer vlan tag. Packet should follow non-vlan path */
+               *vlan_tag = 0xffff;
+               return 0;
+       }
+       if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+               return 0;
+
+       return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+                  struct qlcnic_host_sds_ring *sds_ring, int ring,
+                  u64 sts_data0)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int index, length, cksum, pkt_offset;
+       u16 vid = 0xffff;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_get_sts_refhandle(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+       length = qlcnic_get_sts_totallength(sts_data0);
+       cksum  = qlcnic_get_sts_status(sts_data0);
+       pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+       if (!skb)
+               return buffer;
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+       if (pkt_offset)
+               skb_pull(skb, pkt_offset);
+
+       if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+               adapter->stats.rxdropped++;
+               dev_kfree_skb(skb);
+               return buffer;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       if (vid != 0xffff)
+               __vlan_hwaccel_put_tag(skb, vid);
+
+       napi_gro_receive(&sds_ring->napi, skb);
+
+       adapter->stats.rx_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE            20
+#define QLC_TCP_TS_OPTION_SIZE      12
+#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+                  int ring, u64 sts_data0, u64 sts_data1)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_rx_buffer *buffer;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct iphdr *iph;
+       struct tcphdr *th;
+       bool push, timestamp;
+       int index, l2_hdr_offset, l4_hdr_offset;
+       u16 lro_length, length, data_offset, vid = 0xffff;
+       u32 seq_number;
+
+       if (unlikely(ring > adapter->max_rds_rings))
+               return NULL;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_get_lro_sts_refhandle(sts_data0);
+       if (unlikely(index > rds_ring->num_desc))
+               return NULL;
+
+       buffer = &rds_ring->rx_buf_arr[index];
+
+       timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+       lro_length = qlcnic_get_lro_sts_length(sts_data0);
+       l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+       l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+       push = qlcnic_get_lro_sts_push_flag(sts_data0);
+       seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+       if (!skb)
+               return buffer;
+
+       if (timestamp)
+               data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+       else
+               data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+       skb_put(skb, lro_length + data_offset);
+       skb_pull(skb, l2_hdr_offset);
+
+       if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+               adapter->stats.rxdropped++;
+               dev_kfree_skb(skb);
+               return buffer;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+       iph = (struct iphdr *)skb->data;
+       th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+       length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+       iph->tot_len = htons(length);
+       iph->check = 0;
+       iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+       th->psh = push;
+       th->seq = htonl(seq_number);
+       length = skb->len;
+
+       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+               skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
+       if (vid != 0xffff)
+               __vlan_hwaccel_put_tag(skb, vid);
+       netif_receive_skb(skb);
+
+       adapter->stats.lro_pkts++;
+       adapter->stats.lrobytes += length;
+
+       return buffer;
+}
+
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+       struct list_head *cur;
+       struct status_desc *desc;
+       struct qlcnic_rx_buffer *rxbuf;
+       u64 sts_data0, sts_data1;
+       __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
+       int opcode, ring, desc_cnt, count = 0;
+       u32 consumer = sds_ring->consumer;
+
+       while (count < max) {
+               desc = &sds_ring->desc_head[consumer];
+               sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+               if (!(sts_data0 & STATUS_OWNER_HOST))
+                       break;
+
+               desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+               opcode = qlcnic_get_sts_opcode(sts_data0);
+
+               switch (opcode) {
+               case QLCNIC_RXPKT_DESC:
+               case QLCNIC_OLD_RXPKT_DESC:
+               case QLCNIC_SYN_OFFLOAD:
+                       ring = qlcnic_get_sts_type(sts_data0);
+                       rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
+                                                  sts_data0);
+                       break;
+               case QLCNIC_LRO_DESC:
+                       ring = qlcnic_get_lro_sts_type(sts_data0);
+                       sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+                       rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
+                                                  sts_data1);
+                       break;
+               case QLCNIC_RESPONSE_DESC:
+                       qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+               default:
+                       goto skip;
+               }
+
+               WARN_ON(desc_cnt > 1);
+
+               if (likely(rxbuf))
+                       list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+               else
+                       adapter->stats.null_rxbuf++;
+
+skip:
+               for (; desc_cnt > 0; desc_cnt--) {
+                       desc = &sds_ring->desc_head[consumer];
+                       desc->status_desc_data[0] = owner_phantom;
+                       consumer = get_next_index(consumer, sds_ring->num_desc);
+               }
+               count++;
+       }
+
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &adapter->recv_ctx->rds_rings[ring];
+
+               if (!list_empty(&sds_ring->free_list[ring])) {
+                       list_for_each(cur, &sds_ring->free_list[ring]) {
+                               rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+                                                  list);
+                               qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+                       }
+                       spin_lock(&rds_ring->lock);
+                       list_splice_tail_init(&sds_ring->free_list[ring],
+                                             &rds_ring->free_list);
+                       spin_unlock(&rds_ring->lock);
+               }
+
+               qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+       }
+
+       if (count) {
+               sds_ring->consumer = consumer;
+               writel(consumer, sds_ring->crb_sts_consumer);
+       }
+
+       return count;
+}
+
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+                           struct qlcnic_host_rds_ring *rds_ring)
+{
+       struct rcv_desc *pdesc;
+       struct qlcnic_rx_buffer *buffer;
+       int count = 0;
+       u32 producer;
+       struct list_head *head;
+
+       producer = rds_ring->producer;
+       head = &rds_ring->free_list;
+
+       while (!list_empty(head)) {
+
+               buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+               if (!buffer->skb) {
+                       if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+                               break;
+               }
+
+               count++;
+               list_del(&buffer->list);
+
+               /* make a rcv descriptor  */
+               pdesc = &rds_ring->desc_head[producer];
+               pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+               pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+               pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+               producer = get_next_index(producer, rds_ring->num_desc);
+       }
+
+       if (count) {
+               rds_ring->producer = producer;
+               writel((producer-1) & (rds_ring->num_desc-1),
+                      rds_ring->crb_rcv_producer);
+       }
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+       int i;
+       unsigned char *data = skb->data;
+
+       pr_info(KERN_INFO "\n");
+       for (i = 0; i < skb->len; i++) {
+               QLCDB(adapter, DRV, "%02x ", data[i]);
+               if ((i & 0x0f) == 8)
+                       pr_info(KERN_INFO "\n");
+       }
+}
+
+static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
+                                   u64 sts_data0)
+{
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct sk_buff *skb;
+       struct qlcnic_host_rds_ring *rds_ring;
+       int index, length, cksum, pkt_offset;
+
+       if (unlikely(ring >= adapter->max_rds_rings))
+               return;
+
+       rds_ring = &recv_ctx->rds_rings[ring];
+
+       index = qlcnic_get_sts_refhandle(sts_data0);
+       length = qlcnic_get_sts_totallength(sts_data0);
+       if (unlikely(index >= rds_ring->num_desc))
+               return;
+
+       cksum  = qlcnic_get_sts_status(sts_data0);
+       pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+       skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+       if (!skb)
+               return;
+
+       if (length > rds_ring->skb_size)
+               skb_put(skb, rds_ring->skb_size);
+       else
+               skb_put(skb, length);
+
+       if (pkt_offset)
+               skb_pull(skb, pkt_offset);
+
+       if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+               adapter->diag_cnt++;
+       else
+               dump_skb(skb, adapter);
+
+       dev_kfree_skb_any(skb);
+       adapter->stats.rx_pkts++;
+       adapter->stats.rxbytes += length;
+
+       return;
+}
+
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+       struct status_desc *desc;
+       u64 sts_data0;
+       int ring, opcode, desc_cnt;
+
+       u32 consumer = sds_ring->consumer;
+
+       desc = &sds_ring->desc_head[consumer];
+       sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+       if (!(sts_data0 & STATUS_OWNER_HOST))
+               return;
+
+       desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+       opcode = qlcnic_get_sts_opcode(sts_data0);
+       switch (opcode) {
+       case QLCNIC_RESPONSE_DESC:
+               qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+               break;
+       default:
+               ring = qlcnic_get_sts_type(sts_data0);
+               qlcnic_process_rcv_diag(adapter, ring, sts_data0);
+               break;
+       }
+
+       for (; desc_cnt > 0; desc_cnt--) {
+               desc = &sds_ring->desc_head[consumer];
+               desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+               consumer = get_next_index(consumer, sds_ring->num_desc);
+       }
+
+       sds_ring->consumer = consumer;
+       writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
+{
+       u32 mac_low, mac_high;
+       int i;
+
+       mac_low = off1;
+       mac_high = off2;
+
+       if (alt_mac) {
+               mac_low |= (mac_low >> 16) | (mac_high << 16);
+               mac_high >>= 16;
+       }
+
+       for (i = 0; i < 2; i++)
+               mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+       for (i = 2; i < 6; i++)
+               mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}
+
+int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+       int ring, max_sds_rings;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+               return -ENOMEM;
+
+       max_sds_rings = adapter->max_sds_rings;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+
+               if (ring == max_sds_rings - 1)
+                       netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+                                      QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+               else
+                       netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
+                                      QLCNIC_NETDEV_WEIGHT*2);
+       }
+
+       return 0;
+}
+
+void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netif_napi_del(&sds_ring->napi);
+       }
+
+       qlcnic_free_sds_rings(adapter->recv_ctx);
+}
+
+void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               napi_enable(&sds_ring->napi);
+               qlcnic_enable_int(sds_ring);
+       }
+}
+
+void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+               return;
+
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               qlcnic_disable_int(sds_ring);
+               napi_synchronize(&sds_ring->napi);
+               napi_disable(&sds_ring->napi);
+       }
+}
index 67159d6e9a3562a324407097f16191ae6abcac41..1eef0bf36aa6a077f6db22e211ab3ba2a600f268 100644 (file)
@@ -66,17 +66,10 @@ static void qlcnic_fw_poll_work(struct work_struct *work);
 static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
                work_func_t func, int delay);
 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
-static int qlcnic_poll(struct napi_struct *napi, int budget);
-static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev);
 #endif
 
-static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
-
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
 static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -92,8 +85,6 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *);
 
 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
-static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
-static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
@@ -130,23 +121,7 @@ static const u32 msi_tgt_status[8] = {
 static const
 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
 
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
-{
-       writel(0, sds_ring->crb_intr_mask);
-}
-
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
-{
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-
-       writel(0x1, sds_ring->crb_intr_mask);
-
-       if (!QLCNIC_IS_MSI_FAMILY(adapter))
-               writel(0xfbff, adapter->tgt_mask_reg);
-}
-
-static int
-qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
 {
        int size = sizeof(struct qlcnic_host_sds_ring) * count;
 
@@ -155,8 +130,7 @@ qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
        return recv_ctx->sds_rings == NULL;
 }
 
-static void
-qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
 {
        if (recv_ctx->sds_rings != NULL)
                kfree(recv_ctx->sds_rings);
@@ -164,80 +138,6 @@ qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
        recv_ctx->sds_rings = NULL;
 }
 
-static int
-qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
-{
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-       if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
-               return -ENOMEM;
-
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-
-               if (ring == adapter->max_sds_rings - 1)
-                       netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
-                               QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
-               else
-                       netif_napi_add(netdev, &sds_ring->napi,
-                               qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
-       }
-
-       return 0;
-}
-
-static void
-qlcnic_napi_del(struct qlcnic_adapter *adapter)
-{
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               netif_napi_del(&sds_ring->napi);
-       }
-
-       qlcnic_free_sds_rings(adapter->recv_ctx);
-}
-
-static void
-qlcnic_napi_enable(struct qlcnic_adapter *adapter)
-{
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
-               return;
-
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               napi_enable(&sds_ring->napi);
-               qlcnic_enable_int(sds_ring);
-       }
-}
-
-static void
-qlcnic_napi_disable(struct qlcnic_adapter *adapter)
-{
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-       if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
-               return;
-
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_disable_int(sds_ring);
-               napi_synchronize(&sds_ring->napi);
-               napi_disable(&sds_ring->napi);
-       }
-}
-
 static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
 {
        memset(&adapter->stats, 0, sizeof(adapter->stats));
@@ -722,9 +622,8 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
        return err;
 }
 
-static void
-qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
-               struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+                           struct qlcnic_esw_func_cfg *esw_cfg)
 {
        if (esw_cfg->discard_tagged)
                adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
@@ -755,9 +654,8 @@ qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
        return 0;
 }
 
-static void
-qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
-               struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_esw_func_cfg *esw_cfg)
 {
        adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
                                QLCNIC_PROMISC_DISABLED);
@@ -774,8 +672,7 @@ qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
        qlcnic_set_netdev_features(adapter, esw_cfg);
 }
 
-static int
-qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_esw_func_cfg esw_cfg;
 
@@ -1924,428 +1821,6 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
        adapter->fhash.fmax = 0;
 }
 
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-               u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
-{
-       struct cmd_desc_type0 *hwdesc;
-       struct qlcnic_nic_req *req;
-       struct qlcnic_mac_req *mac_req;
-       struct qlcnic_vlan_req *vlan_req;
-       u32 producer;
-       u64 word;
-
-       producer = tx_ring->producer;
-       hwdesc = &tx_ring->desc_head[tx_ring->producer];
-
-       req = (struct qlcnic_nic_req *)hwdesc;
-       memset(req, 0, sizeof(struct qlcnic_nic_req));
-       req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
-
-       word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
-       req->req_hdr = cpu_to_le64(word);
-
-       mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
-       mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
-       memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
-
-       vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
-       vlan_req->vlan_id = vlan_id;
-
-       tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
-       smp_mb();
-}
-
-#define QLCNIC_MAC_HASH(MAC)\
-       ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
-
-static void
-qlcnic_send_filter(struct qlcnic_adapter *adapter,
-               struct qlcnic_host_tx_ring *tx_ring,
-               struct cmd_desc_type0 *first_desc,
-               struct sk_buff *skb)
-{
-       struct ethhdr *phdr = (struct ethhdr *)(skb->data);
-       struct qlcnic_filter *fil, *tmp_fil;
-       struct hlist_node *tmp_hnode, *n;
-       struct hlist_head *head;
-       u64 src_addr = 0;
-       __le16 vlan_id = 0;
-       u8 hindex;
-
-       if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
-               return;
-
-       if (adapter->fhash.fnum >= adapter->fhash.fmax)
-               return;
-
-       /* Only NPAR capable devices support vlan based learning*/
-       if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
-               vlan_id = first_desc->vlan_TCI;
-       memcpy(&src_addr, phdr->h_source, ETH_ALEN);
-       hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
-       head = &(adapter->fhash.fhead[hindex]);
-
-       hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
-               if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
-                           tmp_fil->vlan_id == vlan_id) {
-
-                       if (jiffies >
-                           (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
-                               qlcnic_change_filter(adapter, src_addr, vlan_id,
-                                                               tx_ring);
-                       tmp_fil->ftime = jiffies;
-                       return;
-               }
-       }
-
-       fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
-       if (!fil)
-               return;
-
-       qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
-       fil->ftime = jiffies;
-       fil->vlan_id = vlan_id;
-       memcpy(fil->faddr, &src_addr, ETH_ALEN);
-       spin_lock(&adapter->mac_learn_lock);
-       hlist_add_head(&(fil->fnode), head);
-       adapter->fhash.fnum++;
-       spin_unlock(&adapter->mac_learn_lock);
-}
-
-static int
-qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
-               struct cmd_desc_type0 *first_desc,
-               struct sk_buff *skb)
-{
-       u8 opcode = 0, hdr_len = 0;
-       u16 flags = 0, vlan_tci = 0;
-       int copied, offset, copy_len;
-       struct cmd_desc_type0 *hwdesc;
-       struct vlan_ethhdr *vh;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-       u16 protocol = ntohs(skb->protocol);
-       u32 producer = tx_ring->producer;
-
-       if (protocol == ETH_P_8021Q) {
-               vh = (struct vlan_ethhdr *)skb->data;
-               flags = FLAGS_VLAN_TAGGED;
-               vlan_tci = ntohs(vh->h_vlan_TCI);
-               protocol = ntohs(vh->h_vlan_encapsulated_proto);
-       } else if (vlan_tx_tag_present(skb)) {
-               flags = FLAGS_VLAN_OOB;
-               vlan_tci = vlan_tx_tag_get(skb);
-       }
-       if (unlikely(adapter->pvid)) {
-               if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
-                       return -EIO;
-               if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
-                       goto set_flags;
-
-               flags = FLAGS_VLAN_OOB;
-               vlan_tci = adapter->pvid;
-       }
-set_flags:
-       qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
-       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-
-       if (*(skb->data) & BIT_0) {
-               flags |= BIT_0;
-               memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
-       }
-       opcode = TX_ETHER_PKT;
-       if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-                       skb_shinfo(skb)->gso_size > 0) {
-
-               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-
-               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-               first_desc->total_hdr_length = hdr_len;
-
-               opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
-
-               /* For LSO, we need to copy the MAC/IP/TCP headers into
-               * the descriptor ring */
-               copied = 0;
-               offset = 2;
-
-               if (flags & FLAGS_VLAN_OOB) {
-                       first_desc->total_hdr_length += VLAN_HLEN;
-                       first_desc->tcp_hdr_offset = VLAN_HLEN;
-                       first_desc->ip_hdr_offset = VLAN_HLEN;
-                       /* Only in case of TSO on vlan device */
-                       flags |= FLAGS_VLAN_TAGGED;
-
-                       /* Create a TSO vlan header template for firmware */
-
-                       hwdesc = &tx_ring->desc_head[producer];
-                       tx_ring->cmd_buf_arr[producer].skb = NULL;
-
-                       copy_len = min((int)sizeof(struct cmd_desc_type0) -
-                               offset, hdr_len + VLAN_HLEN);
-
-                       vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
-                       skb_copy_from_linear_data(skb, vh, 12);
-                       vh->h_vlan_proto = htons(ETH_P_8021Q);
-                       vh->h_vlan_TCI = htons(vlan_tci);
-
-                       skb_copy_from_linear_data_offset(skb, 12,
-                               (char *)vh + 16, copy_len - 16);
-
-                       copied = copy_len - VLAN_HLEN;
-                       offset = 0;
-
-                       producer = get_next_index(producer, tx_ring->num_desc);
-               }
-
-               while (copied < hdr_len) {
-
-                       copy_len = min((int)sizeof(struct cmd_desc_type0) -
-                               offset, (hdr_len - copied));
-
-                       hwdesc = &tx_ring->desc_head[producer];
-                       tx_ring->cmd_buf_arr[producer].skb = NULL;
-
-                       skb_copy_from_linear_data_offset(skb, copied,
-                                (char *) hwdesc + offset, copy_len);
-
-                       copied += copy_len;
-                       offset = 0;
-
-                       producer = get_next_index(producer, tx_ring->num_desc);
-               }
-
-               tx_ring->producer = producer;
-               smp_mb();
-               adapter->stats.lso_frames++;
-
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u8 l4proto;
-
-               if (protocol == ETH_P_IP) {
-                       l4proto = ip_hdr(skb)->protocol;
-
-                       if (l4proto == IPPROTO_TCP)
-                               opcode = TX_TCP_PKT;
-                       else if (l4proto == IPPROTO_UDP)
-                               opcode = TX_UDP_PKT;
-               } else if (protocol == ETH_P_IPV6) {
-                       l4proto = ipv6_hdr(skb)->nexthdr;
-
-                       if (l4proto == IPPROTO_TCP)
-                               opcode = TX_TCPV6_PKT;
-                       else if (l4proto == IPPROTO_UDP)
-                               opcode = TX_UDPV6_PKT;
-               }
-       }
-       first_desc->tcp_hdr_offset += skb_transport_offset(skb);
-       first_desc->ip_hdr_offset += skb_network_offset(skb);
-       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-
-       return 0;
-}
-
-static int
-qlcnic_map_tx_skb(struct pci_dev *pdev,
-               struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
-{
-       struct qlcnic_skb_frag *nf;
-       struct skb_frag_struct *frag;
-       int i, nr_frags;
-       dma_addr_t map;
-
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       nf = &pbuf->frag_array[0];
-
-       map = pci_map_single(pdev, skb->data,
-                       skb_headlen(skb), PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(pdev, map))
-               goto out_err;
-
-       nf->dma = map;
-       nf->length = skb_headlen(skb);
-
-       for (i = 0; i < nr_frags; i++) {
-               frag = &skb_shinfo(skb)->frags[i];
-               nf = &pbuf->frag_array[i+1];
-
-               map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
-                                      DMA_TO_DEVICE);
-               if (dma_mapping_error(&pdev->dev, map))
-                       goto unwind;
-
-               nf->dma = map;
-               nf->length = skb_frag_size(frag);
-       }
-
-       return 0;
-
-unwind:
-       while (--i >= 0) {
-               nf = &pbuf->frag_array[i+1];
-               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
-       }
-
-       nf = &pbuf->frag_array[0];
-       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
-
-out_err:
-       return -ENOMEM;
-}
-
-static void
-qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
-                       struct qlcnic_cmd_buffer *pbuf)
-{
-       struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
-       int nr_frags = skb_shinfo(skb)->nr_frags;
-       int i;
-
-       for (i = 0; i < nr_frags; i++) {
-               nf = &pbuf->frag_array[i+1];
-               pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
-       }
-
-       nf = &pbuf->frag_array[0];
-       pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
-       pbuf->skb = NULL;
-}
-
-static inline void
-qlcnic_clear_cmddesc(u64 *desc)
-{
-       desc[0] = 0ULL;
-       desc[2] = 0ULL;
-       desc[7] = 0ULL;
-}
-
-netdev_tx_t
-qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-       struct qlcnic_cmd_buffer *pbuf;
-       struct qlcnic_skb_frag *buffrag;
-       struct cmd_desc_type0 *hwdesc, *first_desc;
-       struct pci_dev *pdev;
-       struct ethhdr *phdr;
-       int delta = 0;
-       int i, k;
-
-       u32 producer;
-       int frag_count;
-       u32 num_txd = tx_ring->num_desc;
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-               netif_stop_queue(netdev);
-               return NETDEV_TX_BUSY;
-       }
-
-       if (adapter->flags & QLCNIC_MACSPOOF) {
-               phdr = (struct ethhdr *)skb->data;
-               if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
-                       goto drop_packet;
-       }
-
-       frag_count = skb_shinfo(skb)->nr_frags + 1;
-       /* 14 frags supported for normal packet and
-        * 32 frags supported for TSO packet
-        */
-       if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
-
-               for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
-                       delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
-
-               if (!__pskb_pull_tail(skb, delta))
-                       goto drop_packet;
-
-               frag_count = 1 + skb_shinfo(skb)->nr_frags;
-       }
-
-       if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
-               netif_stop_queue(netdev);
-               if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
-                       netif_start_queue(netdev);
-               else {
-                       adapter->stats.xmit_off++;
-                       return NETDEV_TX_BUSY;
-               }
-       }
-
-       producer = tx_ring->producer;
-       pbuf = &tx_ring->cmd_buf_arr[producer];
-
-       pdev = adapter->pdev;
-
-       first_desc = hwdesc = &tx_ring->desc_head[producer];
-       qlcnic_clear_cmddesc((u64 *)hwdesc);
-
-       if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
-               adapter->stats.tx_dma_map_error++;
-               goto drop_packet;
-       }
-
-       pbuf->skb = skb;
-       pbuf->frag_count = frag_count;
-
-       qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
-       qlcnic_set_tx_port(first_desc, adapter->portnum);
-
-       for (i = 0; i < frag_count; i++) {
-
-               k = i % 4;
-
-               if ((k == 0) && (i > 0)) {
-                       /* move to next desc.*/
-                       producer = get_next_index(producer, num_txd);
-                       hwdesc = &tx_ring->desc_head[producer];
-                       qlcnic_clear_cmddesc((u64 *)hwdesc);
-                       tx_ring->cmd_buf_arr[producer].skb = NULL;
-               }
-
-               buffrag = &pbuf->frag_array[i];
-
-               hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
-               switch (k) {
-               case 0:
-                       hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
-                       break;
-               case 1:
-                       hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
-                       break;
-               case 2:
-                       hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
-                       break;
-               case 3:
-                       hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
-                       break;
-               }
-       }
-
-       tx_ring->producer = get_next_index(producer, num_txd);
-       smp_mb();
-
-       if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
-               goto unwind_buff;
-
-       if (adapter->mac_learn)
-               qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
-
-       adapter->stats.txbytes += skb->len;
-       adapter->stats.xmitcalled++;
-
-       qlcnic_update_cmd_producer(tx_ring);
-
-       return NETDEV_TX_OK;
-
-unwind_buff:
-       qlcnic_unmap_buffers(pdev, skb, pbuf);
-drop_packet:
-       adapter->stats.txdropped++;
-       dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
-}
-
 static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -2382,27 +1857,6 @@ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
        return rv;
 }
 
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
-{
-       struct net_device *netdev = adapter->netdev;
-
-       if (adapter->ahw->linkup && !linkup) {
-               netdev_info(netdev, "NIC Link is down\n");
-               adapter->ahw->linkup = 0;
-               if (netif_running(netdev)) {
-                       netif_carrier_off(netdev);
-                       netif_stop_queue(netdev);
-               }
-       } else if (!adapter->ahw->linkup && linkup) {
-               netdev_info(netdev, "NIC Link is up\n");
-               adapter->ahw->linkup = 1;
-               if (netif_running(netdev)) {
-                       netif_carrier_on(netdev);
-                       netif_wake_queue(netdev);
-               }
-       }
-}
-
 static void qlcnic_tx_timeout(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -2509,143 +1963,27 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
 {
-       u32 sw_consumer, hw_consumer;
-       int count = 0, i;
-       struct qlcnic_cmd_buffer *buffer;
-       struct pci_dev *pdev = adapter->pdev;
-       struct net_device *netdev = adapter->netdev;
-       struct qlcnic_skb_frag *frag;
-       int done;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-
-       if (!spin_trylock(&adapter->tx_clean_lock))
-               return 1;
-
-       sw_consumer = tx_ring->sw_consumer;
-       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-
-       while (sw_consumer != hw_consumer) {
-               buffer = &tx_ring->cmd_buf_arr[sw_consumer];
-               if (buffer->skb) {
-                       frag = &buffer->frag_array[0];
-                       pci_unmap_single(pdev, frag->dma, frag->length,
-                                        PCI_DMA_TODEVICE);
-                       frag->dma = 0ULL;
-                       for (i = 1; i < buffer->frag_count; i++) {
-                               frag++;
-                               pci_unmap_page(pdev, frag->dma, frag->length,
-                                              PCI_DMA_TODEVICE);
-                               frag->dma = 0ULL;
-                       }
-
-                       adapter->stats.xmitfinished++;
-                       dev_kfree_skb_any(buffer->skb);
-                       buffer->skb = NULL;
-               }
-
-               sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
-               if (++count >= MAX_STATUS_HANDLE)
-                       break;
-       }
-
-       if (count && netif_running(netdev)) {
-               tx_ring->sw_consumer = sw_consumer;
-
-               smp_mb();
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
-               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-                       if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
-                               netif_wake_queue(netdev);
-                               adapter->stats.xmit_on++;
-                       }
-               }
-               adapter->tx_timeo_cnt = 0;
+       disable_irq(adapter->irq);
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               qlcnic_intr(adapter->irq, sds_ring);
        }
-       /*
-        * If everything is freed up to consumer then check if the ring is full
-        * If the ring is full then check if more needs to be freed and
-        * schedule the call back again.
-        *
-        * This happens when there are 2 CPUs. One could be freeing and the
-        * other filling it. If the ring is full when we get out of here and
-        * the card has already interrupted the host then the host can miss the
-        * interrupt.
-        *
-        * There is still a possible race condition and the host could miss an
-        * interrupt. The card has to take care of this.
-        */
-       hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-       done = (sw_consumer == hw_consumer);
-       spin_unlock(&adapter->tx_clean_lock);
-
-       return done;
+       enable_irq(adapter->irq);
 }
+#endif
 
-static int qlcnic_poll(struct napi_struct *napi, int budget)
+static void
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
 {
-       struct qlcnic_host_sds_ring *sds_ring =
-               container_of(napi, struct qlcnic_host_sds_ring, napi);
-
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-
-       int tx_complete;
-       int work_done;
-
-       tx_complete = qlcnic_process_cmd_ring(adapter);
-
-       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
-       if ((work_done < budget) && tx_complete) {
-               napi_complete(&sds_ring->napi);
-               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-                       qlcnic_enable_int(sds_ring);
-       }
-
-       return work_done;
-}
-
-static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
-{
-       struct qlcnic_host_sds_ring *sds_ring =
-               container_of(napi, struct qlcnic_host_sds_ring, napi);
-
-       struct qlcnic_adapter *adapter = sds_ring->adapter;
-       int work_done;
-
-       work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
-       if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
-               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-                       qlcnic_enable_int(sds_ring);
-       }
-
-       return work_done;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
-       int ring;
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
-       disable_irq(adapter->irq);
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_intr(adapter->irq, sds_ring);
-       }
-       enable_irq(adapter->irq);
-}
-#endif
-
-static void
-qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
-{
-       u32 val;
+       u32 val;
 
        val = adapter->portnum & 0xf;
        val |= encoding << 7;
@@ -3392,93 +2730,6 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
        return err;
 }
 
-static int
-qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
-{
-       return -EOPNOTSUPP;
-}
-
-static int
-qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
-{
-       return -EOPNOTSUPP;
-}
-
-static ssize_t
-qlcnic_store_bridged_mode(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t len)
-{
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       unsigned long new;
-       int ret = -EINVAL;
-
-       if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
-               goto err_out;
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-               goto err_out;
-
-       if (strict_strtoul(buf, 2, &new))
-               goto err_out;
-
-       if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
-               ret = len;
-
-err_out:
-       return ret;
-}
-
-static ssize_t
-qlcnic_show_bridged_mode(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int bridged_mode = 0;
-
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-               bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
-
-       return sprintf(buf, "%d\n", bridged_mode);
-}
-
-static struct device_attribute dev_attr_bridged_mode = {
-       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
-       .show = qlcnic_show_bridged_mode,
-       .store = qlcnic_store_bridged_mode,
-};
-
-static ssize_t
-qlcnic_store_diag_mode(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t len)
-{
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       unsigned long new;
-
-       if (strict_strtoul(buf, 2, &new))
-               return -EINVAL;
-
-       if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
-               adapter->flags ^= QLCNIC_DIAG_ENABLED;
-
-       return len;
-}
-
-static ssize_t
-qlcnic_show_diag_mode(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%d\n",
-                       !!(adapter->flags & QLCNIC_DIAG_ENABLED));
-}
-
-static struct device_attribute dev_attr_diag_mode = {
-       .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
-       .show = qlcnic_show_diag_mode,
-       .store = qlcnic_store_diag_mode,
-};
-
 int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
 {
        if (!use_msi_x && !use_msi) {
@@ -3529,859 +2780,6 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
        return err;
 }
 
-static int
-qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
-                       u8 *rate)
-{
-       *rate = LSB(beacon);
-       *state = MSB(beacon);
-
-       QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
-
-       if (!*state) {
-               *rate = __QLCNIC_MAX_LED_RATE;
-               return 0;
-       } else if (*state > __QLCNIC_MAX_LED_STATE)
-               return -EINVAL;
-
-       if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
-               return -EINVAL;
-
-       return 0;
-}
-
-static ssize_t
-qlcnic_store_beacon(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t len)
-{
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int max_sds_rings = adapter->max_sds_rings;
-       u16 beacon;
-       u8 b_state, b_rate;
-       int err;
-
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
-               dev_warn(dev, "LED test not supported for non "
-                               "privilege function\n");
-               return -EOPNOTSUPP;
-       }
-
-       if (len != sizeof(u16))
-               return QL_STATUS_INVALID_PARAM;
-
-       memcpy(&beacon, buf, sizeof(u16));
-       err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
-       if (err)
-               return err;
-
-       if (adapter->ahw->beacon_state == b_state)
-               return len;
-
-       rtnl_lock();
-
-       if (!adapter->ahw->beacon_state)
-               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
-                       rtnl_unlock();
-                       return -EBUSY;
-               }
-
-       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
-               err = -EIO;
-               goto out;
-       }
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-               err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
-               if (err)
-                       goto out;
-               set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
-       }
-
-       err = qlcnic_config_led(adapter, b_state, b_rate);
-
-       if (!err) {
-               err = len;
-               adapter->ahw->beacon_state = b_state;
-       }
-
-       if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
-               qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
-
- out:
-       if (!adapter->ahw->beacon_state)
-               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
-       rtnl_unlock();
-
-       return err;
-}
-
-static ssize_t
-qlcnic_show_beacon(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
-}
-
-static struct device_attribute dev_attr_beacon = {
-       .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
-       .show = qlcnic_show_beacon,
-       .store = qlcnic_store_beacon,
-};
-
-static int
-qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
-               loff_t offset, size_t size)
-{
-       size_t crb_size = 4;
-
-       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
-               return -EIO;
-
-       if (offset < QLCNIC_PCI_CRBSPACE) {
-               if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
-                                       QLCNIC_PCI_CAMQM_END))
-                       crb_size = 8;
-               else
-                       return -EINVAL;
-       }
-
-       if ((size != crb_size) || (offset & (crb_size-1)))
-               return  -EINVAL;
-
-       return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 data;
-       u64 qmdata;
-       int ret;
-
-       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-
-       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
-               qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
-               memcpy(buf, &qmdata, size);
-       } else {
-               data = QLCRD32(adapter, offset);
-               memcpy(buf, &data, size);
-       }
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 data;
-       u64 qmdata;
-       int ret;
-
-       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-
-       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
-               memcpy(&qmdata, buf, size);
-               qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
-       } else {
-               memcpy(&data, buf, size);
-               QLCWR32(adapter, offset, data);
-       }
-       return size;
-}
-
-static int
-qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
-               loff_t offset, size_t size)
-{
-       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
-               return -EIO;
-
-       if ((size != 8) || (offset & 0x7))
-               return  -EIO;
-
-       return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u64 data;
-       int ret;
-
-       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-
-       if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
-               return -EIO;
-
-       memcpy(buf, &data, size);
-
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u64 data;
-       int ret;
-
-       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
-       if (ret != 0)
-               return ret;
-
-       memcpy(&data, buf, size);
-
-       if (qlcnic_pci_mem_write_2M(adapter, offset, data))
-               return -EIO;
-
-       return size;
-}
-
-static struct bin_attribute bin_attr_crb = {
-       .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_crb,
-       .write = qlcnic_sysfs_write_crb,
-};
-
-static struct bin_attribute bin_attr_mem = {
-       .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_mem,
-       .write = qlcnic_sysfs_write_mem,
-};
-
-static int
-validate_pm_config(struct qlcnic_adapter *adapter,
-                       struct qlcnic_pm_func_cfg *pm_cfg, int count)
-{
-
-       u8 src_pci_func, s_esw_id, d_esw_id;
-       u8 dest_pci_func;
-       int i;
-
-       for (i = 0; i < count; i++) {
-               src_pci_func = pm_cfg[i].pci_func;
-               dest_pci_func = pm_cfg[i].dest_npar;
-               if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
-                               || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
-                       return QL_STATUS_INVALID_PARAM;
-
-               if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
-                       return QL_STATUS_INVALID_PARAM;
-
-               if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
-                       return QL_STATUS_INVALID_PARAM;
-
-               s_esw_id = adapter->npars[src_pci_func].phy_port;
-               d_esw_id = adapter->npars[dest_pci_func].phy_port;
-
-               if (s_esw_id != d_esw_id)
-                       return QL_STATUS_INVALID_PARAM;
-
-       }
-       return 0;
-
-}
-
-static ssize_t
-qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_pm_func_cfg *pm_cfg;
-       u32 id, action, pci_func;
-       int count, rem, i, ret;
-
-       count   = size / sizeof(struct qlcnic_pm_func_cfg);
-       rem     = size % sizeof(struct qlcnic_pm_func_cfg);
-       if (rem)
-               return QL_STATUS_INVALID_PARAM;
-
-       pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
-
-       ret = validate_pm_config(adapter, pm_cfg, count);
-       if (ret)
-               return ret;
-       for (i = 0; i < count; i++) {
-               pci_func = pm_cfg[i].pci_func;
-               action = !!pm_cfg[i].action;
-               id = adapter->npars[pci_func].phy_port;
-               ret = qlcnic_config_port_mirroring(adapter, id,
-                                               action, pci_func);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < count; i++) {
-               pci_func = pm_cfg[i].pci_func;
-               id = adapter->npars[pci_func].phy_port;
-               adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
-               adapter->npars[pci_func].dest_npar = id;
-       }
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
-       int i;
-
-       if (size != sizeof(pm_cfg))
-               return QL_STATUS_INVALID_PARAM;
-
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
-               pm_cfg[i].action = adapter->npars[i].enable_pm;
-               pm_cfg[i].dest_npar = 0;
-               pm_cfg[i].pci_func = i;
-       }
-       memcpy(buf, &pm_cfg, size);
-
-       return size;
-}
-
-static int
-validate_esw_config(struct qlcnic_adapter *adapter,
-       struct qlcnic_esw_func_cfg *esw_cfg, int count)
-{
-       u32 op_mode;
-       u8 pci_func;
-       int i;
-
-       op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
-
-       for (i = 0; i < count; i++) {
-               pci_func = esw_cfg[i].pci_func;
-               if (pci_func >= QLCNIC_MAX_PCI_FUNC)
-                       return QL_STATUS_INVALID_PARAM;
-
-               if (adapter->op_mode == QLCNIC_MGMT_FUNC)
-                       if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
-                               return QL_STATUS_INVALID_PARAM;
-
-               switch (esw_cfg[i].op_mode) {
-               case QLCNIC_PORT_DEFAULTS:
-                       if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
-                                               QLCNIC_NON_PRIV_FUNC) {
-                               if (esw_cfg[i].mac_anti_spoof != 0)
-                                       return QL_STATUS_INVALID_PARAM;
-                               if (esw_cfg[i].mac_override != 1)
-                                       return QL_STATUS_INVALID_PARAM;
-                               if (esw_cfg[i].promisc_mode != 1)
-                                       return QL_STATUS_INVALID_PARAM;
-                       }
-                       break;
-               case QLCNIC_ADD_VLAN:
-                       if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
-                               return QL_STATUS_INVALID_PARAM;
-                       if (!esw_cfg[i].op_type)
-                               return QL_STATUS_INVALID_PARAM;
-                       break;
-               case QLCNIC_DEL_VLAN:
-                       if (!esw_cfg[i].op_type)
-                               return QL_STATUS_INVALID_PARAM;
-                       break;
-               default:
-                       return QL_STATUS_INVALID_PARAM;
-               }
-       }
-       return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_func_cfg *esw_cfg;
-       struct qlcnic_npar_info *npar;
-       int count, rem, i, ret;
-       u8 pci_func, op_mode = 0;
-
-       count   = size / sizeof(struct qlcnic_esw_func_cfg);
-       rem     = size % sizeof(struct qlcnic_esw_func_cfg);
-       if (rem)
-               return QL_STATUS_INVALID_PARAM;
-
-       esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
-       ret = validate_esw_config(adapter, esw_cfg, count);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < count; i++) {
-               if (adapter->op_mode == QLCNIC_MGMT_FUNC)
-                       if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
-                               return QL_STATUS_INVALID_PARAM;
-
-               if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
-                       continue;
-
-               op_mode = esw_cfg[i].op_mode;
-               qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
-               esw_cfg[i].op_mode = op_mode;
-               esw_cfg[i].pci_func = adapter->ahw->pci_func;
-
-               switch (esw_cfg[i].op_mode) {
-               case QLCNIC_PORT_DEFAULTS:
-                       qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
-                       break;
-               case QLCNIC_ADD_VLAN:
-                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
-                       break;
-               case QLCNIC_DEL_VLAN:
-                       esw_cfg[i].vlan_id = 0;
-                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
-                       break;
-               }
-       }
-
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-               goto out;
-
-       for (i = 0; i < count; i++) {
-               pci_func = esw_cfg[i].pci_func;
-               npar = &adapter->npars[pci_func];
-               switch (esw_cfg[i].op_mode) {
-               case QLCNIC_PORT_DEFAULTS:
-                       npar->promisc_mode = esw_cfg[i].promisc_mode;
-                       npar->mac_override = esw_cfg[i].mac_override;
-                       npar->offload_flags = esw_cfg[i].offload_flags;
-                       npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
-                       npar->discard_tagged = esw_cfg[i].discard_tagged;
-                       break;
-               case QLCNIC_ADD_VLAN:
-                       npar->pvid = esw_cfg[i].vlan_id;
-                       break;
-               case QLCNIC_DEL_VLAN:
-                       npar->pvid = 0;
-                       break;
-               }
-       }
-out:
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
-       u8 i;
-
-       if (size != sizeof(esw_cfg))
-               return QL_STATUS_INVALID_PARAM;
-
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
-               esw_cfg[i].pci_func = i;
-               if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
-                       return QL_STATUS_INVALID_PARAM;
-       }
-       memcpy(buf, &esw_cfg, size);
-
-       return size;
-}
-
-static int
-validate_npar_config(struct qlcnic_adapter *adapter,
-                               struct qlcnic_npar_func_cfg *np_cfg, int count)
-{
-       u8 pci_func, i;
-
-       for (i = 0; i < count; i++) {
-               pci_func = np_cfg[i].pci_func;
-               if (pci_func >= QLCNIC_MAX_PCI_FUNC)
-                       return QL_STATUS_INVALID_PARAM;
-
-               if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
-                       return QL_STATUS_INVALID_PARAM;
-
-               if (!IS_VALID_BW(np_cfg[i].min_bw) ||
-                   !IS_VALID_BW(np_cfg[i].max_bw))
-                       return QL_STATUS_INVALID_PARAM;
-       }
-       return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_info nic_info;
-       struct qlcnic_npar_func_cfg *np_cfg;
-       int i, count, rem, ret;
-       u8 pci_func;
-
-       count   = size / sizeof(struct qlcnic_npar_func_cfg);
-       rem     = size % sizeof(struct qlcnic_npar_func_cfg);
-       if (rem)
-               return QL_STATUS_INVALID_PARAM;
-
-       np_cfg = (struct qlcnic_npar_func_cfg *) buf;
-       ret = validate_npar_config(adapter, np_cfg, count);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < count ; i++) {
-               pci_func = np_cfg[i].pci_func;
-               ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
-               if (ret)
-                       return ret;
-               nic_info.pci_func = pci_func;
-               nic_info.min_tx_bw = np_cfg[i].min_bw;
-               nic_info.max_tx_bw = np_cfg[i].max_bw;
-               ret = qlcnic_set_nic_info(adapter, &nic_info);
-               if (ret)
-                       return ret;
-               adapter->npars[i].min_bw = nic_info.min_tx_bw;
-               adapter->npars[i].max_bw = nic_info.max_tx_bw;
-       }
-
-       return size;
-
-}
-static ssize_t
-qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_info nic_info;
-       struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
-       int i, ret;
-
-       if (size != sizeof(np_cfg))
-               return QL_STATUS_INVALID_PARAM;
-
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
-               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
-                       continue;
-               ret = qlcnic_get_nic_info(adapter, &nic_info, i);
-               if (ret)
-                       return ret;
-
-               np_cfg[i].pci_func = i;
-               np_cfg[i].op_mode = (u8)nic_info.op_mode;
-               np_cfg[i].port_num = nic_info.phys_port;
-               np_cfg[i].fw_capab = nic_info.capabilities;
-               np_cfg[i].min_bw = nic_info.min_tx_bw ;
-               np_cfg[i].max_bw = nic_info.max_tx_bw;
-               np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
-               np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
-       }
-       memcpy(buf, &np_cfg, size);
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_statistics port_stats;
-       int ret;
-
-       if (size != sizeof(struct qlcnic_esw_statistics))
-               return QL_STATUS_INVALID_PARAM;
-
-       if (offset >= QLCNIC_MAX_PCI_FUNC)
-               return QL_STATUS_INVALID_PARAM;
-
-       memset(&port_stats, 0, size);
-       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
-                                                               &port_stats.rx);
-       if (ret)
-               return ret;
-
-       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
-                                                               &port_stats.tx);
-       if (ret)
-               return ret;
-
-       memcpy(buf, &port_stats, size);
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_esw_statistics esw_stats;
-       int ret;
-
-       if (size != sizeof(struct qlcnic_esw_statistics))
-               return QL_STATUS_INVALID_PARAM;
-
-       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-               return QL_STATUS_INVALID_PARAM;
-
-       memset(&esw_stats, 0, size);
-       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
-                                                               &esw_stats.rx);
-       if (ret)
-               return ret;
-
-       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
-                                                               &esw_stats.tx);
-       if (ret)
-               return ret;
-
-       memcpy(buf, &esw_stats, size);
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int ret;
-
-       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-               return QL_STATUS_INVALID_PARAM;
-
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
-                                               QLCNIC_QUERY_RX_COUNTER);
-       if (ret)
-               return ret;
-
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
-                                               QLCNIC_QUERY_TX_COUNTER);
-       if (ret)
-               return ret;
-
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       int ret;
-
-       if (offset >= QLCNIC_MAX_PCI_FUNC)
-               return QL_STATUS_INVALID_PARAM;
-
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
-                                               QLCNIC_QUERY_RX_COUNTER);
-       if (ret)
-               return ret;
-
-       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
-                                               QLCNIC_QUERY_TX_COUNTER);
-       if (ret)
-               return ret;
-
-       return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
-       struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
-       struct qlcnic_pci_info *pci_info;
-       int i, ret;
-
-       if (size != sizeof(pci_cfg))
-               return QL_STATUS_INVALID_PARAM;
-
-       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
-       if (!pci_info)
-               return -ENOMEM;
-
-       ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (ret) {
-               kfree(pci_info);
-               return ret;
-       }
-
-       for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
-               pci_cfg[i].pci_func = pci_info[i].id;
-               pci_cfg[i].func_type = pci_info[i].type;
-               pci_cfg[i].port_num = pci_info[i].default_port;
-               pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
-               pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
-               memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
-       }
-       memcpy(buf, &pci_cfg, size);
-       kfree(pci_info);
-       return size;
-}
-static struct bin_attribute bin_attr_npar_config = {
-       .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_npar_config,
-       .write = qlcnic_sysfs_write_npar_config,
-};
-
-static struct bin_attribute bin_attr_pci_config = {
-       .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_pci_config,
-       .write = NULL,
-};
-
-static struct bin_attribute bin_attr_port_stats = {
-       .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_get_port_stats,
-       .write = qlcnic_sysfs_clear_port_stats,
-};
-
-static struct bin_attribute bin_attr_esw_stats = {
-       .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_get_esw_stats,
-       .write = qlcnic_sysfs_clear_esw_stats,
-};
-
-static struct bin_attribute bin_attr_esw_config = {
-       .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_esw_config,
-       .write = qlcnic_sysfs_write_esw_config,
-};
-
-static struct bin_attribute bin_attr_pm_config = {
-       .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
-       .size = 0,
-       .read = qlcnic_sysfs_read_pm_config,
-       .write = qlcnic_sysfs_write_pm_config,
-};
-
-static void
-qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-               if (device_create_file(dev, &dev_attr_bridged_mode))
-                       dev_warn(dev,
-                               "failed to create bridged_mode sysfs entry\n");
-}
-
-static void
-qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-
-       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
-               device_remove_file(dev, &dev_attr_bridged_mode);
-}
-
-static void
-qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-       u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-
-       if (device_create_bin_file(dev, &bin_attr_port_stats))
-               dev_info(dev, "failed to create port stats sysfs entry");
-
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
-               return;
-       if (device_create_file(dev, &dev_attr_diag_mode))
-               dev_info(dev, "failed to create diag_mode sysfs entry\n");
-       if (device_create_bin_file(dev, &bin_attr_crb))
-               dev_info(dev, "failed to create crb sysfs entry\n");
-       if (device_create_bin_file(dev, &bin_attr_mem))
-               dev_info(dev, "failed to create mem sysfs entry\n");
-
-       if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-               return;
-
-       if (device_create_bin_file(dev, &bin_attr_pci_config))
-               dev_info(dev, "failed to create pci config sysfs entry");
-       if (device_create_file(dev, &dev_attr_beacon))
-               dev_info(dev, "failed to create beacon sysfs entry");
-
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-               return;
-       if (device_create_bin_file(dev, &bin_attr_esw_config))
-               dev_info(dev, "failed to create esw config sysfs entry");
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-               return;
-       if (device_create_bin_file(dev, &bin_attr_npar_config))
-               dev_info(dev, "failed to create npar config sysfs entry");
-       if (device_create_bin_file(dev, &bin_attr_pm_config))
-               dev_info(dev, "failed to create pm config sysfs entry");
-       if (device_create_bin_file(dev, &bin_attr_esw_stats))
-               dev_info(dev, "failed to create eswitch stats sysfs entry");
-}
-
-static void
-qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
-{
-       struct device *dev = &adapter->pdev->dev;
-       u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-
-       device_remove_bin_file(dev, &bin_attr_port_stats);
-
-       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
-               return;
-       device_remove_file(dev, &dev_attr_diag_mode);
-       device_remove_bin_file(dev, &bin_attr_crb);
-       device_remove_bin_file(dev, &bin_attr_mem);
-       if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
-               return;
-       device_remove_bin_file(dev, &bin_attr_pci_config);
-       device_remove_file(dev, &dev_attr_beacon);
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-               return;
-       device_remove_bin_file(dev, &bin_attr_esw_config);
-       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
-               return;
-       device_remove_bin_file(dev, &bin_attr_npar_config);
-       device_remove_bin_file(dev, &bin_attr_pm_config);
-       device_remove_bin_file(dev, &bin_attr_esw_stats);
-}
-
 #ifdef CONFIG_INET
 
 #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644 (file)
index 0000000..12ff292
--- /dev/null
@@ -0,0 +1,629 @@
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
+#include <net/ip.h>
+
+#define QLCNIC_DUMP_WCRB       BIT_0
+#define QLCNIC_DUMP_RWCRB      BIT_1
+#define QLCNIC_DUMP_ANDCRB     BIT_2
+#define QLCNIC_DUMP_ORCRB      BIT_3
+#define QLCNIC_DUMP_POLLCRB    BIT_4
+#define QLCNIC_DUMP_RD_SAVE    BIT_5
+#define QLCNIC_DUMP_WRT_SAVED  BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST        BIT_7
+#define QLCNIC_DUMP_SKIP       BIT_7
+
+#define QLCNIC_DUMP_MASK_MAX   0xff
+
+struct qlcnic_common_entry_hdr {
+       u32     type;
+       u32     offset;
+       u32     cap_size;
+       u8      mask;
+       u8      rsvd[2];
+       u8      flags;
+} __packed;
+
+struct __crb {
+       u32     addr;
+       u8      stride;
+       u8      rsvd1[3];
+       u32     data_size;
+       u32     no_ops;
+       u32     rsvd2[4];
+} __packed;
+
+struct __ctrl {
+       u32     addr;
+       u8      stride;
+       u8      index_a;
+       u16     timeout;
+       u32     data_size;
+       u32     no_ops;
+       u8      opcode;
+       u8      index_v;
+       u8      shl_val;
+       u8      shr_val;
+       u32     val1;
+       u32     val2;
+       u32     val3;
+} __packed;
+
+struct __cache {
+       u32     addr;
+       u16     stride;
+       u16     init_tag_val;
+       u32     size;
+       u32     no_ops;
+       u32     ctrl_addr;
+       u32     ctrl_val;
+       u32     read_addr;
+       u8      read_addr_stride;
+       u8      read_addr_num;
+       u8      rsvd1[2];
+} __packed;
+
+struct __ocm {
+       u8      rsvd[8];
+       u32     size;
+       u32     no_ops;
+       u8      rsvd1[8];
+       u32     read_addr;
+       u32     read_addr_stride;
+} __packed;
+
+struct __mem {
+       u8      rsvd[24];
+       u32     addr;
+       u32     size;
+} __packed;
+
+struct __mux {
+       u32     addr;
+       u8      rsvd[4];
+       u32     size;
+       u32     no_ops;
+       u32     val;
+       u32     val_stride;
+       u32     read_addr;
+       u8      rsvd2[4];
+} __packed;
+
+struct __queue {
+       u32     sel_addr;
+       u16     stride;
+       u8      rsvd[2];
+       u32     size;
+       u32     no_ops;
+       u8      rsvd2[8];
+       u32     read_addr;
+       u8      read_addr_stride;
+       u8      read_addr_cnt;
+       u8      rsvd3[2];
+} __packed;
+
+struct qlcnic_dump_entry {
+       struct qlcnic_common_entry_hdr hdr;
+       union {
+               struct __crb    crb;
+               struct __cache  cache;
+               struct __ocm    ocm;
+               struct __mem    mem;
+               struct __mux    mux;
+               struct __queue  que;
+               struct __ctrl   ctrl;
+       } region;
+} __packed;
+
+enum qlcnic_minidump_opcode {
+       QLCNIC_DUMP_NOP         = 0,
+       QLCNIC_DUMP_READ_CRB    = 1,
+       QLCNIC_DUMP_READ_MUX    = 2,
+       QLCNIC_DUMP_QUEUE       = 3,
+       QLCNIC_DUMP_BRD_CONFIG  = 4,
+       QLCNIC_DUMP_READ_OCM    = 6,
+       QLCNIC_DUMP_PEG_REG     = 7,
+       QLCNIC_DUMP_L1_DTAG     = 8,
+       QLCNIC_DUMP_L1_ITAG     = 9,
+       QLCNIC_DUMP_L1_DATA     = 11,
+       QLCNIC_DUMP_L1_INST     = 12,
+       QLCNIC_DUMP_L2_DTAG     = 21,
+       QLCNIC_DUMP_L2_ITAG     = 22,
+       QLCNIC_DUMP_L2_DATA     = 23,
+       QLCNIC_DUMP_L2_INST     = 24,
+       QLCNIC_DUMP_READ_ROM    = 71,
+       QLCNIC_DUMP_READ_MEM    = 72,
+       QLCNIC_DUMP_READ_CTRL   = 98,
+       QLCNIC_DUMP_TLHDR       = 99,
+       QLCNIC_DUMP_RDEND       = 255
+};
+
+struct qlcnic_dump_operations {
+       enum qlcnic_minidump_opcode opcode;
+       u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+                      __le32 *);
+};
+
+static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+       u32 dest;
+       void __iomem *window_reg;
+
+       dest = addr & 0xFFFF0000;
+       window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
+       writel(dest, window_reg);
+       readl(window_reg);
+       window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+       *data = readl(window_reg);
+}
+
+static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+       u32 dest;
+       void __iomem *window_reg;
+
+       dest = addr & 0xFFFF0000;
+       window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
+       writel(dest, window_reg);
+       readl(window_reg);
+       window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+       writel(data, window_reg);
+       readl(window_reg);
+}
+
+/* FW dump related functions */
+static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 addr, data;
+       struct __crb *crb = &entry->region.crb;
+       void __iomem *base = adapter->ahw->pci_base0;
+
+       addr = crb->addr;
+
+       for (i = 0; i < crb->no_ops; i++) {
+               qlcnic_read_dump_reg(addr, base, &data);
+               *buffer++ = cpu_to_le32(addr);
+               *buffer++ = cpu_to_le32(data);
+               addr += crb->stride;
+       }
+       return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+                           struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i, k, timeout = 0;
+       void __iomem *base = adapter->ahw->pci_base0;
+       u32 addr, data;
+       u8 opcode, no_ops;
+       struct __ctrl *ctr = &entry->region.ctrl;
+       struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+
+       addr = ctr->addr;
+       no_ops = ctr->no_ops;
+
+       for (i = 0; i < no_ops; i++) {
+               k = 0;
+               opcode = 0;
+               for (k = 0; k < 8; k++) {
+                       if (!(ctr->opcode & (1 << k)))
+                               continue;
+                       switch (1 << k) {
+                       case QLCNIC_DUMP_WCRB:
+                               qlcnic_write_dump_reg(addr, base, ctr->val1);
+                               break;
+                       case QLCNIC_DUMP_RWCRB:
+                               qlcnic_read_dump_reg(addr, base, &data);
+                               qlcnic_write_dump_reg(addr, base, data);
+                               break;
+                       case QLCNIC_DUMP_ANDCRB:
+                               qlcnic_read_dump_reg(addr, base, &data);
+                               qlcnic_write_dump_reg(addr, base,
+                                                     data & ctr->val2);
+                               break;
+                       case QLCNIC_DUMP_ORCRB:
+                               qlcnic_read_dump_reg(addr, base, &data);
+                               qlcnic_write_dump_reg(addr, base,
+                                                     data | ctr->val3);
+                               break;
+                       case QLCNIC_DUMP_POLLCRB:
+                               while (timeout <= ctr->timeout) {
+                                       qlcnic_read_dump_reg(addr, base, &data);
+                                       if ((data & ctr->val2) == ctr->val1)
+                                               break;
+                                       msleep(1);
+                                       timeout++;
+                               }
+                               if (timeout > ctr->timeout) {
+                                       dev_info(&adapter->pdev->dev,
+                                       "Timed out, aborting poll CRB\n");
+                                       return -EINVAL;
+                               }
+                               break;
+                       case QLCNIC_DUMP_RD_SAVE:
+                               if (ctr->index_a)
+                                       addr = t_hdr->saved_state[ctr->index_a];
+                               qlcnic_read_dump_reg(addr, base, &data);
+                               t_hdr->saved_state[ctr->index_v] = data;
+                               break;
+                       case QLCNIC_DUMP_WRT_SAVED:
+                               if (ctr->index_v)
+                                       data = t_hdr->saved_state[ctr->index_v];
+                               else
+                                       data = ctr->val1;
+                               if (ctr->index_a)
+                                       addr = t_hdr->saved_state[ctr->index_a];
+                               qlcnic_write_dump_reg(addr, base, data);
+                               break;
+                       case QLCNIC_DUMP_MOD_SAVE_ST:
+                               data = t_hdr->saved_state[ctr->index_v];
+                               data <<= ctr->shl_val;
+                               data >>= ctr->shr_val;
+                               if (ctr->val2)
+                                       data &= ctr->val2;
+                               data |= ctr->val3;
+                               data += ctr->val1;
+                               t_hdr->saved_state[ctr->index_v] = data;
+                               break;
+                       default:
+                               dev_info(&adapter->pdev->dev,
+                                        "Unknown opcode\n");
+                               break;
+                       }
+               }
+               addr += ctr->stride;
+       }
+       return 0;
+}
+
+static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int loop;
+       u32 val, data = 0;
+       struct __mux *mux = &entry->region.mux;
+       void __iomem *base = adapter->ahw->pci_base0;
+
+       val = mux->val;
+       for (loop = 0; loop < mux->no_ops; loop++) {
+               qlcnic_write_dump_reg(mux->addr, base, val);
+               qlcnic_read_dump_reg(mux->read_addr, base, &data);
+               *buffer++ = cpu_to_le32(val);
+               *buffer++ = cpu_to_le32(data);
+               val += mux->val_stride;
+       }
+       return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i, loop;
+       u32 cnt, addr, data, que_id = 0;
+       void __iomem *base = adapter->ahw->pci_base0;
+       struct __queue *que = &entry->region.que;
+
+       addr = que->read_addr;
+       cnt = que->read_addr_cnt;
+
+       for (loop = 0; loop < que->no_ops; loop++) {
+               qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+               addr = que->read_addr;
+               for (i = 0; i < cnt; i++) {
+                       qlcnic_read_dump_reg(addr, base, &data);
+                       *buffer++ = cpu_to_le32(data);
+                       addr += que->read_addr_stride;
+               }
+               que_id += que->stride;
+       }
+       return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 data;
+       void __iomem *addr;
+       struct __ocm *ocm = &entry->region.ocm;
+
+       addr = adapter->ahw->pci_base0 + ocm->read_addr;
+       for (i = 0; i < ocm->no_ops; i++) {
+               data = readl(addr);
+               *buffer++ = cpu_to_le32(data);
+               addr += ocm->read_addr_stride;
+       }
+       return ocm->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i, count = 0;
+       u32 fl_addr, size, val, lck_val, addr;
+       struct __mem *rom = &entry->region.mem;
+       void __iomem *base = adapter->ahw->pci_base0;
+
+       fl_addr = rom->addr;
+       size = rom->size/4;
+lock_try:
+       lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+       if (!lck_val && count < MAX_CTL_CHECK) {
+               msleep(10);
+               count++;
+               goto lock_try;
+       }
+       writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+       for (i = 0; i < size; i++) {
+               addr = fl_addr & 0xFFFF0000;
+               qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+               addr = LSW(fl_addr) + FLASH_ROM_DATA;
+               qlcnic_read_dump_reg(addr, base, &val);
+               fl_addr += 4;
+               *buffer++ = cpu_to_le32(val);
+       }
+       readl(base + QLCNIC_FLASH_SEM2_ULK);
+       return rom->size;
+}
+
+static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+                               struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 cnt, val, data, addr;
+       void __iomem *base = adapter->ahw->pci_base0;
+       struct __cache *l1 = &entry->region.cache;
+
+       val = l1->init_tag_val;
+
+       for (i = 0; i < l1->no_ops; i++) {
+               qlcnic_write_dump_reg(l1->addr, base, val);
+               qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+               addr = l1->read_addr;
+               cnt = l1->read_addr_num;
+               while (cnt) {
+                       qlcnic_read_dump_reg(addr, base, &data);
+                       *buffer++ = cpu_to_le32(data);
+                       addr += l1->read_addr_stride;
+                       cnt--;
+               }
+               val += l1->stride;
+       }
+       return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+                               struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       int i;
+       u32 cnt, val, data, addr;
+       u8 poll_mask, poll_to, time_out = 0;
+       void __iomem *base = adapter->ahw->pci_base0;
+       struct __cache *l2 = &entry->region.cache;
+
+       val = l2->init_tag_val;
+       poll_mask = LSB(MSW(l2->ctrl_val));
+       poll_to = MSB(MSW(l2->ctrl_val));
+
+       for (i = 0; i < l2->no_ops; i++) {
+               qlcnic_write_dump_reg(l2->addr, base, val);
+               if (LSW(l2->ctrl_val))
+                       qlcnic_write_dump_reg(l2->ctrl_addr, base,
+                                             LSW(l2->ctrl_val));
+               if (!poll_mask)
+                       goto skip_poll;
+               do {
+                       qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+                       if (!(data & poll_mask))
+                               break;
+                       msleep(1);
+                       time_out++;
+               } while (time_out <= poll_to);
+
+               if (time_out > poll_to) {
+                       dev_err(&adapter->pdev->dev,
+                               "Timeout exceeded in %s, aborting dump\n",
+                               __func__);
+                       return -EINVAL;
+               }
+skip_poll:
+               addr = l2->read_addr;
+               cnt = l2->read_addr_num;
+               while (cnt) {
+                       qlcnic_read_dump_reg(addr, base, &data);
+                       *buffer++ = cpu_to_le32(data);
+                       addr += l2->read_addr_stride;
+                       cnt--;
+               }
+               val += l2->stride;
+       }
+       return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+                             struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       u32 addr, data, test, ret = 0;
+       int i, reg_read;
+       struct __mem *mem = &entry->region.mem;
+       void __iomem *base = adapter->ahw->pci_base0;
+
+       reg_read = mem->size;
+       addr = mem->addr;
+       /* check for data size of multiple of 16 and 16 byte alignment */
+       if ((addr & 0xf) || (reg_read%16)) {
+               dev_info(&adapter->pdev->dev,
+                        "Unaligned memory addr:0x%x size:0x%x\n",
+                        addr, reg_read);
+               return -EINVAL;
+       }
+
+       mutex_lock(&adapter->ahw->mem_lock);
+
+       while (reg_read != 0) {
+               qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
+               qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
+               qlcnic_write_dump_reg(MIU_TEST_CTR, base,
+                                     TA_CTL_ENABLE | TA_CTL_START);
+
+               for (i = 0; i < MAX_CTL_CHECK; i++) {
+                       qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+                       if (!(test & TA_CTL_BUSY))
+                               break;
+               }
+               if (i == MAX_CTL_CHECK) {
+                       if (printk_ratelimit()) {
+                               dev_err(&adapter->pdev->dev,
+                                       "failed to read through agent\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+               }
+               for (i = 0; i < 4; i++) {
+                       qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
+                                            &data);
+                       *buffer++ = cpu_to_le32(data);
+               }
+               addr += 16;
+               reg_read -= 16;
+               ret += 16;
+       }
+out:
+       mutex_unlock(&adapter->ahw->mem_lock);
+       return mem->size;
+}
+
+static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+                          struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+       return 0;
+}
+
+static const struct qlcnic_dump_operations fw_dump_ops[] = {
+       { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
+       { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
+       { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
+       { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
+       { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
+       { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
+       { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
+       { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
+       { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
+       { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
+       { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
+       { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
+       { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
+       { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
+       { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
+       { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
+       { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
+       { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
+       { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
+       { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
+};
+
+/* Walk the template and collect dump for each entry in the dump template */
+static int
+qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
+                       u32 size)
+{
+       int ret = 1;
+       if (size != entry->hdr.cap_size) {
+               dev_info(dev,
+                        "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+               entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
+               dev_info(dev, "Aborting further dump capture\n");
+               ret = 0;
+       }
+       return ret;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+       __le32 *buffer;
+       char mesg[64];
+       char *msg[] = {mesg, NULL};
+       int i, k, ops_cnt, ops_index, dump_size = 0;
+       u32 entry_offset, dump, no_entries, buf_offset = 0;
+       struct qlcnic_dump_entry *entry;
+       struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+       struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+
+       if (fw_dump->clr) {
+               dev_info(&adapter->pdev->dev,
+                        "Previous dump not cleared, not capturing dump\n");
+               return -EIO;
+       }
+       /* Calculate the size for dump data area only */
+       for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+               if (i & tmpl_hdr->drv_cap_mask)
+                       dump_size += tmpl_hdr->cap_sizes[k];
+       if (!dump_size)
+               return -EIO;
+
+       fw_dump->data = vzalloc(dump_size);
+       if (!fw_dump->data) {
+               dev_info(&adapter->pdev->dev,
+                        "Unable to allocate (%d KB) for fw dump\n",
+                        dump_size / 1024);
+               return -ENOMEM;
+       }
+       buffer = fw_dump->data;
+       fw_dump->size = dump_size;
+       no_entries = tmpl_hdr->num_entries;
+       ops_cnt = ARRAY_SIZE(fw_dump_ops);
+       entry_offset = tmpl_hdr->offset;
+       tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
+       tmpl_hdr->sys_info[1] = adapter->fw_version;
+
+       for (i = 0; i < no_entries; i++) {
+               entry = (void *)tmpl_hdr + entry_offset;
+               if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
+               }
+               /* Find the handler for this entry */
+               ops_index = 0;
+               while (ops_index < ops_cnt) {
+                       if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+                               break;
+                       ops_index++;
+               }
+               if (ops_index == ops_cnt) {
+                       dev_info(&adapter->pdev->dev,
+                                "Invalid entry type %d, exiting dump\n",
+                                entry->hdr.type);
+                       goto error;
+               }
+               /* Collect dump for this entry */
+               dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+               if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
+                                                    dump))
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+               buf_offset += entry->hdr.cap_size;
+               entry_offset += entry->hdr.offset;
+               buffer = fw_dump->data + buf_offset;
+       }
+       if (dump_size != buf_offset) {
+               dev_info(&adapter->pdev->dev,
+                        "Captured(%d) and expected size(%d) do not match\n",
+                        buf_offset, dump_size);
+               goto error;
+       } else {
+               fw_dump->clr = 1;
+               snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
+                        adapter->netdev->name);
+               dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
+                        fw_dump->size);
+               /* Send a udev event to notify availability of FW dump */
+               kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
+               return 0;
+       }
+error:
+       vfree(fw_dump->data);
+       return -EINVAL;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644 (file)
index 0000000..10a702a
--- /dev/null
@@ -0,0 +1,962 @@
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+
+#include <linux/sysfs.h>
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+       return -EOPNOTSUPP;
+}
+
+int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+       return -EOPNOTSUPP;
+}
+
+static ssize_t qlcnic_store_bridged_mode(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned long new;
+       int ret = -EINVAL;
+
+       if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+               goto err_out;
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               goto err_out;
+
+       if (strict_strtoul(buf, 2, &new))
+               goto err_out;
+
+       if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+               ret = len;
+
+err_out:
+       return ret;
+}
+
+static ssize_t qlcnic_show_bridged_mode(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int bridged_mode = 0;
+
+       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+               bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+       return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static ssize_t qlcnic_store_diag_mode(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned long new;
+
+       if (strict_strtoul(buf, 2, &new))
+               return -EINVAL;
+
+       if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+               adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+       return len;
+}
+
+static ssize_t qlcnic_show_diag_mode(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n",
+                      !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
+                                 u8 *state, u8 *rate)
+{
+       *rate = LSB(beacon);
+       *state = MSB(beacon);
+
+       QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+       if (!*state) {
+               *rate = __QLCNIC_MAX_LED_RATE;
+               return 0;
+       } else if (*state > __QLCNIC_MAX_LED_STATE) {
+               return -EINVAL;
+       }
+
+       if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+               return -EINVAL;
+
+       return 0;
+}
+
+static ssize_t qlcnic_store_beacon(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t len)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int max_sds_rings = adapter->max_sds_rings;
+       u16 beacon;
+       u8 b_state, b_rate;
+       int err;
+
+       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               dev_warn(dev,
+                        "LED test not supported in non privileged mode\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (len != sizeof(u16))
+               return QL_STATUS_INVALID_PARAM;
+
+       memcpy(&beacon, buf, sizeof(u16));
+       err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+       if (err)
+               return err;
+
+       if (adapter->ahw->beacon_state == b_state)
+               return len;
+
+       rtnl_lock();
+
+       if (!adapter->ahw->beacon_state)
+               if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+                       rtnl_unlock();
+                       return -EBUSY;
+               }
+
+       if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+               err = -EIO;
+               goto out;
+       }
+
+       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+               err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+               if (err)
+                       goto out;
+               set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+       }
+
+       err = qlcnic_config_led(adapter, b_state, b_rate);
+
+       if (!err) {
+               err = len;
+               adapter->ahw->beacon_state = b_state;
+       }
+
+       if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+               qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+
+ out:
+       if (!adapter->ahw->beacon_state)
+               clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+       rtnl_unlock();
+
+       return err;
+}
+
+static ssize_t qlcnic_show_beacon(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+                                    loff_t offset, size_t size)
+{
+       size_t crb_size = 4;
+
+       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+               return -EIO;
+
+       if (offset < QLCNIC_PCI_CRBSPACE) {
+               if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+                                 QLCNIC_PCI_CAMQM_END))
+                       crb_size = 8;
+               else
+                       return -EINVAL;
+       }
+
+       if ((size != crb_size) || (offset & (crb_size-1)))
+               return  -EINVAL;
+
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *attr, char *buf,
+                                    loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       u32 data;
+       u64 qmdata;
+       int ret;
+
+       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+               qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+               memcpy(buf, &qmdata, size);
+       } else {
+               data = QLCRD32(adapter, offset);
+               memcpy(buf, &data, size);
+       }
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+                                     struct bin_attribute *attr, char *buf,
+                                     loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       u32 data;
+       u64 qmdata;
+       int ret;
+
+       ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+               memcpy(&qmdata, buf, size);
+               qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+       } else {
+               memcpy(&data, buf, size);
+               QLCWR32(adapter, offset, data);
+       }
+       return size;
+}
+
+static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+                                    loff_t offset, size_t size)
+{
+       if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+               return -EIO;
+
+       if ((size != 8) || (offset & 0x7))
+               return  -EIO;
+
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *attr, char *buf,
+                                    loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       u64 data;
+       int ret;
+
+       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+               return -EIO;
+
+       memcpy(buf, &data, size);
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+                                     struct bin_attribute *attr, char *buf,
+                                     loff_t offset, size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       u64 data;
+       int ret;
+
+       ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+       if (ret != 0)
+               return ret;
+
+       memcpy(&data, buf, size);
+
+       if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+               return -EIO;
+
+       return size;
+}
+
+static int validate_pm_config(struct qlcnic_adapter *adapter,
+                             struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+       u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               src_pci_func = pm_cfg[i].pci_func;
+               dest_pci_func = pm_cfg[i].dest_npar;
+               if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
+                   dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
+                       return QL_STATUS_INVALID_PARAM;
+
+               if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+                       return QL_STATUS_INVALID_PARAM;
+
+               if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+                       return QL_STATUS_INVALID_PARAM;
+
+               s_esw_id = adapter->npars[src_pci_func].phy_port;
+               d_esw_id = adapter->npars[dest_pci_func].phy_port;
+
+               if (s_esw_id != d_esw_id)
+                       return QL_STATUS_INVALID_PARAM;
+       }
+       return 0;
+
+}
+
+static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_pm_func_cfg *pm_cfg;
+       u32 id, action, pci_func;
+       int count, rem, i, ret;
+
+       count   = size / sizeof(struct qlcnic_pm_func_cfg);
+       rem     = size % sizeof(struct qlcnic_pm_func_cfg);
+       if (rem)
+               return QL_STATUS_INVALID_PARAM;
+
+       pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+
+       ret = validate_pm_config(adapter, pm_cfg, count);
+       if (ret)
+               return ret;
+       for (i = 0; i < count; i++) {
+               pci_func = pm_cfg[i].pci_func;
+               action = !!pm_cfg[i].action;
+               id = adapter->npars[pci_func].phy_port;
+               ret = qlcnic_config_port_mirroring(adapter, id, action,
+                                                  pci_func);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < count; i++) {
+               pci_func = pm_cfg[i].pci_func;
+               id = adapter->npars[pci_func].phy_port;
+               adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
+               adapter->npars[pci_func].dest_npar = id;
+       }
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
+                                          struct kobject *kobj,
+                                          struct bin_attribute *attr,
+                                          char *buf, loff_t offset,
+                                          size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+       int i;
+
+       if (size != sizeof(pm_cfg))
+               return QL_STATUS_INVALID_PARAM;
+
+       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+                       continue;
+               pm_cfg[i].action = adapter->npars[i].enable_pm;
+               pm_cfg[i].dest_npar = 0;
+               pm_cfg[i].pci_func = i;
+       }
+       memcpy(buf, &pm_cfg, size);
+
+       return size;
+}
+
+static int validate_esw_config(struct qlcnic_adapter *adapter,
+                              struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+       u32 op_mode;
+       u8 pci_func;
+       int i;
+
+       op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+
+       for (i = 0; i < count; i++) {
+               pci_func = esw_cfg[i].pci_func;
+               if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+                       return QL_STATUS_INVALID_PARAM;
+
+               if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
+                       if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+                               return QL_STATUS_INVALID_PARAM;
+               }
+
+               switch (esw_cfg[i].op_mode) {
+               case QLCNIC_PORT_DEFAULTS:
+                       if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
+                                           QLCNIC_NON_PRIV_FUNC) {
+                               if (esw_cfg[i].mac_anti_spoof != 0)
+                                       return QL_STATUS_INVALID_PARAM;
+                               if (esw_cfg[i].mac_override != 1)
+                                       return QL_STATUS_INVALID_PARAM;
+                               if (esw_cfg[i].promisc_mode != 1)
+                                       return QL_STATUS_INVALID_PARAM;
+                       }
+                       break;
+               case QLCNIC_ADD_VLAN:
+                       if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+                               return QL_STATUS_INVALID_PARAM;
+                       if (!esw_cfg[i].op_type)
+                               return QL_STATUS_INVALID_PARAM;
+                       break;
+               case QLCNIC_DEL_VLAN:
+                       if (!esw_cfg[i].op_type)
+                               return QL_STATUS_INVALID_PARAM;
+                       break;
+               default:
+                       return QL_STATUS_INVALID_PARAM;
+               }
+       }
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
+                                            struct kobject *kobj,
+                                            struct bin_attribute *attr,
+                                            char *buf, loff_t offset,
+                                            size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_func_cfg *esw_cfg;
+       struct qlcnic_npar_info *npar;
+       int count, rem, i, ret;
+       u8 pci_func, op_mode = 0;
+
+       count   = size / sizeof(struct qlcnic_esw_func_cfg);
+       rem     = size % sizeof(struct qlcnic_esw_func_cfg);
+       if (rem)
+               return QL_STATUS_INVALID_PARAM;
+
+       esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+       ret = validate_esw_config(adapter, esw_cfg, count);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < count; i++) {
+               if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
+                       if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+                               return QL_STATUS_INVALID_PARAM;
+               }
+
+               if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+                       continue;
+
+               op_mode = esw_cfg[i].op_mode;
+               qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+               esw_cfg[i].op_mode = op_mode;
+               esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+               switch (esw_cfg[i].op_mode) {
+               case QLCNIC_PORT_DEFAULTS:
+                       qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+                       break;
+               case QLCNIC_ADD_VLAN:
+                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+                       break;
+               case QLCNIC_DEL_VLAN:
+                       esw_cfg[i].vlan_id = 0;
+                       qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+                       break;
+               }
+       }
+
+       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+               goto out;
+
+       for (i = 0; i < count; i++) {
+               pci_func = esw_cfg[i].pci_func;
+               npar = &adapter->npars[pci_func];
+               switch (esw_cfg[i].op_mode) {
+               case QLCNIC_PORT_DEFAULTS:
+                       npar->promisc_mode = esw_cfg[i].promisc_mode;
+                       npar->mac_override = esw_cfg[i].mac_override;
+                       npar->offload_flags = esw_cfg[i].offload_flags;
+                       npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+                       npar->discard_tagged = esw_cfg[i].discard_tagged;
+                       break;
+               case QLCNIC_ADD_VLAN:
+                       npar->pvid = esw_cfg[i].vlan_id;
+                       break;
+               case QLCNIC_DEL_VLAN:
+                       npar->pvid = 0;
+                       break;
+               }
+       }
+out:
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+       u8 i;
+
+       if (size != sizeof(esw_cfg))
+               return QL_STATUS_INVALID_PARAM;
+
+       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+                       continue;
+               esw_cfg[i].pci_func = i;
+               if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+                       return QL_STATUS_INVALID_PARAM;
+       }
+       memcpy(buf, &esw_cfg, size);
+
+       return size;
+}
+
+static int validate_npar_config(struct qlcnic_adapter *adapter,
+                               struct qlcnic_npar_func_cfg *np_cfg,
+                               int count)
+{
+       u8 pci_func, i;
+
+       for (i = 0; i < count; i++) {
+               pci_func = np_cfg[i].pci_func;
+               if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+                       return QL_STATUS_INVALID_PARAM;
+
+               if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+                       return QL_STATUS_INVALID_PARAM;
+
+               if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+                   !IS_VALID_BW(np_cfg[i].max_bw))
+                       return QL_STATUS_INVALID_PARAM;
+       }
+       return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
+                                             struct kobject *kobj,
+                                             struct bin_attribute *attr,
+                                             char *buf, loff_t offset,
+                                             size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_info nic_info;
+       struct qlcnic_npar_func_cfg *np_cfg;
+       int i, count, rem, ret;
+       u8 pci_func;
+
+       count   = size / sizeof(struct qlcnic_npar_func_cfg);
+       rem     = size % sizeof(struct qlcnic_npar_func_cfg);
+       if (rem)
+               return QL_STATUS_INVALID_PARAM;
+
+       np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+       ret = validate_npar_config(adapter, np_cfg, count);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < count ; i++) {
+               pci_func = np_cfg[i].pci_func;
+               ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+               if (ret)
+                       return ret;
+               nic_info.pci_func = pci_func;
+               nic_info.min_tx_bw = np_cfg[i].min_bw;
+               nic_info.max_tx_bw = np_cfg[i].max_bw;
+               ret = qlcnic_set_nic_info(adapter, &nic_info);
+               if (ret)
+                       return ret;
+               adapter->npars[i].min_bw = nic_info.min_tx_bw;
+               adapter->npars[i].max_bw = nic_info.max_tx_bw;
+       }
+
+       return size;
+
+}
+
+static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
+                                            struct kobject *kobj,
+                                            struct bin_attribute *attr,
+                                            char *buf, loff_t offset,
+                                            size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_info nic_info;
+       struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+       int i, ret;
+
+       if (size != sizeof(np_cfg))
+               return QL_STATUS_INVALID_PARAM;
+
+       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+               if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+                       continue;
+               ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+               if (ret)
+                       return ret;
+
+               np_cfg[i].pci_func = i;
+               np_cfg[i].op_mode = (u8)nic_info.op_mode;
+               np_cfg[i].port_num = nic_info.phys_port;
+               np_cfg[i].fw_capab = nic_info.capabilities;
+               np_cfg[i].min_bw = nic_info.min_tx_bw;
+               np_cfg[i].max_bw = nic_info.max_tx_bw;
+               np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
+               np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+       }
+       memcpy(buf, &np_cfg, size);
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
+                                          struct kobject *kobj,
+                                          struct bin_attribute *attr,
+                                          char *buf, loff_t offset,
+                                          size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_statistics port_stats;
+       int ret;
+
+       if (size != sizeof(struct qlcnic_esw_statistics))
+               return QL_STATUS_INVALID_PARAM;
+
+       if (offset >= QLCNIC_MAX_PCI_FUNC)
+               return QL_STATUS_INVALID_PARAM;
+
+       memset(&port_stats, 0, size);
+       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+                                   &port_stats.rx);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+                                   &port_stats.tx);
+       if (ret)
+               return ret;
+
+       memcpy(buf, &port_stats, size);
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
+                                         struct kobject *kobj,
+                                         struct bin_attribute *attr,
+                                         char *buf, loff_t offset,
+                                         size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_esw_statistics esw_stats;
+       int ret;
+
+       if (size != sizeof(struct qlcnic_esw_statistics))
+               return QL_STATUS_INVALID_PARAM;
+
+       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+               return QL_STATUS_INVALID_PARAM;
+
+       memset(&esw_stats, 0, size);
+       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+                                      &esw_stats.rx);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+                                      &esw_stats.tx);
+       if (ret)
+               return ret;
+
+       memcpy(buf, &esw_stats, size);
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+               return QL_STATUS_INVALID_PARAM;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+                                    QLCNIC_QUERY_RX_COUNTER);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+                                    QLCNIC_QUERY_TX_COUNTER);
+       if (ret)
+               return ret;
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
+                                            struct kobject *kobj,
+                                            struct bin_attribute *attr,
+                                            char *buf, loff_t offset,
+                                            size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       int ret;
+
+       if (offset >= QLCNIC_MAX_PCI_FUNC)
+               return QL_STATUS_INVALID_PARAM;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+                                    QLCNIC_QUERY_RX_COUNTER);
+       if (ret)
+               return ret;
+
+       ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+                                    QLCNIC_QUERY_TX_COUNTER);
+       if (ret)
+               return ret;
+
+       return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
+                                           struct kobject *kobj,
+                                           struct bin_attribute *attr,
+                                           char *buf, loff_t offset,
+                                           size_t size)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+       struct qlcnic_pci_info *pci_info;
+       int i, ret;
+
+       if (size != sizeof(pci_cfg))
+               return QL_STATUS_INVALID_PARAM;
+
+       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+
+       ret = qlcnic_get_pci_info(adapter, pci_info);
+       if (ret) {
+               kfree(pci_info);
+               return ret;
+       }
+
+       for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+               pci_cfg[i].pci_func = pci_info[i].id;
+               pci_cfg[i].func_type = pci_info[i].type;
+               pci_cfg[i].port_num = pci_info[i].default_port;
+               pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+               pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+               memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+       }
+       memcpy(buf, &pci_cfg, size);
+       kfree(pci_info);
+       return size;
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+       .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_bridged_mode,
+       .store = qlcnic_store_bridged_mode,
+};
+
+static struct device_attribute dev_attr_diag_mode = {
+       .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_diag_mode,
+       .store = qlcnic_store_diag_mode,
+};
+
+static struct device_attribute dev_attr_beacon = {
+       .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+       .show = qlcnic_show_beacon,
+       .store = qlcnic_store_beacon,
+};
+
+static struct bin_attribute bin_attr_crb = {
+       .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_crb,
+       .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+       .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_mem,
+       .write = qlcnic_sysfs_write_mem,
+};
+
+static struct bin_attribute bin_attr_npar_config = {
+       .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_npar_config,
+       .write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+       .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_pci_config,
+       .write = NULL,
+};
+
+static struct bin_attribute bin_attr_port_stats = {
+       .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_get_port_stats,
+       .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+       .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_get_esw_stats,
+       .write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+       .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_esw_config,
+       .write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+       .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+       .size = 0,
+       .read = qlcnic_sysfs_read_pm_config,
+       .write = qlcnic_sysfs_write_pm_config,
+};
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+               if (device_create_file(dev, &dev_attr_bridged_mode))
+                       dev_warn(dev,
+                                "failed to create bridged_mode sysfs entry\n");
+}
+
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+               device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+       if (device_create_bin_file(dev, &bin_attr_port_stats))
+               dev_info(dev, "failed to create port stats sysfs entry");
+
+       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+               return;
+       if (device_create_file(dev, &dev_attr_diag_mode))
+               dev_info(dev, "failed to create diag_mode sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_crb))
+               dev_info(dev, "failed to create crb sysfs entry\n");
+       if (device_create_bin_file(dev, &bin_attr_mem))
+               dev_info(dev, "failed to create mem sysfs entry\n");
+
+       if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+               return;
+
+       if (device_create_bin_file(dev, &bin_attr_pci_config))
+               dev_info(dev, "failed to create pci config sysfs entry");
+       if (device_create_file(dev, &dev_attr_beacon))
+               dev_info(dev, "failed to create beacon sysfs entry");
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return;
+       if (device_create_bin_file(dev, &bin_attr_esw_config))
+               dev_info(dev, "failed to create esw config sysfs entry");
+       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+               return;
+       if (device_create_bin_file(dev, &bin_attr_npar_config))
+               dev_info(dev, "failed to create npar config sysfs entry");
+       if (device_create_bin_file(dev, &bin_attr_pm_config))
+               dev_info(dev, "failed to create pm config sysfs entry");
+       if (device_create_bin_file(dev, &bin_attr_esw_stats))
+               dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+       device_remove_bin_file(dev, &bin_attr_port_stats);
+
+       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+               return;
+       device_remove_file(dev, &dev_attr_diag_mode);
+       device_remove_bin_file(dev, &bin_attr_crb);
+       device_remove_bin_file(dev, &bin_attr_mem);
+       if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+               return;
+       device_remove_bin_file(dev, &bin_attr_pci_config);
+       device_remove_file(dev, &dev_attr_beacon);
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return;
+       device_remove_bin_file(dev, &bin_attr_esw_config);
+       if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+               return;
+       device_remove_bin_file(dev, &bin_attr_npar_config);
+       device_remove_bin_file(dev, &bin_attr_pm_config);
+       device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
index 953c4f44d505c00d2a7c698fa22c46000269d70e..18238060f1c087ee0d5a8b4990144b841c56ff4b 100644 (file)
 #define USB_PRODUCT_ID_LAN7500         (0x7500)
 #define USB_PRODUCT_ID_LAN7505         (0x7505)
 #define RXW_PADDING                    2
-#define SUPPORTED_WAKE                 (WAKE_UCAST | WAKE_BCAST | \
+#define SUPPORTED_WAKE                 (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
                                         WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
 
+#define SUSPEND_SUSPEND0               (0x01)
+#define SUSPEND_SUSPEND1               (0x02)
+#define SUSPEND_SUSPEND2               (0x04)
+#define SUSPEND_SUSPEND3               (0x08)
+#define SUSPEND_ALLMODES               (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
+                                        SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+
 #define check_warn(ret, fmt, args...) \
        ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
 
@@ -74,6 +81,7 @@ struct smsc75xx_priv {
        struct mutex dataport_mutex;
        spinlock_t rfe_ctl_lock;
        struct work_struct set_multicast;
+       u8 suspend_flags;
 };
 
 struct usb_context {
@@ -163,36 +171,17 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
        return __smsc75xx_write_reg(dev, index, data, 0);
 }
 
-static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
-{
-       if (WARN_ON_ONCE(!dev))
-               return -EINVAL;
-
-       return usbnet_write_cmd_nopm(dev, USB_REQ_SET_FEATURE,
-                                    USB_DIR_OUT | USB_RECIP_DEVICE,
-                                    feature, 0, NULL, 0);
-}
-
-static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
-{
-       if (WARN_ON_ONCE(!dev))
-               return -EINVAL;
-
-       return usbnet_write_cmd_nopm(dev, USB_REQ_CLEAR_FEATURE,
-                                    USB_DIR_OUT | USB_RECIP_DEVICE,
-                                    feature, 0, NULL, 0);
-}
-
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
-static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
+static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev,
+                                                    int in_pm)
 {
        unsigned long start_time = jiffies;
        u32 val;
        int ret;
 
        do {
-               ret = smsc75xx_read_reg(dev, MII_ACCESS, &val);
+               ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm);
                check_warn_return(ret, "Error reading MII_ACCESS\n");
 
                if (!(val & MII_ACCESS_BUSY))
@@ -202,7 +191,8 @@ static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
        return -EIO;
 }
 
-static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+                               int in_pm)
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 val, addr;
@@ -211,7 +201,7 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
        mutex_lock(&dev->phy_mutex);
 
        /* confirm MII not busy */
-       ret = smsc75xx_phy_wait_not_busy(dev);
+       ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
        check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read\n");
 
        /* set the address, index & direction (read from PHY) */
@@ -220,13 +210,13 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
        addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
                | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
                | MII_ACCESS_READ | MII_ACCESS_BUSY;
-       ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
+       ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
        check_warn_goto_done(ret, "Error writing MII_ACCESS\n");
 
-       ret = smsc75xx_phy_wait_not_busy(dev);
+       ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
        check_warn_goto_done(ret, "Timed out reading MII reg %02X\n", idx);
 
-       ret = smsc75xx_read_reg(dev, MII_DATA, &val);
+       ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm);
        check_warn_goto_done(ret, "Error reading MII_DATA\n");
 
        ret = (u16)(val & 0xFFFF);
@@ -236,8 +226,8 @@ done:
        return ret;
 }
 
-static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
-                               int regval)
+static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id,
+                                 int idx, int regval, int in_pm)
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 val, addr;
@@ -246,11 +236,11 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
        mutex_lock(&dev->phy_mutex);
 
        /* confirm MII not busy */
-       ret = smsc75xx_phy_wait_not_busy(dev);
+       ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
        check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write\n");
 
        val = regval;
-       ret = smsc75xx_write_reg(dev, MII_DATA, val);
+       ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm);
        check_warn_goto_done(ret, "Error writing MII_DATA\n");
 
        /* set the address, index & direction (write to PHY) */
@@ -259,16 +249,39 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
        addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
                | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
                | MII_ACCESS_WRITE | MII_ACCESS_BUSY;
-       ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
+       ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
        check_warn_goto_done(ret, "Error writing MII_ACCESS\n");
 
-       ret = smsc75xx_phy_wait_not_busy(dev);
+       ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
        check_warn_goto_done(ret, "Timed out writing MII reg %02X\n", idx);
 
 done:
        mutex_unlock(&dev->phy_mutex);
 }
 
+static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
+                                  int idx)
+{
+       return __smsc75xx_mdio_read(netdev, phy_id, idx, 1);
+}
+
+static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
+                                    int idx, int regval)
+{
+       __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1);
+}
+
+static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+       return __smsc75xx_mdio_read(netdev, phy_id, idx, 0);
+}
+
+static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
+                               int regval)
+{
+       __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0);
+}
+
 static int smsc75xx_wait_eeprom(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
@@ -640,8 +653,13 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
 {
        struct usbnet *dev = netdev_priv(net);
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       int ret;
 
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+
+       ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
+       check_warn_return(ret, "device_set_wakeup_enable error %d\n", ret);
+
        return 0;
 }
 
@@ -1163,6 +1181,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Init all registers */
        ret = smsc75xx_reset(dev);
+       check_warn_return(ret, "smsc75xx_reset error %d\n", ret);
 
        dev->net->netdev_ops = &smsc75xx_netdev_ops;
        dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
@@ -1213,48 +1232,255 @@ static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg,
        return 0;
 }
 
+static int smsc75xx_enter_suspend0(struct usbnet *dev)
+{
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       u32 val;
+       int ret;
+
+       ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL\n");
+
+       val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST));
+       val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS;
+
+       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL\n");
+
+       pdata->suspend_flags |= SUSPEND_SUSPEND0;
+
+       return 0;
+}
+
+static int smsc75xx_enter_suspend1(struct usbnet *dev)
+{
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       u32 val;
+       int ret;
+
+       ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL\n");
+
+       val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+       val |= PMT_CTL_SUS_MODE_1;
+
+       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL\n");
+
+       /* clear wol status, enable energy detection */
+       val &= ~PMT_CTL_WUPS;
+       val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
+
+       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL\n");
+
+       pdata->suspend_flags |= SUSPEND_SUSPEND1;
+
+       return 0;
+}
+
+static int smsc75xx_enter_suspend2(struct usbnet *dev)
+{
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       u32 val;
+       int ret;
+
+       ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL\n");
+
+       val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+       val |= PMT_CTL_SUS_MODE_2;
+
+       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL\n");
+
+       pdata->suspend_flags |= SUSPEND_SUSPEND2;
+
+       return 0;
+}
+
+static int smsc75xx_enter_suspend3(struct usbnet *dev)
+{
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       u32 val;
+       int ret;
+
+       ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val);
+       check_warn_return(ret, "Error reading FCT_RX_CTL\n");
+
+       if (val & FCT_RX_CTL_RXUSED) {
+               netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n");
+               return -EBUSY;
+       }
+
+       ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL\n");
+
+       val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+       val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN;
+
+       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL\n");
+
+       /* clear wol status */
+       val &= ~PMT_CTL_WUPS;
+       val |= PMT_CTL_WUPS_WOL;
+
+       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL\n");
+
+       pdata->suspend_flags |= SUSPEND_SUSPEND3;
+
+       return 0;
+}
+
+static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
+{
+       struct mii_if_info *mii = &dev->mii;
+       int ret;
+
+       netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
+
+       /* read to clear */
+       ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+       check_warn_return(ret, "Error reading PHY_INT_SRC\n");
+
+       /* enable interrupt source */
+       ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+       check_warn_return(ret, "Error reading PHY_INT_MASK\n");
+
+       ret |= mask;
+
+       smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+
+       return 0;
+}
+
+static int smsc75xx_link_ok_nopm(struct usbnet *dev)
+{
+       struct mii_if_info *mii = &dev->mii;
+       int ret;
+
+       /* first, a dummy read, needed to latch some MII phys */
+       ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+       check_warn_return(ret, "Error reading MII_BMSR\n");
+
+       ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+       check_warn_return(ret, "Error reading MII_BMSR\n");
+
+       return !!(ret & BMSR_LSTATUS);
+}
+
+static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up)
+{
+       int ret;
+
+       if (!netif_running(dev->net)) {
+               /* interface is ifconfig down so fully power down hw */
+               netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n");
+               return smsc75xx_enter_suspend2(dev);
+       }
+
+       if (!link_up) {
+               /* link is down so enter EDPD mode */
+               netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
+
+               /* enable PHY wakeup events for if cable is attached */
+               ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+                       PHY_INT_MASK_ANEG_COMP);
+               check_warn_return(ret, "error enabling PHY wakeup ints\n");
+
+               netdev_info(dev->net, "entering SUSPEND1 mode\n");
+               return smsc75xx_enter_suspend1(dev);
+       }
+
+       /* enable PHY wakeup events so we remote wakeup if cable is pulled */
+       ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+               PHY_INT_MASK_LINK_DOWN);
+       check_warn_return(ret, "error enabling PHY wakeup ints\n");
+
+       netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
+       return smsc75xx_enter_suspend3(dev);
+}
+
 static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       u32 val, link_up;
        int ret;
-       u32 val;
 
        ret = usbnet_suspend(intf, message);
-       check_warn_return(ret, "usbnet_suspend error\n");
+       check_warn_goto_done(ret, "usbnet_suspend error\n");
+
+       if (pdata->suspend_flags) {
+               netdev_warn(dev->net, "error during last resume\n");
+               pdata->suspend_flags = 0;
+       }
+
+       /* determine if link is up using only _nopm functions */
+       link_up = smsc75xx_link_ok_nopm(dev);
+
+       if (message.event == PM_EVENT_AUTO_SUSPEND) {
+               ret = smsc75xx_autosuspend(dev, link_up);
+               goto done;
+       }
 
-       /* if no wol options set, enter lowest power SUSPEND2 mode */
-       if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+       /* if we get this far we're not autosuspending */
+       /* if no wol options set, or if link is down and we're not waking on
+        * PHY activity, enter lowest power SUSPEND2 mode
+        */
+       if (!(pdata->wolopts & SUPPORTED_WAKE) ||
+               !(link_up || (pdata->wolopts & WAKE_PHY))) {
                netdev_info(dev->net, "entering SUSPEND2 mode\n");
 
                /* disable energy detect (link up) & wake up events */
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                val &= ~(WUCSR_MPEN | WUCSR_WUEN);
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
 
                ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
-               check_warn_return(ret, "Error reading PMT_CTL\n");
+               check_warn_goto_done(ret, "Error reading PMT_CTL\n");
 
                val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
 
                ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
-               check_warn_return(ret, "Error writing PMT_CTL\n");
+               check_warn_goto_done(ret, "Error writing PMT_CTL\n");
 
-               /* enter suspend2 mode */
-               ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
-               check_warn_return(ret, "Error reading PMT_CTL\n");
+               ret = smsc75xx_enter_suspend2(dev);
+               goto done;
+       }
 
-               val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
-               val |= PMT_CTL_SUS_MODE_2;
+       if (pdata->wolopts & WAKE_PHY) {
+               ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+                       (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN));
+               check_warn_goto_done(ret, "error enabling PHY wakeup ints\n");
 
-               ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
-               check_warn_return(ret, "Error writing PMT_CTL\n");
+               /* if link is down then configure EDPD and enter SUSPEND1,
+                * otherwise enter SUSPEND0 below
+                */
+               if (!link_up) {
+                       struct mii_if_info *mii = &dev->mii;
+                       netdev_info(dev->net, "entering SUSPEND1 mode\n");
 
-               return 0;
+                       /* enable energy detect power-down mode */
+                       ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id,
+                               PHY_MODE_CTRL_STS);
+                       check_warn_goto_done(ret, "Error reading PHY_MODE_CTRL_STS\n");
+
+                       ret |= MODE_CTRL_STS_EDPWRDOWN;
+
+                       smsc75xx_mdio_write_nopm(dev->net, mii->phy_id,
+                               PHY_MODE_CTRL_STS, ret);
+
+                       /* enter SUSPEND1 mode */
+                       ret = smsc75xx_enter_suspend1(dev);
+                       goto done;
+               }
        }
 
        if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) {
@@ -1263,7 +1489,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
                /* disable all filters */
                for (i = 0; i < WUF_NUM; i++) {
                        ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0);
-                       check_warn_return(ret, "Error writing WUF_CFGX\n");
+                       check_warn_goto_done(ret, "Error writing WUF_CFGX\n");
                }
 
                if (pdata->wolopts & WAKE_MCAST) {
@@ -1273,7 +1499,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
                        val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST
                                | smsc_crc(mcast, 3);
                        ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007);
-                       check_warn_return(ret, "Error writing wakeup filter\n");
+                       check_warn_goto_done(ret, "Error writing wakeup filter\n");
                }
 
                if (pdata->wolopts & WAKE_ARP) {
@@ -1283,118 +1509,127 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
                        val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16)
                                | smsc_crc(arp, 2);
                        ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003);
-                       check_warn_return(ret, "Error writing wakeup filter\n");
+                       check_warn_goto_done(ret, "Error writing wakeup filter\n");
                }
 
                /* clear any pending pattern match packet status */
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                val |= WUCSR_WUFR;
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
 
                netdev_info(dev->net, "enabling packet match detection\n");
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                val |= WUCSR_WUEN;
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
        } else {
                netdev_info(dev->net, "disabling packet match detection\n");
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                val &= ~WUCSR_WUEN;
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
        }
 
        /* disable magic, bcast & unicast wakeup sources */
        ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-       check_warn_return(ret, "Error reading WUCSR\n");
+       check_warn_goto_done(ret, "Error reading WUCSR\n");
 
        val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN);
 
        ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-       check_warn_return(ret, "Error writing WUCSR\n");
+       check_warn_goto_done(ret, "Error writing WUCSR\n");
+
+       if (pdata->wolopts & WAKE_PHY) {
+               netdev_info(dev->net, "enabling PHY wakeup\n");
+
+               ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+               check_warn_goto_done(ret, "Error reading PMT_CTL\n");
+
+               /* clear wol status, enable energy detection */
+               val &= ~PMT_CTL_WUPS;
+               val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
+
+               ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+               check_warn_goto_done(ret, "Error writing PMT_CTL\n");
+       }
 
        if (pdata->wolopts & WAKE_MAGIC) {
                netdev_info(dev->net, "enabling magic packet wakeup\n");
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                /* clear any pending magic packet status */
                val |= WUCSR_MPR | WUCSR_MPEN;
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
        }
 
        if (pdata->wolopts & WAKE_BCAST) {
                netdev_info(dev->net, "enabling broadcast detection\n");
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                val |= WUCSR_BCAST_FR | WUCSR_BCST_EN;
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
        }
 
        if (pdata->wolopts & WAKE_UCAST) {
                netdev_info(dev->net, "enabling unicast detection\n");
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
-               check_warn_return(ret, "Error reading WUCSR\n");
+               check_warn_goto_done(ret, "Error reading WUCSR\n");
 
                val |= WUCSR_WUFR | WUCSR_PFDA_EN;
 
                ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
-               check_warn_return(ret, "Error writing WUCSR\n");
+               check_warn_goto_done(ret, "Error writing WUCSR\n");
        }
 
        /* enable receiver to enable frame reception */
        ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val);
-       check_warn_return(ret, "Failed to read MAC_RX: %d\n", ret);
+       check_warn_goto_done(ret, "Failed to read MAC_RX: %d\n", ret);
 
        val |= MAC_RX_RXEN;
 
        ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val);
-       check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret);
+       check_warn_goto_done(ret, "Failed to write MAC_RX: %d\n", ret);
 
        /* some wol options are enabled, so enter SUSPEND0 */
        netdev_info(dev->net, "entering SUSPEND0 mode\n");
+       ret = smsc75xx_enter_suspend0(dev);
 
-       ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
-       check_warn_return(ret, "Error reading PMT_CTL\n");
-
-       val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST));
-       val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS;
-
-       ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
-       check_warn_return(ret, "Error writing PMT_CTL\n");
-
-       smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
-
-       return 0;
+done:
+       if (ret)
+               usbnet_resume(intf);
+       return ret;
 }
 
 static int smsc75xx_resume(struct usb_interface *intf)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       u8 suspend_flags = pdata->suspend_flags;
        int ret;
        u32 val;
 
-       if (pdata->wolopts) {
-               netdev_info(dev->net, "resuming from SUSPEND0\n");
+       netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
 
-               smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+       /* do this first to ensure it's cleared even in error case */
+       pdata->suspend_flags = 0;
 
+       if (suspend_flags & SUSPEND_ALLMODES) {
                /* Disable wakeup sources */
                ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
                check_warn_return(ret, "Error reading WUCSR\n");
@@ -1414,7 +1649,9 @@ static int smsc75xx_resume(struct usb_interface *intf)
 
                ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
                check_warn_return(ret, "Error writing PMT_CTL\n");
-       } else {
+       }
+
+       if (suspend_flags & SUSPEND_SUSPEND2) {
                netdev_info(dev->net, "resuming from SUSPEND2\n");
 
                ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
@@ -1570,6 +1807,12 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
        return skb;
 }
 
+static int smsc75xx_manage_power(struct usbnet *dev, int on)
+{
+       dev->intf->needs_remote_wakeup = on;
+       return 0;
+}
+
 static const struct driver_info smsc75xx_info = {
        .description    = "smsc75xx USB 2.0 Gigabit Ethernet",
        .bind           = smsc75xx_bind,
@@ -1579,6 +1822,7 @@ static const struct driver_info smsc75xx_info = {
        .rx_fixup       = smsc75xx_rx_fixup,
        .tx_fixup       = smsc75xx_tx_fixup,
        .status         = smsc75xx_status,
+       .manage_power   = smsc75xx_manage_power,
        .flags          = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
 };
 
@@ -1606,6 +1850,7 @@ static struct usb_driver smsc75xx_driver = {
        .reset_resume   = smsc75xx_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
+       .supports_autosuspend = 1,
 };
 
 module_usb_driver(smsc75xx_driver);
index fb828e9fe8e0c8c8dbf04e99d0fe0bfc9dcdd8ff..a14f28b280f57a040e3d499955419f6c949c5c93 100644 (file)
@@ -74,6 +74,10 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
 #define HASH_SIZE_SHIFT  5
 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
 {
        u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
@@ -683,28 +687,26 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        return 0;
 }
 
-static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
-                                       const struct ipv6hdr *ipv6h,
-                                       struct sk_buff *skb)
+static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
+                                      const struct ipv6hdr *ipv6h,
+                                      struct sk_buff *skb)
 {
        __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
 
        if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
                ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
 
-       if (INET_ECN_is_ce(dsfield))
-               IP_ECN_set_ce(ip_hdr(skb));
+       return IP6_ECN_decapsulate(ipv6h, skb);
 }
 
-static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
-                                       const struct ipv6hdr *ipv6h,
-                                       struct sk_buff *skb)
+static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
+                                      const struct ipv6hdr *ipv6h,
+                                      struct sk_buff *skb)
 {
        if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
                ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
 
-       if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
-               IP6_ECN_set_ce(ipv6_hdr(skb));
+       return IP6_ECN_decapsulate(ipv6h, skb);
 }
 
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
@@ -768,12 +770,13 @@ EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
                       __u8 ipproto,
-                      void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
-                                                   const struct ipv6hdr *ipv6h,
-                                                   struct sk_buff *skb))
+                      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+                                                  const struct ipv6hdr *ipv6h,
+                                                  struct sk_buff *skb))
 {
        struct ip6_tnl *t;
        const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       int err;
 
        rcu_read_lock();
 
@@ -803,14 +806,26 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
                skb->pkt_type = PACKET_HOST;
                memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 
+               __skb_tunnel_rx(skb, t->dev);
+
+               err = dscp_ecn_decapsulate(t, ipv6h, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
+                                                    &ipv6h->saddr,
+                                                    ipv6_get_dsfield(ipv6h));
+                       if (err > 1) {
+                               ++t->dev->stats.rx_frame_errors;
+                               ++t->dev->stats.rx_errors;
+                               rcu_read_unlock();
+                               goto discard;
+                       }
+               }
+
                tstats = this_cpu_ptr(t->dev->tstats);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
 
-               __skb_tunnel_rx(skb, t->dev);
-
-               dscp_ecn_decapsulate(t, ipv6h, skb);
-
                netif_rx(skb);
 
                rcu_read_unlock();
index 80cb3829831ce1ead743992c0febb9ec2bfefc13..cfba99b2c2a4958144ff4c2a2faa1d98192d7310 100644 (file)
 #define HASH_SIZE  16
 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static int ipip6_tunnel_init(struct net_device *dev);
 static void ipip6_tunnel_setup(struct net_device *dev);
 static void ipip6_dev_free(struct net_device *dev);
@@ -106,6 +110,7 @@ static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
        }
 
        tot->rx_errors = dev->stats.rx_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
        tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
        tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
        tot->tx_dropped = dev->stats.tx_dropped;
@@ -585,16 +590,11 @@ out:
        return err;
 }
 
-static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
-{
-       if (INET_ECN_is_ce(iph->tos))
-               IP6_ECN_set_ce(ipv6_hdr(skb));
-}
-
 static int ipip6_rcv(struct sk_buff *skb)
 {
        const struct iphdr *iph;
        struct ip_tunnel *tunnel;
+       int err;
 
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
@@ -616,18 +616,27 @@ static int ipip6_rcv(struct sk_buff *skb)
                if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
                    !isatap_chksrc(skb, iph, tunnel)) {
                        tunnel->dev->stats.rx_errors++;
-                       kfree_skb(skb);
-                       return 0;
+                       goto out;
+               }
+
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &iph->saddr, iph->tos);
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto out;
+                       }
                }
 
                tstats = this_cpu_ptr(tunnel->dev->tstats);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
 
-               __skb_tunnel_rx(skb, tunnel->dev);
-
-               ipip6_ecn_decapsulate(iph, skb);
-
                netif_rx(skb);
 
                return 0;
index 9687fa1c2275c76cb7033a83657068406d716de7..6ed37652a4c388df5f96dde4f4e865d86e1868f6 100644 (file)
@@ -1,7 +1,8 @@
 /*
- * net/sched/sch_qfq.c         Quick Fair Queueing Scheduler.
+ * net/sched/sch_qfq.c         Quick Fair Queueing Plus Scheduler.
  *
  * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
+ * Copyright (c) 2012 Paolo Valente.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
 #include <net/pkt_cls.h>
 
 
-/*  Quick Fair Queueing
-    ===================
+/*  Quick Fair Queueing Plus
+    ========================
 
     Sources:
 
-    Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
+    [1] Paolo Valente,
+    "Reducing the Execution Time of Fair-Queueing Schedulers."
+    http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
+
+    Sources for QFQ:
+
+    [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
     Packet Scheduling with Tight Bandwidth Distribution Guarantees."
 
     See also:
 
 /*
 
+  QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
+  classes. Each aggregate is timestamped with a virtual start time S
+  and a virtual finish time F, and scheduled according to its
+  timestamps. S and F are computed as a function of a system virtual
+  time function V. The classes within each aggregate are instead
+  scheduled with DRR.
+
+  To speed up operations, QFQ+ divides also aggregates into a limited
+  number of groups. Which group a class belongs to depends on the
+  ratio between the maximum packet length for the class and the weight
+  of the class. Groups have their own S and F. In the end, QFQ+
+  schedules groups, then aggregates within groups, then classes within
+  aggregates. See [1] and [2] for a full description.
+
   Virtual time computations.
 
   S, F and V are all computed in fixed point arithmetic with
 #define QFQ_MAX_SLOTS  32
 
 /*
- * Shifts used for class<->group mapping.  We allow class weights that are
- * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
+ * Shifts used for aggregate<->group mapping.  We allow class weights that are
+ * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
  * group with the smallest index that can support the L_i / r_i configured
- * for the class.
+ * for the classes in the aggregate.
  *
  * grp->index is the index of the group; and grp->slot_shift
  * is the shift for the corresponding (scaled) sigma_i.
  */
 #define QFQ_MAX_INDEX          24
-#define QFQ_MAX_WSHIFT         12
+#define QFQ_MAX_WSHIFT         10
 
-#define        QFQ_MAX_WEIGHT          (1<<QFQ_MAX_WSHIFT)
-#define QFQ_MAX_WSUM           (16*QFQ_MAX_WEIGHT)
+#define        QFQ_MAX_WEIGHT          (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
+#define QFQ_MAX_WSUM           (64*QFQ_MAX_WEIGHT)
 
 #define FRAC_BITS              30      /* fixed point arithmetic */
 #define ONE_FP                 (1UL << FRAC_BITS)
 #define IWSUM                  (ONE_FP/QFQ_MAX_WSUM)
 
 #define QFQ_MTU_SHIFT          16      /* to support TSO/GSO */
-#define QFQ_MIN_SLOT_SHIFT     (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
-#define QFQ_MIN_LMAX           256     /* min possible lmax for a class */
+#define QFQ_MIN_LMAX           512     /* see qfq_slot_insert */
+
+#define QFQ_MAX_AGG_CLASSES    8 /* max num classes per aggregate allowed */
 
 /*
  * Possible group states.  These values are used as indexes for the bitmaps
@@ -106,6 +128,8 @@ enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
 
 struct qfq_group;
 
+struct qfq_aggregate;
+
 struct qfq_class {
        struct Qdisc_class_common common;
 
@@ -116,7 +140,12 @@ struct qfq_class {
        struct gnet_stats_queue qstats;
        struct gnet_stats_rate_est rate_est;
        struct Qdisc *qdisc;
+       struct list_head alist;         /* Link for active-classes list. */
+       struct qfq_aggregate *agg;      /* Parent aggregate. */
+       int deficit;                    /* DRR deficit counter. */
+};
 
+struct qfq_aggregate {
        struct hlist_node next; /* Link for the slot list. */
        u64 S, F;               /* flow timestamps (exact) */
 
@@ -127,8 +156,18 @@ struct qfq_class {
        struct qfq_group *grp;
 
        /* these are copied from the flowset. */
-       u32     inv_w;          /* ONE_FP/weight */
-       u32     lmax;           /* Max packet size for this flow. */
+       u32     class_weight; /* Weight of each class in this aggregate. */
+       /* Max pkt size for the classes in this aggregate, DRR quantum. */
+       int     lmax;
+
+       u32     inv_w;      /* ONE_FP/(sum of weights of classes in aggr.). */
+       u32     budgetmax;  /* Max budget for this aggregate. */
+       u32     initial_budget, budget;     /* Initial and current budget. */
+
+       int               num_classes;  /* Number of classes in this aggr. */
+       struct list_head  active;       /* DRR queue of active classes. */
+
+       struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
 };
 
 struct qfq_group {
@@ -138,7 +177,7 @@ struct qfq_group {
        unsigned int front;             /* Index of the front slot. */
        unsigned long full_slots;       /* non-empty slots */
 
-       /* Array of RR lists of active classes. */
+       /* Array of RR lists of active aggregates. */
        struct hlist_head slots[QFQ_MAX_SLOTS];
 };
 
@@ -146,13 +185,28 @@ struct qfq_sched {
        struct tcf_proto *filter_list;
        struct Qdisc_class_hash clhash;
 
-       u64             V;              /* Precise virtual time. */
-       u32             wsum;           /* weight sum */
+       u64                     oldV, V;        /* Precise virtual times. */
+       struct qfq_aggregate    *in_serv_agg;   /* Aggregate being served. */
+       u32                     num_active_agg; /* Num. of active aggregates */
+       u32                     wsum;           /* weight sum */
 
        unsigned long bitmaps[QFQ_MAX_STATE];       /* Group bitmaps. */
        struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
+       u32 min_slot_shift;     /* Index of the group-0 bit in the bitmaps. */
+
+       u32 max_agg_classes;            /* Max number of classes per aggr. */
+       struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
 };
 
+/*
+ * Possible reasons why the timestamps of an aggregate are updated
+ * enqueue: the aggregate switches from idle to active and must scheduled
+ *         for service
+ * requeue: the aggregate finishes its budget, so it stops being served and
+ *         must be rescheduled for service
+ */
+enum update_reason {enqueue, requeue};
+
 static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
 {
        struct qfq_sched *q = qdisc_priv(sch);
@@ -182,18 +236,18 @@ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
  * index = log_2(maxlen/weight) but we need to apply the scaling.
  * This is used only once at flow creation.
  */
-static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
+static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
 {
        u64 slot_size = (u64)maxlen * inv_w;
        unsigned long size_map;
        int index = 0;
 
-       size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
+       size_map = slot_size >> min_slot_shift;
        if (!size_map)
                goto out;
 
        index = __fls(size_map) + 1;    /* basically a log_2 */
-       index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
+       index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
 
        if (index < 0)
                index = 0;
@@ -204,66 +258,150 @@ out:
        return index;
 }
 
-/* Length of the next packet (0 if the queue is empty). */
-static unsigned int qdisc_peek_len(struct Qdisc *sch)
+static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
+static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
+                            enum update_reason);
+
+static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
+                        u32 lmax, u32 weight)
 {
-       struct sk_buff *skb;
+       INIT_LIST_HEAD(&agg->active);
+       hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
+
+       agg->lmax = lmax;
+       agg->class_weight = weight;
+}
+
+static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
+                                         u32 lmax, u32 weight)
+{
+       struct qfq_aggregate *agg;
+       struct hlist_node *n;
+
+       hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next)
+               if (agg->lmax == lmax && agg->class_weight == weight)
+                       return agg;
+
+       return NULL;
+}
+
 
-       skb = sch->ops->peek(sch);
-       return skb ? qdisc_pkt_len(skb) : 0;
+/* Update aggregate as a function of the new number of classes. */
+static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
+                          int new_num_classes)
+{
+       u32 new_agg_weight;
+
+       if (new_num_classes == q->max_agg_classes)
+               hlist_del_init(&agg->nonfull_next);
+
+       if (agg->num_classes > new_num_classes &&
+           new_num_classes == q->max_agg_classes - 1) /* agg no more full */
+               hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
+
+       agg->budgetmax = new_num_classes * agg->lmax;
+       new_agg_weight = agg->class_weight * new_num_classes;
+       agg->inv_w = ONE_FP/new_agg_weight;
+
+       if (agg->grp == NULL) {
+               int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
+                                      q->min_slot_shift);
+               agg->grp = &q->groups[i];
+       }
+
+       q->wsum +=
+               (int) agg->class_weight * (new_num_classes - agg->num_classes);
+
+       agg->num_classes = new_num_classes;
+}
+
+/* Add class to aggregate. */
+static void qfq_add_to_agg(struct qfq_sched *q,
+                          struct qfq_aggregate *agg,
+                          struct qfq_class *cl)
+{
+       cl->agg = agg;
+
+       qfq_update_agg(q, agg, agg->num_classes+1);
+       if (cl->qdisc->q.qlen > 0) { /* adding an active class */
+               list_add_tail(&cl->alist, &agg->active);
+               if (list_first_entry(&agg->active, struct qfq_class, alist) ==
+                   cl && q->in_serv_agg != agg) /* agg was inactive */
+                       qfq_activate_agg(q, agg, enqueue); /* schedule agg */
+       }
 }
 
-static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
-static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
-                              unsigned int len);
+static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
 
-static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
-                                   u32 lmax, u32 inv_w, int delta_w)
+static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
-       int i;
+       if (!hlist_unhashed(&agg->nonfull_next))
+               hlist_del_init(&agg->nonfull_next);
+       if (q->in_serv_agg == agg)
+               q->in_serv_agg = qfq_choose_next_agg(q);
+       kfree(agg);
+}
 
-       /* update qfq-specific data */
-       cl->lmax = lmax;
-       cl->inv_w = inv_w;
-       i = qfq_calc_index(cl->inv_w, cl->lmax);
+/* Deschedule class from within its parent aggregate. */
+static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
+{
+       struct qfq_aggregate *agg = cl->agg;
 
-       cl->grp = &q->groups[i];
 
-       q->wsum += delta_w;
+       list_del(&cl->alist); /* remove from RR queue of the aggregate */
+       if (list_empty(&agg->active)) /* agg is now inactive */
+               qfq_deactivate_agg(q, agg);
 }
 
-static void qfq_update_reactivate_class(struct qfq_sched *q,
-                                       struct qfq_class *cl,
-                                       u32 inv_w, u32 lmax, int delta_w)
+/* Remove class from its parent aggregate. */
+static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
 {
-       bool need_reactivation = false;
-       int i = qfq_calc_index(inv_w, lmax);
+       struct qfq_aggregate *agg = cl->agg;
 
-       if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
-               /*
-                * shift cl->F back, to not charge the
-                * class for the not-yet-served head
-                * packet
-                */
-               cl->F = cl->S;
-               /* remove class from its slot in the old group */
-               qfq_deactivate_class(q, cl);
-               need_reactivation = true;
+       cl->agg = NULL;
+       if (agg->num_classes == 1) { /* agg being emptied, destroy it */
+               qfq_destroy_agg(q, agg);
+               return;
        }
+       qfq_update_agg(q, agg, agg->num_classes-1);
+}
 
-       qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+/* Deschedule class and remove it from its parent aggregate. */
+static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
+{
+       if (cl->qdisc->q.qlen > 0) /* class is active */
+               qfq_deactivate_class(q, cl);
 
-       if (need_reactivation) /* activate in new group */
-               qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+       qfq_rm_from_agg(q, cl);
 }
 
+/* Move class to a new aggregate, matching the new class weight and/or lmax */
+static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
+                          u32 lmax)
+{
+       struct qfq_sched *q = qdisc_priv(sch);
+       struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
+
+       if (new_agg == NULL) { /* create new aggregate */
+               new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
+               if (new_agg == NULL)
+                       return -ENOBUFS;
+               qfq_init_agg(q, new_agg, lmax, weight);
+       }
+       qfq_deact_rm_from_agg(q, cl);
+       qfq_add_to_agg(q, new_agg, cl);
+
+       return 0;
+}
 
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                            struct nlattr **tca, unsigned long *arg)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl = (struct qfq_class *)*arg;
+       bool existing = false;
        struct nlattr *tb[TCA_QFQ_MAX + 1];
+       struct qfq_aggregate *new_agg = NULL;
        u32 weight, lmax, inv_w;
        int err;
        int delta_w;
@@ -286,15 +424,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        } else
                weight = 1;
 
-       inv_w = ONE_FP / weight;
-       weight = ONE_FP / inv_w;
-       delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
-       if (q->wsum + delta_w > QFQ_MAX_WSUM) {
-               pr_notice("qfq: total weight out of range (%u + %u)\n",
-                         delta_w, q->wsum);
-               return -EINVAL;
-       }
-
        if (tb[TCA_QFQ_LMAX]) {
                lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
                if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
@@ -304,7 +433,23 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        } else
                lmax = psched_mtu(qdisc_dev(sch));
 
-       if (cl != NULL) {
+       inv_w = ONE_FP / weight;
+       weight = ONE_FP / inv_w;
+
+       if (cl != NULL &&
+           lmax == cl->agg->lmax &&
+           weight == cl->agg->class_weight)
+               return 0; /* nothing to change */
+
+       delta_w = weight - (cl ? cl->agg->class_weight : 0);
+
+       if (q->wsum + delta_w > QFQ_MAX_WSUM) {
+               pr_notice("qfq: total weight out of range (%d + %u)\n",
+                         delta_w, q->wsum);
+               return -EINVAL;
+       }
+
+       if (cl != NULL) { /* modify existing class */
                if (tca[TCA_RATE]) {
                        err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
                                                    qdisc_root_sleeping_lock(sch),
@@ -312,25 +457,18 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                        if (err)
                                return err;
                }
-
-               if (lmax == cl->lmax && inv_w == cl->inv_w)
-                       return 0; /* nothing to update */
-
-               sch_tree_lock(sch);
-               qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
-               sch_tree_unlock(sch);
-
-               return 0;
+               existing = true;
+               goto set_change_agg;
        }
 
+       /* create and init new class */
        cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
        if (cl == NULL)
                return -ENOBUFS;
 
        cl->refcnt = 1;
        cl->common.classid = classid;
-
-       qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+       cl->deficit = lmax;
 
        cl->qdisc = qdisc_create_dflt(sch->dev_queue,
                                      &pfifo_qdisc_ops, classid);
@@ -341,11 +479,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                err = gen_new_estimator(&cl->bstats, &cl->rate_est,
                                        qdisc_root_sleeping_lock(sch),
                                        tca[TCA_RATE]);
-               if (err) {
-                       qdisc_destroy(cl->qdisc);
-                       kfree(cl);
-                       return err;
-               }
+               if (err)
+                       goto destroy_class;
        }
 
        sch_tree_lock(sch);
@@ -354,19 +489,39 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 
        qdisc_class_hash_grow(sch, &q->clhash);
 
+set_change_agg:
+       sch_tree_lock(sch);
+       new_agg = qfq_find_agg(q, lmax, weight);
+       if (new_agg == NULL) { /* create new aggregate */
+               sch_tree_unlock(sch);
+               new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
+               if (new_agg == NULL) {
+                       err = -ENOBUFS;
+                       gen_kill_estimator(&cl->bstats, &cl->rate_est);
+                       goto destroy_class;
+               }
+               sch_tree_lock(sch);
+               qfq_init_agg(q, new_agg, lmax, weight);
+       }
+       if (existing)
+               qfq_deact_rm_from_agg(q, cl);
+       qfq_add_to_agg(q, new_agg, cl);
+       sch_tree_unlock(sch);
+
        *arg = (unsigned long)cl;
        return 0;
+
+destroy_class:
+       qdisc_destroy(cl->qdisc);
+       kfree(cl);
+       return err;
 }
 
 static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
 {
        struct qfq_sched *q = qdisc_priv(sch);
 
-       if (cl->inv_w) {
-               q->wsum -= ONE_FP / cl->inv_w;
-               cl->inv_w = 0;
-       }
-
+       qfq_rm_from_agg(q, cl);
        gen_kill_estimator(&cl->bstats, &cl->rate_est);
        qdisc_destroy(cl->qdisc);
        kfree(cl);
@@ -481,8 +636,8 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
-           nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
+       if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
+           nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
                goto nla_put_failure;
        return nla_nest_end(skb, nest);
 
@@ -500,8 +655,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        memset(&xstats, 0, sizeof(xstats));
        cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
 
-       xstats.weight = ONE_FP/cl->inv_w;
-       xstats.lmax = cl->lmax;
+       xstats.weight = cl->agg->class_weight;
+       xstats.lmax = cl->agg->lmax;
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
@@ -652,16 +807,16 @@ static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
  * perhaps
  *
        old_V ^= q->V;
-       old_V >>= QFQ_MIN_SLOT_SHIFT;
+       old_V >>= q->min_slot_shift;
        if (old_V) {
                ...
        }
  *
  */
-static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
+static void qfq_make_eligible(struct qfq_sched *q)
 {
-       unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
-       unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
+       unsigned long vslot = q->V >> q->min_slot_shift;
+       unsigned long old_vslot = q->oldV >> q->min_slot_shift;
 
        if (vslot != old_vslot) {
                unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
@@ -672,34 +827,38 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
 
 
 /*
- * If the weight and lmax (max_pkt_size) of the classes do not change,
- * then QFQ guarantees that the slot index is never higher than
- * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
+ * The index of the slot in which the aggregate is to be inserted must
+ * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
+ * because the start time of the group may be moved backward by one
+ * slot after the aggregate has been inserted, and this would cause
+ * non-empty slots to be right-shifted by one position.
  *
- * With the current values of the above constants, the index is
- * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
+ * If the weight and lmax (max_pkt_size) of the classes do not change,
+ * then QFQ+ does meet the above contraint according to the current
+ * values of its parameters. In fact, if the weight and lmax of the
+ * classes do not change, then, from the theory, QFQ+ guarantees that
+ * the slot index is never higher than
+ * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
+ * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
  *
  * When the weight of a class is increased or the lmax of the class is
- * decreased, a new class with smaller slot size may happen to be
- * activated. The activation of this class should be properly delayed
- * to when the service of the class has finished in the ideal system
- * tracked by QFQ. If the activation of the class is not delayed to
- * this reference time instant, then this class may be unjustly served
- * before other classes waiting for service. This may cause
- * (unfrequently) the above bound to the slot index to be violated for
- * some of these unlucky classes.
+ * decreased, a new aggregate with smaller slot size than the original
+ * parent aggregate of the class may happen to be activated. The
+ * activation of this aggregate should be properly delayed to when the
+ * service of the class has finished in the ideal system tracked by
+ * QFQ+. If the activation of the aggregate is not delayed to this
+ * reference time instant, then this aggregate may be unjustly served
+ * before other aggregates waiting for service. This may cause the
+ * above bound to the slot index to be violated for some of these
+ * unlucky aggregates.
  *
- * Instead of delaying the activation of the new class, which is quite
- * complex, the following inaccurate but simple solution is used: if
- * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
- * of the class are shifted backward so as to let the slot index
- * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
- * the slot index is above it, then the data structure implementing
- * the bucket list either gets immediately corrupted or may get
- * corrupted on a possible next packet arrival that causes the start
- * time of the group to be shifted backward.
+ * Instead of delaying the activation of the new aggregate, which is
+ * quite complex, the following inaccurate but simple solution is used:
+ * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
+ * timestamps of the aggregate are shifted backward so as to let the
+ * slot index become equal to QFQ_MAX_SLOTS-2.
  */
-static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
+static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
                            u64 roundedS)
 {
        u64 slot = (roundedS - grp->S) >> grp->slot_shift;
@@ -708,22 +867,22 @@ static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
        if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
                u64 deltaS = roundedS - grp->S -
                        ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
-               cl->S -= deltaS;
-               cl->F -= deltaS;
+               agg->S -= deltaS;
+               agg->F -= deltaS;
                slot = QFQ_MAX_SLOTS - 2;
        }
 
        i = (grp->front + slot) % QFQ_MAX_SLOTS;
 
-       hlist_add_head(&cl->next, &grp->slots[i]);
+       hlist_add_head(&agg->next, &grp->slots[i]);
        __set_bit(slot, &grp->full_slots);
 }
 
 /* Maybe introduce hlist_first_entry?? */
-static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
+static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
 {
        return hlist_entry(grp->slots[grp->front].first,
-                          struct qfq_class, next);
+                          struct qfq_aggregate, next);
 }
 
 /*
@@ -731,20 +890,20 @@ static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
  */
 static void qfq_front_slot_remove(struct qfq_group *grp)
 {
-       struct qfq_class *cl = qfq_slot_head(grp);
+       struct qfq_aggregate *agg = qfq_slot_head(grp);
 
-       BUG_ON(!cl);
-       hlist_del(&cl->next);
+       BUG_ON(!agg);
+       hlist_del(&agg->next);
        if (hlist_empty(&grp->slots[grp->front]))
                __clear_bit(0, &grp->full_slots);
 }
 
 /*
- * Returns the first full queue in a group. As a side effect,
- * adjust the bucket list so the first non-empty bucket is at
- * position 0 in full_slots.
+ * Returns the first aggregate in the first non-empty bucket of the
+ * group. As a side effect, adjusts the bucket list so the first
+ * non-empty bucket is at position 0 in full_slots.
  */
-static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
+static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
 {
        unsigned int i;
 
@@ -780,7 +939,7 @@ static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
        grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
 }
 
-static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
+static void qfq_update_eligible(struct qfq_sched *q)
 {
        struct qfq_group *grp;
        unsigned long ineligible;
@@ -792,137 +951,226 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
                        if (qfq_gt(grp->S, q->V))
                                q->V = grp->S;
                }
-               qfq_make_eligible(q, old_V);
+               qfq_make_eligible(q);
        }
 }
 
-/*
- * Updates the class, returns true if also the group needs to be updated.
- */
-static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
+/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
+static void agg_dequeue(struct qfq_aggregate *agg,
+                       struct qfq_class *cl, unsigned int len)
 {
-       unsigned int len = qdisc_peek_len(cl->qdisc);
+       qdisc_dequeue_peeked(cl->qdisc);
 
-       cl->S = cl->F;
-       if (!len)
-               qfq_front_slot_remove(grp);     /* queue is empty */
-       else {
-               u64 roundedS;
+       cl->deficit -= (int) len;
 
-               cl->F = cl->S + (u64)len * cl->inv_w;
-               roundedS = qfq_round_down(cl->S, grp->slot_shift);
-               if (roundedS == grp->S)
-                       return false;
-
-               qfq_front_slot_remove(grp);
-               qfq_slot_insert(grp, cl, roundedS);
+       if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
+               list_del(&cl->alist);
+       else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
+               cl->deficit += agg->lmax;
+               list_move_tail(&cl->alist, &agg->active);
        }
+}
+
+static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
+                                          struct qfq_class **cl,
+                                          unsigned int *len)
+{
+       struct sk_buff *skb;
 
-       return true;
+       *cl = list_first_entry(&agg->active, struct qfq_class, alist);
+       skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
+       if (skb == NULL)
+               WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
+       else
+               *len = qdisc_pkt_len(skb);
+
+       return skb;
+}
+
+/* Update F according to the actual service received by the aggregate. */
+static inline void charge_actual_service(struct qfq_aggregate *agg)
+{
+       /* compute the service received by the aggregate */
+       u32 service_received = agg->initial_budget - agg->budget;
+
+       agg->F = agg->S + (u64)service_received * agg->inv_w;
 }
 
 static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
-       struct qfq_group *grp;
+       struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
        struct qfq_class *cl;
-       struct sk_buff *skb;
-       unsigned int len;
-       u64 old_V;
+       struct sk_buff *skb = NULL;
+       /* next-packet len, 0 means no more active classes in in-service agg */
+       unsigned int len = 0;
 
-       if (!q->bitmaps[ER])
+       if (in_serv_agg == NULL)
                return NULL;
 
-       grp = qfq_ffs(q, q->bitmaps[ER]);
+       if (!list_empty(&in_serv_agg->active))
+               skb = qfq_peek_skb(in_serv_agg, &cl, &len);
 
-       cl = qfq_slot_head(grp);
-       skb = qdisc_dequeue_peeked(cl->qdisc);
-       if (!skb) {
-               WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
-               return NULL;
+       /*
+        * If there are no active classes in the in-service aggregate,
+        * or if the aggregate has not enough budget to serve its next
+        * class, then choose the next aggregate to serve.
+        */
+       if (len == 0 || in_serv_agg->budget < len) {
+               charge_actual_service(in_serv_agg);
+
+               /* recharge the budget of the aggregate */
+               in_serv_agg->initial_budget = in_serv_agg->budget =
+                       in_serv_agg->budgetmax;
+
+               if (!list_empty(&in_serv_agg->active))
+                       /*
+                        * Still active: reschedule for
+                        * service. Possible optimization: if no other
+                        * aggregate is active, then there is no point
+                        * in rescheduling this aggregate, and we can
+                        * just keep it as the in-service one. This
+                        * should be however a corner case, and to
+                        * handle it, we would need to maintain an
+                        * extra num_active_aggs field.
+                       */
+                       qfq_activate_agg(q, in_serv_agg, requeue);
+               else if (sch->q.qlen == 0) { /* no aggregate to serve */
+                       q->in_serv_agg = NULL;
+                       return NULL;
+               }
+
+               /*
+                * If we get here, there are other aggregates queued:
+                * choose the new aggregate to serve.
+                */
+               in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
+               skb = qfq_peek_skb(in_serv_agg, &cl, &len);
        }
+       if (!skb)
+               return NULL;
 
        sch->q.qlen--;
        qdisc_bstats_update(sch, skb);
 
-       old_V = q->V;
-       len = qdisc_pkt_len(skb);
+       agg_dequeue(in_serv_agg, cl, len);
+       in_serv_agg->budget -= len;
        q->V += (u64)len * IWSUM;
        pr_debug("qfq dequeue: len %u F %lld now %lld\n",
-                len, (unsigned long long) cl->F, (unsigned long long) q->V);
+                len, (unsigned long long) in_serv_agg->F,
+                (unsigned long long) q->V);
 
-       if (qfq_update_class(grp, cl)) {
-               u64 old_F = grp->F;
+       return skb;
+}
 
-               cl = qfq_slot_scan(grp);
-               if (!cl)
-                       __clear_bit(grp->index, &q->bitmaps[ER]);
-               else {
-                       u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
-                       unsigned int s;
+static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
+{
+       struct qfq_group *grp;
+       struct qfq_aggregate *agg, *new_front_agg;
+       u64 old_F;
 
-                       if (grp->S == roundedS)
-                               goto skip_unblock;
-                       grp->S = roundedS;
-                       grp->F = roundedS + (2ULL << grp->slot_shift);
-                       __clear_bit(grp->index, &q->bitmaps[ER]);
-                       s = qfq_calc_state(q, grp);
-                       __set_bit(grp->index, &q->bitmaps[s]);
-               }
+       qfq_update_eligible(q);
+       q->oldV = q->V;
+
+       if (!q->bitmaps[ER])
+               return NULL;
+
+       grp = qfq_ffs(q, q->bitmaps[ER]);
+       old_F = grp->F;
+
+       agg = qfq_slot_head(grp);
 
-               qfq_unblock_groups(q, grp->index, old_F);
+       /* agg starts to be served, remove it from schedule */
+       qfq_front_slot_remove(grp);
+
+       new_front_agg = qfq_slot_scan(grp);
+
+       if (new_front_agg == NULL) /* group is now inactive, remove from ER */
+               __clear_bit(grp->index, &q->bitmaps[ER]);
+       else {
+               u64 roundedS = qfq_round_down(new_front_agg->S,
+                                             grp->slot_shift);
+               unsigned int s;
+
+               if (grp->S == roundedS)
+                       return agg;
+               grp->S = roundedS;
+               grp->F = roundedS + (2ULL << grp->slot_shift);
+               __clear_bit(grp->index, &q->bitmaps[ER]);
+               s = qfq_calc_state(q, grp);
+               __set_bit(grp->index, &q->bitmaps[s]);
        }
 
-skip_unblock:
-       qfq_update_eligible(q, old_V);
+       qfq_unblock_groups(q, grp->index, old_F);
 
-       return skb;
+       return agg;
 }
 
 /*
- * Assign a reasonable start time for a new flow k in group i.
+ * Assign a reasonable start time for a new aggregate in group i.
  * Admissible values for \hat(F) are multiples of \sigma_i
  * no greater than V+\sigma_i . Larger values mean that
  * we had a wraparound so we consider the timestamp to be stale.
  *
  * If F is not stale and F >= V then we set S = F.
  * Otherwise we should assign S = V, but this may violate
- * the ordering in ER. So, if we have groups in ER, set S to
- * the F_j of the first group j which would be blocking us.
+ * the ordering in EB (see [2]). So, if we have groups in ER,
+ * set S to the F_j of the first group j which would be blocking us.
  * We are guaranteed not to move S backward because
  * otherwise our group i would still be blocked.
  */
-static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
+static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
        unsigned long mask;
        u64 limit, roundedF;
-       int slot_shift = cl->grp->slot_shift;
+       int slot_shift = agg->grp->slot_shift;
 
-       roundedF = qfq_round_down(cl->F, slot_shift);
+       roundedF = qfq_round_down(agg->F, slot_shift);
        limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
 
-       if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
+       if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
                /* timestamp was stale */
-               mask = mask_from(q->bitmaps[ER], cl->grp->index);
+               mask = mask_from(q->bitmaps[ER], agg->grp->index);
                if (mask) {
                        struct qfq_group *next = qfq_ffs(q, mask);
                        if (qfq_gt(roundedF, next->F)) {
                                if (qfq_gt(limit, next->F))
-                                       cl->S = next->F;
+                                       agg->S = next->F;
                                else /* preserve timestamp correctness */
-                                       cl->S = limit;
+                                       agg->S = limit;
                                return;
                        }
                }
-               cl->S = q->V;
+               agg->S = q->V;
        } else  /* timestamp is not stale */
-               cl->S = cl->F;
+               agg->S = agg->F;
 }
 
+/*
+ * Update the timestamps of agg before scheduling/rescheduling it for
+ * service.  In particular, assign to agg->F its maximum possible
+ * value, i.e., the virtual finish time with which the aggregate
+ * should be labeled if it used all its budget once in service.
+ */
+static inline void
+qfq_update_agg_ts(struct qfq_sched *q,
+                   struct qfq_aggregate *agg, enum update_reason reason)
+{
+       if (reason != requeue)
+               qfq_update_start(q, agg);
+       else /* just charge agg for the service received */
+               agg->S = agg->F;
+
+       agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
+}
+
+static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *);
+
 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl;
+       struct qfq_aggregate *agg;
        int err = 0;
 
        cl = qfq_classify(skb, sch, &err);
@@ -934,11 +1182,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
        pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
 
-       if (unlikely(cl->lmax < qdisc_pkt_len(skb))) {
+       if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
                pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
-                         cl->lmax, qdisc_pkt_len(skb), cl->common.classid);
-               qfq_update_reactivate_class(q, cl, cl->inv_w,
-                                           qdisc_pkt_len(skb), 0);
+                        cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
+               err = qfq_change_agg(sch, cl, cl->agg->class_weight,
+                                    qdisc_pkt_len(skb));
+               if (err)
+                       return err;
        }
 
        err = qdisc_enqueue(skb, cl->qdisc);
@@ -954,35 +1204,50 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        bstats_update(&cl->bstats, skb);
        ++sch->q.qlen;
 
-       /* If the new skb is not the head of queue, then done here. */
-       if (cl->qdisc->q.qlen != 1)
+       agg = cl->agg;
+       /* if the queue was not empty, then done here */
+       if (cl->qdisc->q.qlen != 1) {
+               if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
+                   list_first_entry(&agg->active, struct qfq_class, alist)
+                   == cl && cl->deficit < qdisc_pkt_len(skb))
+                       list_move_tail(&cl->alist, &agg->active);
+
                return err;
+       }
+
+       /* schedule class for service within the aggregate */
+       cl->deficit = agg->lmax;
+       list_add_tail(&cl->alist, &agg->active);
 
-       /* If reach this point, queue q was idle */
-       qfq_activate_class(q, cl, qdisc_pkt_len(skb));
+       if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
+               return err; /* aggregate was not empty, nothing else to do */
+
+       /* recharge budget */
+       agg->initial_budget = agg->budget = agg->budgetmax;
+
+       qfq_update_agg_ts(q, agg, enqueue);
+       if (q->in_serv_agg == NULL)
+               q->in_serv_agg = agg;
+       else if (agg != q->in_serv_agg)
+               qfq_schedule_agg(q, agg);
 
        return err;
 }
 
 /*
- * Handle class switch from idle to backlogged.
+ * Schedule aggregate according to its timestamps.
  */
-static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
-                              unsigned int pkt_len)
+static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
-       struct qfq_group *grp = cl->grp;
+       struct qfq_group *grp = agg->grp;
        u64 roundedS;
        int s;
 
-       qfq_update_start(q, cl);
-
-       /* compute new finish time and rounded start. */
-       cl->F = cl->S + (u64)pkt_len * cl->inv_w;
-       roundedS = qfq_round_down(cl->S, grp->slot_shift);
+       roundedS = qfq_round_down(agg->S, grp->slot_shift);
 
        /*
-        * insert cl in the correct bucket.
-        * If cl->S >= grp->S we don't need to adjust the
+        * Insert agg in the correct bucket.
+        * If agg->S >= grp->S we don't need to adjust the
         * bucket list and simply go to the insertion phase.
         * Otherwise grp->S is decreasing, we must make room
         * in the bucket list, and also recompute the group state.
@@ -990,10 +1255,10 @@ static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
         * was in ER make sure to adjust V.
         */
        if (grp->full_slots) {
-               if (!qfq_gt(grp->S, cl->S))
+               if (!qfq_gt(grp->S, agg->S))
                        goto skip_update;
 
-               /* create a slot for this cl->S */
+               /* create a slot for this agg->S */
                qfq_slot_rotate(grp, roundedS);
                /* group was surely ineligible, remove */
                __clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1008,46 +1273,61 @@ static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
 
        pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
                 s, q->bitmaps[s],
-                (unsigned long long) cl->S,
-                (unsigned long long) cl->F,
+                (unsigned long long) agg->S,
+                (unsigned long long) agg->F,
                 (unsigned long long) q->V);
 
 skip_update:
-       qfq_slot_insert(grp, cl, roundedS);
+       qfq_slot_insert(grp, agg, roundedS);
 }
 
 
+/* Update agg ts and schedule agg for service */
+static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
+                            enum update_reason reason)
+{
+       qfq_update_agg_ts(q, agg, reason);
+       qfq_schedule_agg(q, agg);
+}
+
 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
-                           struct qfq_class *cl)
+                           struct qfq_aggregate *agg)
 {
        unsigned int i, offset;
        u64 roundedS;
 
-       roundedS = qfq_round_down(cl->S, grp->slot_shift);
+       roundedS = qfq_round_down(agg->S, grp->slot_shift);
        offset = (roundedS - grp->S) >> grp->slot_shift;
+
        i = (grp->front + offset) % QFQ_MAX_SLOTS;
 
-       hlist_del(&cl->next);
+       hlist_del(&agg->next);
        if (hlist_empty(&grp->slots[i]))
                __clear_bit(offset, &grp->full_slots);
 }
 
 /*
- * called to forcibly destroy a queue.
- * If the queue is not in the front bucket, or if it has
- * other queues in the front bucket, we can simply remove
- * the queue with no other side effects.
+ * Called to forcibly deschedule an aggregate.  If the aggregate is
+ * not in the front bucket, or if the latter has other aggregates in
+ * the front bucket, we can simply remove the aggregate with no other
+ * side effects.
  * Otherwise we must propagate the event up.
  */
-static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
+static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
 {
-       struct qfq_group *grp = cl->grp;
+       struct qfq_group *grp = agg->grp;
        unsigned long mask;
        u64 roundedS;
        int s;
 
-       cl->F = cl->S;
-       qfq_slot_remove(q, grp, cl);
+       if (agg == q->in_serv_agg) {
+               charge_actual_service(agg);
+               q->in_serv_agg = qfq_choose_next_agg(q);
+               return;
+       }
+
+       agg->F = agg->S;
+       qfq_slot_remove(q, grp, agg);
 
        if (!grp->full_slots) {
                __clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1066,8 +1346,8 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
                }
                __clear_bit(grp->index, &q->bitmaps[ER]);
        } else if (hlist_empty(&grp->slots[grp->front])) {
-               cl = qfq_slot_scan(grp);
-               roundedS = qfq_round_down(cl->S, grp->slot_shift);
+               agg = qfq_slot_scan(grp);
+               roundedS = qfq_round_down(agg->S, grp->slot_shift);
                if (grp->S != roundedS) {
                        __clear_bit(grp->index, &q->bitmaps[ER]);
                        __clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1080,7 +1360,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
                }
        }
 
-       qfq_update_eligible(q, q->V);
+       qfq_update_eligible(q);
 }
 
 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1092,6 +1372,32 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
                qfq_deactivate_class(q, cl);
 }
 
+static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
+                                      struct hlist_head *slot)
+{
+       struct qfq_aggregate *agg;
+       struct hlist_node *n;
+       struct qfq_class *cl;
+       unsigned int len;
+
+       hlist_for_each_entry(agg, n, slot, next) {
+               list_for_each_entry(cl, &agg->active, alist) {
+
+                       if (!cl->qdisc->ops->drop)
+                               continue;
+
+                       len = cl->qdisc->ops->drop(cl->qdisc);
+                       if (len > 0) {
+                               if (cl->qdisc->q.qlen == 0)
+                                       qfq_deactivate_class(q, cl);
+
+                               return len;
+                       }
+               }
+       }
+       return 0;
+}
+
 static unsigned int qfq_drop(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
@@ -1101,24 +1407,13 @@ static unsigned int qfq_drop(struct Qdisc *sch)
        for (i = 0; i <= QFQ_MAX_INDEX; i++) {
                grp = &q->groups[i];
                for (j = 0; j < QFQ_MAX_SLOTS; j++) {
-                       struct qfq_class *cl;
-                       struct hlist_node *n;
-
-                       hlist_for_each_entry(cl, n, &grp->slots[j], next) {
-
-                               if (!cl->qdisc->ops->drop)
-                                       continue;
-
-                               len = cl->qdisc->ops->drop(cl->qdisc);
-                               if (len > 0) {
-                                       sch->q.qlen--;
-                                       if (!cl->qdisc->q.qlen)
-                                               qfq_deactivate_class(q, cl);
-
-                                       return len;
-                               }
+                       len = qfq_drop_from_slot(q, &grp->slots[j]);
+                       if (len > 0) {
+                               sch->q.qlen--;
+                               return len;
                        }
                }
+
        }
 
        return 0;
@@ -1129,44 +1424,51 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_group *grp;
        int i, j, err;
+       u32 max_cl_shift, maxbudg_shift, max_classes;
 
        err = qdisc_class_hash_init(&q->clhash);
        if (err < 0)
                return err;
 
+       if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
+               max_classes = QFQ_MAX_AGG_CLASSES;
+       else
+               max_classes = qdisc_dev(sch)->tx_queue_len + 1;
+       /* max_cl_shift = floor(log_2(max_classes)) */
+       max_cl_shift = __fls(max_classes);
+       q->max_agg_classes = 1<<max_cl_shift;
+
+       /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
+       maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
+       q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
+
        for (i = 0; i <= QFQ_MAX_INDEX; i++) {
                grp = &q->groups[i];
                grp->index = i;
-               grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
-                                  - (QFQ_MAX_INDEX - i);
+               grp->slot_shift = q->min_slot_shift + i;
                for (j = 0; j < QFQ_MAX_SLOTS; j++)
                        INIT_HLIST_HEAD(&grp->slots[j]);
        }
 
+       INIT_HLIST_HEAD(&q->nonfull_aggs);
+
        return 0;
 }
 
 static void qfq_reset_qdisc(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
-       struct qfq_group *grp;
        struct qfq_class *cl;
-       struct hlist_node *n, *tmp;
-       unsigned int i, j;
+       struct hlist_node *n;
+       unsigned int i;
 
-       for (i = 0; i <= QFQ_MAX_INDEX; i++) {
-               grp = &q->groups[i];
-               for (j = 0; j < QFQ_MAX_SLOTS; j++) {
-                       hlist_for_each_entry_safe(cl, n, tmp,
-                                                 &grp->slots[j], next) {
+       for (i = 0; i < q->clhash.hashsize; i++) {
+               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+                       if (cl->qdisc->q.qlen > 0)
                                qfq_deactivate_class(q, cl);
-                       }
-               }
-       }
 
-       for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
                        qdisc_reset(cl->qdisc);
+               }
        }
        sch->q.qlen = 0;
 }