]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Mon, 8 Sep 2014 04:41:53 +0000 (21:41 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 8 Sep 2014 04:41:53 +0000 (21:41 -0700)
24 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/phy/phy.c
include/linux/mlx4/device.h
include/linux/netdevice.h
include/net/sock.h
net/core/dev.c
net/core/skbuff.c
net/core/sock.c
net/ipv6/addrconf.c
net/ipv6/mcast.c

diff --combined MAINTAINERS
index c9b4b55bcbb33f45804b2f58eedecbc72b5dac7d,5e7866a486b0c3310b64ca5c9aa6cd6d9c16d354..fd86604f3a9cfb190be54195541a11fbbb527bee
@@@ -1277,9 -1277,15 +1277,15 @@@ F:    drivers/scsi/arm
  ARM/Rockchip SoC support
  M:    Heiko Stuebner <heiko@sntech.de>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ L:    linux-rockchip@lists.infradead.org
  S:    Maintained
+ F:    arch/arm/boot/dts/rk3*
  F:    arch/arm/mach-rockchip/
+ F:    drivers/clk/rockchip/
+ F:    drivers/i2c/busses/i2c-rk3x.c
  F:    drivers/*/*rockchip*
+ F:    drivers/*/*/*rockchip*
+ F:    sound/soc/rockchip/
  
  ARM/SAMSUNG ARM ARCHITECTURES
  M:    Ben Dooks <ben-linux@fluff.org>
@@@ -2065,7 -2071,7 +2071,7 @@@ S:      Supporte
  F:    drivers/scsi/bnx2i/
  
  BROADCOM KONA GPIO DRIVER
- M:    Markus Mayer <markus.mayer@linaro.org>
+ M:    Ray Jui <rjui@broadcom.com>
  L:    bcm-kernel-feedback-list@broadcom.com
  S:    Supported
  F:    drivers/gpio/gpio-bcm-kona.c
@@@ -3121,6 -3127,17 +3127,17 @@@ F:    include/linux/host1x.
  F:    include/uapi/drm/tegra_drm.h
  F:    Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
  
+ DRM DRIVERS FOR RENESAS
+ M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ L:    dri-devel@lists.freedesktop.org
+ L:    linux-sh@vger.kernel.org
+ T:    git git://people.freedesktop.org/~airlied/linux
+ S:    Supported
+ F:    drivers/gpu/drm/rcar-du/
+ F:    drivers/gpu/drm/shmobile/
+ F:    include/linux/platform_data/rcar-du.h
+ F:    include/linux/platform_data/shmob_drm.h
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -7361,7 -7378,7 +7378,7 @@@ F:      drivers/net/ethernet/qlogic/qla3xxx.
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
  M:    Shahed Shaikh <shahed.shaikh@qlogic.com>
 -M:    Dept-HSGLinuxNICDev@qlogic.com
 +M:    Dept-GELinuxNICDev@qlogic.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/qlogic/qlcnic/
@@@ -9545,6 -9562,14 +9562,14 @@@ S:    Maintaine
  F:    Documentation/usb/ohci.txt
  F:    drivers/usb/host/ohci*
  
+ USB OVER IP DRIVER
+ M:    Valentina Manea <valentina.manea.m@gmail.com>
+ M:    Shuah Khan <shuah.kh@samsung.com>
+ L:    linux-usb@vger.kernel.org
+ S:    Maintained
+ F:    drivers/usb/usbip/
+ F:    tools/usb/usbip/
  USB PEGASUS DRIVER
  M:    Petko Manolov <petkan@nucleusys.com>
  L:    linux-usb@vger.kernel.org
@@@ -10045,9 -10070,9 +10070,9 @@@ F:   Documentation/x86
  F:    arch/x86/
  
  X86 PLATFORM DRIVERS
- M:    Matthew Garrett <matthew.garrett@nebula.com>
+ M:    Darren Hart <dvhart@infradead.org>
  L:    platform-driver-x86@vger.kernel.org
- T:    git git://cavan.codon.org.uk/platform-drivers-x86.git
+ T:    git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
  S:    Maintained
  F:    drivers/platform/x86/
  
index 978f9ec961aeebb6a8dbdf14abd3e6d706f44964,a3c11355a34dd199a40252dab901ab23cb908ffb..76479d04b9037370ebb91cd161574c793eaf881b
  #include "xgbe.h"
  #include "xgbe-common.h"
  
 -
  static ssize_t xgbe_common_read(char __user *buffer, size_t count,
                                loff_t *ppos, unsigned int value)
  {
@@@ -271,8 -272,8 +271,8 @@@ static ssize_t xpcs_reg_value_read(stru
        struct xgbe_prv_data *pdata = filp->private_data;
        unsigned int value;
  
-       value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
-                                          pdata->debugfs_xpcs_reg);
+       value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+                          pdata->debugfs_xpcs_reg);
  
        return xgbe_common_read(buffer, count, ppos, value);
  }
@@@ -289,8 -290,8 +289,8 @@@ static ssize_t xpcs_reg_value_write(str
        if (len < 0)
                return len;
  
-       pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
-                                   pdata->debugfs_xpcs_reg, value);
+       XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+                   value);
  
        return len;
  }
index 3be98e505001845ad2b02fa307ddc45877aed81c,ea273836d999c854d89d45789e33d1ff81afc92b..9da3a03e8c0777595a7f99774be62c9528e55c6a
  #include "xgbe.h"
  #include "xgbe-common.h"
  
 -
  static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
                                      unsigned int usec)
  {
@@@ -347,7 -348,7 +347,7 @@@ static int xgbe_disable_tx_flow_control
  
        /* Clear MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@@ -372,7 -373,7 +372,7 @@@ static int xgbe_enable_tx_flow_control(
  
        /* Set MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@@ -508,8 -509,8 +508,8 @@@ static void xgbe_enable_mac_interrupts(
        XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
  
        /* Enable all counter interrupts */
-       XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
-       XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+       XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+       XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
  }
  
  static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@@ -1632,6 -1633,9 +1632,9 @@@ static int xgbe_flush_tx_queues(struct 
  {
        unsigned int i, count;
  
+       if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+               return 0;
        for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
  
@@@ -1702,8 -1706,8 +1705,8 @@@ static void xgbe_config_mtl_mode(struc
        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
  }
  
- static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
-                                                 unsigned char queue_count)
+ static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+                                                 unsigned int queue_count)
  {
        unsigned int q_fifo_size = 0;
        enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
                q_fifo_size = XGBE_FIFO_SIZE_KB(256);
                break;
        }
+       /* The configured value is not the actual amount of fifo RAM */
+       q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
        q_fifo_size = q_fifo_size / queue_count;
  
        /* Set the queue fifo size programmable value */
@@@ -1946,6 -1954,32 +1953,32 @@@ static void xgbe_config_vlan_support(st
                xgbe_disable_rx_vlan_stripping(pdata);
  }
  
+ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+ {
+       bool read_hi;
+       u64 val;
+       switch (reg_lo) {
+       /* These registers are always 64 bit */
+       case MMC_TXOCTETCOUNT_GB_LO:
+       case MMC_TXOCTETCOUNT_G_LO:
+       case MMC_RXOCTETCOUNT_GB_LO:
+       case MMC_RXOCTETCOUNT_G_LO:
+               read_hi = true;
+               break;
+       default:
+               read_hi = false;
+       };
+       val = XGMAC_IOREAD(pdata, reg_lo);
+       if (read_hi)
+               val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+       return val;
+ }
  static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
  {
        struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
                stats->txoctetcount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
                stats->txframecount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
                stats->txbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
                stats->txmulticastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
                stats->tx64octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
                stats->tx65to127octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
                stats->tx128to255octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
                stats->tx256to511octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
                stats->tx512to1023octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
                stats->tx1024tomaxoctets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
                stats->txunicastframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
                stats->txmulticastframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
                stats->txbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
                stats->txunderflowerror +=
-                       XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
                stats->txoctetcount_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
                stats->txframecount_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
                stats->txpauseframes +=
-                       XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+                       xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
                stats->txvlanframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
  }
  
  static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
                stats->rxframecount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
                stats->rxoctetcount_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
                stats->rxoctetcount_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
                stats->rxbroadcastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
                stats->rxmulticastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
                stats->rxcrcerror +=
-                       XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
                stats->rxrunterror +=
-                       XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+                       xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
                stats->rxjabbererror +=
-                       XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+                       xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
                stats->rxundersize_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+                       xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
                stats->rxoversize_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+                       xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
                stats->rx64octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
                stats->rx65to127octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
                stats->rx128to255octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
                stats->rx256to511octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
                stats->rx512to1023octets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
                stats->rx1024tomaxoctets_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
                stats->rxunicastframes_g +=
-                       XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+                       xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
                stats->rxlengtherror +=
-                       XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+                       xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
                stats->rxoutofrangetype +=
-                       XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+                       xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
                stats->rxpauseframes +=
-                       XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+                       xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
                stats->rxfifooverflow +=
-                       XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+                       xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
                stats->rxvlanframes_gb +=
-                       XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+                       xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
  
        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
                stats->rxwatchdogerror +=
-                       XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+                       xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
  }
  
  static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
  
        stats->txoctetcount_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
  
        stats->txframecount_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
  
        stats->txbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
  
        stats->txmulticastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
  
        stats->tx64octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
  
        stats->tx65to127octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
  
        stats->tx128to255octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
  
        stats->tx256to511octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
  
        stats->tx512to1023octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
  
        stats->tx1024tomaxoctets_gb +=
-               XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
  
        stats->txunicastframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
  
        stats->txmulticastframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
  
        stats->txbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
  
        stats->txunderflowerror +=
-               XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+               xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
  
        stats->txoctetcount_g +=
-               XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
  
        stats->txframecount_g +=
-               XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
  
        stats->txpauseframes +=
-               XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+               xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
  
        stats->txvlanframes_g +=
-               XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
  
        stats->rxframecount_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
  
        stats->rxoctetcount_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
  
        stats->rxoctetcount_g +=
-               XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
  
        stats->rxbroadcastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
  
        stats->rxmulticastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
  
        stats->rxcrcerror +=
-               XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+               xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
  
        stats->rxrunterror +=
-               XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+               xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
  
        stats->rxjabbererror +=
-               XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+               xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
  
        stats->rxundersize_g +=
-               XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+               xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
  
        stats->rxoversize_g +=
-               XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+               xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
  
        stats->rx64octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
  
        stats->rx65to127octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
  
        stats->rx128to255octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
  
        stats->rx256to511octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
  
        stats->rx512to1023octets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
  
        stats->rx1024tomaxoctets_gb +=
-               XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
  
        stats->rxunicastframes_g +=
-               XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+               xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
  
        stats->rxlengtherror +=
-               XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+               xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
  
        stats->rxoutofrangetype +=
-               XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+               xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
  
        stats->rxpauseframes +=
-               XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+               xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
  
        stats->rxfifooverflow +=
-               XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+               xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
  
        stats->rxvlanframes_gb +=
-               XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+               xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
  
        stats->rxwatchdogerror +=
-               XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+               xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
  
        /* Un-freeze counters */
        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
index 847da66d15485bf39beec98590bca2f3cd03befa,b26d75856553bf62c975e533dc815051f7968de5..29554992215aa18301f26b7e431b1fc19dfff9cb
  #include "xgbe.h"
  #include "xgbe-common.h"
  
 -
  static int xgbe_poll(struct napi_struct *, int);
  static void xgbe_set_rx_mode(struct net_device *);
  
@@@ -360,6 -361,8 +360,8 @@@ void xgbe_get_all_hw_features(struct xg
  
        memset(hw_feat, 0, sizeof(*hw_feat));
  
+       hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
        /* Hardware feature register 0 */
        hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
        hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
index 2289526b6f1cef4d16b9152cdd71e4e67b395222,46f613028e9c00ba9b659a4b8ccc7a8f5f47c644..49508ec98b72c1c01fdd189fc71e8c7465c77d2c
  #include "xgbe.h"
  #include "xgbe-common.h"
  
 -
  struct xgbe_stats {
        char stat_string[ETH_GSTRING_LEN];
        int stat_size;
@@@ -172,7 -173,6 +172,7 @@@ static const struct xgbe_stats xgbe_gst
        XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
        XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
  };
 +
  #define XGBE_STATS_COUNT      ARRAY_SIZE(xgbe_gstring_stats)
  
  static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
@@@ -361,15 -361,16 +361,16 @@@ static void xgbe_get_drvinfo(struct net
                             struct ethtool_drvinfo *drvinfo)
  {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
  
        strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
                sizeof(drvinfo->bus_info));
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
-                XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+                XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
        drvinfo->n_stats = XGBE_STATS_COUNT;
  }
  
index 984161d4ffc9475a549b7ad7862bddcb41526e29,bdf9cfa70e88c4dac3c7f3c439fa2b3eef29aaa6..f5a8fa03921aafdff9dd8c8274b4bd055ad05e8c
  #include "xgbe.h"
  #include "xgbe-common.h"
  
 -
  MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
  MODULE_LICENSE("Dual BSD/GPL");
  MODULE_VERSION(XGBE_DRV_VERSION);
@@@ -171,7 -172,7 +171,7 @@@ static struct xgbe_channel *xgbe_alloc_
                }
  
                if (i < pdata->rx_ring_count) {
-                       spin_lock_init(&tx_ring->lock);
+                       spin_lock_init(&rx_ring->lock);
                        channel->rx_ring = rx_ring++;
                }
  
index 84fe34ce5571772de3e205e7c4c77d706b8fac5d,e9fe6e6ddcc34acd7469aef6b523855e535f5a99..789957d43a1379939e2aa9b8d15f267473a271a6
  #include <linux/net_tstamp.h>
  #include <net/dcbnl.h>
  
 -
  #define XGBE_DRV_NAME         "amd-xgbe"
  #define XGBE_DRV_VERSION      "1.0.0-a"
  #define XGBE_DRV_DESC         "AMD 10 Gigabit Ethernet Driver"
  #define XGMAC_DRIVER_CONTEXT  1
  #define XGMAC_IOCTL_CONTEXT   2
  
+ #define XGBE_FIFO_MAX         81920
  #define XGBE_FIFO_SIZE_B(x)   (x)
  #define XGBE_FIFO_SIZE_KB(x)  (x * 1024)
  
        ((_ring)->rdata +                                       \
         ((_idx) & ((_ring)->rdesc_count - 1)))
  
 -
  /* Default coalescing parameters */
  #define XGMAC_INIT_DMA_TX_USECS               50
  #define XGMAC_INIT_DMA_TX_FRAMES      25
@@@ -524,6 -527,9 +525,9 @@@ struct xgbe_desc_if 
   * or configurations are present in the device.
   */
  struct xgbe_hw_features {
+       /* HW Version */
+       unsigned int version;
        /* HW Feature Register0 */
        unsigned int gmii;              /* 1000 Mbps support */
        unsigned int vlhash;            /* VLAN Hash Filter */
index a5ea416133a8a5b54516d916d72d471e8c94dcf1,d8d07a818b89bc694a77b2bdc3172f5bbb6d7010..c3e260c21734c37ca1641ffbf9441c867d18deb6
@@@ -84,7 -84,7 +84,7 @@@ config BNX
  
  config CNIC
        tristate "QLogic CNIC support"
-       depends on PCI
+       depends on PCI && (IPV6 || IPV6=n)
        select BNX2
        select UIO
        ---help---
@@@ -122,7 -122,6 +122,7 @@@ config TIGON
  config BNX2X
        tristate "Broadcom NetXtremeII 10Gb support"
        depends on PCI
 +      select PTP_1588_CLOCK
        select FW_LOADER
        select ZLIB_INFLATE
        select LIBCRC32C
index 5579d4bdbaac1a417da0072a862e614306d838ff,c4daa068f1db5e37bdea6f160dde26b7c13d1659..3e0621acdf05570db85127f0da200ac80160876e
@@@ -2233,7 -2233,12 +2233,12 @@@ struct shmem2_region 
        u32 reserved3;                          /* Offset 0x14C */
        u32 reserved4;                          /* Offset 0x150 */
        u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
-       #define LINK_ATTR_SYNC_KR2_ENABLE       (1<<0)
+       #define LINK_ATTR_SYNC_KR2_ENABLE       0x00000001
+       #define LINK_SFP_EEPROM_COMP_CODE_MASK  0x0000ff00
+       #define LINK_SFP_EEPROM_COMP_CODE_SHIFT          8
+       #define LINK_SFP_EEPROM_COMP_CODE_SR    0x00001000
+       #define LINK_SFP_EEPROM_COMP_CODE_LR    0x00002000
+       #define LINK_SFP_EEPROM_COMP_CODE_LRM   0x00004000
  
        u32 reserved5[2];
        u32 reserved6[PORT_MAX];
@@@ -2876,8 -2881,8 +2881,8 @@@ struct afex_stats 
  };
  
  #define BCM_5710_FW_MAJOR_VERSION                     7
 -#define BCM_5710_FW_MINOR_VERSION                     8
 -#define BCM_5710_FW_REVISION_VERSION          19
 +#define BCM_5710_FW_MINOR_VERSION                     10
 +#define BCM_5710_FW_REVISION_VERSION          51
  #define BCM_5710_FW_ENGINEERING_VERSION               0
  #define BCM_5710_FW_COMPILE_FLAGS                     1
  
@@@ -3446,7 -3451,6 +3451,7 @@@ enum classify_rule 
        CLASSIFY_RULE_OPCODE_MAC,
        CLASSIFY_RULE_OPCODE_VLAN,
        CLASSIFY_RULE_OPCODE_PAIR,
 +      CLASSIFY_RULE_OPCODE_VXLAN,
        MAX_CLASSIFY_RULE
  };
  
@@@ -3476,8 -3480,7 +3481,8 @@@ struct client_init_general_data 
        u8 func_id;
        u8 cos;
        u8 traffic_type;
 -      u32 reserved0;
 +      u8 fp_hsi_ver;
 +      u8 reserved0[3];
  };
  
  
@@@ -3547,9 -3550,7 +3552,9 @@@ struct client_init_rx_data 
        __le16 rx_cos_mask;
        __le16 silent_vlan_value;
        __le16 silent_vlan_mask;
 -      __le32 reserved6[2];
 +      u8 handle_ptp_pkts_flg;
 +      u8 reserved6[3];
 +      __le32 reserved7;
  };
  
  /*
@@@ -3580,7 -3581,7 +3585,7 @@@ struct client_init_tx_data 
        u8 tunnel_lso_inc_ip_id;
        u8 refuse_outband_vlan_flg;
        u8 tunnel_non_lso_pcsum_location;
 -      u8 reserved1;
 +      u8 tunnel_non_lso_outer_ip_csum_location;
  };
  
  /*
@@@ -3618,9 -3619,7 +3623,9 @@@ struct client_update_ramrod_data 
        u8 refuse_outband_vlan_change_flg;
        u8 tx_switching_flg;
        u8 tx_switching_change_flg;
 -      __le32 reserved1;
 +      u8 handle_ptp_pkts_flg;
 +      u8 handle_ptp_pkts_change_flg;
 +      __le16 reserved1;
        __le32 echo;
  };
  
@@@ -3640,11 -3639,6 +3645,11 @@@ struct double_regpair 
        u32 regpair1_hi;
  };
  
 +/* 2nd parse bd type used in ethernet tx BDs */
 +enum eth_2nd_parse_bd_type {
 +      ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL,
 +      MAX_ETH_2ND_PARSE_BD_TYPE
 +};
  
  /*
   * Ethernet address typesm used in ethernet tx BDs
@@@ -3729,18 -3723,6 +3734,18 @@@ struct eth_classify_vlan_cmd 
        __le16 vlan;
  };
  
 +/*
 + * Command for adding/removing a VXLAN classification rule
 + */
 +struct eth_classify_vxlan_cmd {
 +      struct eth_classify_cmd_header header;
 +      __le32 vni;
 +      __le16 inner_mac_lsb;
 +      __le16 inner_mac_mid;
 +      __le16 inner_mac_msb;
 +      __le16 reserved1;
 +};
 +
  /*
   * union for eth classification rule
   */
@@@ -3748,7 -3730,6 +3753,7 @@@ union eth_classify_rule_cmd 
        struct eth_classify_mac_cmd mac;
        struct eth_classify_vlan_cmd vlan;
        struct eth_classify_pair_cmd pair;
 +      struct eth_classify_vxlan_cmd vxlan;
  };
  
  /*
@@@ -3854,10 -3835,8 +3859,10 @@@ struct eth_fast_path_rx_cqe 
  #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
  #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
  #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
 -#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
 -#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6)
 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6
 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7)
 +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7
        u8 status_flags;
  #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
  #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@@ -3928,13 -3907,6 +3933,13 @@@ struct eth_filter_rules_ramrod_data 
        struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
  };
  
 +/* Hsi version */
 +enum eth_fp_hsi_ver {
 +      ETH_FP_HSI_VER_0,
 +      ETH_FP_HSI_VER_1,
 +      ETH_FP_HSI_VER_2,
 +      MAX_ETH_FP_HSI_VER
 +};
  
  /*
   * parameters for eth classification configuration ramrod
@@@ -3983,17 -3955,29 +3988,17 @@@ struct eth_mac_addresses 
  
  /* tunneling related data */
  struct eth_tunnel_data {
 -#if defined(__BIG_ENDIAN)
 -      __le16 dst_mid;
 -      __le16 dst_lo;
 -#elif defined(__LITTLE_ENDIAN)
        __le16 dst_lo;
        __le16 dst_mid;
 -#endif
 -#if defined(__BIG_ENDIAN)
 -      __le16 reserved0;
 -      __le16 dst_hi;
 -#elif defined(__LITTLE_ENDIAN)
        __le16 dst_hi;
 -      __le16 reserved0;
 -#endif
 -#if defined(__BIG_ENDIAN)
 -      u8 reserved1;
 -      u8 ip_hdr_start_inner_w;
 -      __le16 pseudo_csum;
 -#elif defined(__LITTLE_ENDIAN)
 +      __le16 fw_ip_hdr_csum;
        __le16 pseudo_csum;
        u8 ip_hdr_start_inner_w;
 -      u8 reserved1;
 -#endif
 +      u8 flags;
 +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
 +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
 +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
  };
  
  /* union for mac addresses and for tunneling data.
@@@ -4080,41 -4064,31 +4085,41 @@@ enum eth_rss_mode 
   */
  struct eth_rss_update_ramrod_data {
        u8 rss_engine_id;
 -      u8 capabilities;
 +      u8 rss_mode;
 +      __le16 capabilities;
  #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
  #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
  #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
  #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
  #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
  #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
 +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
 +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
        u8 rss_result_mask;
 -      u8 rss_mode;
 -      __le16 udp_4tuple_dst_port_mask;
 -      __le16 udp_4tuple_dst_port_value;
 +      u8 reserved3;
 +      __le16 reserved4;
        u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
        __le32 rss_key[T_ETH_RSS_KEY];
        __le32 echo;
 -      __le32 reserved3;
 +      __le32 reserved5;
  };
  
  
@@@ -4286,10 -4260,10 +4291,10 @@@ enum eth_tunnel_lso_inc_ip_id 
  /* In case tunnel exist and L4 checksum offload,
   * the pseudo checksum location, on packet or on BD.
   */
 -enum eth_tunnel_non_lso_pcsum_location {
 -      PCSUM_ON_PKT,
 -      PCSUM_ON_BD,
 -      MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
 +enum eth_tunnel_non_lso_csum_location {
 +      CSUM_ON_PKT,
 +      CSUM_ON_BD,
 +      MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
  };
  
  /*
@@@ -4336,10 -4310,8 +4341,10 @@@ struct eth_tx_start_bd 
        __le16 vlan_or_ethertype;
        struct eth_tx_bd_flags bd_flags;
        u8 general_data;
 -#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
 +#define ETH_TX_START_BD_HDR_NBDS (0x7<<0)
  #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
 +#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3)
 +#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3
  #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
  #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
  #define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
@@@ -4415,8 -4387,8 +4420,8 @@@ struct eth_tx_parse_2nd_bd 
        __le16 global_data;
  #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
  #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
 -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
 -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4)
 +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
  #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
  #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
  #define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
  #define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
  #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
  #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
 -#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
 -#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
 -      __le16 reserved1;
 +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13)
 +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
 +      u8 bd_type;
 +#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0)
 +#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0
 +#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4)
 +#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4
 +      u8 reserved3;
        u8 tcp_flags;
  #define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
  #define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
  #define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
  #define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
  #define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
 -      u8 reserved2;
 +      u8 reserved4;
        u8 tunnel_udp_hdr_start_w;
        u8 fw_ip_hdr_to_payload_w;
        __le16 fw_ip_csum_wo_len_flags_frag;
@@@ -5238,18 -5205,10 +5243,18 @@@ struct function_start_data 
        u8 path_id;
        u8 network_cos_mode;
        u8 dmae_cmd_id;
 -      u8 gre_tunnel_mode;
 -      u8 gre_tunnel_rss;
 -      u8 nvgre_clss_en;
 -      __le16 reserved1[2];
 +      u8 tunnel_mode;
 +      u8 gre_tunnel_type;
 +      u8 tunn_clss_en;
 +      u8 inner_gre_rss_en;
 +      u8 sd_accept_mf_clss_fail;
 +      __le16 vxlan_dst_port;
 +      __le16 sd_accept_mf_clss_fail_ethtype;
 +      __le16 sd_vlan_eth_type;
 +      u8 sd_vlan_force_pri_flg;
 +      u8 sd_vlan_force_pri_val;
 +      u8 sd_accept_mf_clss_fail_match_ethtype;
 +      u8 no_added_tags;
  };
  
  struct function_update_data {
        u8 tx_switch_suspend_change_flg;
        u8 tx_switch_suspend;
        u8 echo;
 +      u8 update_tunn_cfg_flg;
 +      u8 tunnel_mode;
 +      u8 gre_tunnel_type;
 +      u8 tunn_clss_en;
 +      u8 inner_gre_rss_en;
 +      __le16 vxlan_dst_port;
 +      u8 sd_vlan_force_pri_change_flg;
 +      u8 sd_vlan_force_pri_flg;
 +      u8 sd_vlan_force_pri_val;
 +      u8 sd_vlan_tag_change_flg;
 +      u8 sd_vlan_eth_type_change_flg;
        u8 reserved1;
 -      u8 update_gre_cfg_flg;
 -      u8 gre_tunnel_mode;
 -      u8 gre_tunnel_rss;
 -      u8 nvgre_clss_en;
 -      u32 reserved3;
 +      __le16 sd_vlan_tag;
 +      __le16 sd_vlan_eth_type;
  };
  
  /*
@@@ -5308,9 -5259,17 +5313,9 @@@ struct fw_version 
  #define __FW_VERSION_RESERVED_SHIFT 4
  };
  
 -/* GRE RSS Mode */
 -enum gre_rss_mode {
 -      GRE_OUTER_HEADERS_RSS,
 -      GRE_INNER_HEADERS_RSS,
 -      NVGRE_KEY_ENTROPY_RSS,
 -      MAX_GRE_RSS_MODE
 -};
  
  /* GRE Tunnel Mode */
  enum gre_tunnel_type {
 -      NO_GRE_TUNNEL,
        NVGRE_TUNNEL,
        L2GRE_TUNNEL,
        IPGRE_TUNNEL,
@@@ -5483,7 -5442,6 +5488,7 @@@ enum ip_ver 
   * Malicious VF error ID
   */
  enum malicious_vf_error_id {
 +      MALICIOUS_VF_NO_ERROR,
        VF_PF_CHANNEL_NOT_READY,
        ETH_ILLEGAL_BD_LENGTHS,
        ETH_PACKET_TOO_SHORT,
@@@ -5644,16 -5602,6 +5649,16 @@@ struct protocol_common_spe 
        union protocol_common_specific_data data;
  };
  
 +/* The data for the Set Timesync Ramrod */
 +struct set_timesync_ramrod_data {
 +      u8 drift_adjust_cmd;
 +      u8 offset_cmd;
 +      u8 add_sub_drift_adjust_value;
 +      u8 drift_adjust_value;
 +      u32 drift_adjust_period;
 +      struct regpair offset_delta;
 +};
 +
  /*
   * The send queue element
   */
@@@ -5776,38 -5724,10 +5781,38 @@@ struct tstorm_vf_zone_data 
        struct regpair reserved;
  };
  
 +/* Add or Subtract Value for Set Timesync Ramrod */
 +enum ts_add_sub_value {
 +      TS_SUB_VALUE,
 +      TS_ADD_VALUE,
 +      MAX_TS_ADD_SUB_VALUE
 +};
  
 -/*
 - * zone A per-queue data
 - */
 +/* Drift-Adjust Commands for Set Timesync Ramrod */
 +enum ts_drift_adjust_cmd {
 +      TS_DRIFT_ADJUST_KEEP,
 +      TS_DRIFT_ADJUST_SET,
 +      TS_DRIFT_ADJUST_RESET,
 +      MAX_TS_DRIFT_ADJUST_CMD
 +};
 +
 +/* Offset Commands for Set Timesync Ramrod */
 +enum ts_offset_cmd {
 +      TS_OFFSET_KEEP,
 +      TS_OFFSET_INC,
 +      TS_OFFSET_DEC,
 +      MAX_TS_OFFSET_CMD
 +};
 +
 +/* Tunnel Mode */
 +enum tunnel_mode {
 +      TUNN_MODE_NONE,
 +      TUNN_MODE_VXLAN,
 +      TUNN_MODE_GRE,
 +      MAX_TUNNEL_MODE
 +};
 +
 + /* zone A per-queue data */
  struct ustorm_queue_zone_data {
        struct ustorm_eth_rx_producers eth_rx_producers;
        struct regpair reserved[3];
index 93132d8fec671b9e6a0858c19c2aa33519e73210,d1c093dcb054aebb71b1ee701cf175e8ab804515..32e2444ab5e1c0621e2616fdb258d0d88ebe0e26
@@@ -41,7 -41,6 +41,7 @@@
  #include <linux/ethtool.h>
  #include <linux/mii.h>
  #include <linux/if_vlan.h>
 +#include <linux/crash_dump.h>
  #include <net/ip.h>
  #include <net/ipv6.h>
  #include <net/tcp.h>
@@@ -64,6 -63,7 +64,6 @@@
  #include "bnx2x_vfpf.h"
  #include "bnx2x_dcb.h"
  #include "bnx2x_sp.h"
 -
  #include <linux/firmware.h>
  #include "bnx2x_fw_file_hdr.h"
  /* FW files */
@@@ -290,8 -290,6 +290,8 @@@ static int bnx2x_set_storm_rx_mode(stru
  * General service functions
  ****************************************************************************/
  
 +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
 +
  static void __storm_memset_dma_mapping(struct bnx2x *bp,
                                       u32 addr, dma_addr_t mapping)
  {
@@@ -525,7 -523,6 +525,7 @@@ int bnx2x_issue_dmae_with_comp(struct b
         * as long as this code is called both from syscall context and
         * from ndo_set_rx_mode() flow that may be called from BH.
         */
 +
        spin_lock_bh(&bp->dmae_lock);
  
        /* reset completion */
        }
  
  unlock:
 +
        spin_unlock_bh(&bp->dmae_lock);
 +
        return rc;
  }
  
@@@ -651,98 -646,119 +651,98 @@@ static void bnx2x_write_dmae_phys_len(s
        bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
  }
  
 +enum storms {
 +         XSTORM,
 +         TSTORM,
 +         CSTORM,
 +         USTORM,
 +         MAX_STORMS
 +};
 +
 +#define STORMS_NUM 4
 +#define REGS_IN_ENTRY 4
 +
 +static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
 +                                            enum storms storm,
 +                                            int entry)
 +{
 +      switch (storm) {
 +      case XSTORM:
 +              return XSTORM_ASSERT_LIST_OFFSET(entry);
 +      case TSTORM:
 +              return TSTORM_ASSERT_LIST_OFFSET(entry);
 +      case CSTORM:
 +              return CSTORM_ASSERT_LIST_OFFSET(entry);
 +      case USTORM:
 +              return USTORM_ASSERT_LIST_OFFSET(entry);
 +      case MAX_STORMS:
 +      default:
 +              BNX2X_ERR("unknown storm\n");
 +      }
 +      return -EINVAL;
 +}
 +
  static int bnx2x_mc_assert(struct bnx2x *bp)
  {
        char last_idx;
 -      int i, rc = 0;
 -      u32 row0, row1, row2, row3;
 -
 -      /* XSTORM */
 -      last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
 -                         XSTORM_ASSERT_LIST_INDEX_OFFSET);
 -      if (last_idx)
 -              BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 -
 -      /* print the asserts */
 -      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 -
 -              row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 -                            XSTORM_ASSERT_LIST_OFFSET(i));
 -              row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 -                            XSTORM_ASSERT_LIST_OFFSET(i) + 4);
 -              row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 -                            XSTORM_ASSERT_LIST_OFFSET(i) + 8);
 -              row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
 -                            XSTORM_ASSERT_LIST_OFFSET(i) + 12);
 -
 -              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 -                      BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 -                                i, row3, row2, row1, row0);
 -                      rc++;
 -              } else {
 -                      break;
 -              }
 -      }
 -
 -      /* TSTORM */
 -      last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
 -                         TSTORM_ASSERT_LIST_INDEX_OFFSET);
 -      if (last_idx)
 -              BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 -
 -      /* print the asserts */
 -      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 -
 -              row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 -                            TSTORM_ASSERT_LIST_OFFSET(i));
 -              row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 -                            TSTORM_ASSERT_LIST_OFFSET(i) + 4);
 -              row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 -                            TSTORM_ASSERT_LIST_OFFSET(i) + 8);
 -              row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
 -                            TSTORM_ASSERT_LIST_OFFSET(i) + 12);
 -
 -              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 -                      BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 -                                i, row3, row2, row1, row0);
 -                      rc++;
 -              } else {
 -                      break;
 -              }
 -      }
 +      int i, j, rc = 0;
 +      enum storms storm;
 +      u32 regs[REGS_IN_ENTRY];
 +      u32 bar_storm_intmem[STORMS_NUM] = {
 +              BAR_XSTRORM_INTMEM,
 +              BAR_TSTRORM_INTMEM,
 +              BAR_CSTRORM_INTMEM,
 +              BAR_USTRORM_INTMEM
 +      };
 +      u32 storm_assert_list_index[STORMS_NUM] = {
 +              XSTORM_ASSERT_LIST_INDEX_OFFSET,
 +              TSTORM_ASSERT_LIST_INDEX_OFFSET,
 +              CSTORM_ASSERT_LIST_INDEX_OFFSET,
 +              USTORM_ASSERT_LIST_INDEX_OFFSET
 +      };
 +      char *storms_string[STORMS_NUM] = {
 +              "XSTORM",
 +              "TSTORM",
 +              "CSTORM",
 +              "USTORM"
 +      };
  
 -      /* CSTORM */
 -      last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
 -                         CSTORM_ASSERT_LIST_INDEX_OFFSET);
 -      if (last_idx)
 -              BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 -
 -      /* print the asserts */
 -      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 -
 -              row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 -                            CSTORM_ASSERT_LIST_OFFSET(i));
 -              row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 -                            CSTORM_ASSERT_LIST_OFFSET(i) + 4);
 -              row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 -                            CSTORM_ASSERT_LIST_OFFSET(i) + 8);
 -              row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
 -                            CSTORM_ASSERT_LIST_OFFSET(i) + 12);
 -
 -              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 -                      BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 -                                i, row3, row2, row1, row0);
 -                      rc++;
 -              } else {
 -                      break;
 +      for (storm = XSTORM; storm < MAX_STORMS; storm++) {
 +              last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
 +                                 storm_assert_list_index[storm]);
 +              if (last_idx)
 +                      BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
 +                                storms_string[storm], last_idx);
 +
 +              /* print the asserts */
 +              for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 +                      /* read a single assert entry */
 +                      for (j = 0; j < REGS_IN_ENTRY; j++)
 +                              regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
 +                                        bnx2x_get_assert_list_entry(bp,
 +                                                                    storm,
 +                                                                    i) +
 +                                        sizeof(u32) * j);
 +
 +                      /* log entry if it contains a valid assert */
 +                      if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 +                              BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 +                                        storms_string[storm], i, regs[3],
 +                                        regs[2], regs[1], regs[0]);
 +                              rc++;
 +                      } else {
 +                              break;
 +                      }
                }
        }
  
 -      /* USTORM */
 -      last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
 -                         USTORM_ASSERT_LIST_INDEX_OFFSET);
 -      if (last_idx)
 -              BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
 -
 -      /* print the asserts */
 -      for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 -
 -              row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
 -                            USTORM_ASSERT_LIST_OFFSET(i));
 -              row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
 -                            USTORM_ASSERT_LIST_OFFSET(i) + 4);
 -              row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
 -                            USTORM_ASSERT_LIST_OFFSET(i) + 8);
 -              row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
 -                            USTORM_ASSERT_LIST_OFFSET(i) + 12);
 -
 -              if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 -                      BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 -                                i, row3, row2, row1, row0);
 -                      rc++;
 -              } else {
 -                      break;
 -              }
 -      }
 +      BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
 +                CHIP_IS_E1(bp) ? "everest1" :
 +                CHIP_IS_E1H(bp) ? "everest1h" :
 +                CHIP_IS_E2(bp) ? "everest2" : "everest3",
 +                BCM_5710_FW_MAJOR_VERSION,
 +                BCM_5710_FW_MINOR_VERSION,
 +                BCM_5710_FW_REVISION_VERSION);
  
        return rc;
  }
@@@ -967,12 -983,6 +967,12 @@@ void bnx2x_panic_dump(struct bnx2x *bp
                u32 *sb_data_p;
                struct bnx2x_fp_txdata txdata;
  
 +              if (!bp->fp)
 +                      break;
 +
 +              if (!fp->rx_cons_sb)
 +                      continue;
 +
                /* Rx */
                BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
                          i, fp->rx_bd_prod, fp->rx_bd_cons,
                /* Tx */
                for_each_cos_in_tx_queue(fp, cos)
                {
 +                      if (!fp->txdata_ptr[cos])
 +                              break;
 +
                        txdata = *fp->txdata_ptr[cos];
 +
 +                      if (!txdata.tx_cons_sb)
 +                              continue;
 +
                        BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
                                  i, txdata.tx_pkt_prod,
                                  txdata.tx_pkt_cons, txdata.tx_bd_prod,
        for_each_valid_rx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
 +              if (!bp->fp)
 +                      break;
 +
 +              if (!fp->rx_cons_sb)
 +                      continue;
 +
                start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
                end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
                for (j = start; j != end; j = RX_BD(j + 1)) {
        /* Tx */
        for_each_valid_tx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              if (!bp->fp)
 +                      break;
 +
                for_each_cos_in_tx_queue(fp, cos) {
                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
  
 +                      if (!fp->txdata_ptr[cos])
 +                              break;
 +
 +                      if (!txdata->tx_cons_sb)
 +                              continue;
 +
                        start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
                        end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
                        for (j = start; j != end; j = TX_BD(j + 1)) {
@@@ -2084,6 -2071,8 +2084,6 @@@ int bnx2x_get_gpio(struct bnx2x *bp, in
        else
                value = 0;
  
 -      DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
 -
        return value;
  }
  
@@@ -4689,7 -4678,7 +4689,7 @@@ static bool bnx2x_check_blocks_with_par
        for (i = 0; sig; i++) {
                cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
 -                      res |= true; /* Each bit is real error! */
 +                      res = true; /* Each bit is real error! */
                        if (print) {
                                switch (cur_bit) {
                                case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
@@@ -4768,21 -4757,21 +4768,21 @@@ static bool bnx2x_check_blocks_with_par
                                        _print_next_block((*par_num)++,
                                                          "MCP ROM");
                                *global = true;
 -                              res |= true;
 +                              res = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
                                if (print)
                                        _print_next_block((*par_num)++,
                                                          "MCP UMP RX");
                                *global = true;
 -                              res |= true;
 +                              res = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
                                if (print)
                                        _print_next_block((*par_num)++,
                                                          "MCP UMP TX");
                                *global = true;
 -                              res |= true;
 +                              res = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
                                if (print)
@@@ -4814,7 -4803,7 +4814,7 @@@ static bool bnx2x_check_blocks_with_par
        for (i = 0; sig; i++) {
                cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
 -                      res |= true; /* Each bit is real error! */
 +                      res = true; /* Each bit is real error! */
                        if (print) {
                                switch (cur_bit) {
                                case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
@@@ -5463,14 -5452,6 +5463,14 @@@ static void bnx2x_eq_int(struct bnx2x *
                                break;
  
                        goto next_spqe;
 +
 +              case EVENT_RING_OPCODE_SET_TIMESYNC:
 +                      DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
 +                         "got set_timesync ramrod completion\n");
 +                      if (f_obj->complete_cmd(bp, f_obj,
 +                                              BNX2X_F_CMD_SET_TIMESYNC))
 +                              break;
 +                      goto next_spqe;
                }
  
                switch (opcode | bp->state) {
@@@ -6121,7 -6102,7 +6121,7 @@@ static int bnx2x_fill_accept_flags(stru
        }
  
        /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
 -      if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
 +      if (rx_mode != BNX2X_RX_MODE_NONE) {
                __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
                __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
        }
@@@ -6868,6 -6849,37 +6868,37 @@@ static void bnx2x__common_init_phy(stru
        bnx2x_release_phy_lock(bp);
  }
  
+ static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+ {
+       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+       /* make sure this value is 0 */
+       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+ }
+ static void bnx2x_set_endianity(struct bnx2x *bp)
+ {
+ #ifdef __BIG_ENDIAN
+       bnx2x_config_endianity(bp, 1);
+ #else
+       bnx2x_config_endianity(bp, 0);
+ #endif
+ }
+ static void bnx2x_reset_endianity(struct bnx2x *bp)
+ {
+       bnx2x_config_endianity(bp, 0);
+ }
  /**
   * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
   *
@@@ -6934,23 -6946,7 +6965,7 @@@ static int bnx2x_init_hw_common(struct 
  
        bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
        bnx2x_init_pxp(bp);
- #ifdef __BIG_ENDIAN
-       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
-       /* make sure this value is 0 */
-       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
- /*    REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
-       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
- #endif
+       bnx2x_set_endianity(bp);
        bnx2x_ilt_init_page_size(bp, INITOP_SET);
  
        if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@@ -7666,11 -7662,7 +7681,11 @@@ static inline int bnx2x_func_switch_upd
        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
  
        /* Function parameters */
 -      switch_update_params->suspend = suspend;
 +      __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
 +                &switch_update_params->changes);
 +      if (suspend)
 +              __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
 +                        &switch_update_params->changes);
  
        rc = bnx2x_func_state_change(bp, &func_params);
  
@@@ -9033,7 -9025,7 +9048,7 @@@ static int bnx2x_func_wait_started(stru
                struct bnx2x_func_state_params func_params = {NULL};
  
                DP(NETIF_MSG_IFDOWN,
 -                 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
 +                 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
  
                func_params.f_obj = &bp->func_obj;
                __set_bit(RAMROD_DRV_CLR_ONLY,
        return 0;
  }
  
 +static void bnx2x_disable_ptp(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +
 +      /* Disable sending PTP packets to host */
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
 +             NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
 +
 +      /* Reset PTP event detection rules */
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
 +             NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
 +             NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
 +      REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
 +             NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
 +      REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
 +             NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
 +
 +      /* Disable the PTP feature */
 +      REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
 +             NIG_REG_P0_PTP_EN, 0x0);
 +}
 +
 +/* Called during unload, to stop PTP-related stuff */
 +void bnx2x_stop_ptp(struct bnx2x *bp)
 +{
 +      /* Cancel PTP work queue. Should be done after the Tx queues are
 +       * drained to prevent additional scheduling.
 +       */
 +      cancel_work_sync(&bp->ptp_task);
 +
 +      if (bp->ptp_tx_skb) {
 +              dev_kfree_skb_any(bp->ptp_tx_skb);
 +              bp->ptp_tx_skb = NULL;
 +      }
 +
 +      /* Disable PTP in HW */
 +      bnx2x_disable_ptp(bp);
 +
 +      DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
 +}
 +
  void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
  {
        int port = BP_PORT(bp);
@@@ -9212,13 -9162,6 +9227,13 @@@ unload_error
  #endif
        }
  
 +      /* stop_ptp should be after the Tx queues are drained to prevent
 +       * scheduling to the cancelled PTP work queue. It should also be after
 +       * function stop ramrod is sent, since as part of this ramrod FW access
 +       * PTP registers.
 +       */
 +      bnx2x_stop_ptp(bp);
 +
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
        /* Delete all NAPI objects */
@@@ -11957,7 -11900,7 +11972,7 @@@ static int bnx2x_init_bp(struct bnx2x *
        bp->disable_tpa = disable_tpa;
        bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
        /* Reduce memory usage in kdump environment by disabling TPA */
 -      bp->disable_tpa |= reset_devices;
 +      bp->disable_tpa |= is_kdump_kernel();
  
        /* Set TPA flags */
        if (bp->disable_tpa) {
  
        bp->dump_preset_idx = 1;
  
 +      if (CHIP_IS_E3B0(bp))
 +              bp->flags |= PTP_SUPPORTED;
 +
        return rc;
  }
  
@@@ -12368,17 -12308,13 +12383,17 @@@ static int bnx2x_ioctl(struct net_devic
        struct bnx2x *bp = netdev_priv(dev);
        struct mii_ioctl_data *mdio = if_mii(ifr);
  
 -      DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
 -         mdio->phy_id, mdio->reg_num, mdio->val_in);
 -
        if (!netif_running(dev))
                return -EAGAIN;
  
 -      return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
 +      switch (cmd) {
 +      case SIOCSHWTSTAMP:
 +              return bnx2x_hwtstamp_ioctl(bp, ifr);
 +      default:
 +              DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
 +                 mdio->phy_id, mdio->reg_num, mdio->val_in);
 +              return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
 +      }
  }
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
@@@ -13022,191 -12958,6 +13037,191 @@@ static int set_is_vf(int chip_id
        }
  }
  
 +/* nig_tsgen registers relative address */
 +#define tsgen_ctrl 0x0
 +#define tsgen_freecount 0x10
 +#define tsgen_synctime_t0 0x20
 +#define tsgen_offset_t0 0x28
 +#define tsgen_drift_t0 0x30
 +#define tsgen_synctime_t1 0x58
 +#define tsgen_offset_t1 0x60
 +#define tsgen_drift_t1 0x68
 +
 +/* FW workaround for setting drift */
 +static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
 +                                        int best_val, int best_period)
 +{
 +      struct bnx2x_func_state_params func_params = {NULL};
 +      struct bnx2x_func_set_timesync_params *set_timesync_params =
 +              &func_params.params.set_timesync;
 +
 +      /* Prepare parameters for function state transitions */
 +      __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +      __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
 +
 +      /* Function parameters */
 +      set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
 +      set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
 +      set_timesync_params->add_sub_drift_adjust_value =
 +              drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
 +      set_timesync_params->drift_adjust_value = best_val;
 +      set_timesync_params->drift_adjust_period = best_period;
 +
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 +{
 +      struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 +      int rc;
 +      int drift_dir = 1;
 +      int val, period, period1, period2, dif, dif1, dif2;
 +      int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
 +
 +      DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
 +
 +      if (!netif_running(bp->dev)) {
 +              DP(BNX2X_MSG_PTP,
 +                 "PTP adjfreq called while the interface is down\n");
 +              return -EFAULT;
 +      }
 +
 +      if (ppb < 0) {
 +              ppb = -ppb;
 +              drift_dir = 0;
 +      }
 +
 +      if (ppb == 0) {
 +              best_val = 1;
 +              best_period = 0x1FFFFFF;
 +      } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
 +              best_val = 31;
 +              best_period = 1;
 +      } else {
 +              /* Changed not to allow val = 8, 16, 24 as these values
 +               * are not supported in workaround.
 +               */
 +              for (val = 0; val <= 31; val++) {
 +                      if ((val & 0x7) == 0)
 +                              continue;
 +                      period1 = val * 1000000 / ppb;
 +                      period2 = period1 + 1;
 +                      if (period1 != 0)
 +                              dif1 = ppb - (val * 1000000 / period1);
 +                      else
 +                              dif1 = BNX2X_MAX_PHC_DRIFT;
 +                      if (dif1 < 0)
 +                              dif1 = -dif1;
 +                      dif2 = ppb - (val * 1000000 / period2);
 +                      if (dif2 < 0)
 +                              dif2 = -dif2;
 +                      dif = (dif1 < dif2) ? dif1 : dif2;
 +                      period = (dif1 < dif2) ? period1 : period2;
 +                      if (dif < best_dif) {
 +                              best_dif = dif;
 +                              best_val = val;
 +                              best_period = period;
 +                      }
 +              }
 +      }
 +
 +      rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
 +                                          best_period);
 +      if (rc) {
 +              BNX2X_ERR("Failed to set drift\n");
 +              return -EFAULT;
 +      }
 +
 +      DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
 +         best_period);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 +{
 +      struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 +      u64 now;
 +
 +      DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
 +
 +      now = timecounter_read(&bp->timecounter);
 +      now += delta;
 +      /* Re-init the timecounter */
 +      timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 +{
 +      struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 +      u64 ns;
 +      u32 remainder;
 +
 +      ns = timecounter_read(&bp->timecounter);
 +
 +      DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
 +
 +      ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
 +      ts->tv_nsec = remainder;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
 +                           const struct timespec *ts)
 +{
 +      struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 +      u64 ns;
 +
 +      ns = ts->tv_sec * 1000000000ULL;
 +      ns += ts->tv_nsec;
 +
 +      DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
 +
 +      /* Re-init the timecounter */
 +      timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
 +
 +      return 0;
 +}
 +
 +/* Enable (or disable) ancillary features of the phc subsystem */
 +static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
 +                          struct ptp_clock_request *rq, int on)
 +{
 +      struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 +
 +      BNX2X_ERR("PHC ancillary features are not supported\n");
 +      return -ENOTSUPP;
 +}
 +
 +void bnx2x_register_phc(struct bnx2x *bp)
 +{
 +      /* Fill the ptp_clock_info struct and register PTP clock*/
 +      bp->ptp_clock_info.owner = THIS_MODULE;
 +      snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
 +      bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
 +      bp->ptp_clock_info.n_alarm = 0;
 +      bp->ptp_clock_info.n_ext_ts = 0;
 +      bp->ptp_clock_info.n_per_out = 0;
 +      bp->ptp_clock_info.pps = 0;
 +      bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
 +      bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
 +      bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
 +      bp->ptp_clock_info.settime = bnx2x_ptp_settime;
 +      bp->ptp_clock_info.enable = bnx2x_ptp_enable;
 +
 +      bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
 +      if (IS_ERR(bp->ptp_clock)) {
 +              bp->ptp_clock = NULL;
 +              BNX2X_ERR("PTP clock registeration failed\n");
 +      }
 +}
 +
  static int bnx2x_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *ent)
  {
                       "Unknown",
                       dev->base_addr, bp->pdev->irq, dev->dev_addr);
  
 +      bnx2x_register_phc(bp);
 +
        return 0;
  
  init_one_exit:
@@@ -13406,11 -13155,6 +13421,11 @@@ static void __bnx2x_remove(struct pci_d
                           struct bnx2x *bp,
                           bool remove_netdev)
  {
 +      if (bp->ptp_clock) {
 +              ptp_clock_unregister(bp->ptp_clock);
 +              bp->ptp_clock = NULL;
 +      }
 +
        /* Delete storage MAC address */
        if (!NO_FCOE(bp)) {
                rtnl_lock();
        bnx2x_iov_remove_one(bp);
  
        /* Power on: we can't let PCI layer write to us while we are in D3 */
-       if (IS_PF(bp))
+       if (IS_PF(bp)) {
                bnx2x_set_power_state(bp, PCI_D0);
  
+               /* Set endianity registers to reset values in case next driver
+                * boots in different endianty environment.
+                */
+               bnx2x_reset_endianity(bp);
+       }
        /* Disable MSI/MSI-X */
        bnx2x_disable_msi(bp);
  
@@@ -14386,332 -14136,3 +14407,332 @@@ int bnx2x_pretend_func(struct bnx2x *bp
        REG_RD(bp, pretend_reg);
        return 0;
  }
 +
 +static void bnx2x_ptp_task(struct work_struct *work)
 +{
 +      struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
 +      int port = BP_PORT(bp);
 +      u32 val_seq;
 +      u64 timestamp, ns;
 +      struct skb_shared_hwtstamps shhwtstamps;
 +
 +      /* Read Tx timestamp registers */
 +      val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
 +                       NIG_REG_P0_TLLH_PTP_BUF_SEQID);
 +      if (val_seq & 0x10000) {
 +              /* There is a valid timestamp value */
 +              timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
 +                                 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
 +              timestamp <<= 32;
 +              timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
 +                                  NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
 +              /* Reset timestamp register to allow new timestamp */
 +              REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
 +                     NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
 +              ns = timecounter_cyc2time(&bp->timecounter, timestamp);
 +
 +              memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 +              shhwtstamps.hwtstamp = ns_to_ktime(ns);
 +              skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
 +              dev_kfree_skb_any(bp->ptp_tx_skb);
 +              bp->ptp_tx_skb = NULL;
 +
 +              DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
 +                 timestamp, ns);
 +      } else {
 +              DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
 +              /* Reschedule to keep checking for a valid timestamp value */
 +              schedule_work(&bp->ptp_task);
 +      }
 +}
 +
 +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
 +{
 +      int port = BP_PORT(bp);
 +      u64 timestamp, ns;
 +
 +      timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
 +                          NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
 +      timestamp <<= 32;
 +      timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
 +                          NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
 +
 +      /* Reset timestamp register to allow new timestamp */
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
 +             NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
 +
 +      ns = timecounter_cyc2time(&bp->timecounter, timestamp);
 +
 +      skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
 +
 +      DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
 +         timestamp, ns);
 +}
 +
 +/* Read the PHC */
 +static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
 +{
 +      struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
 +      int port = BP_PORT(bp);
 +      u32 wb_data[2];
 +      u64 phc_cycles;
 +
 +      REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
 +                  NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
 +      phc_cycles = wb_data[1];
 +      phc_cycles = (phc_cycles << 32) + wb_data[0];
 +
 +      DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
 +
 +      return phc_cycles;
 +}
 +
 +static void bnx2x_init_cyclecounter(struct bnx2x *bp)
 +{
 +      memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
 +      bp->cyclecounter.read = bnx2x_cyclecounter_read;
 +      bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
 +      bp->cyclecounter.shift = 1;
 +      bp->cyclecounter.mult = 1;
 +}
 +
 +static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
 +{
 +      struct bnx2x_func_state_params func_params = {NULL};
 +      struct bnx2x_func_set_timesync_params *set_timesync_params =
 +              &func_params.params.set_timesync;
 +
 +      /* Prepare parameters for function state transitions */
 +      __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +      __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
 +
 +      /* Function parameters */
 +      set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
 +      set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
 +
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +int bnx2x_enable_ptp_packets(struct bnx2x *bp)
 +{
 +      struct bnx2x_queue_state_params q_params;
 +      int rc, i;
 +
 +      /* send queue update ramrod to enable PTP packets */
 +      memset(&q_params, 0, sizeof(q_params));
 +      __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 +      q_params.cmd = BNX2X_Q_CMD_UPDATE;
 +      __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
 +                &q_params.params.update.update_flags);
 +      __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
 +                &q_params.params.update.update_flags);
 +
 +      /* send the ramrod on all the queues of the PF */
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              /* Set the appropriate Queue object */
 +              q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
 +
 +              /* Update the Queue state */
 +              rc = bnx2x_queue_state_change(bp, &q_params);
 +              if (rc) {
 +                      BNX2X_ERR("Failed to enable PTP packets\n");
 +                      return rc;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +int bnx2x_configure_ptp_filters(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      int rc;
 +
 +      if (!bp->hwtstamp_ioctl_called)
 +              return 0;
 +
 +      switch (bp->tx_type) {
 +      case HWTSTAMP_TX_ON:
 +              bp->flags |= TX_TIMESTAMPING_EN;
 +              REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
 +                     NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
 +              REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
 +                     NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
 +              break;
 +      case HWTSTAMP_TX_ONESTEP_SYNC:
 +              BNX2X_ERR("One-step timestamping is not supported\n");
 +              return -ERANGE;
 +      }
 +
 +      switch (bp->rx_filter) {
 +      case HWTSTAMP_FILTER_NONE:
 +              break;
 +      case HWTSTAMP_FILTER_ALL:
 +      case HWTSTAMP_FILTER_SOME:
 +              bp->rx_filter = HWTSTAMP_FILTER_NONE;
 +              break;
 +      case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 +              bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 +              /* Initialize PTP detection for UDP/IPv4 events */
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
 +                     NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
 +                     NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
 +              break;
 +      case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 +              bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 +              /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
 +                     NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
 +                     NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
 +              break;
 +      case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 +              bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
 +              /* Initialize PTP detection L2 events */
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
 +                     NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
 +                     NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
 +
 +              break;
 +      case HWTSTAMP_FILTER_PTP_V2_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 +              bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 +              /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
 +                     NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
 +              REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
 +                     NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
 +              break;
 +      }
 +
 +      /* Indicate to FW that this PF expects recorded PTP packets */
 +      rc = bnx2x_enable_ptp_packets(bp);
 +      if (rc)
 +              return rc;
 +
 +      /* Enable sending PTP packets to host */
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
 +             NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
 +
 +      return 0;
 +}
 +
 +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
 +{
 +      struct hwtstamp_config config;
 +      int rc;
 +
 +      DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
 +
 +      if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 +              return -EFAULT;
 +
 +      DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
 +         config.tx_type, config.rx_filter);
 +
 +      if (config.flags) {
 +              BNX2X_ERR("config.flags is reserved for future use\n");
 +              return -EINVAL;
 +      }
 +
 +      bp->hwtstamp_ioctl_called = 1;
 +      bp->tx_type = config.tx_type;
 +      bp->rx_filter = config.rx_filter;
 +
 +      rc = bnx2x_configure_ptp_filters(bp);
 +      if (rc)
 +              return rc;
 +
 +      config.rx_filter = bp->rx_filter;
 +
 +      return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 +              -EFAULT : 0;
 +}
 +
 +/* Configrues HW for PTP */
 +static int bnx2x_configure_ptp(struct bnx2x *bp)
 +{
 +      int rc, port = BP_PORT(bp);
 +      u32 wb_data[2];
 +
 +      /* Reset PTP event detection rules - will be configured in the IOCTL */
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
 +             NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
 +             NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
 +      REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
 +             NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
 +      REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
 +             NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
 +
 +      /* Disable PTP packets to host - will be configured in the IOCTL*/
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
 +             NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
 +
 +      /* Enable the PTP feature */
 +      REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
 +             NIG_REG_P0_PTP_EN, 0x3F);
 +
 +      /* Enable the free-running counter */
 +      wb_data[0] = 0;
 +      wb_data[1] = 0;
 +      REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
 +
 +      /* Reset drift register (offset register is not reset) */
 +      rc = bnx2x_send_reset_timesync_ramrod(bp);
 +      if (rc) {
 +              BNX2X_ERR("Failed to reset PHC drift register\n");
 +              return -EFAULT;
 +      }
 +
 +      /* Reset possibly old timestamps */
 +      REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
 +             NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
 +      REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
 +             NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
 +
 +      return 0;
 +}
 +
 +/* Called during load, to initialize PTP-related stuff */
 +void bnx2x_init_ptp(struct bnx2x *bp)
 +{
 +      int rc;
 +
 +      /* Configure PTP in HW */
 +      rc = bnx2x_configure_ptp(bp);
 +      if (rc) {
 +              BNX2X_ERR("Stopping PTP initialization\n");
 +              return;
 +      }
 +
 +      /* Init work queue for Tx timestamping */
 +      INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
 +
 +      /* Init cyclecounter and timecounter. This is done only in the first
 +       * load. If done in every load, PTP application will fail when doing
 +       * unload / load (e.g. MTU change) while it is running.
 +       */
 +      if (!bp->timecounter_init_done) {
 +              bnx2x_init_cyclecounter(bp);
 +              timecounter_init(&bp->timecounter, &bp->cyclecounter,
 +                               ktime_to_ns(ktime_get_real()));
 +              bp->timecounter_init_done = 1;
 +      }
 +
 +      DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
 +}
index eef63a8508e67e0083cf063636c5df10f9189283,a6a9f284c8dd762579e062a9d9d62f03e1ce6f96..23f23c97c2ad7b9fa52b8d589b1627a560635f92
@@@ -31,7 -31,7 +31,7 @@@
  #include <linux/if_vlan.h>
  #include <linux/prefetch.h>
  #include <linux/random.h>
- #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ #if IS_ENABLED(CONFIG_VLAN_8021Q)
  #define BCM_VLAN 1
  #endif
  #include <net/ip.h>
@@@ -383,7 -383,7 +383,7 @@@ static int cnic_iscsi_nl_msg_recv(struc
                        break;
  
                rcu_read_lock();
 -              if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
 +              if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
                        rc = -ENODEV;
                        rcu_read_unlock();
                        break;
@@@ -527,7 -527,7 +527,7 @@@ int cnic_unregister_driver(int ulp_type
        list_for_each_entry(dev, &cnic_dev_list, list) {
                struct cnic_local *cp = dev->cnic_priv;
  
 -              if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 +              if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
                        pr_err("%s: Type %d still has devices registered\n",
                               __func__, ulp_type);
                        read_unlock(&cnic_dev_lock);
@@@ -575,7 -575,7 +575,7 @@@ static int cnic_register_device(struct 
                mutex_unlock(&cnic_lock);
                return -EAGAIN;
        }
 -      if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 +      if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
                pr_err("%s: Type %d has already been registered to this device\n",
                       __func__, ulp_type);
                mutex_unlock(&cnic_lock);
@@@ -3685,7 -3685,7 +3685,7 @@@ static int cnic_get_v4_route(struct soc
  static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
                             struct dst_entry **dst)
  {
- #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+ #if IS_ENABLED(CONFIG_IPV6)
        struct flowi6 fl6;
  
        memset(&fl6, 0, sizeof(fl6));
index b60f16381229cd9db36d7ba80cfa5fc1f2ad0913,369848e107f8ed37952b4849ed632655f598b6ba..be039dd6114d5378b5896e6e58d1d10eb0578ca4
@@@ -224,19 -224,15 +224,19 @@@ static int i40e_add_del_fdir_udpv4(stru
        ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
        if (ret) {
                dev_info(&pf->pdev->dev,
 -                       "Filter command send failed for PCTYPE %d (ret = %d)\n",
 -                       fd_data->pctype, ret);
 +                       "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
 +                       fd_data->pctype, fd_data->fd_id, ret);
                err = true;
        } else {
 -              dev_info(&pf->pdev->dev,
 -                       "Filter OK for PCTYPE %d (ret = %d)\n",
 -                       fd_data->pctype, ret);
 +              if (add)
 +                      dev_info(&pf->pdev->dev,
 +                               "Filter OK for PCTYPE %d loc = %d\n",
 +                               fd_data->pctype, fd_data->fd_id);
 +              else
 +                      dev_info(&pf->pdev->dev,
 +                               "Filter deleted for PCTYPE %d loc = %d\n",
 +                               fd_data->pctype, fd_data->fd_id);
        }
 -
        return err ? -EOPNOTSUPP : 0;
  }
  
@@@ -280,18 -276,10 +280,18 @@@ static int i40e_add_del_fdir_tcpv4(stru
        tcp->source = fd_data->src_port;
  
        if (add) {
 +              pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
                        dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                }
 +      } else {
 +              pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
 +                                (pf->fd_tcp_rule - 1) : 0;
 +              if (pf->fd_tcp_rule == 0) {
 +                      pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 +                      dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
 +              }
        }
  
        fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
  
        if (ret) {
                dev_info(&pf->pdev->dev,
 -                       "Filter command send failed for PCTYPE %d (ret = %d)\n",
 -                       fd_data->pctype, ret);
 +                       "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
 +                       fd_data->pctype, fd_data->fd_id, ret);
                err = true;
        } else {
 -              dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
 -                       fd_data->pctype, ret);
 +              if (add)
 +                      dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
 +                               fd_data->pctype, fd_data->fd_id);
 +              else
 +                      dev_info(&pf->pdev->dev,
 +                               "Filter deleted for PCTYPE %d loc = %d\n",
 +                               fd_data->pctype, fd_data->fd_id);
        }
  
        return err ? -EOPNOTSUPP : 0;
@@@ -372,18 -355,13 +372,18 @@@ static int i40e_add_del_fdir_ipv4(struc
  
                if (ret) {
                        dev_info(&pf->pdev->dev,
 -                               "Filter command send failed for PCTYPE %d (ret = %d)\n",
 -                               fd_data->pctype, ret);
 +                               "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
 +                               fd_data->pctype, fd_data->fd_id, ret);
                        err = true;
                } else {
 -                      dev_info(&pf->pdev->dev,
 -                               "Filter OK for PCTYPE %d (ret = %d)\n",
 -                               fd_data->pctype, ret);
 +                      if (add)
 +                              dev_info(&pf->pdev->dev,
 +                                       "Filter OK for PCTYPE %d loc = %d\n",
 +                                       fd_data->pctype, fd_data->fd_id);
 +                      else
 +                              dev_info(&pf->pdev->dev,
 +                                       "Filter deleted for PCTYPE %d loc = %d\n",
 +                                       fd_data->pctype, fd_data->fd_id);
                }
        }
  
@@@ -465,14 -443,8 +465,14 @@@ static void i40e_fd_handle_status(struc
                I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
  
        if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
 -              dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
 -                       rx_desc->wb.qword0.hi_dword.fd_id);
 +              if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
 +                  (I40E_DEBUG_FD & pf->hw.debug_mask))
 +                      dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
 +                               rx_desc->wb.qword0.hi_dword.fd_id);
 +
 +              pf->fd_add_err++;
 +              /* store the current atr filter count */
 +              pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
  
                /* filter programming failed most likely due to table full */
                fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
                 * FD ATR/SB and then re-enable it when there is room.
                 */
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
 -                      /* Turn off ATR first */
 -                      if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
 +                      if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                            !(pf->auto_disable_flags &
 -                            I40E_FLAG_FD_ATR_ENABLED)) {
 -                              dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
 -                              pf->auto_disable_flags |=
 -                                                     I40E_FLAG_FD_ATR_ENABLED;
 -                              pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
 -                      } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
 -                                 !(pf->auto_disable_flags &
                                     I40E_FLAG_FD_SB_ENABLED)) {
                                dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
 -                              pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
                        }
                } else {
 -                      dev_info(&pdev->dev, "FD filter programming error\n");
 +                      dev_info(&pdev->dev,
 +                              "FD filter programming failed due to incorrect filter parameters\n");
                }
        } else if (error ==
                          (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
                if (I40E_DEBUG_FD & pf->hw.debug_mask)
 -                      dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
 +                      dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
                                 rx_desc->wb.qword0.hi_dword.fd_id);
        }
  }
@@@ -607,7 -587,6 +607,7 @@@ static u32 i40e_get_tx_pending(struct i
  static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
  {
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
 +      struct i40e_pf *pf = tx_ring->vsi->back;
        bool ret = false;
  
        clear_check_for_tx_hang(tx_ring);
         * pending but without time to complete it yet.
         */
        if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
 -          tx_pending) {
 +          (tx_pending >= I40E_MIN_DESC_PENDING)) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
 +      } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
 +                 (tx_pending < I40E_MIN_DESC_PENDING) &&
 +                 (tx_pending > 0)) {
 +              if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
 +                      dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
 +                               tx_pending, tx_ring->queue_index);
 +              pf->tx_sluggish_count++;
        } else {
                /* update completed stats and disarm the hang check */
                tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
@@@ -1241,6 -1213,7 +1241,6 @@@ static inline void i40e_rx_checksum(str
        ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
                      (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
  
 -      skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
        skb->ip_summed = CHECKSUM_NONE;
  
        /* Rx csum enabled and ip headers found? */
        }
  
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      skb->csum_level = ipv4_tunnel || ipv6_tunnel;
  
        return;
  
@@@ -2323,7 -2295,7 +2323,7 @@@ static netdev_tx_t i40e_xmit_frame_ring
                goto out_drop;
  
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
  
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
index 50cf5b8d0e15306d1a306e8b935d6e05f4b9ddf9,95a3ec236b4951ab7166637159393df569da97fb..04c7c1557a0c770ab0f256d3012cd3b9be70240e
@@@ -163,13 -163,11 +163,13 @@@ static bool i40e_check_tx_hang(struct i
         * pending but without time to complete it yet.
         */
        if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
 -          tx_pending) {
 +          (tx_pending >= I40E_MIN_DESC_PENDING)) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
 -      } else {
 +      } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
 +                 !(tx_pending < I40E_MIN_DESC_PENDING) ||
 +                 !(tx_pending > 0)) {
                /* update completed stats and disarm the hang check */
                tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
@@@ -746,6 -744,7 +746,6 @@@ static inline void i40e_rx_checksum(str
        ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
                      (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
  
 -      skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
        skb->ip_summed = CHECKSUM_NONE;
  
        /* Rx csum enabled and ip headers found? */
        }
  
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      skb->csum_level = ipv4_tunnel || ipv6_tunnel;
  
        return;
  
@@@ -1599,7 -1597,7 +1599,7 @@@ static netdev_tx_t i40e_xmit_frame_ring
                goto out_drop;
  
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
  
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
index 493fe693e675b3e33a675147edba03324bb6d819,cf4f38db1c0a60338c1d7479b0cfe28f50b6b558..3a08a1f78c732b49992cc711907131d74b5e18e8
@@@ -175,7 -175,7 +175,7 @@@ static const struct stmmac_stats stmmac
        STMMAC_MMC_STAT(mmc_rx_octetcount_g),
        STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
        STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
-       STMMAC_MMC_STAT(mmc_rx_crc_errror),
+       STMMAC_MMC_STAT(mmc_rx_crc_error),
        STMMAC_MMC_STAT(mmc_rx_align_error),
        STMMAC_MMC_STAT(mmc_rx_run_error),
        STMMAC_MMC_STAT(mmc_rx_jabber_error),
@@@ -261,11 -261,11 +261,11 @@@ static int stmmac_ethtool_getsettings(s
                ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
  
                /* Get and convert ADV/LP_ADV from the HW AN registers */
 -              if (priv->hw->mac->get_adv)
 -                      priv->hw->mac->get_adv(priv->hw, &adv);
 -              else
 +              if (!priv->hw->mac->get_adv)
                        return -EOPNOTSUPP;     /* should never happen indeed */
  
 +              priv->hw->mac->get_adv(priv->hw, &adv);
 +
                /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
  
                if (adv.pause & STMMAC_PCS_PAUSE)
@@@ -340,17 -340,19 +340,17 @@@ static int stmmac_ethtool_setsettings(s
                if (cmd->autoneg != AUTONEG_ENABLE)
                        return -EINVAL;
  
 -              if (cmd->autoneg == AUTONEG_ENABLE) {
 -                      mask &= (ADVERTISED_1000baseT_Half |
 +              mask &= (ADVERTISED_1000baseT_Half |
                        ADVERTISED_1000baseT_Full |
                        ADVERTISED_100baseT_Half |
                        ADVERTISED_100baseT_Full |
                        ADVERTISED_10baseT_Half |
                        ADVERTISED_10baseT_Full);
  
 -                      spin_lock(&priv->lock);
 -                      if (priv->hw->mac->ctrl_ane)
 -                              priv->hw->mac->ctrl_ane(priv->hw, 1);
 -                      spin_unlock(&priv->lock);
 -              }
 +              spin_lock(&priv->lock);
 +              if (priv->hw->mac->ctrl_ane)
 +                      priv->hw->mac->ctrl_ane(priv->hw, 1);
 +              spin_unlock(&priv->lock);
  
                return 0;
        }
index 8fcc64c37ff1b28d57a6e19807b2beb073b74541,6e6ee226de04f60dc8511334db8ca48310fd5cff..9dbb02d9d9c255c9c2515cac80340affe266617a
@@@ -275,6 -275,7 +275,7 @@@ static void stmmac_eee_ctrl_timer(unsig
   */
  bool stmmac_eee_init(struct stmmac_priv *priv)
  {
+       char *phy_bus_name = priv->plat->phy_bus_name;
        bool ret = false;
  
        /* Using PCS we cannot dial with the phy registers at this stage
            (priv->pcs == STMMAC_PCS_RTBI))
                goto out;
  
+       /* Never init EEE in case of a switch is attached */
+       if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+               goto out;
        /* MAC core supports the EEE feature. */
        if (priv->dma_cap.eee) {
                int tx_lpi_timer = priv->tx_lpi_timer;
                        priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
                                                     tx_lpi_timer);
-               } else
-                       /* Set HW EEE according to the speed */
-                       priv->hw->mac->set_eee_pls(priv->hw,
-                                                  priv->phydev->link);
+               }
+               /* Set HW EEE according to the speed */
+               priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
  
                pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
  
@@@ -603,16 -607,16 +607,16 @@@ static int stmmac_hwtstamp_ioctl(struc
                /* calculate default added value:
                 * formula is :
                 * addend = (2^32)/freq_div_ratio;
-                * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
-                * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
-                * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+                * where, freq_div_ratio = clk_ptp_ref_i/50MHz
+                * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
+                * NOTE: clk_ptp_ref_i should be >= 50MHz to
                 *       achive 20ns accuracy.
                 *
                 * 2^x * y == (y << x), hence
                 * 2^32 * 50000000 ==> (50000000 << 32)
                 */
                temp = (u64) (50000000ULL << 32);
-               priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+               priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
                priv->hw->ptp->config_addend(priv->ioaddr,
                                             priv->default_addend);
  
@@@ -638,6 -642,16 +642,16 @@@ static int stmmac_init_ptp(struct stmma
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
  
+       /* Fall-back to main clock in case of no PTP ref is passed */
+       priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+       if (IS_ERR(priv->clk_ptp_ref)) {
+               priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+               priv->clk_ptp_ref = NULL;
+       } else {
+               clk_prepare_enable(priv->clk_ptp_ref);
+               priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+       }
        priv->adv_ts = 0;
        if (priv->dma_cap.atime_stamp && priv->extend_desc)
                priv->adv_ts = 1;
  
  static void stmmac_release_ptp(struct stmmac_priv *priv)
  {
+       if (priv->clk_ptp_ref)
+               clk_disable_unprepare(priv->clk_ptp_ref);
        stmmac_ptp_unregister(priv);
  }
  
@@@ -818,7 -834,7 +834,7 @@@ static int stmmac_init_phy(struct net_d
        /* Stop Advertising 1000BASE Capability if interface is not GMII */
        if ((interface == PHY_INTERFACE_MODE_MII) ||
            (interface == PHY_INTERFACE_MODE_RMII) ||
 -              (max_speed < 1000 &&  max_speed > 0))
 +              (max_speed < 1000 && max_speed > 0))
                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
                                         SUPPORTED_1000baseT_Full);
  
@@@ -1061,7 -1077,8 +1077,8 @@@ static int init_dma_desc_rings(struct n
                else
                        p = priv->dma_tx + i;
                p->des2 = 0;
-               priv->tx_skbuff_dma[i] = 0;
+               priv->tx_skbuff_dma[i].buf = 0;
+               priv->tx_skbuff_dma[i].map_as_page = false;
                priv->tx_skbuff[i] = NULL;
        }
  
@@@ -1100,17 -1117,24 +1117,24 @@@ static void dma_free_tx_skbufs(struct s
                else
                        p = priv->dma_tx + i;
  
-               if (priv->tx_skbuff_dma[i]) {
-                       dma_unmap_single(priv->device,
-                                        priv->tx_skbuff_dma[i],
-                                        priv->hw->desc->get_tx_len(p),
-                                        DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[i] = 0;
+               if (priv->tx_skbuff_dma[i].buf) {
+                       if (priv->tx_skbuff_dma[i].map_as_page)
+                               dma_unmap_page(priv->device,
+                                              priv->tx_skbuff_dma[i].buf,
+                                              priv->hw->desc->get_tx_len(p),
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+                                                priv->tx_skbuff_dma[i].buf,
+                                                priv->hw->desc->get_tx_len(p),
+                                                DMA_TO_DEVICE);
                }
  
                if (priv->tx_skbuff[i] != NULL) {
                        dev_kfree_skb_any(priv->tx_skbuff[i]);
                        priv->tx_skbuff[i] = NULL;
+                       priv->tx_skbuff_dma[i].buf = 0;
+                       priv->tx_skbuff_dma[i].map_as_page = false;
                }
        }
  }
@@@ -1131,7 -1155,8 +1155,8 @@@ static int alloc_dma_desc_resources(str
        if (!priv->rx_skbuff)
                goto err_rx_skbuff;
  
-       priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+       priv->tx_skbuff_dma = kmalloc_array(txsize,
+                                           sizeof(*priv->tx_skbuff_dma),
                                            GFP_KERNEL);
        if (!priv->tx_skbuff_dma)
                goto err_tx_skbuff_dma;
@@@ -1293,12 -1318,19 +1318,19 @@@ static void stmmac_tx_clean(struct stmm
                        pr_debug("%s: curr %d, dirty %d\n", __func__,
                                 priv->cur_tx, priv->dirty_tx);
  
-               if (likely(priv->tx_skbuff_dma[entry])) {
-                       dma_unmap_single(priv->device,
-                                        priv->tx_skbuff_dma[entry],
-                                        priv->hw->desc->get_tx_len(p),
-                                        DMA_TO_DEVICE);
-                       priv->tx_skbuff_dma[entry] = 0;
+               if (likely(priv->tx_skbuff_dma[entry].buf)) {
+                       if (priv->tx_skbuff_dma[entry].map_as_page)
+                               dma_unmap_page(priv->device,
+                                              priv->tx_skbuff_dma[entry].buf,
+                                              priv->hw->desc->get_tx_len(p),
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+                                                priv->tx_skbuff_dma[entry].buf,
+                                                priv->hw->desc->get_tx_len(p),
+                                                DMA_TO_DEVICE);
+                       priv->tx_skbuff_dma[entry].buf = 0;
+                       priv->tx_skbuff_dma[entry].map_as_page = false;
                }
                priv->hw->mode->clean_desc3(priv, p);
  
@@@ -1637,6 -1669,13 +1669,13 @@@ static int stmmac_hw_setup(struct net_d
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->hw, dev->mtu);
  
+       ret = priv->hw->mac->rx_ipc(priv->hw);
+       if (!ret) {
+               pr_warn(" RX IPC Checksum Offload disabled\n");
+               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+               priv->hw->rx_csum = 0;
+       }
        /* Enable the MAC Rx/Tx */
        stmmac_set_mac(priv->ioaddr, true);
  
@@@ -1887,12 -1926,16 +1926,16 @@@ static netdev_tx_t stmmac_xmit(struct s
        if (likely(!is_jumbo)) {
                desc->des2 = dma_map_single(priv->device, skb->data,
                                            nopaged_len, DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       goto dma_map_err;
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
                priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
                                                csum_insertion, priv->mode);
        } else {
                desc = first;
                entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+               if (unlikely(entry < 0))
+                       goto dma_map_err;
        }
  
        for (i = 0; i < nfrags; i++) {
  
                desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
                                              DMA_TO_DEVICE);
-               priv->tx_skbuff_dma[entry] = desc->des2;
+               if (dma_mapping_error(priv->device, desc->des2))
+                       goto dma_map_err; /* should reuse desc w/o issues */
+               priv->tx_skbuff_dma[entry].buf = desc->des2;
+               priv->tx_skbuff_dma[entry].map_as_page = true;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
                                                priv->mode);
                wmb();
        priv->hw->dma->enable_dma_transmission(priv->ioaddr);
  
        spin_unlock(&priv->tx_lock);
+       return NETDEV_TX_OK;
  
+ dma_map_err:
+       dev_err(priv->device, "Tx dma map failed\n");
+       dev_kfree_skb(skb);
+       priv->dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
  }
  
@@@ -2028,7 -2080,12 +2080,12 @@@ static inline void stmmac_rx_refill(str
                        priv->rx_skbuff_dma[entry] =
                            dma_map_single(priv->device, skb->data, bfsize,
                                           DMA_FROM_DEVICE);
+                       if (dma_mapping_error(priv->device,
+                                             priv->rx_skbuff_dma[entry])) {
+                               dev_err(priv->device, "Rx dma map failed\n");
+                               dev_kfree_skb(skb);
+                               break;
+                       }
                        p->des2 = priv->rx_skbuff_dma[entry];
  
                        priv->hw->mode->refill_desc3(priv, p);
@@@ -2055,7 -2112,7 +2112,7 @@@ static int stmmac_rx(struct stmmac_pri
        unsigned int entry = priv->cur_rx % rxsize;
        unsigned int next_entry;
        unsigned int count = 0;
-       int coe = priv->plat->rx_coe;
+       int coe = priv->hw->rx_csum;
  
        if (netif_msg_rx_status(priv)) {
                pr_debug("%s: descriptor ring:\n", __func__);
@@@ -2276,8 -2333,7 +2333,7 @@@ static netdev_features_t stmmac_fix_fea
  
        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
                features &= ~NETIF_F_RXCSUM;
-       else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
-               features &= ~NETIF_F_IPV6_CSUM;
        if (!priv->plat->tx_coe)
                features &= ~NETIF_F_ALL_CSUM;
  
        return features;
  }
  
+ static int stmmac_set_features(struct net_device *netdev,
+                              netdev_features_t features)
+ {
+       struct stmmac_priv *priv = netdev_priv(netdev);
+       /* Keep the COE Type in case of csum is supporting */
+       if (features & NETIF_F_RXCSUM)
+               priv->hw->rx_csum = priv->plat->rx_coe;
+       else
+               priv->hw->rx_csum = 0;
+       /* No check needed because rx_coe has been set before and it will be
+        * fixed in case of issue.
+        */
+       priv->hw->mac->rx_ipc(priv->hw);
+       return 0;
+ }
  /**
   *  stmmac_interrupt - main ISR
   *  @irq: interrupt number.
@@@ -2572,6 -2646,7 +2646,7 @@@ static const struct net_device_ops stmm
        .ndo_stop = stmmac_release,
        .ndo_change_mtu = stmmac_change_mtu,
        .ndo_fix_features = stmmac_fix_features,
+       .ndo_set_features = stmmac_set_features,
        .ndo_set_rx_mode = stmmac_set_rx_mode,
        .ndo_tx_timeout = stmmac_tx_timeout,
        .ndo_do_ioctl = stmmac_ioctl,
   */
  static int stmmac_hw_init(struct stmmac_priv *priv)
  {
-       int ret;
        struct mac_device_info *mac;
  
        /* Identify the MAC HW device */
        /* To use alternate (extended) or normal descriptor structures */
        stmmac_selec_desc_mode(priv);
  
-       ret = priv->hw->mac->rx_ipc(priv->hw);
-       if (!ret) {
-               pr_warn(" RX IPC Checksum Offload not configured.\n");
-               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
-       }
-       if (priv->plat->rx_coe)
+       if (priv->plat->rx_coe) {
+               priv->hw->rx_csum = priv->plat->rx_coe;
                pr_info(" RX Checksum Offload Engine supported (type %d)\n",
                        priv->plat->rx_coe);
+       }
        if (priv->plat->tx_coe)
                pr_info(" TX Checksum insertion supported\n");
  
diff --combined drivers/net/phy/phy.c
index 932190e04d0854aeb59341e9c3d7b9b22f82b520,a854d38c231dfebc983a3bcd4840fdd5a2257bd2..1dfffdc9dfc3577bd6893688da191c38539e243f
@@@ -955,7 -955,7 +955,7 @@@ static inline void mmd_phy_indirect(str
   * 3) Write reg 13 // MMD Data Command for MMD DEVAD
   * 3) Read  reg 14 // Read MMD data
   */
 -static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
 +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
                                 int devad, int addr)
  {
        struct phy_driver *phydrv = phydev->drv;
        }
        return value;
  }
 +EXPORT_SYMBOL(phy_read_mmd_indirect);
  
  /**
   * phy_write_mmd_indirect - writes data to the MMD registers
   * 3) Write reg 13 // MMD Data Command for MMD DEVAD
   * 3) Write reg 14 // Write MMD data
   */
 -static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
 +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
                                   int devad, int addr, u32 data)
  {
        struct phy_driver *phydrv = phydev->drv;
                phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
        }
  }
 +EXPORT_SYMBOL(phy_write_mmd_indirect);
  
  /**
   * phy_init_eee - init and check the EEE feature
@@@ -1019,14 -1017,12 +1019,14 @@@ int phy_init_eee(struct phy_device *phy
  {
        /* According to 802.3az,the EEE is supported only in full duplex-mode.
         * Also EEE feature is active when core is operating with MII, GMII
 -       * or RGMII.
 +       * or RGMII. Internal PHYs are also allowed to proceed and should
 +       * return an error if they do not support EEE.
         */
        if ((phydev->duplex == DUPLEX_FULL) &&
            ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
            (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
 -          (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
 +          (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
 +           phy_is_internal(phydev))) {
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
                int status;
                /* First check if the EEE ability is supported */
                eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
                                                MDIO_MMD_PCS, phydev->addr);
-               if (eee_cap < 0)
-                       return eee_cap;
+               if (eee_cap <= 0)
+                       goto eee_exit_err;
  
                cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
                if (!cap)
-                       return -EPROTONOSUPPORT;
+                       goto eee_exit_err;
  
                /* Check which link settings negotiated and verify it in
                 * the EEE advertising registers.
                 */
                eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
                                               MDIO_MMD_AN, phydev->addr);
-               if (eee_lp < 0)
-                       return eee_lp;
+               if (eee_lp <= 0)
+                       goto eee_exit_err;
  
                eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
                                                MDIO_MMD_AN, phydev->addr);
-               if (eee_adv < 0)
-                       return eee_adv;
+               if (eee_adv <= 0)
+                       goto eee_exit_err;
  
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
                if (!(lp & adv & settings[idx].setting))
-                       return -EPROTONOSUPPORT;
+                       goto eee_exit_err;
  
                if (clk_stop_enable) {
                        /* Configure the PHY to stop receiving xMII
  
                return 0; /* EEE supported */
        }
+ eee_exit_err:
        return -EPROTONOSUPPORT;
  }
  EXPORT_SYMBOL(phy_init_eee);
index 783dd099abd1e55f45771202f766cc72a32abf8c,511c6e0d21a9ab0045c9dd8b2dc7f9c488730279..1befd8df9cfc98d9dc190ed0db98f16f15810cfd
@@@ -38,7 -38,6 +38,7 @@@
  #include <linux/completion.h>
  #include <linux/radix-tree.h>
  #include <linux/cpu_rmap.h>
 +#include <linux/crash_dump.h>
  
  #include <linux/atomic.h>
  
@@@ -1197,6 -1196,9 +1197,9 @@@ int mlx4_map_sw_to_hw_steering_id(struc
                                  enum mlx4_net_trans_rule_id id);
  int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
  
+ int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+                         int port, int qpn, u16 prio, u64 *reg_id);
  void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
                          int i, int val);
  
@@@ -1276,7 -1278,7 +1279,7 @@@ int mlx4_mr_rereg_mem_write(struct mlx4
  /* Returns true if running in low memory profile (kdump kernel) */
  static inline bool mlx4_low_memory_profile(void)
  {
 -      return reset_devices;
 +      return is_kdump_kernel();
  }
  
  #endif /* MLX4_DEVICE_H */
index 5be20a7bbb0da01b779c4faa9904691df6734f1f,c8e388e5fcccfa604d087ce33ccee5ace528e598..ba72f6baae1a9030318f6d769bc513e9682d9bf2
@@@ -1747,12 -1747,6 +1747,12 @@@ struct netdev_queue *netdev_get_tx_queu
        return &dev->_tx[index];
  }
  
 +static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
 +                                                  const struct sk_buff *skb)
 +{
 +      return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 +}
 +
  static inline void netdev_for_each_tx_queue(struct net_device *dev,
                                            void (*f)(struct net_device *,
                                                      struct netdev_queue *,
@@@ -1787,13 -1781,24 +1787,13 @@@ void dev_net_set(struct net_device *dev
  #endif
  }
  
 -static inline bool netdev_uses_dsa_tags(struct net_device *dev)
 -{
 -#ifdef CONFIG_NET_DSA_TAG_DSA
 -      if (dev->dsa_ptr != NULL)
 -              return dsa_uses_dsa_tags(dev->dsa_ptr);
 -#endif
 -
 -      return 0;
 -}
 -
 -static inline bool netdev_uses_trailer_tags(struct net_device *dev)
 +static inline bool netdev_uses_dsa(struct net_device *dev)
  {
 -#ifdef CONFIG_NET_DSA_TAG_TRAILER
 +#ifdef CONFIG_NET_DSA
        if (dev->dsa_ptr != NULL)
 -              return dsa_uses_trailer_tags(dev->dsa_ptr);
 +              return dsa_uses_tagged_protocol(dev->dsa_ptr);
  #endif
 -
 -      return 0;
 +      return false;
  }
  
  /**
@@@ -1878,13 -1883,7 +1878,13 @@@ struct napi_gro_cb 
        u16     proto;
  
        /* Used in udp_gro_receive */
 -      u16     udp_mark;
 +      u8      udp_mark:1;
 +
 +      /* GRO checksum is valid */
 +      u8      csum_valid:1;
 +
 +      /* Number of checksums via CHECKSUM_UNNECESSARY */
 +      u8      csum_cnt:3;
  
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
@@@ -1928,13 -1927,6 +1928,13 @@@ struct udp_offload 
        struct offload_callbacks callbacks;
  };
  
 +struct dsa_device_ops {
 +      netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
 +      int (*rcv)(struct sk_buff *skb, struct net_device *dev,
 +                 struct packet_type *pt, struct net_device *orig_dev);
 +};
 +
 +
  /* often modified stats are per cpu, other are shared (netdev->stats) */
  struct pcpu_sw_netstats {
        u64     rx_packets;
  #define NETDEV_CHANGEUPPER    0x0015
  #define NETDEV_RESEND_IGMP    0x0016
  #define NETDEV_PRECHANGEMTU   0x0017 /* notify before mtu change happened */
 +#define NETDEV_CHANGEINFODATA 0x0018
  
  int register_netdevice_notifier(struct notifier_block *nb);
  int unregister_netdevice_notifier(struct notifier_block *nb);
@@@ -2162,97 -2153,11 +2162,97 @@@ static inline void *skb_gro_network_hea
  static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
                                        const void *start, unsigned int len)
  {
 -      if (skb->ip_summed == CHECKSUM_COMPLETE)
 +      if (NAPI_GRO_CB(skb)->csum_valid)
                NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
                                                  csum_partial(start, len, 0));
  }
  
 +/* GRO checksum functions. These are logical equivalents of the normal
 + * checksum functions (in skbuff.h) except that they operate on the GRO
 + * offsets and fields in sk_buff.
 + */
 +
 +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
 +
 +static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
 +                                                    bool zero_okay,
 +                                                    __sum16 check)
 +{
 +      return (skb->ip_summed != CHECKSUM_PARTIAL &&
 +              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
 +              (!zero_okay || check));
 +}
 +
 +static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
 +                                                         __wsum psum)
 +{
 +      if (NAPI_GRO_CB(skb)->csum_valid &&
 +          !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
 +              return 0;
 +
 +      NAPI_GRO_CB(skb)->csum = psum;
 +
 +      return __skb_gro_checksum_complete(skb);
 +}
 +
 +static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
 +{
 +      if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
 +              /* Consume a checksum from CHECKSUM_UNNECESSARY */
 +              NAPI_GRO_CB(skb)->csum_cnt--;
 +      } else {
 +              /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
 +               * verified a new top level checksum or an encapsulated one
 +               * during GRO. This saves work if we fallback to normal path.
 +               */
 +              __skb_incr_checksum_unnecessary(skb);
 +      }
 +}
 +
 +#define __skb_gro_checksum_validate(skb, proto, zero_okay, check,     \
 +                                  compute_pseudo)                     \
 +({                                                                    \
 +      __sum16 __ret = 0;                                              \
 +      if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))  \
 +              __ret = __skb_gro_checksum_validate_complete(skb,       \
 +                              compute_pseudo(skb, proto));            \
 +      if (__ret)                                                      \
 +              __skb_mark_checksum_bad(skb);                           \
 +      else                                                            \
 +              skb_gro_incr_csum_unnecessary(skb);                     \
 +      __ret;                                                          \
 +})
 +
 +#define skb_gro_checksum_validate(skb, proto, compute_pseudo)         \
 +      __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
 +
 +#define skb_gro_checksum_validate_zero_check(skb, proto, check,               \
 +                                           compute_pseudo)            \
 +      __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
 +
 +#define skb_gro_checksum_simple_validate(skb)                         \
 +      __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
 +
 +static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
 +{
 +      return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
 +              !NAPI_GRO_CB(skb)->csum_valid);
 +}
 +
 +static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
 +                                            __sum16 check, __wsum pseudo)
 +{
 +      NAPI_GRO_CB(skb)->csum = ~pseudo;
 +      NAPI_GRO_CB(skb)->csum_valid = 1;
 +}
 +
 +#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo)       \
 +do {                                                                  \
 +      if (__skb_gro_checksum_convert_check(skb))                      \
 +              __skb_gro_checksum_convert(skb, check,                  \
 +                                         compute_pseudo(skb, proto)); \
 +} while (0)
 +
  static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
@@@ -2849,9 -2754,8 +2849,9 @@@ int dev_set_mac_address(struct net_devi
  int dev_change_carrier(struct net_device *, bool new_carrier);
  int dev_get_phys_port_id(struct net_device *dev,
                         struct netdev_phys_port_id *ppid);
 -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 -                      struct netdev_queue *txq);
 +struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev);
 +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 +                                  struct netdev_queue *txq, int *ret);
  int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@@ -3272,7 -3176,7 +3272,7 @@@ static inline int __dev_uc_sync(struct 
  }
  
  /**
-  *  __dev_uc_unsync - Remove synchonized addresses from device
+  *  __dev_uc_unsync - Remove synchronized addresses from device
   *  @dev:  device to sync
   *  @unsync: function to call if address should be removed
   *
@@@ -3316,7 -3220,7 +3316,7 @@@ static inline int __dev_mc_sync(struct 
  }
  
  /**
-  *  __dev_mc_unsync - Remove synchonized addresses from device
+  *  __dev_mc_unsync - Remove synchronized addresses from device
   *  @dev:  device to sync
   *  @unsync: function to call if address should be removed
   *
@@@ -3453,27 -3357,6 +3453,27 @@@ int __init dev_proc_init(void)
  #define dev_proc_init() 0
  #endif
  
 +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
 +                                            struct sk_buff *skb, struct net_device *dev,
 +                                            bool more)
 +{
 +      skb->xmit_more = more ? 1 : 0;
 +      return ops->ndo_start_xmit(skb, dev);
 +}
 +
 +static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
 +                                          struct netdev_queue *txq, bool more)
 +{
 +      const struct net_device_ops *ops = dev->netdev_ops;
 +      int rc;
 +
 +      rc = __netdev_start_xmit(ops, skb, dev, more);
 +      if (rc == NETDEV_TX_OK)
 +              txq_trans_update(txq);
 +
 +      return rc;
 +}
 +
  int netdev_class_create_file_ns(struct class_attribute *class_attr,
                                const void *ns);
  void netdev_class_remove_file_ns(struct class_attribute *class_attr,
diff --combined include/net/sock.h
index ad23e80cb8d339a2b17e6a5ba84813264b402c35,b9a5bd0ed9f3a6b7bc0d087b2b2ae5563b4a0304..049ab1b732a65e7059b8dd8826af042426101024
@@@ -1574,12 -1574,7 +1574,12 @@@ struct sk_buff *sock_wmalloc(struct soc
  void sock_wfree(struct sk_buff *skb);
  void skb_orphan_partial(struct sk_buff *skb);
  void sock_rfree(struct sk_buff *skb);
 +void sock_efree(struct sk_buff *skb);
 +#ifdef CONFIG_INET
  void sock_edemux(struct sk_buff *skb);
 +#else
 +#define sock_edemux(skb) sock_efree(skb)
 +#endif
  
  int sock_setsockopt(struct socket *sock, int level, int op,
                    char __user *optval, unsigned int optlen);
@@@ -2046,7 -2041,6 +2046,7 @@@ void sk_stop_timer(struct sock *sk, str
  int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
  
  int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
 +struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
  
  /*
   *    Recover an error report and clear atomically
@@@ -2171,9 -2165,7 +2171,7 @@@ sock_recv_timestamp(struct msghdr *msg
         */
        if (sock_flag(sk, SOCK_RCVTSTAMP) ||
            (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
-           (kt.tv64 &&
-            (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
-             skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
+           (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
            (hwtstamps->hwtstamp.tv64 &&
             (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
                __sock_recv_timestamp(msg, sk, skb);
diff --combined net/core/dev.c
index 2f3dbd6575701dc6b65d6813a08360b9b2d4db2d,ab9a16530c3684b2e1ff39f6f0587fb6e4aa7f24..3c6a967e583070f35f3b8c173c7314ac5030a0a3
@@@ -2485,6 -2485,52 +2485,6 @@@ static int illegal_highdma(struct net_d
        return 0;
  }
  
 -struct dev_gso_cb {
 -      void (*destructor)(struct sk_buff *skb);
 -};
 -
 -#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
 -
 -static void dev_gso_skb_destructor(struct sk_buff *skb)
 -{
 -      struct dev_gso_cb *cb;
 -
 -      kfree_skb_list(skb->next);
 -      skb->next = NULL;
 -
 -      cb = DEV_GSO_CB(skb);
 -      if (cb->destructor)
 -              cb->destructor(skb);
 -}
 -
 -/**
 - *    dev_gso_segment - Perform emulated hardware segmentation on skb.
 - *    @skb: buffer to segment
 - *    @features: device features as applicable to this skb
 - *
 - *    This function segments the given skb and stores the list of segments
 - *    in skb->next.
 - */
 -static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 -{
 -      struct sk_buff *segs;
 -
 -      segs = skb_gso_segment(skb, features);
 -
 -      /* Verifying header integrity only. */
 -      if (!segs)
 -              return 0;
 -
 -      if (IS_ERR(segs))
 -              return PTR_ERR(segs);
 -
 -      skb->next = segs;
 -      DEV_GSO_CB(skb)->destructor = skb->destructor;
 -      skb->destructor = dev_gso_skb_destructor;
 -
 -      return 0;
 -}
 -
  /* If MPLS offload request, verify we are testing hardware MPLS features
   * instead of standard features for the netdev.
   */
@@@ -2541,137 -2587,137 +2541,143 @@@ netdev_features_t netif_skb_features(st
                return harmonize_features(skb, features);
        }
  
-       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
-                                              NETIF_F_HW_VLAN_STAG_TX);
+       features = netdev_intersect_features(features,
+                                            skb->dev->vlan_features |
+                                            NETIF_F_HW_VLAN_CTAG_TX |
+                                            NETIF_F_HW_VLAN_STAG_TX);
  
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
-               features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
-                               NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
-                               NETIF_F_HW_VLAN_STAG_TX;
+               features = netdev_intersect_features(features,
+                                                    NETIF_F_SG |
+                                                    NETIF_F_HIGHDMA |
+                                                    NETIF_F_FRAGLIST |
+                                                    NETIF_F_GEN_CSUM |
+                                                    NETIF_F_HW_VLAN_CTAG_TX |
+                                                    NETIF_F_HW_VLAN_STAG_TX);
  
        return harmonize_features(skb, features);
  }
  EXPORT_SYMBOL(netif_skb_features);
  
 -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 -                      struct netdev_queue *txq)
 +static int xmit_one(struct sk_buff *skb, struct net_device *dev,
 +                  struct netdev_queue *txq, bool more)
  {
 -      const struct net_device_ops *ops = dev->netdev_ops;
 +      unsigned int len;
 +      int rc;
 +
 +      if (!list_empty(&ptype_all))
 +              dev_queue_xmit_nit(skb, dev);
 +
 +      len = skb->len;
 +      trace_net_dev_start_xmit(skb, dev);
 +      rc = netdev_start_xmit(skb, dev, txq, more);
 +      trace_net_dev_xmit(skb, rc, dev, len);
 +
 +      return rc;
 +}
 +
 +struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
 +                                  struct netdev_queue *txq, int *ret)
 +{
 +      struct sk_buff *skb = first;
        int rc = NETDEV_TX_OK;
 -      unsigned int skb_len;
  
 -      if (likely(!skb->next)) {
 -              netdev_features_t features;
 +      while (skb) {
 +              struct sk_buff *next = skb->next;
  
 -              /*
 -               * If device doesn't need skb->dst, release it right now while
 -               * its hot in this cpu cache
 -               */
 -              if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
 -                      skb_dst_drop(skb);
 +              skb->next = NULL;
 +              rc = xmit_one(skb, dev, txq, next != NULL);
 +              if (unlikely(!dev_xmit_complete(rc))) {
 +                      skb->next = next;
 +                      goto out;
 +              }
  
 -              features = netif_skb_features(skb);
 +              skb = next;
 +              if (netif_xmit_stopped(txq) && skb) {
 +                      rc = NETDEV_TX_BUSY;
 +                      break;
 +              }
 +      }
  
 -              if (vlan_tx_tag_present(skb) &&
 -                  !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 -                      skb = __vlan_put_tag(skb, skb->vlan_proto,
 -                                           vlan_tx_tag_get(skb));
 -                      if (unlikely(!skb))
 -                              goto out;
 +out:
 +      *ret = rc;
 +      return skb;
 +}
  
 +struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
 +{
 +      if (vlan_tx_tag_present(skb) &&
 +          !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 +              skb = __vlan_put_tag(skb, skb->vlan_proto,
 +                                   vlan_tx_tag_get(skb));
 +              if (skb)
                        skb->vlan_tci = 0;
 -              }
 +      }
 +      return skb;
 +}
  
 -              /* If encapsulation offload request, verify we are testing
 -               * hardware encapsulation features instead of standard
 -               * features for the netdev
 -               */
 -              if (skb->encapsulation)
 -                      features &= dev->hw_enc_features;
 +struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
 +{
 +      netdev_features_t features;
  
 -              if (netif_needs_gso(skb, features)) {
 -                      if (unlikely(dev_gso_segment(skb, features)))
 -                              goto out_kfree_skb;
 -                      if (skb->next)
 -                              goto gso;
 -              } else {
 -                      if (skb_needs_linearize(skb, features) &&
 -                          __skb_linearize(skb))
 -                              goto out_kfree_skb;
 +      if (skb->next)
 +              return skb;
  
 -                      /* If packet is not checksummed and device does not
 -                       * support checksumming for this protocol, complete
 -                       * checksumming here.
 -                       */
 -                      if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -                              if (skb->encapsulation)
 -                                      skb_set_inner_transport_header(skb,
 -                                              skb_checksum_start_offset(skb));
 -                              else
 -                                      skb_set_transport_header(skb,
 -                                              skb_checksum_start_offset(skb));
 -                              if (!(features & NETIF_F_ALL_CSUM) &&
 -                                   skb_checksum_help(skb))
 -                                      goto out_kfree_skb;
 -                      }
 -              }
 +      /* If device doesn't need skb->dst, release it right now while
 +       * its hot in this cpu cache
 +       */
 +      if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
 +              skb_dst_drop(skb);
  
 -              if (!list_empty(&ptype_all))
 -                      dev_queue_xmit_nit(skb, dev);
 +      features = netif_skb_features(skb);
 +      skb = validate_xmit_vlan(skb, features);
 +      if (unlikely(!skb))
 +              goto out_null;
  
 -              skb_len = skb->len;
 -              trace_net_dev_start_xmit(skb, dev);
 -              rc = ops->ndo_start_xmit(skb, dev);
 -              trace_net_dev_xmit(skb, rc, dev, skb_len);
 -              if (rc == NETDEV_TX_OK)
 -                      txq_trans_update(txq);
 -              return rc;
 -      }
 +      /* If encapsulation offload request, verify we are testing
 +       * hardware encapsulation features instead of standard
 +       * features for the netdev
 +       */
 +      if (skb->encapsulation)
 +              features &= dev->hw_enc_features;
  
 -gso:
 -      do {
 -              struct sk_buff *nskb = skb->next;
 +      if (netif_needs_gso(skb, features)) {
 +              struct sk_buff *segs;
  
 -              skb->next = nskb->next;
 -              nskb->next = NULL;
 +              segs = skb_gso_segment(skb, features);
 +              kfree_skb(skb);
 +              if (IS_ERR(segs))
 +                      segs = NULL;
 +              skb = segs;
 +      } else {
 +              if (skb_needs_linearize(skb, features) &&
 +                  __skb_linearize(skb))
 +                      goto out_kfree_skb;
  
 -              if (!list_empty(&ptype_all))
 -                      dev_queue_xmit_nit(nskb, dev);
 -
 -              skb_len = nskb->len;
 -              trace_net_dev_start_xmit(nskb, dev);
 -              rc = ops->ndo_start_xmit(nskb, dev);
 -              trace_net_dev_xmit(nskb, rc, dev, skb_len);
 -              if (unlikely(rc != NETDEV_TX_OK)) {
 -                      if (rc & ~NETDEV_TX_MASK)
 -                              goto out_kfree_gso_skb;
 -                      nskb->next = skb->next;
 -                      skb->next = nskb;
 -                      return rc;
 +              /* If packet is not checksummed and device does not
 +               * support checksumming for this protocol, complete
 +               * checksumming here.
 +               */
 +              if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +                      if (skb->encapsulation)
 +                              skb_set_inner_transport_header(skb,
 +                                                             skb_checksum_start_offset(skb));
 +                      else
 +                              skb_set_transport_header(skb,
 +                                                       skb_checksum_start_offset(skb));
 +                      if (!(features & NETIF_F_ALL_CSUM) &&
 +                          skb_checksum_help(skb))
 +                              goto out_kfree_skb;
                }
 -              txq_trans_update(txq);
 -              if (unlikely(netif_xmit_stopped(txq) && skb->next))
 -                      return NETDEV_TX_BUSY;
 -      } while (skb->next);
 -
 -out_kfree_gso_skb:
 -      if (likely(skb->next == NULL)) {
 -              skb->destructor = DEV_GSO_CB(skb)->destructor;
 -              consume_skb(skb);
 -              return rc;
        }
 +
 +      return skb;
 +
  out_kfree_skb:
        kfree_skb(skb);
 -out:
 -      return rc;
 +out_null:
 +      return NULL;
  }
 -EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
  
  static void qdisc_pkt_len_init(struct sk_buff *skb)
  {
@@@ -2739,8 -2785,7 +2745,8 @@@ static inline int __dev_xmit_skb(struc
  
                qdisc_bstats_update(q, skb);
  
 -              if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
 +              skb = validate_xmit_skb(skb, dev);
 +              if (skb && sch_direct_xmit(skb, q, dev, txq, root_lock)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
                                contended = false;
@@@ -2880,15 -2925,11 +2886,15 @@@ static int __dev_queue_xmit(struct sk_b
                        if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
                                goto recursion_alert;
  
 +                      skb = validate_xmit_skb(skb, dev);
 +                      if (!skb)
 +                              goto drop;
 +
                        HARD_TX_LOCK(dev, txq, cpu);
  
                        if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
 -                              rc = dev_hard_start_xmit(skb, dev, txq);
 +                              skb = dev_hard_start_xmit(skb, dev, txq, &rc);
                                __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
@@@ -2909,11 -2950,10 +2915,11 @@@ recursion_alert
        }
  
        rc = -ENETDOWN;
 +drop:
        rcu_read_unlock_bh();
  
        atomic_long_inc(&dev->tx_dropped);
 -      kfree_skb(skb);
 +      kfree_skb_list(skb);
        return rc;
  out:
        rcu_read_unlock_bh();
@@@ -3090,7 -3130,8 +3096,7 @@@ static int get_rps_cpu(struct net_devic
        }
  
        if (map) {
 -              tcpu = map->cpus[((u64) hash * map->len) >> 32];
 -
 +              tcpu = map->cpus[reciprocal_scale(hash, map->len)];
                if (cpu_online(tcpu)) {
                        cpu = tcpu;
                        goto done;
@@@ -3924,10 -3965,11 +3930,10 @@@ static enum gro_result dev_gro_receive(
        if (!(skb->dev->features & NETIF_F_GRO))
                goto normal;
  
 -      if (skb_is_gso(skb) || skb_has_frag_list(skb))
 +      if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
                goto normal;
  
        gro_list_prepare(napi, skb);
 -      NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
  
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->udp_mark = 0;
  
 +              /* Setup for GRO checksum validation */
 +              switch (skb->ip_summed) {
 +              case CHECKSUM_COMPLETE:
 +                      NAPI_GRO_CB(skb)->csum = skb->csum;
 +                      NAPI_GRO_CB(skb)->csum_valid = 1;
 +                      NAPI_GRO_CB(skb)->csum_cnt = 0;
 +                      break;
 +              case CHECKSUM_UNNECESSARY:
 +                      NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
 +                      NAPI_GRO_CB(skb)->csum_valid = 0;
 +                      break;
 +              default:
 +                      NAPI_GRO_CB(skb)->csum_cnt = 0;
 +                      NAPI_GRO_CB(skb)->csum_valid = 0;
 +              }
 +
                pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
                break;
        }
@@@ -4186,31 -4212,6 +4192,31 @@@ gro_result_t napi_gro_frags(struct napi
  }
  EXPORT_SYMBOL(napi_gro_frags);
  
 +/* Compute the checksum from gro_offset and return the folded value
 + * after adding in any pseudo checksum.
 + */
 +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
 +{
 +      __wsum wsum;
 +      __sum16 sum;
 +
 +      wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
 +
 +      /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
 +      sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
 +      if (likely(!sum)) {
 +              if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
 +                  !skb->csum_complete_sw)
 +                      netdev_rx_csum_fault(skb->dev);
 +      }
 +
 +      NAPI_GRO_CB(skb)->csum = wsum;
 +      NAPI_GRO_CB(skb)->csum_valid = 1;
 +
 +      return sum;
 +}
 +EXPORT_SYMBOL(__skb_gro_checksum_complete);
 +
  /*
   * net_rps_action_and_irq_enable sends any pending IPI's for rps.
   * Note: called with local irq disabled, but exits with local irq enabled.
@@@ -4894,7 -4895,8 +4900,8 @@@ static void __netdev_adjacent_dev_remov
        if (adj->master)
                sysfs_remove_link(&(dev->dev.kobj), "master");
  
-       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+       if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
+           net_eq(dev_net(dev),dev_net(adj_dev)))
                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  
        list_del_rcu(&adj->list);
@@@ -5164,11 -5166,65 +5171,65 @@@ void netdev_upper_dev_unlink(struct net
  }
  EXPORT_SYMBOL(netdev_upper_dev_unlink);
  
+ void netdev_adjacent_add_links(struct net_device *dev)
+ {
+       struct netdev_adjacent *iter;
+       struct net *net = dev_net(dev);
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_add(dev, iter->dev,
+                                         &dev->adj_list.upper);
+       }
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_add(dev, iter->dev,
+                                         &dev->adj_list.lower);
+       }
+ }
+ void netdev_adjacent_del_links(struct net_device *dev)
+ {
+       struct netdev_adjacent *iter;
+       struct net *net = dev_net(dev);
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_del(iter->dev, dev->name,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_del(dev, iter->dev->name,
+                                         &dev->adj_list.upper);
+       }
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
+               netdev_adjacent_sysfs_del(iter->dev, dev->name,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_del(dev, iter->dev->name,
+                                         &dev->adj_list.lower);
+       }
+ }
  void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
  {
        struct netdev_adjacent *iter;
  
+       struct net *net = dev_net(dev);
        list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
                netdev_adjacent_sysfs_del(iter->dev, oldname,
                                          &iter->dev->adj_list.lower);
                netdev_adjacent_sysfs_add(iter->dev, dev,
        }
  
        list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               if (!net_eq(net,dev_net(iter->dev)))
+                       continue;
                netdev_adjacent_sysfs_del(iter->dev, oldname,
                                          &iter->dev->adj_list.upper);
                netdev_adjacent_sysfs_add(iter->dev, dev,
@@@ -6778,6 -6836,7 +6841,7 @@@ int dev_change_net_namespace(struct net
  
        /* Send a netdev-removed uevent to the old namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
+       netdev_adjacent_del_links(dev);
  
        /* Actually switch the network namespace */
        dev_net_set(dev, net);
  
        /* Send a netdev-add uevent to the new namespace */
        kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+       netdev_adjacent_add_links(dev);
  
        /* Fixup kobjects */
        err = device_rename(&dev->dev, dev->name);
diff --combined net/core/skbuff.c
index a936a401564ebb9c55b624512c63641752bcce09,da1378a3e2c72e0f23e9e21581dfd2ece5491d09..a18dfb02d94492647a50a88797e47d0f698ad62b
@@@ -2647,7 -2647,7 +2647,7 @@@ EXPORT_SYMBOL(skb_prepare_seq_read)
   * skb_seq_read() will return the remaining part of the block.
   *
   * Note 1: The size of each block of data returned can be arbitrary,
-  *       this limitation is the cost for zerocopy seqeuental
+  *       this limitation is the cost for zerocopy sequential
   *       reads of potentially non linear data.
   *
   * Note 2: Fragment lists within fragments are not implemented
@@@ -2781,7 -2781,7 +2781,7 @@@ EXPORT_SYMBOL(skb_find_text)
  /**
   * skb_append_datato_frags - append the user data to a skb
   * @sk: sock  structure
-  * @skb: skb structure to be appened with user data.
+  * @skb: skb structure to be appended with user data.
   * @getfrag: call back function to be used for getting the user data
   * @from: pointer to user message iov
   * @length: length of the iov message
@@@ -3491,53 -3491,32 +3491,53 @@@ int sock_queue_err_skb(struct sock *sk
  }
  EXPORT_SYMBOL(sock_queue_err_skb);
  
 -void __skb_tstamp_tx(struct sk_buff *orig_skb,
 -                   struct skb_shared_hwtstamps *hwtstamps,
 -                   struct sock *sk, int tstype)
 +struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
  {
 -      struct sock_exterr_skb *serr;
 -      struct sk_buff *skb;
 -      int err;
 +      struct sk_buff_head *q = &sk->sk_error_queue;
 +      struct sk_buff *skb, *skb_next;
 +      int err = 0;
  
 -      if (!sk)
 -              return;
 +      spin_lock_bh(&q->lock);
 +      skb = __skb_dequeue(q);
 +      if (skb && (skb_next = skb_peek(q)))
 +              err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
 +      spin_unlock_bh(&q->lock);
  
 -      if (hwtstamps) {
 -              *skb_hwtstamps(orig_skb) =
 -                      *hwtstamps;
 -      } else {
 -              /*
 -               * no hardware time stamps available,
 -               * so keep the shared tx_flags and only
 -               * store software time stamp
 -               */
 -              orig_skb->tstamp = ktime_get_real();
 +      sk->sk_err = err;
 +      if (err)
 +              sk->sk_error_report(sk);
 +
 +      return skb;
 +}
 +EXPORT_SYMBOL(sock_dequeue_err_skb);
 +
 +struct sk_buff *skb_clone_sk(struct sk_buff *skb)
 +{
 +      struct sock *sk = skb->sk;
 +      struct sk_buff *clone;
 +
 +      if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
 +              return NULL;
 +
 +      clone = skb_clone(skb, GFP_ATOMIC);
 +      if (!clone) {
 +              sock_put(sk);
 +              return NULL;
        }
  
 -      skb = skb_clone(orig_skb, GFP_ATOMIC);
 -      if (!skb)
 -              return;
 +      clone->sk = sk;
 +      clone->destructor = sock_efree;
 +
 +      return clone;
 +}
 +EXPORT_SYMBOL(skb_clone_sk);
 +
 +static void __skb_complete_tx_timestamp(struct sk_buff *skb,
 +                                      struct sock *sk,
 +                                      int tstype)
 +{
 +      struct sock_exterr_skb *serr;
 +      int err;
  
        serr = SKB_EXT_ERR(skb);
        memset(serr, 0, sizeof(*serr));
        if (err)
                kfree_skb(skb);
  }
 +
 +void skb_complete_tx_timestamp(struct sk_buff *skb,
 +                             struct skb_shared_hwtstamps *hwtstamps)
 +{
 +      struct sock *sk = skb->sk;
 +
 +      /* take a reference to prevent skb_orphan() from freeing the socket */
 +      sock_hold(sk);
 +
 +      *skb_hwtstamps(skb) = *hwtstamps;
 +      __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
 +
 +      sock_put(sk);
 +}
 +EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 +
 +void __skb_tstamp_tx(struct sk_buff *orig_skb,
 +                   struct skb_shared_hwtstamps *hwtstamps,
 +                   struct sock *sk, int tstype)
 +{
 +      struct sk_buff *skb;
 +
 +      if (!sk)
 +              return;
 +
 +      if (hwtstamps)
 +              *skb_hwtstamps(orig_skb) = *hwtstamps;
 +      else
 +              orig_skb->tstamp = ktime_get_real();
 +
 +      skb = skb_clone(orig_skb, GFP_ATOMIC);
 +      if (!skb)
 +              return;
 +
 +      __skb_complete_tx_timestamp(skb, sk, tstype);
 +}
  EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
  
  void skb_tstamp_tx(struct sk_buff *orig_skb,
diff --combined net/core/sock.c
index 69592cb66e3b6132a666ee21cf17d1da78b10f8a,d372b4bd3f996dd3248e86748b7d16c912c9b5d7..6f436b5e49611aee6d0e21fc3cdcccdbd8a82a08
@@@ -166,7 -166,7 +166,7 @@@ EXPORT_SYMBOL(sk_ns_capable)
  /**
   * sk_capable - Socket global capability test
   * @sk: Socket to use a capability on or through
-  * @cap: The global capbility to use
+  * @cap: The global capability to use
   *
   * Test to see if the opener of the socket had when the socket was
   * created and the current process has the capability @cap in all user
@@@ -183,7 -183,7 +183,7 @@@ EXPORT_SYMBOL(sk_capable)
   * @sk: Socket to use a capability on or through
   * @cap: The capability to use
   *
-  * Test to see if the opener of the socket had when the socke was created
+  * Test to see if the opener of the socket had when the socket was created
   * and the current process has the capability @cap over the network namespace
   * the socket is a member of.
   */
@@@ -437,6 -437,7 +437,6 @@@ static void sock_disable_timestamp(stru
  int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  {
        int err;
 -      int skb_len;
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
  
        skb->dev = NULL;
        skb_set_owner_r(skb, sk);
  
 -      /* Cache the SKB length before we tack it onto the receive
 -       * queue.  Once it is added it no longer belongs to us and
 -       * may be freed by other threads of control pulling packets
 -       * from the queue.
 -       */
 -      skb_len = skb->len;
 -
        /* we escape from rcu protected region, make sure we dont leak
         * a norefcounted dst
         */
@@@ -1637,24 -1645,18 +1637,24 @@@ void sock_rfree(struct sk_buff *skb
  }
  EXPORT_SYMBOL(sock_rfree);
  
 +void sock_efree(struct sk_buff *skb)
 +{
 +      sock_put(skb->sk);
 +}
 +EXPORT_SYMBOL(sock_efree);
 +
 +#ifdef CONFIG_INET
  void sock_edemux(struct sk_buff *skb)
  {
        struct sock *sk = skb->sk;
  
 -#ifdef CONFIG_INET
        if (sk->sk_state == TCP_TIME_WAIT)
                inet_twsk_put(inet_twsk(sk));
        else
 -#endif
                sock_put(sk);
  }
  EXPORT_SYMBOL(sock_edemux);
 +#endif
  
  kuid_t sock_i_uid(struct sock *sk)
  {
@@@ -1820,6 -1822,9 +1820,9 @@@ struct sk_buff *sock_alloc_send_pskb(st
                                                           order);
                                        if (page)
                                                goto fill_page;
+                                       /* Do not retry other high order allocations */
+                                       order = 1;
+                                       max_page_order = 0;
                                }
                                order--;
                        }
@@@ -1867,10 -1872,8 +1870,8 @@@ EXPORT_SYMBOL(sock_alloc_send_skb)
   * no guarantee that allocations succeed. Therefore, @sz MUST be
   * less or equal than PAGE_SIZE.
   */
- bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
+ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
  {
-       int order;
        if (pfrag->page) {
                if (atomic_read(&pfrag->page->_count) == 1) {
                        pfrag->offset = 0;
                put_page(pfrag->page);
        }
  
-       order = SKB_FRAG_PAGE_ORDER;
-       do {
-               gfp_t gfp = prio;
-               if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
-               pfrag->page = alloc_pages(gfp, order);
+       pfrag->offset = 0;
+       if (SKB_FRAG_PAGE_ORDER) {
+               pfrag->page = alloc_pages(gfp | __GFP_COMP |
+                                         __GFP_NOWARN | __GFP_NORETRY,
+                                         SKB_FRAG_PAGE_ORDER);
                if (likely(pfrag->page)) {
-                       pfrag->offset = 0;
-                       pfrag->size = PAGE_SIZE << order;
+                       pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
                        return true;
                }
-       } while (--order >= 0);
+       }
+       pfrag->page = alloc_page(gfp);
+       if (likely(pfrag->page)) {
+               pfrag->size = PAGE_SIZE;
+               return true;
+       }
        return false;
  }
  EXPORT_SYMBOL(skb_page_frag_refill);
@@@ -2494,11 -2498,11 +2496,11 @@@ int sock_recv_errqueue(struct sock *sk
                       int level, int type)
  {
        struct sock_exterr_skb *serr;
 -      struct sk_buff *skb, *skb2;
 +      struct sk_buff *skb;
        int copied, err;
  
        err = -EAGAIN;
 -      skb = skb_dequeue(&sk->sk_error_queue);
 +      skb = sock_dequeue_err_skb(sk);
        if (skb == NULL)
                goto out;
  
        msg->msg_flags |= MSG_ERRQUEUE;
        err = copied;
  
 -      /* Reset and regenerate socket error */
 -      spin_lock_bh(&sk->sk_error_queue.lock);
 -      sk->sk_err = 0;
 -      if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
 -              sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
 -              spin_unlock_bh(&sk->sk_error_queue.lock);
 -              sk->sk_error_report(sk);
 -      } else
 -              spin_unlock_bh(&sk->sk_error_queue.lock);
 -
  out_free_skb:
        kfree_skb(skb);
  out:
diff --combined net/ipv6/addrconf.c
index 267ce3caee2403b65eafefb54e19029168acf8d0,fc1fac2a052856e5c1b2476dc469d261a6558e5e..ad4598fcc416cde41211fd466b2b259d6cb98dc9
@@@ -180,7 -180,7 +180,7 @@@ static struct ipv6_devconf ipv6_devcon
        .rtr_solicits           = MAX_RTR_SOLICITATIONS,
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
        .rtr_solicit_delay      = MAX_RTR_SOLICITATION_DELAY,
 -      .use_tempaddr           = 0,
 +      .use_tempaddr           = 0,
        .temp_valid_lft         = TEMP_VALID_LIFETIME,
        .temp_prefered_lft      = TEMP_PREFERRED_LIFETIME,
        .regen_max_retry        = REGEN_MAX_RETRY,
@@@ -1105,8 -1105,8 +1105,8 @@@ retry
        spin_unlock_bh(&ifp->lock);
  
        regen_advance = idev->cnf.regen_max_retry *
 -                      idev->cnf.dad_transmits *
 -                      NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
 +                      idev->cnf.dad_transmits *
 +                      NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
        write_unlock_bh(&idev->lock);
  
        /* A temporary address is created only if this calculated Preferred
@@@ -1690,14 -1690,12 +1690,12 @@@ void addrconf_dad_failure(struct inet6_
        addrconf_mod_dad_work(ifp, 0);
  }
  
- /* Join to solicited addr multicast group. */
+ /* Join to solicited addr multicast group.
+  * caller must hold RTNL */
  void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
  {
        struct in6_addr maddr;
  
-       ASSERT_RTNL();
        if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
  
        ipv6_dev_mc_inc(dev, &maddr);
  }
  
+ /* caller must hold RTNL */
  void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
  {
        struct in6_addr maddr;
  
-       ASSERT_RTNL();
        if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
  
        __ipv6_dev_mc_dec(idev, &maddr);
  }
  
+ /* caller must hold RTNL */
  static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
  {
        struct in6_addr addr;
  
-       ASSERT_RTNL();
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
        ipv6_dev_ac_inc(ifp->idev->dev, &addr);
  }
  
+ /* caller must hold RTNL */
  static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
  {
        struct in6_addr addr;
  
-       ASSERT_RTNL();
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@@ -3035,7 -3030,7 +3030,7 @@@ static int addrconf_ifdown(struct net_d
                struct hlist_head *h = &inet6_addr_lst[i];
  
                spin_lock_bh(&addrconf_hash_lock);
 -      restart:
 +restart:
                hlist_for_each_entry_rcu(ifa, h, addr_lst) {
                        if (ifa->idev == idev) {
                                hlist_del_init_rcu(&ifa->addr_lst);
@@@ -3547,8 -3542,8 +3542,8 @@@ static void __net_exit if6_proc_net_exi
  }
  
  static struct pernet_operations if6_proc_net_ops = {
 -       .init = if6_proc_net_init,
 -       .exit = if6_proc_net_exit,
 +      .init = if6_proc_net_init,
 +      .exit = if6_proc_net_exit,
  };
  
  int __init if6_proc_init(void)
@@@ -4773,15 -4768,11 +4768,11 @@@ static void __ipv6_ifa_notify(int event
                addrconf_leave_solict(ifp->idev, &ifp->addr);
                if (!ipv6_addr_any(&ifp->peer_addr)) {
                        struct rt6_info *rt;
-                       struct net_device *dev = ifp->idev->dev;
-                       rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
-                                       dev->ifindex, 1);
-                       if (rt) {
-                               dst_hold(&rt->dst);
-                               if (ip6_del_rt(rt))
-                                       dst_free(&rt->dst);
-                       }
+                       rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
+                                                      ifp->idev->dev, 0, 0);
+                       if (rt && ip6_del_rt(rt))
+                               dst_free(&rt->dst);
                }
                dst_hold(&ifp->rt->dst);
  
diff --combined net/ipv6/mcast.c
index 64919425f1abf3a17df5be25e1ceb52143aeb044,a23b655a7627a69046d956139946c00dda8825f4..6833dd07b2c22f2f118feb09297f1f0d799634ee
@@@ -121,7 -121,6 +121,7 @@@ static int ip6_mc_leave_src(struct soc
  #define IPV6_MLD_MAX_MSF      64
  
  int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
 +int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
  
  /*
   *    socket join on multicast group
@@@ -173,6 -172,7 +173,7 @@@ int ipv6_sock_mc_join(struct sock *sk, 
        mc_lst->next = NULL;
        mc_lst->addr = *addr;
  
+       rtnl_lock();
        rcu_read_lock();
        if (ifindex == 0) {
                struct rt6_info *rt;
  
        if (dev == NULL) {
                rcu_read_unlock();
+               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return -ENODEV;
        }
  
        if (err) {
                rcu_read_unlock();
+               rtnl_unlock();
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                return err;
        }
        spin_unlock(&ipv6_sk_mc_lock);
  
        rcu_read_unlock();
+       rtnl_unlock();
  
        return 0;
  }
@@@ -230,10 -233,11 +234,11 @@@ int ipv6_sock_mc_drop(struct sock *sk, 
        if (!ipv6_addr_is_multicast(addr))
                return -EINVAL;
  
+       rtnl_lock();
        spin_lock(&ipv6_sk_mc_lock);
        for (lnk = &np->ipv6_mc_list;
             (mc_lst = rcu_dereference_protected(*lnk,
 -                      lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
 +                      lockdep_is_held(&ipv6_sk_mc_lock))) != NULL;
              lnk = &mc_lst->next) {
                if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
                    ipv6_addr_equal(&mc_lst->addr, addr)) {
                        } else
                                (void) ip6_mc_leave_src(sk, mc_lst, NULL);
                        rcu_read_unlock();
+                       rtnl_unlock();
                        atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
                        kfree_rcu(mc_lst, rcu);
                        return 0;
                }
        }
        spin_unlock(&ipv6_sk_mc_lock);
+       rtnl_unlock();
  
        return -EADDRNOTAVAIL;
  }
@@@ -303,6 -310,7 +311,7 @@@ void ipv6_sock_mc_close(struct sock *sk
        if (!rcu_access_pointer(np->ipv6_mc_list))
                return;
  
+       rtnl_lock();
        spin_lock(&ipv6_sk_mc_lock);
        while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
                                lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
                spin_lock(&ipv6_sk_mc_lock);
        }
        spin_unlock(&ipv6_sk_mc_lock);
+       rtnl_unlock();
  }
  
  int ip6_mc_source(int add, int omode, struct sock *sk,
                if (!psl)
                        goto done;      /* err = -EADDRNOTAVAIL */
                rv = !0;
 -              for (i=0; i<psl->sl_count; i++) {
 +              for (i = 0; i < psl->sl_count; i++) {
                        rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
                        if (rv == 0)
                                break;
                /* update the interface filter */
                ip6_mc_del_src(idev, group, omode, 1, source, 1);
  
 -              for (j=i+1; j<psl->sl_count; j++)
 +              for (j = i+1; j < psl->sl_count; j++)
                        psl->sl_addr[j-1] = psl->sl_addr[j];
                psl->sl_count--;
                err = 0;
                newpsl->sl_max = count;
                newpsl->sl_count = count - IP6_SFBLOCK;
                if (psl) {
 -                      for (i=0; i<psl->sl_count; i++)
 +                      for (i = 0; i < psl->sl_count; i++)
                                newpsl->sl_addr[i] = psl->sl_addr[i];
                        sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
                }
                pmc->sflist = psl = newpsl;
        }
        rv = 1; /* > 0 for insert logic below if sl_count is 0 */
 -      for (i=0; i<psl->sl_count; i++) {
 +      for (i = 0; i < psl->sl_count; i++) {
                rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
                if (rv == 0) /* There is an error in the address. */
                        goto done;
        }
 -      for (j=psl->sl_count-1; j>=i; j--)
 +      for (j = psl->sl_count-1; j >= i; j--)
                psl->sl_addr[j+1] = psl->sl_addr[j];
        psl->sl_addr[i] = *source;
        psl->sl_count++;
@@@ -515,7 -524,7 +525,7 @@@ int ip6_mc_msfilter(struct sock *sk, st
                        goto done;
                }
                newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
 -              for (i=0; i<newpsl->sl_count; ++i) {
 +              for (i = 0; i < newpsl->sl_count; ++i) {
                        struct sockaddr_in6 *psin6;
  
                        psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
@@@ -607,7 -616,7 +617,7 @@@ int ip6_mc_msfget(struct sock *sk, stru
         * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
         * have the socket lock, so reading here is safe.
         */
 -      for (i=0; i<copycount; i++) {
 +      for (i = 0; i < copycount; i++) {
                struct sockaddr_in6 *psin6;
                struct sockaddr_storage ss;
  
@@@ -649,7 -658,7 +659,7 @@@ bool inet6_mc_check(struct sock *sk, co
        } else {
                int i;
  
 -              for (i=0; i<psl->sl_count; i++) {
 +              for (i = 0; i < psl->sl_count; i++) {
                        if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
                                break;
                }
@@@ -763,7 -772,7 +773,7 @@@ static void mld_add_delrec(struct inet6
                pmc->mca_tomb = im->mca_tomb;
                pmc->mca_sources = im->mca_sources;
                im->mca_tomb = im->mca_sources = NULL;
 -              for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
 +              for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = pmc->mca_crcount;
        }
        spin_unlock_bh(&im->mca_lock);
@@@ -781,7 -790,7 +791,7 @@@ static void mld_del_delrec(struct inet6
  
        spin_lock_bh(&idev->mc_lock);
        pmc_prev = NULL;
 -      for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
 +      for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
                if (ipv6_addr_equal(&pmc->mca_addr, pmca))
                        break;
                pmc_prev = pmc;
        spin_unlock_bh(&idev->mc_lock);
  
        if (pmc) {
 -              for (psf=pmc->mca_tomb; psf; psf=psf_next) {
 +              for (psf = pmc->mca_tomb; psf; psf = psf_next) {
                        psf_next = psf->sf_next;
                        kfree(psf);
                }
@@@ -822,14 -831,14 +832,14 @@@ static void mld_clear_delrec(struct ine
  
        /* clear dead sources, too */
        read_lock_bh(&idev->lock);
 -      for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 +      for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
                struct ip6_sf_list *psf, *psf_next;
  
                spin_lock_bh(&pmc->mca_lock);
                psf = pmc->mca_tomb;
                pmc->mca_tomb = NULL;
                spin_unlock_bh(&pmc->mca_lock);
 -              for (; psf; psf=psf_next) {
 +              for (; psf; psf = psf_next) {
                        psf_next = psf->sf_next;
                        kfree(psf);
                }
@@@ -846,6 -855,8 +856,8 @@@ int ipv6_dev_mc_inc(struct net_device *
        struct ifmcaddr6 *mc;
        struct inet6_dev *idev;
  
+       ASSERT_RTNL();
        /* we need to take a reference on idev */
        idev = in6_dev_get(dev);
  
@@@ -917,8 -928,10 +929,10 @@@ int __ipv6_dev_mc_dec(struct inet6_dev 
  {
        struct ifmcaddr6 *ma, **map;
  
+       ASSERT_RTNL();
        write_lock_bh(&idev->lock);
 -      for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
 +      for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
                if (ipv6_addr_equal(&ma->mca_addr, addr)) {
                        if (--ma->mca_users == 0) {
                                *map = ma->next;
@@@ -969,7 -982,7 +983,7 @@@ bool ipv6_chk_mcast_addr(struct net_dev
        idev = __in6_dev_get(dev);
        if (idev) {
                read_lock_bh(&idev->lock);
 -              for (mc = idev->mc_list; mc; mc=mc->next) {
 +              for (mc = idev->mc_list; mc; mc = mc->next) {
                        if (ipv6_addr_equal(&mc->mca_addr, group))
                                break;
                }
                                struct ip6_sf_list *psf;
  
                                spin_lock_bh(&mc->mca_lock);
 -                              for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
 +                              for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
                                        if (ipv6_addr_equal(&psf->sf_addr, src_addr))
                                                break;
                                }
                                                psf->sf_count[MCAST_EXCLUDE] !=
                                                mc->mca_sfcount[MCAST_EXCLUDE];
                                else
 -                                      rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
 +                                      rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
                                spin_unlock_bh(&mc->mca_lock);
                        } else
                                rv = true; /* don't filter unspecified source */
@@@ -1078,10 -1091,10 +1092,10 @@@ static bool mld_xmarksources(struct ifm
        int i, scount;
  
        scount = 0;
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
                if (scount == nsrcs)
                        break;
 -              for (i=0; i<nsrcs; i++) {
 +              for (i = 0; i < nsrcs; i++) {
                        /* skip inactive filters */
                        if (psf->sf_count[MCAST_INCLUDE] ||
                            pmc->mca_sfcount[MCAST_EXCLUDE] !=
@@@ -1111,10 -1124,10 +1125,10 @@@ static bool mld_marksources(struct ifmc
        /* mark INCLUDE-mode sources */
  
        scount = 0;
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
                if (scount == nsrcs)
                        break;
 -              for (i=0; i<nsrcs; i++) {
 +              for (i = 0; i < nsrcs; i++) {
                        if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
                                psf->sf_gsresp = 1;
                                scount++;
@@@ -1192,16 -1205,15 +1206,16 @@@ static void mld_update_qrv(struct inet6
         * and SHOULD NOT be one. Catch this here if we ever run
         * into such a case in future.
         */
 +      const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
        WARN_ON(idev->mc_qrv == 0);
  
        if (mlh2->mld2q_qrv > 0)
                idev->mc_qrv = mlh2->mld2q_qrv;
  
 -      if (unlikely(idev->mc_qrv < 2)) {
 +      if (unlikely(idev->mc_qrv < min_qrv)) {
                net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
 -                                   idev->mc_qrv, MLD_QRV_DEFAULT);
 -              idev->mc_qrv = MLD_QRV_DEFAULT;
 +                                   idev->mc_qrv, min_qrv);
 +              idev->mc_qrv = min_qrv;
        }
  }
  
@@@ -1366,13 -1378,13 +1380,13 @@@ int igmp6_event_query(struct sk_buff *s
  
        read_lock_bh(&idev->lock);
        if (group_type == IPV6_ADDR_ANY) {
 -              for (ma = idev->mc_list; ma; ma=ma->next) {
 +              for (ma = idev->mc_list; ma; ma = ma->next) {
                        spin_lock_bh(&ma->mca_lock);
                        igmp6_group_queried(ma, max_delay);
                        spin_unlock_bh(&ma->mca_lock);
                }
        } else {
 -              for (ma = idev->mc_list; ma; ma=ma->next) {
 +              for (ma = idev->mc_list; ma; ma = ma->next) {
                        if (!ipv6_addr_equal(group, &ma->mca_addr))
                                continue;
                        spin_lock_bh(&ma->mca_lock);
@@@ -1436,7 -1448,7 +1450,7 @@@ int igmp6_event_report(struct sk_buff *
         */
  
        read_lock_bh(&idev->lock);
 -      for (ma = idev->mc_list; ma; ma=ma->next) {
 +      for (ma = idev->mc_list; ma; ma = ma->next) {
                if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
                        spin_lock(&ma->mca_lock);
                        if (del_timer(&ma->mca_timer))
@@@ -1500,7 -1512,7 +1514,7 @@@ mld_scount(struct ifmcaddr6 *pmc, int t
        struct ip6_sf_list *psf;
        int scount = 0;
  
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
                if (!is_in(pmc, psf, type, gdeleted, sdeleted))
                        continue;
                scount++;
@@@ -1714,7 -1726,7 +1728,7 @@@ static struct sk_buff *add_grec(struct 
        }
        first = 1;
        psf_prev = NULL;
 -      for (psf=*psf_list; psf; psf=psf_next) {
 +      for (psf = *psf_list; psf; psf = psf_next) {
                struct in6_addr *psrc;
  
                psf_next = psf->sf_next;
@@@ -1793,7 -1805,7 +1807,7 @@@ static void mld_send_report(struct inet
  
        read_lock_bh(&idev->lock);
        if (!pmc) {
 -              for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 +              for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
                        if (pmc->mca_flags & MAF_NOREPORT)
                                continue;
                        spin_lock_bh(&pmc->mca_lock);
@@@ -1826,7 -1838,7 +1840,7 @@@ static void mld_clear_zeros(struct ip6_
        struct ip6_sf_list *psf_prev, *psf_next, *psf;
  
        psf_prev = NULL;
 -      for (psf=*ppsf; psf; psf = psf_next) {
 +      for (psf = *ppsf; psf; psf = psf_next) {
                psf_next = psf->sf_next;
                if (psf->sf_crcount == 0) {
                        if (psf_prev)
@@@ -1850,7 -1862,7 +1864,7 @@@ static void mld_send_cr(struct inet6_de
  
        /* deleted MCA's */
        pmc_prev = NULL;
 -      for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
 +      for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
                pmc_next = pmc->next;
                if (pmc->mca_sfmode == MCAST_INCLUDE) {
                        type = MLD2_BLOCK_OLD_SOURCES;
        spin_unlock(&idev->mc_lock);
  
        /* change recs */
 -      for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 +      for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
                spin_lock_bh(&pmc->mca_lock);
                if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
                        type = MLD2_BLOCK_OLD_SOURCES;
@@@ -2020,7 -2032,7 +2034,7 @@@ static void mld_send_initial_cr(struct 
  
        skb = NULL;
        read_lock_bh(&idev->lock);
 -      for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 +      for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
                spin_lock_bh(&pmc->mca_lock);
                if (pmc->mca_sfcount[MCAST_EXCLUDE])
                        type = MLD2_CHANGE_TO_EXCLUDE;
@@@ -2065,7 -2077,7 +2079,7 @@@ static int ip6_mc_del1_src(struct ifmca
        int rv = 0;
  
        psf_prev = NULL;
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
                if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
                        break;
                psf_prev = psf;
@@@ -2106,7 -2118,7 +2120,7 @@@ static int ip6_mc_del_src(struct inet6_
        if (!idev)
                return -ENODEV;
        read_lock_bh(&idev->lock);
 -      for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 +      for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
                if (ipv6_addr_equal(pmca, &pmc->mca_addr))
                        break;
        }
                pmc->mca_sfcount[sfmode]--;
        }
        err = 0;
 -      for (i=0; i<sfcount; i++) {
 +      for (i = 0; i < sfcount; i++) {
                int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
  
                changerec |= rv > 0;
                pmc->mca_sfmode = MCAST_INCLUDE;
                pmc->mca_crcount = idev->mc_qrv;
                idev->mc_ifc_count = pmc->mca_crcount;
 -              for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
 +              for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                mld_ifc_event(pmc->idev);
        } else if (sf_setstate(pmc) || changerec)
@@@ -2161,7 -2173,7 +2175,7 @@@ static int ip6_mc_add1_src(struct ifmca
        struct ip6_sf_list *psf, *psf_prev;
  
        psf_prev = NULL;
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
                if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
                        break;
                psf_prev = psf;
@@@ -2186,7 -2198,7 +2200,7 @@@ static void sf_markstate(struct ifmcadd
        struct ip6_sf_list *psf;
        int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
  
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
                if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
                        psf->sf_oldin = mca_xcount ==
                                psf->sf_count[MCAST_EXCLUDE] &&
@@@ -2203,7 -2215,7 +2217,7 @@@ static int sf_setstate(struct ifmcaddr
        int new_in, rv;
  
        rv = 0;
 -      for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
 +      for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
                if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
                        new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
                                !psf->sf_count[MCAST_INCLUDE];
                        if (!psf->sf_oldin) {
                                struct ip6_sf_list *prev = NULL;
  
 -                              for (dpsf=pmc->mca_tomb; dpsf;
 -                                   dpsf=dpsf->sf_next) {
 +                              for (dpsf = pmc->mca_tomb; dpsf;
 +                                   dpsf = dpsf->sf_next) {
                                        if (ipv6_addr_equal(&dpsf->sf_addr,
                                            &psf->sf_addr))
                                                break;
                         * add or update "delete" records if an active filter
                         * is now inactive
                         */
 -                      for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
 +                      for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
                                if (ipv6_addr_equal(&dpsf->sf_addr,
                                    &psf->sf_addr))
                                        break;
@@@ -2270,7 -2282,7 +2284,7 @@@ static int ip6_mc_add_src(struct inet6_
        if (!idev)
                return -ENODEV;
        read_lock_bh(&idev->lock);
 -      for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 +      for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
                if (ipv6_addr_equal(pmca, &pmc->mca_addr))
                        break;
        }
        if (!delta)
                pmc->mca_sfcount[sfmode]++;
        err = 0;
 -      for (i=0; i<sfcount; i++) {
 +      for (i = 0; i < sfcount; i++) {
                err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
                if (err)
                        break;
  
                if (!delta)
                        pmc->mca_sfcount[sfmode]--;
 -              for (j=0; j<i; j++)
 +              for (j = 0; j < i; j++)
                        ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
        } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
                struct ip6_sf_list *psf;
  
                pmc->mca_crcount = idev->mc_qrv;
                idev->mc_ifc_count = pmc->mca_crcount;
 -              for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
 +              for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                mld_ifc_event(idev);
        } else if (sf_setstate(pmc))
@@@ -2324,12 -2336,12 +2338,12 @@@ static void ip6_mc_clear_src(struct ifm
  {
        struct ip6_sf_list *psf, *nextpsf;
  
 -      for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
 +      for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
                nextpsf = psf->sf_next;
                kfree(psf);
        }
        pmc->mca_tomb = NULL;
 -      for (psf=pmc->mca_sources; psf; psf=nextpsf) {
 +      for (psf = pmc->mca_sources; psf; psf = nextpsf) {
                nextpsf = psf->sf_next;
                kfree(psf);
        }
@@@ -2473,21 -2485,13 +2487,21 @@@ void ipv6_mc_down(struct inet6_dev *ide
        mld_gq_stop_timer(idev);
        mld_dad_stop_timer(idev);
  
 -      for (i = idev->mc_list; i; i=i->next)
 +      for (i = idev->mc_list; i; i = i->next)
                igmp6_group_dropped(i);
        read_unlock_bh(&idev->lock);
  
        mld_clear_delrec(idev);
  }
  
 +static void ipv6_mc_reset(struct inet6_dev *idev)
 +{
 +      idev->mc_qrv = sysctl_mld_qrv;
 +      idev->mc_qi = MLD_QI_DEFAULT;
 +      idev->mc_qri = MLD_QRI_DEFAULT;
 +      idev->mc_v1_seen = 0;
 +      idev->mc_maxdelay = unsolicited_report_interval(idev);
 +}
  
  /* Device going up */
  
@@@ -2498,8 -2502,7 +2512,8 @@@ void ipv6_mc_up(struct inet6_dev *idev
        /* Install multicast list, except for all-nodes (already installed) */
  
        read_lock_bh(&idev->lock);
 -      for (i = idev->mc_list; i; i=i->next)
 +      ipv6_mc_reset(idev);
 +      for (i = idev->mc_list; i; i = i->next)
                igmp6_group_added(i);
        read_unlock_bh(&idev->lock);
  }
@@@ -2519,7 -2522,13 +2533,7 @@@ void ipv6_mc_init_dev(struct inet6_dev 
                        (unsigned long)idev);
        setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
                    (unsigned long)idev);
 -
 -      idev->mc_qrv = MLD_QRV_DEFAULT;
 -      idev->mc_qi = MLD_QI_DEFAULT;
 -      idev->mc_qri = MLD_QRI_DEFAULT;
 -
 -      idev->mc_maxdelay = unsolicited_report_interval(idev);
 -      idev->mc_v1_seen = 0;
 +      ipv6_mc_reset(idev);
        write_unlock_bh(&idev->lock);
  }