]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 23 Apr 2013 00:32:51 +0000 (20:32 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 23 Apr 2013 00:32:51 +0000 (20:32 -0400)
Conflicts:
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
include/net/scm.h
net/batman-adv/routing.c
net/ipv4/tcp_input.c

The e{uid,gid} --> {uid,gid} credentials fix conflicted with the
cleanup in net-next to now pass cred structs around.

The be2net driver had a bug fix in 'net' that overlapped with the VLAN
interface changes by Patrick McHardy in net-next.

An IGB conflict existed because in 'net' the build_skb() support was
reverted, and in 'net-next' there was a comment style fix within that
code.

Several batman-adv conflicts were resolved by making sure that all
calls to batadv_is_my_mac() are changed to have a new bat_priv first
argument.

Eric Dumazet's TS ECR fix in TCP in 'net' conflicted with the F-RTO
rewrite in 'net-next', mostly overlapping changes.

Thanks to Stephen Rothwell and Antonio Quartulli for help with several
of these merge resolutions.

Signed-off-by: David S. Miller <davem@davemloft.net>
55 files changed:
1  2 
MAINTAINERS
drivers/net/bonding/bond_main.c
drivers/net/can/mcp251x.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/tun.c
drivers/net/usb/cdc_mbim.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/ssb/driver_chipcommon_pmu.c
include/net/scm.h
kernel/signal.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/routing.c
net/batman-adv/translation-table.c
net/batman-adv/vis.c
net/bridge/br_if.c
net/can/gw.c
net/core/dev.c
net/core/rtnetlink.c
net/ipv4/devinet.c
net/ipv4/ip_fragment.c
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/reassembly.c
net/iucv/af_iucv.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/netfilter/nf_nat_core.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/unix/af_unix.c
security/selinux/hooks.c

diff --combined MAINTAINERS
index c39bdc3fb4250c8c0a5d906663d89af38a960df5,8bdd7a7ef2f4687aa26ee66a70fe8ad428e14890..1ee5d119831ee155ff4a501839f0bbd97d721969
@@@ -1764,7 -1764,7 +1764,7 @@@ F:      arch/arm/configs/bcm2835_defconfi
  F:    drivers/*/*bcm2835*
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
 -M:    Matt Carlson <mcarlson@broadcom.com>
 +M:    Nithin Nayak Sujir <nsujir@broadcom.com>
  M:    Michael Chan <mchan@broadcom.com>
  L:    netdev@vger.kernel.org
  S:    Supported
@@@ -4941,6 -4941,12 +4941,12 @@@ W:    logfs.or
  S:    Maintained
  F:    fs/logfs/
  
+ LPC32XX MACHINE SUPPORT
+ M:    Roland Stigge <stigge@antcom.de>
+ L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ S:    Maintained
+ F:    arch/arm/mach-lpc32xx/
  LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
  M:    Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
  M:    Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
@@@ -6330,7 -6336,6 +6336,7 @@@ F:      drivers/acpi/apei/erst.
  
  PTP HARDWARE CLOCK SUPPORT
  M:    Richard Cochran <richardcochran@gmail.com>
 +L:    netdev@vger.kernel.org
  S:    Maintained
  W:    http://linuxptp.sourceforge.net/
  F:    Documentation/ABI/testing/sysfs-ptp
@@@ -6462,7 -6467,6 +6468,7 @@@ S:      Supporte
  F:    drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
 +M:    Shahed Shaikh <shahed.shaikh@qlogic.com>
  M:    Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
  M:    Ron Mercer <ron.mercer@qlogic.com>
  M:    linux-driver@qlogic.com
@@@ -6627,7 -6631,7 +6633,7 @@@ S:      Supporte
  F:    fs/reiserfs/
  
  REGISTER MAP ABSTRACTION
- M:    Mark Brown <broonie@opensource.wolfsonmicro.com>
+ M:    Mark Brown <broonie@kernel.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
  S:    Supported
  F:    drivers/base/regmap/
@@@ -7375,7 -7379,7 +7381,7 @@@ F:      sound
  
  SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
  M:    Liam Girdwood <lgirdwood@gmail.com>
- M:    Mark Brown <broonie@opensource.wolfsonmicro.com>
+ M:    Mark Brown <broonie@kernel.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  W:    http://alsa-project.org/main/index.php/ASoC
@@@ -7464,7 -7468,7 +7470,7 @@@ F:      drivers/clk/spear
  
  SPI SUBSYSTEM
  M:    Grant Likely <grant.likely@secretlab.ca>
- M:    Mark Brown <broonie@opensource.wolfsonmicro.com>
+ M:    Mark Brown <broonie@kernel.org>
  L:    spi-devel-general@lists.sourceforge.net
  Q:    http://patchwork.kernel.org/project/spi-devel-general/list/
  T:    git git://git.secretlab.ca/git/linux-2.6.git
@@@ -8516,7 -8520,7 +8522,7 @@@ F:      drivers/usb/gadget/*uvc*.
  F:    drivers/usb/gadget/webcam.c
  
  USB WIRELESS RNDIS DRIVER (rndis_wlan)
 -M:    Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
 +M:    Jussi Kivilinna <jussi.kivilinna@iki.fi>
  L:    linux-wireless@vger.kernel.org
  S:    Maintained
  F:    drivers/net/wireless/rndis_wlan.c
@@@ -8709,7 -8713,7 +8715,7 @@@ F:      drivers/scsi/vmw_pvscsi.
  
  VOLTAGE AND CURRENT REGULATOR FRAMEWORK
  M:    Liam Girdwood <lrg@ti.com>
- M:    Mark Brown <broonie@opensource.wolfsonmicro.com>
+ M:    Mark Brown <broonie@kernel.org>
  W:    http://opensource.wolfsonmicro.com/node/15
  W:    http://www.slimlogic.co.uk/?p=48
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
index 5e22126c7a269faa6c886ec2578eafd37855d783,dbbea0eec134f1b2f0225161a33111264450f48e..532153db1f9c7026235b47bd963b8e06391c5dfd
@@@ -428,15 -428,14 +428,15 @@@ int bond_dev_queue_xmit(struct bonding 
   * @bond_dev: bonding net device that got called
   * @vid: vlan id being added
   */
 -static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
 +static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
 +                              __be16 proto, u16 vid)
  {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *stop_at;
        int i, res;
  
        bond_for_each_slave(bond, slave, i) {
 -              res = vlan_vid_add(slave->dev, vid);
 +              res = vlan_vid_add(slave->dev, proto, vid);
                if (res)
                        goto unwind;
        }
@@@ -454,7 -453,7 +454,7 @@@ unwind
        /* unwind from head to the slave that failed */
        stop_at = slave;
        bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
 -              vlan_vid_del(slave->dev, vid);
 +              vlan_vid_del(slave->dev, proto, vid);
  
        return res;
  }
   * @bond_dev: bonding net device that got called
   * @vid: vlan id being removed
   */
 -static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 +static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
 +                               __be16 proto, u16 vid)
  {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
        int i, res;
  
        bond_for_each_slave(bond, slave, i)
 -              vlan_vid_del(slave->dev, vid);
 +              vlan_vid_del(slave->dev, proto, vid);
  
        res = bond_del_vlan(bond, vid);
        if (res) {
@@@ -490,8 -488,7 +490,8 @@@ static void bond_add_vlans_on_slave(str
        int res;
  
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 -              res = vlan_vid_add(slave_dev, vlan->vlan_id);
 +              res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
 +                                 vlan->vlan_id);
                if (res)
                        pr_warning("%s: Failed to add vlan id %d to device %s\n",
                                   bond->dev->name, vlan->vlan_id,
@@@ -507,7 -504,7 +507,7 @@@ static void bond_del_vlans_from_slave(s
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                if (!vlan->vlan_id)
                        continue;
 -              vlan_vid_del(slave_dev, vlan->vlan_id);
 +              vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
        }
  }
  
@@@ -782,7 -779,7 +782,7 @@@ static void bond_resend_igmp_join_reque
  
        /* rejoin all groups on vlan devices */
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 -              vlan_dev = __vlan_find_dev_deep(bond_dev,
 +              vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
                                                vlan->vlan_id);
                if (vlan_dev)
                        __bond_resend_igmp_join_requests(vlan_dev);
@@@ -799,8 -796,9 +799,8 @@@ static void bond_resend_igmp_join_reque
  {
        struct bonding *bond = container_of(work, struct bonding,
                                            mcast_work.work);
 -      rcu_read_lock();
 +
        bond_resend_igmp_join_requests(bond);
 -      rcu_read_unlock();
  }
  
  /*
@@@ -848,8 -846,10 +848,10 @@@ static void bond_mc_swap(struct bondin
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(old_active->dev, -1);
  
+               netif_addr_lock_bh(bond->dev);
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_del(old_active->dev, ha->addr);
+               netif_addr_unlock_bh(bond->dev);
        }
  
        if (new_active) {
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(new_active->dev, 1);
  
+               netif_addr_lock_bh(bond->dev);
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_add(new_active->dev, ha->addr);
+               netif_addr_unlock_bh(bond->dev);
        }
  }
  
@@@ -1903,11 -1905,29 +1907,29 @@@ err_dest_symlinks
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
  
  err_detach:
+       if (!USES_PRIMARY(bond->params.mode)) {
+               netif_addr_lock_bh(bond_dev);
+               bond_mc_list_flush(bond_dev, slave_dev);
+               netif_addr_unlock_bh(bond_dev);
+       }
+       bond_del_vlans_from_slave(bond, slave_dev);
        write_lock_bh(&bond->lock);
        bond_detach_slave(bond, new_slave);
+       if (bond->primary_slave == new_slave)
+               bond->primary_slave = NULL;
        write_unlock_bh(&bond->lock);
+       if (bond->curr_active_slave == new_slave) {
+               read_lock(&bond->lock);
+               write_lock_bh(&bond->curr_slave_lock);
+               bond_change_active_slave(bond, NULL);
+               bond_select_active_slave(bond);
+               write_unlock_bh(&bond->curr_slave_lock);
+               read_unlock(&bond->lock);
+       }
+       slave_disable_netpoll(new_slave);
  
  err_close:
+       slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
  
  err_unset_master:
@@@ -2512,8 -2532,7 +2534,8 @@@ static int bond_has_this_ip(struct bond
  
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                rcu_read_lock();
 -              vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id);
 +              vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
 +                                              vlan->vlan_id);
                rcu_read_unlock();
                if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
                        return 1;
@@@ -2542,7 -2561,7 +2564,7 @@@ static void bond_arp_send(struct net_de
                return;
        }
        if (vlan_id) {
 -              skb = vlan_put_tag(skb, vlan_id);
 +              skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
                if (!skb) {
                        pr_err("failed to insert VLAN tag\n");
                        return;
@@@ -2604,7 -2623,6 +2626,7 @@@ static void bond_arp_send_all(struct bo
                list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                        rcu_read_lock();
                        vlan_dev = __vlan_find_dev_deep(bond->dev,
 +                                                      htons(ETH_P_8021Q),
                                                        vlan->vlan_id);
                        rcu_read_unlock();
                        if (vlan_dev == rt->dst.dev) {
@@@ -3172,11 -3190,20 +3194,20 @@@ static int bond_slave_netdev_event(unsi
                                   struct net_device *slave_dev)
  {
        struct slave *slave = bond_slave_get_rtnl(slave_dev);
-       struct bonding *bond = slave->bond;
-       struct net_device *bond_dev = slave->bond->dev;
+       struct bonding *bond;
+       struct net_device *bond_dev;
        u32 old_speed;
        u8 old_duplex;
  
+       /* A netdev event can be generated while enslaving a device
+        * before netdev_rx_handler_register is called in which case
+        * slave will be NULL
+        */
+       if (!slave)
+               return NOTIFY_DONE;
+       bond_dev = slave->bond->dev;
+       bond = slave->bond;
        switch (event) {
        case NETDEV_UNREGISTER:
                if (bond->setup_by_slave)
@@@ -3290,20 -3317,22 +3321,22 @@@ static int bond_xmit_hash_policy_l2(str
   */
  static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
  {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
+       const struct ethhdr *data;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
        u32 v6hash;
-       __be32 *s, *d;
+       const __be32 *s, *d;
  
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_network_may_pull(skb, sizeof(*iph))) {
                iph = ip_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
                v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
  static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
  {
        u32 layer4_xor = 0;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
-       __be32 *s, *d;
-       __be16 *layer4hdr;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
+       const __be32 *s, *d;
+       const __be16 *l4 = NULL;
+       __be16 _l4[2];
+       int noff = skb_network_offset(skb);
+       int poff;
  
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_may_pull(skb, noff + sizeof(*iph))) {
                iph = ip_hdr(skb);
-               if (!ip_is_fragment(iph) &&
-                   (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(iph->protocol);
+               if (!ip_is_fragment(iph) && poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
-               if ((ipv6h->nexthdr == IPPROTO_TCP ||
-                    ipv6h->nexthdr == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)(ipv6h + 1);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(ipv6h->nexthdr);
+               if (poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
@@@ -4226,37 -4258,6 +4262,37 @@@ void bond_set_mode_ops(struct bonding *
        }
  }
  
 +static int bond_ethtool_get_settings(struct net_device *bond_dev,
 +                                   struct ethtool_cmd *ecmd)
 +{
 +      struct bonding *bond = netdev_priv(bond_dev);
 +      struct slave *slave;
 +      int i;
 +      unsigned long speed = 0;
 +
 +      ecmd->duplex = DUPLEX_UNKNOWN;
 +      ecmd->port = PORT_OTHER;
 +
 +      /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
 +       * do not need to check mode.  Though link speed might not represent
 +       * the true receive or transmit bandwidth (not all modes are symmetric)
 +       * this is an accurate maximum.
 +       */
 +      read_lock(&bond->lock);
 +      bond_for_each_slave(bond, slave, i) {
 +              if (SLAVE_IS_OK(slave)) {
 +                      if (slave->speed != SPEED_UNKNOWN)
 +                              speed += slave->speed;
 +                      if (ecmd->duplex == DUPLEX_UNKNOWN &&
 +                          slave->duplex != DUPLEX_UNKNOWN)
 +                              ecmd->duplex = slave->duplex;
 +              }
 +      }
 +      ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
 +      read_unlock(&bond->lock);
 +      return 0;
 +}
 +
  static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
                                     struct ethtool_drvinfo *drvinfo)
  {
  
  static const struct ethtool_ops bond_ethtool_ops = {
        .get_drvinfo            = bond_ethtool_get_drvinfo,
 +      .get_settings           = bond_ethtool_get_settings,
        .get_link               = ethtool_op_get_link,
  };
  
@@@ -4359,9 -4359,9 +4395,9 @@@ static void bond_setup(struct net_devic
         */
  
        bond_dev->hw_features = BOND_VLAN_FEATURES |
 -                              NETIF_F_HW_VLAN_TX |
 -                              NETIF_F_HW_VLAN_RX |
 -                              NETIF_F_HW_VLAN_FILTER;
 +                              NETIF_F_HW_VLAN_CTAG_TX |
 +                              NETIF_F_HW_VLAN_CTAG_RX |
 +                              NETIF_F_HW_VLAN_CTAG_FILTER;
  
        bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
        bond_dev->features |= bond_dev->hw_features;
@@@ -4882,9 -4882,18 +4918,18 @@@ static int __net_init bond_net_init(str
  static void __net_exit bond_net_exit(struct net *net)
  {
        struct bond_net *bn = net_generic(net, bond_net_id);
+       struct bonding *bond, *tmp_bond;
+       LIST_HEAD(list);
  
        bond_destroy_sysfs(bn);
        bond_destroy_proc_dir(bn);
+       /* Kill off any bonds created after unregistering bond rtnl ops */
+       rtnl_lock();
+       list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+               unregister_netdevice_queue(bond->dev, &list);
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
  }
  
  static struct pernet_operations bond_net_ops = {
@@@ -4938,8 -4947,8 +4983,8 @@@ static void __exit bonding_exit(void
  
        bond_destroy_debugfs();
  
-       unregister_pernet_subsys(&bond_net_ops);
        rtnl_link_unregister(&bond_link_ops);
+       unregister_pernet_subsys(&bond_net_ops);
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
        /*
index 55033dd57afb06bf34723949751af908c3946faf,9aa0c64c33c81c9f296ed5f08b0d3666ee989816..8cda23bf0614a1ebe383660300e3ec0cd34d20e2
@@@ -269,7 -269,7 +269,7 @@@ struct mcp251x_priv 
  #define MCP251X_IS(_model) \
  static inline int mcp251x_is_##_model(struct spi_device *spi) \
  { \
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi); \
        return priv->model == CAN_MCP251X_MCP##_model; \
  }
  
@@@ -305,7 -305,7 +305,7 @@@ static void mcp251x_clean(struct net_de
   */
  static int mcp251x_spi_trans(struct spi_device *spi, int len)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        struct spi_transfer t = {
                .tx_buf = priv->spi_tx_buf,
                .rx_buf = priv->spi_rx_buf,
  
  static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        u8 val = 0;
  
        priv->spi_tx_buf[0] = INSTRUCTION_READ;
  static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
                uint8_t *v1, uint8_t *v2)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
  
        priv->spi_tx_buf[0] = INSTRUCTION_READ;
        priv->spi_tx_buf[1] = reg;
  
  static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
  
        priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
        priv->spi_tx_buf[1] = reg;
  static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
                               u8 mask, uint8_t val)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
  
        priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
        priv->spi_tx_buf[1] = reg;
  static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
                                int len, int tx_buf_idx)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
  
        if (mcp251x_is_2510(spi)) {
                int i;
  static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
                          int tx_buf_idx)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        u32 sid, eid, exide, rtr;
        u8 buf[SPI_TRANSFER_BUF_LEN];
  
  static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
                                int buf_idx)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
  
        if (mcp251x_is_2510(spi)) {
                int i, len;
  
  static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        struct sk_buff *skb;
        struct can_frame *frame;
        u8 buf[SPI_TRANSFER_BUF_LEN];
@@@ -550,7 -550,7 +550,7 @@@ static int mcp251x_do_set_mode(struct n
  
  static int mcp251x_set_normal_mode(struct spi_device *spi)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        unsigned long timeout;
  
        /* Enable interrupts */
@@@ -620,7 -620,7 +620,7 @@@ static int mcp251x_setup(struct net_dev
  
  static int mcp251x_hw_reset(struct spi_device *spi)
  {
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        int ret;
        unsigned long timeout;
  
@@@ -929,6 -929,7 +929,7 @@@ static int mcp251x_open(struct net_devi
        struct mcp251x_priv *priv = netdev_priv(net);
        struct spi_device *spi = priv->spi;
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+       unsigned long flags;
        int ret;
  
        ret = open_candev(net);
        priv->tx_skb = NULL;
        priv->tx_len = 0;
  
+       flags = IRQF_ONESHOT;
+       if (pdata->irq_flags)
+               flags |= pdata->irq_flags;
+       else
+               flags |= IRQF_TRIGGER_FALLING;
        ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
-                 pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
-                 DEVICE_NAME, priv);
+                                  flags, DEVICE_NAME, priv);
        if (ret) {
                dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
                if (pdata->transceiver_enable)
@@@ -1020,7 -1026,7 +1026,7 @@@ static int mcp251x_can_probe(struct spi
                CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
        priv->model = spi_get_device_id(spi)->driver_data;
        priv->net = net;
 -      dev_set_drvdata(&spi->dev, priv);
 +      spi_set_drvdata(spi, priv);
  
        priv->spi = spi;
        mutex_init(&priv->mcp_lock);
@@@ -1118,7 -1124,7 +1124,7 @@@ error_out
  static int mcp251x_can_remove(struct spi_device *spi)
  {
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        struct net_device *net = priv->net;
  
        unregister_candev(net);
        return 0;
  }
  
 -#ifdef CONFIG_PM
 -static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
 +#ifdef CONFIG_PM_SLEEP
 +
 +static int mcp251x_can_suspend(struct device *dev)
  {
 +      struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
        struct net_device *net = priv->net;
  
        priv->force_quit = 1;
        return 0;
  }
  
 -static int mcp251x_can_resume(struct spi_device *spi)
 +static int mcp251x_can_resume(struct device *dev)
  {
 +      struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
 -      struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 +      struct mcp251x_priv *priv = spi_get_drvdata(spi);
  
        if (priv->after_suspend & AFTER_SUSPEND_POWER) {
                pdata->power_enable(1);
        enable_irq(spi->irq);
        return 0;
  }
 -#else
 -#define mcp251x_can_suspend NULL
 -#define mcp251x_can_resume NULL
  #endif
  
 +static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
 +      mcp251x_can_resume);
 +
  static const struct spi_device_id mcp251x_id_table[] = {
        { "mcp2510",    CAN_MCP251X_MCP2510 },
        { "mcp2515",    CAN_MCP251X_MCP2515 },
@@@ -1210,15 -1213,29 +1216,15 @@@ MODULE_DEVICE_TABLE(spi, mcp251x_id_tab
  static struct spi_driver mcp251x_can_driver = {
        .driver = {
                .name = DEVICE_NAME,
 -              .bus = &spi_bus_type,
                .owner = THIS_MODULE,
 +              .pm = &mcp251x_can_pm_ops,
        },
  
        .id_table = mcp251x_id_table,
        .probe = mcp251x_can_probe,
        .remove = mcp251x_can_remove,
 -      .suspend = mcp251x_can_suspend,
 -      .resume = mcp251x_can_resume,
  };
 -
 -static int __init mcp251x_can_init(void)
 -{
 -      return spi_register_driver(&mcp251x_can_driver);
 -}
 -
 -static void __exit mcp251x_can_exit(void)
 -{
 -      spi_unregister_driver(&mcp251x_can_driver);
 -}
 -
 -module_init(mcp251x_can_init);
 -module_exit(mcp251x_can_exit);
 +module_spi_driver(mcp251x_can_driver);
  
  MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
              "Christian Pellegrin <chripell@evolware.org>");
index 6b50443d3456c12c34f525408267639cd14f875a,57619dd4a92b0ef08ff0b990ae26db355bb732f3..d72bd8c40aa15ec40c8be248cabaea49453ccfb7
@@@ -451,8 -451,7 +451,8 @@@ static void bnx2x_tpa_start(struct bnx2
   * Compute number of aggregated segments, and gso_type.
   */
  static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 -                               u16 len_on_bd, unsigned int pkt_len)
 +                               u16 len_on_bd, unsigned int pkt_len,
 +                               u16 num_of_coalesced_segs)
  {
        /* TPA aggregation won't have either IP options or TCP options
         * other than timestamp or IPv6 extension headers.
        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
         * to skb_shinfo(skb)->gso_segs
         */
 -      NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
 -                                             skb_shinfo(skb)->gso_size);
 +      NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
  }
  
  static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@@ -537,8 -537,7 +537,8 @@@ static int bnx2x_fill_frag_skb(struct b
        /* This is needed in order to enable forwarding support */
        if (frag_size)
                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 -                                   le16_to_cpu(cqe->pkt_len));
 +                                   le16_to_cpu(cqe->pkt_len),
 +                                   le16_to_cpu(cqe->num_of_coalesced_segs));
  
  #ifdef BNX2X_STOP_ON_ERROR
        if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@@ -719,7 -718,7 +719,7 @@@ static void bnx2x_tpa_stop(struct bnx2
                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
                                         skb, cqe, cqe_idx)) {
                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 -                              __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 +                              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
                        bnx2x_gro_receive(bp, fp, skb);
                } else {
                        DP(NETIF_MSG_RX_STATUS,
@@@ -994,7 -993,7 +994,7 @@@ reuse_rx
  
                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
                    PARSING_FLAGS_VLAN)
 -                      __vlan_hwaccel_put_tag(skb,
 +                      __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                               le16_to_cpu(cqe_fp->vlan_tag));
                napi_gro_receive(&fp->napi, skb);
  
@@@ -2010,7 -2009,7 +2010,7 @@@ static int bnx2x_init_hw(struct bnx2x *
   * Cleans the object that have internal lists without sending
   * ramrods. Should be run when interrutps are disabled.
   */
 -static void bnx2x_squeeze_objects(struct bnx2x *bp)
 +void bnx2x_squeeze_objects(struct bnx2x *bp)
  {
        int rc;
        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@@ -2615,6 -2614,9 +2615,9 @@@ int bnx2x_nic_load(struct bnx2x *bp, in
                        }
                }
  
+               /* initialize FW coalescing state machines in RAM */
+               bnx2x_update_coalesce(bp);
                /* setup the leading queue */
                rc = bnx2x_setup_leading(bp);
                if (rc) {
@@@ -2775,7 -2777,7 +2778,7 @@@ load_error0
  #endif /* ! BNX2X_STOP_ON_ERROR */
  }
  
 -static int bnx2x_drain_tx_queues(struct bnx2x *bp)
 +int bnx2x_drain_tx_queues(struct bnx2x *bp)
  {
        u8 rc = 0, cos, i;
  
@@@ -3087,11 -3089,11 +3090,11 @@@ int bnx2x_poll(struct napi_struct *napi
   * to ease the pain of our fellow microcode engineers
   * we use one mapping for both BDs
   */
 -static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
 -                                 struct bnx2x_fp_txdata *txdata,
 -                                 struct sw_tx_bd *tx_buf,
 -                                 struct eth_tx_start_bd **tx_bd, u16 hlen,
 -                                 u16 bd_prod, int nbd)
 +static u16 bnx2x_tx_split(struct bnx2x *bp,
 +                        struct bnx2x_fp_txdata *txdata,
 +                        struct sw_tx_bd *tx_buf,
 +                        struct eth_tx_start_bd **tx_bd, u16 hlen,
 +                        u16 bd_prod)
  {
        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
        struct eth_tx_bd *d_tx_bd;
        int old_len = le16_to_cpu(h_tx_bd->nbytes);
  
        /* first fix first BD */
 -      h_tx_bd->nbd = cpu_to_le16(nbd);
        h_tx_bd->nbytes = cpu_to_le16(hlen);
  
 -      DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
 -         h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
 +      DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
 +         h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
  
        /* now get a new data BD
         * (after the pbd) and fill it */
  
  #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
  #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
 -static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
 +static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
  {
        __sum16 tsum = (__force __sum16) csum;
  
        return bswab16(tsum);
  }
  
 -static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
 +static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
  {
        u32 rc;
 +      __u8 prot = 0;
 +      __be16 protocol;
  
        if (skb->ip_summed != CHECKSUM_PARTIAL)
 -              rc = XMIT_PLAIN;
 +              return XMIT_PLAIN;
  
 -      else {
 -              if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
 -                      rc = XMIT_CSUM_V6;
 -                      if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 -                              rc |= XMIT_CSUM_TCP;
 +      protocol = vlan_get_protocol(skb);
 +      if (protocol == htons(ETH_P_IPV6)) {
 +              rc = XMIT_CSUM_V6;
 +              prot = ipv6_hdr(skb)->nexthdr;
 +      } else {
 +              rc = XMIT_CSUM_V4;
 +              prot = ip_hdr(skb)->protocol;
 +      }
  
 +      if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
 +              if (inner_ip_hdr(skb)->version == 6) {
 +                      rc |= XMIT_CSUM_ENC_V6;
 +                      if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 +                              rc |= XMIT_CSUM_TCP;
                } else {
 -                      rc = XMIT_CSUM_V4;
 -                      if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 +                      rc |= XMIT_CSUM_ENC_V4;
 +                      if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
                                rc |= XMIT_CSUM_TCP;
                }
        }
 +      if (prot == IPPROTO_TCP)
 +              rc |= XMIT_CSUM_TCP;
  
 -      if (skb_is_gso_v6(skb))
 -              rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
 -      else if (skb_is_gso(skb))
 -              rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 +      if (skb_is_gso_v6(skb)) {
 +              rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
 +              if (rc & XMIT_CSUM_ENC)
 +                      rc |= XMIT_GSO_ENC_V6;
 +      } else if (skb_is_gso(skb)) {
 +              rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
 +              if (rc & XMIT_CSUM_ENC)
 +                      rc |= XMIT_GSO_ENC_V4;
 +      }
  
        return rc;
  }
@@@ -3271,23 -3257,14 +3274,23 @@@ exit_lbl
  }
  #endif
  
 -static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
 -                                      u32 xmit_type)
 +static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
 +                               u32 xmit_type)
  {
 +      struct ipv6hdr *ipv6;
 +
        *parsing_data |= (skb_shinfo(skb)->gso_size <<
                              ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
                              ETH_TX_PARSE_BD_E2_LSO_MSS;
 -      if ((xmit_type & XMIT_GSO_V6) &&
 -          (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
 +
 +      if (xmit_type & XMIT_GSO_ENC_V6)
 +              ipv6 = inner_ipv6_hdr(skb);
 +      else if (xmit_type & XMIT_GSO_V6)
 +              ipv6 = ipv6_hdr(skb);
 +      else
 +              ipv6 = NULL;
 +
 +      if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
  }
  
   * @pbd:      parse BD
   * @xmit_type:        xmit flags
   */
 -static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
 -                                   struct eth_tx_parse_bd_e1x *pbd,
 -                                   u32 xmit_type)
 +static void bnx2x_set_pbd_gso(struct sk_buff *skb,
 +                            struct eth_tx_parse_bd_e1x *pbd,
 +                            u32 xmit_type)
  {
        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
 -      pbd->tcp_flags = pbd_tcp_flags(skb);
 +      pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
  
        if (xmit_type & XMIT_GSO_V4) {
                pbd->ip_id = bswab16(ip_hdr(skb)->id);
                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
  }
  
 +/**
 + * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
 + *
 + * @bp:                       driver handle
 + * @skb:              packet skb
 + * @parsing_data:     data to be updated
 + * @xmit_type:                xmit flags
 + *
 + * 57712/578xx related, when skb has encapsulation
 + */
 +static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
 +                               u32 *parsing_data, u32 xmit_type)
 +{
 +      *parsing_data |=
 +              ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
 +
 +      if (xmit_type & XMIT_CSUM_TCP) {
 +              *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 +
 +              return skb_inner_transport_header(skb) +
 +                      inner_tcp_hdrlen(skb) - skb->data;
 +      }
 +
 +      /* We support checksum offload for TCP and UDP only.
 +       * No need to pass the UDP header length - it's a constant.
 +       */
 +      return skb_inner_transport_header(skb) +
 +              sizeof(struct udphdr) - skb->data;
 +}
 +
  /**
   * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
   *
   * @parsing_data:     data to be updated
   * @xmit_type:                xmit flags
   *
 - * 57712 related
 + * 57712/578xx related
   */
 -static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
 -                                      u32 *parsing_data, u32 xmit_type)
 +static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
 +                              u32 *parsing_data, u32 xmit_type)
  {
        *parsing_data |=
                ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
 -              ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
 -              ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
 +              ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
  
        if (xmit_type & XMIT_CSUM_TCP) {
                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
  }
  
 -static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 -      struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
 +/* set FW indication according to inner or outer protocols if tunneled */
 +static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +                             struct eth_tx_start_bd *tx_start_bd,
 +                             u32 xmit_type)
  {
        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
  
 -      if (xmit_type & XMIT_CSUM_V4)
 -              tx_start_bd->bd_flags.as_bitfield |=
 -                                      ETH_TX_BD_FLAGS_IP_CSUM;
 -      else
 -              tx_start_bd->bd_flags.as_bitfield |=
 -                                      ETH_TX_BD_FLAGS_IPV6;
 +      if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
 +              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
  
        if (!(xmit_type & XMIT_CSUM_TCP))
                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
   * @pbd:      parse BD to be updated
   * @xmit_type:        xmit flags
   */
 -static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 -      struct eth_tx_parse_bd_e1x *pbd,
 -      u32 xmit_type)
 +static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +                           struct eth_tx_parse_bd_e1x *pbd,
 +                           u32 xmit_type)
  {
        u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
  
        return hlen;
  }
  
 +static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
 +                                    struct eth_tx_parse_bd_e2 *pbd_e2,
 +                                    struct eth_tx_parse_2nd_bd *pbd2,
 +                                    u16 *global_data,
 +                                    u32 xmit_type)
 +{
 +      u16 hlen_w = 0;
 +      u8 outerip_off, outerip_len = 0;
 +      /* from outer IP to transport */
 +      hlen_w = (skb_inner_transport_header(skb) -
 +                skb_network_header(skb)) >> 1;
 +
 +      /* transport len */
 +      if (xmit_type & XMIT_CSUM_TCP)
 +              hlen_w += inner_tcp_hdrlen(skb) >> 1;
 +      else
 +              hlen_w += sizeof(struct udphdr) >> 1;
 +
 +      pbd2->fw_ip_hdr_to_payload_w = hlen_w;
 +
 +      if (xmit_type & XMIT_CSUM_ENC_V4) {
 +              struct iphdr *iph = ip_hdr(skb);
 +              pbd2->fw_ip_csum_wo_len_flags_frag =
 +                      bswab16(csum_fold((~iph->check) -
 +                                        iph->tot_len - iph->frag_off));
 +      } else {
 +              pbd2->fw_ip_hdr_to_payload_w =
 +                      hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
 +      }
 +
 +      pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
 +
 +      pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
 +
 +      if (xmit_type & XMIT_GSO_V4) {
 +              pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
 +
 +              pbd_e2->data.tunnel_data.pseudo_csum =
 +                      bswab16(~csum_tcpudp_magic(
 +                                      inner_ip_hdr(skb)->saddr,
 +                                      inner_ip_hdr(skb)->daddr,
 +                                      0, IPPROTO_TCP, 0));
 +
 +              outerip_len = ip_hdr(skb)->ihl << 1;
 +      } else {
 +              pbd_e2->data.tunnel_data.pseudo_csum =
 +                      bswab16(~csum_ipv6_magic(
 +                                      &inner_ipv6_hdr(skb)->saddr,
 +                                      &inner_ipv6_hdr(skb)->daddr,
 +                                      0, IPPROTO_TCP, 0));
 +      }
 +
 +      outerip_off = (skb_network_header(skb) - skb->data) >> 1;
 +
 +      *global_data |=
 +              outerip_off |
 +              (!!(xmit_type & XMIT_CSUM_V6) <<
 +                      ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
 +              (outerip_len <<
 +                      ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
 +              ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
 +                      ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
 +}
 +
  /* called with netif_tx_lock
   * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
   * netif_wake_queue()
@@@ -3537,7 -3418,6 +3540,7 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
 +      struct eth_tx_parse_2nd_bd *pbd2 = NULL;
        u32 pbd_e2_parsing_data = 0;
        u16 pkt_prod, bd_prod;
        int nbd, txq_index;
                        mac_type = MULTICAST_ADDRESS;
        }
  
 -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
 +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
        /* First, check if we need to linearize the skb (due to FW
           restrictions). No need to check fragmentation if page size > 8K
           (there will be no violation to FW restrictions) */
        first_bd = tx_start_bd;
  
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 -      SET_FLAG(tx_start_bd->general_data,
 -               ETH_TX_START_BD_PARSE_NBDS,
 -               0);
  
 -      /* header nbd */
 -      SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
 +      /* header nbd: indirectly zero other flags! */
 +      tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
  
        /* remember the first BD of the packet */
        tx_buf->first_bd = txdata->tx_bd_prod;
                /* when transmitting in a vf, start bd must hold the ethertype
                 * for fw to enforce it
                 */
 -#ifndef BNX2X_STOP_ON_ERROR
 -              if (IS_VF(bp)) {
 -#endif
 +              if (IS_VF(bp))
                        tx_start_bd->vlan_or_ethertype =
                                cpu_to_le16(ntohs(eth->h_proto));
 -#ifndef BNX2X_STOP_ON_ERROR
 -              } else {
 +              else
                        /* used by FW for packet accounting */
                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
 -              }
 -#endif
        }
  
 +      nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
 +
        /* turn on parsing and get a BD */
        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  
        if (!CHIP_IS_E1x(bp)) {
                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 -              /* Set PBD in checksum offload case */
 -              if (xmit_type & XMIT_CSUM)
 +
 +              if (xmit_type & XMIT_CSUM_ENC) {
 +                      u16 global_data = 0;
 +
 +                      /* Set PBD in enc checksum offload case */
 +                      hlen = bnx2x_set_pbd_csum_enc(bp, skb,
 +                                                    &pbd_e2_parsing_data,
 +                                                    xmit_type);
 +
 +                      /* turn on 2nd parsing and get a BD */
 +                      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +                      pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
 +
 +                      memset(pbd2, 0, sizeof(*pbd2));
 +
 +                      pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
 +                              (skb_inner_network_header(skb) -
 +                               skb->data) >> 1;
 +
 +                      if (xmit_type & XMIT_GSO_ENC)
 +                              bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
 +                                                        &global_data,
 +                                                        xmit_type);
 +
 +                      pbd2->global_data = cpu_to_le16(global_data);
 +
 +                      /* add addition parse BD indication to start BD */
 +                      SET_FLAG(tx_start_bd->general_data,
 +                               ETH_TX_START_BD_PARSE_NBDS, 1);
 +                      /* set encapsulation flag in start BD */
 +                      SET_FLAG(tx_start_bd->general_data,
 +                               ETH_TX_START_BD_TUNNEL_EXIST, 1);
 +                      nbd++;
 +              } else if (xmit_type & XMIT_CSUM) {
 +                      /* Set PBD in checksum offload case w/o encapsulation */
                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
                                                     &pbd_e2_parsing_data,
                                                     xmit_type);
 +              }
  
 -              if (IS_MF_SI(bp) || IS_VF(bp)) {
 -                      /* fill in the MAC addresses in the PBD - for local
 -                       * switching
 -                       */
 -                      bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
 -                                            &pbd_e2->src_mac_addr_mid,
 -                                            &pbd_e2->src_mac_addr_lo,
 +              /* Add the macs to the parsing BD this is a vf */
 +              if (IS_VF(bp)) {
 +                      /* override GRE parameters in BD */
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
 +                                            &pbd_e2->data.mac_addr.src_mid,
 +                                            &pbd_e2->data.mac_addr.src_lo,
                                              eth->h_source);
 -                      bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
 -                                            &pbd_e2->dst_mac_addr_mid,
 -                                            &pbd_e2->dst_mac_addr_lo,
 +
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
 +                                            &pbd_e2->data.mac_addr.dst_mid,
 +                                            &pbd_e2->data.mac_addr.dst_lo,
                                              eth->h_dest);
                }
  
        /* Setup the data pointer of the first BD of the packet */
        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 -      nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
        pkt_size = tx_start_bd->nbytes;
  
        DP(NETIF_MSG_TX_QUEUED,
 -         "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
 +         "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
 -         le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
 +         le16_to_cpu(tx_start_bd->nbytes),
           tx_start_bd->bd_flags.as_bitfield,
           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
  
  
                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
  
 -              if (unlikely(skb_headlen(skb) > hlen))
 +              if (unlikely(skb_headlen(skb) > hlen)) {
 +                      nbd++;
                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
                                                 &tx_start_bd, hlen,
 -                                               bd_prod, ++nbd);
 +                                               bd_prod);
 +              }
                if (!CHIP_IS_E1x(bp))
                        bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
                                             xmit_type);
        if (pbd_e2)
                DP(NETIF_MSG_TX_QUEUED,
                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
 -                 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
 -                 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
 -                 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
 +                 pbd_e2,
 +                 pbd_e2->data.mac_addr.dst_hi,
 +                 pbd_e2->data.mac_addr.dst_mid,
 +                 pbd_e2->data.mac_addr.dst_lo,
 +                 pbd_e2->data.mac_addr.src_hi,
 +                 pbd_e2->data.mac_addr.src_mid,
 +                 pbd_e2->data.mac_addr.src_lo,
                   pbd_e2->parsing_data);
        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
  
@@@ -4737,11 -4583,11 +4740,11 @@@ static void storm_memset_hc_disable(str
        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
        u32 addr = BAR_CSTRORM_INTMEM +
                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
-       u16 flags = REG_RD16(bp, addr);
+       u8 flags = REG_RD8(bp, addr);
        /* clear and set */
        flags &= ~HC_INDEX_DATA_HC_ENABLED;
        flags |= enable_flag;
-       REG_WR16(bp, addr, flags);
+       REG_WR8(bp, addr, flags);
        DP(NETIF_MSG_IFUP,
           "port %x fw_sb_id %d sb_index %d disable %d\n",
           port, fw_sb_id, sb_index, disable);
index 1e60c5d139d1b046ebad084b4b46fa15fdb8f73e,c50696b396f1cb88196d4f682d61d902d0da9fc6..86d13870399ef0737835e813ee28942dc274a6fd
@@@ -75,6 -75,8 +75,6 @@@
  #define FW_FILE_NAME_E1H      "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  #define FW_FILE_NAME_E2               "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  
 -#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
 -
  /* Time in jiffies before concluding the transmitter is hung */
  #define TX_TIMEOUT            (5*HZ)
  
@@@ -2953,16 -2955,14 +2953,16 @@@ static unsigned long bnx2x_get_common_f
        __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
  
        /* tx only connections collect statistics (on the same index as the
 -       *  parent connection). The statistics are zeroed when the parent
 -       *  connection is initialized.
 +       * parent connection). The statistics are zeroed when the parent
 +       * connection is initialized.
         */
  
        __set_bit(BNX2X_Q_FLG_STATS, &flags);
        if (zero_stats)
                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
  
 +      __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
 +      __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
  
  #ifdef BNX2X_STOP_ON_ERROR
        __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@@ -3227,29 -3227,16 +3227,29 @@@ static void bnx2x_drv_info_ether_stat(s
  {
        struct eth_stats_info *ether_stat =
                &bp->slowpath->drv_info_to_mcp.ether_stat;
 +      struct bnx2x_vlan_mac_obj *mac_obj =
 +              &bp->sp_objs->mac_obj;
 +      int i;
  
        strlcpy(ether_stat->version, DRV_MODULE_VERSION,
                ETH_STAT_INFO_VERSION_LEN);
  
 -      bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
 -                                      DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
 -                                      ether_stat->mac_local);
 -
 +      /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
 +       * mac_local field in ether_stat struct. The base address is offset by 2
 +       * bytes to account for the field being 8 bytes but a mac address is
 +       * only 6 bytes. Likewise, the stride for the get_n_elements function is
 +       * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
 +       * allocated by the ether_stat struct, so the macs will land in their
 +       * proper positions.
 +       */
 +      for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
 +              memset(ether_stat->mac_local + i, 0,
 +                     sizeof(ether_stat->mac_local[0]));
 +      mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
 +                              DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
 +                              ether_stat->mac_local + MAC_PAD, MAC_PAD,
 +                              ETH_ALEN);
        ether_stat->mtu_size = bp->dev->mtu;
 -
        if (bp->dev->features & NETIF_F_RXCSUM)
                ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
        if (bp->dev->features & NETIF_F_TSO)
@@@ -3271,7 -3258,8 +3271,7 @@@ static void bnx2x_drv_info_fcoe_stat(st
        if (!CNIC_LOADED(bp))
                return;
  
 -      memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
 -             bp->fip_mac, ETH_ALEN);
 +      memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
  
        fcoe_stat->qos_priority =
                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@@ -3373,8 -3361,8 +3373,8 @@@ static void bnx2x_drv_info_iscsi_stat(s
        if (!CNIC_LOADED(bp))
                return;
  
 -      memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
 -             bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
 +      memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
 +             ETH_ALEN);
  
        iscsi_stat->qos_priority =
                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@@ -4959,7 -4947,7 +4959,7 @@@ static void bnx2x_after_function_update
                                  q);
        }
  
-       if (!NO_FCOE(bp)) {
+       if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
                fp = &bp->fp[FCOE_IDX(bp)];
                queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
  
@@@ -6041,10 -6029,9 +6041,10 @@@ void bnx2x_nic_init(struct bnx2x *bp, u
        rmb();
        bnx2x_init_rx_rings(bp);
        bnx2x_init_tx_rings(bp);
 -
 -      if (IS_VF(bp))
 +      if (IS_VF(bp)) {
 +              bnx2x_memset_stats(bp);
                return;
 +      }
  
        /* Initialize MOD_ABS interrupts */
        bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@@ -9538,10 -9525,6 +9538,10 @@@ sp_rtnl_not_reset
                bnx2x_vfpf_storm_rx_mode(bp);
        }
  
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 +                             &bp->sp_rtnl_state))
 +              bnx2x_pf_set_vfs_vlan(bp);
 +
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
  
        /* enable SR-IOV if applicable */
        if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
 -                                             &bp->sp_rtnl_state))
 +                                             &bp->sp_rtnl_state)) {
 +              bnx2x_disable_sriov(bp);
                bnx2x_enable_sriov(bp);
 +      }
  }
  
  static void bnx2x_period_task(struct work_struct *work)
@@@ -9720,31 -9701,6 +9720,31 @@@ static struct bnx2x_prev_path_list 
        return NULL;
  }
  
 +static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
 +{
 +      struct bnx2x_prev_path_list *tmp_list;
 +      int rc;
 +
 +      rc = down_interruptible(&bnx2x_prev_sem);
 +      if (rc) {
 +              BNX2X_ERR("Received %d when tried to take lock\n", rc);
 +              return rc;
 +      }
 +
 +      tmp_list = bnx2x_prev_path_get_entry(bp);
 +      if (tmp_list) {
 +              tmp_list->aer = 1;
 +              rc = 0;
 +      } else {
 +              BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
 +                        BP_PATH(bp));
 +      }
 +
 +      up(&bnx2x_prev_sem);
 +
 +      return rc;
 +}
 +
  static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
  {
        struct bnx2x_prev_path_list *tmp_list;
        if (down_trylock(&bnx2x_prev_sem))
                return false;
  
 -      list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
 -              if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
 -                  bp->pdev->bus->number == tmp_list->bus &&
 -                  BP_PATH(bp) == tmp_list->path) {
 +      tmp_list = bnx2x_prev_path_get_entry(bp);
 +      if (tmp_list) {
 +              if (tmp_list->aer) {
 +                      DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
 +                         BP_PATH(bp));
 +              } else {
                        rc = true;
                        BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
                                       BP_PATH(bp));
 -                      break;
                }
        }
  
@@@ -9775,28 -9730,6 +9775,28 @@@ static int bnx2x_prev_mark_path(struct 
        struct bnx2x_prev_path_list *tmp_list;
        int rc;
  
 +      rc = down_interruptible(&bnx2x_prev_sem);
 +      if (rc) {
 +              BNX2X_ERR("Received %d when tried to take lock\n", rc);
 +              return rc;
 +      }
 +
 +      /* Check whether the entry for this path already exists */
 +      tmp_list = bnx2x_prev_path_get_entry(bp);
 +      if (tmp_list) {
 +              if (!tmp_list->aer) {
 +                      BNX2X_ERR("Re-Marking the path.\n");
 +              } else {
 +                      DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
 +                         BP_PATH(bp));
 +                      tmp_list->aer = 0;
 +              }
 +              up(&bnx2x_prev_sem);
 +              return 0;
 +      }
 +      up(&bnx2x_prev_sem);
 +
 +      /* Create an entry for this path and add it */
        tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
        if (!tmp_list) {
                BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
        tmp_list->bus = bp->pdev->bus->number;
        tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
        tmp_list->path = BP_PATH(bp);
 +      tmp_list->aer = 0;
        tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
  
        rc = down_interruptible(&bnx2x_prev_sem);
                BNX2X_ERR("Received %d when tried to take lock\n", rc);
                kfree(tmp_list);
        } else {
 -              BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
 -                              BP_PATH(bp));
 +              DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
 +                 BP_PATH(bp));
                list_add(&tmp_list->list, &bnx2x_prev_list);
                up(&bnx2x_prev_sem);
        }
@@@ -9946,6 -9878,10 +9946,10 @@@ static int bnx2x_prev_unload_common(str
                                REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
                        }
                }
+               if (!CHIP_IS_E1x(bp))
+                       /* block FW from writing to host */
+                       REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
                /* wait until BRB is empty */
                tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
                while (timer_count) {
@@@ -10054,7 -9990,6 +10058,7 @@@ static int bnx2x_prev_unload(struct bnx
        }
  
        do {
 +              int aer = 0;
                /* Lock MCP using an unload request */
                fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
                if (!fw) {
                        break;
                }
  
 -              if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
 +              rc = down_interruptible(&bnx2x_prev_sem);
 +              if (rc) {
 +                      BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
 +                                rc);
 +              } else {
 +                      /* If Path is marked by EEH, ignore unload status */
 +                      aer = !!(bnx2x_prev_path_get_entry(bp) &&
 +                               bnx2x_prev_path_get_entry(bp)->aer);
 +                      up(&bnx2x_prev_sem);
 +              }
 +
 +              if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
                        rc = bnx2x_prev_unload_common(bp);
                        break;
                }
@@@ -10114,12 -10038,8 +10118,12 @@@ static void bnx2x_get_common_hwinfo(str
        id = ((val & 0xffff) << 16);
        val = REG_RD(bp, MISC_REG_CHIP_REV);
        id |= ((val & 0xf) << 12);
 -      val = REG_RD(bp, MISC_REG_CHIP_METAL);
 -      id |= ((val & 0xff) << 4);
 +
 +      /* Metal is read from PCI regs, but we can't access >=0x400 from
 +       * the configuration space (so we need to reg_rd)
 +       */
 +      val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
 +      id |= (((val >> 24) & 0xf) << 4);
        val = REG_RD(bp, MISC_REG_BOND_ID);
        id |= (val & 0xf);
        bp->common.chip_id = id;
@@@ -10896,12 -10816,14 +10900,12 @@@ static void bnx2x_get_cnic_mac_hwinfo(s
                        }
                }
  
 -              if (IS_MF_STORAGE_SD(bp))
 -                      /* Zero primary MAC configuration */
 -                      memset(bp->dev->dev_addr, 0, ETH_ALEN);
 -
 -              if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
 -                      /* use FIP MAC as primary MAC */
 +              /* If this is a storage-only interface, use SAN mac as
 +               * primary MAC. Notice that for SD this is already the case,
 +               * as the SAN mac was copied from the primary MAC.
 +               */
 +              if (IS_MF_FCOE_AFEX(bp))
                        memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
 -
        } else {
                val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
                                iscsi_mac_upper);
@@@ -11138,9 -11060,6 +11142,9 @@@ static int bnx2x_get_hwinfo(struct bnx2
                                } else
                                        BNX2X_DEV_INFO("illegal OV for SD\n");
                                break;
 +                      case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
 +                              bp->mf_config[vn] = 0;
 +                              break;
                        default:
                                /* Unknown configuration: reset mf_config */
                                bp->mf_config[vn] = 0;
@@@ -11487,6 -11406,26 +11491,6 @@@ static int bnx2x_init_bp(struct bnx2x *
   * net_device service functions
   */
  
 -static int bnx2x_open_epilog(struct bnx2x *bp)
 -{
 -      /* Enable sriov via delayed work. This must be done via delayed work
 -       * because it causes the probe of the vf devices to be run, which invoke
 -       * register_netdevice which must have rtnl lock taken. As we are holding
 -       * the lock right now, that could only work if the probe would not take
 -       * the lock. However, as the probe of the vf may be called from other
 -       * contexts as well (such as passthrough to vm failes) it can't assume
 -       * the lock is being held for it. Using delayed work here allows the
 -       * probe code to simply take the lock (i.e. wait for it to be released
 -       * if it is being held).
 -       */
 -      smp_mb__before_clear_bit();
 -      set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
 -      smp_mb__after_clear_bit();
 -      schedule_delayed_work(&bp->sp_rtnl_task, 0);
 -
 -      return 0;
 -}
 -
  /* called with rtnl_lock */
  static int bnx2x_open(struct net_device *dev)
  {
@@@ -11856,8 -11795,6 +11860,8 @@@ static const struct net_device_ops bnx2
        .ndo_setup_tc           = bnx2x_setup_tc,
  #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
 +      .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
 +      .ndo_get_vf_config      = bnx2x_get_vf_config,
  #endif
  #ifdef NETDEV_FCOE_WWNN
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
@@@ -12020,26 -11957,19 +12024,26 @@@ static int bnx2x_init_dev(struct bnx2x 
        dev->watchdog_timeo = TX_TIMEOUT;
  
        dev->netdev_ops = &bnx2x_netdev_ops;
 -      bnx2x_set_ethtool_ops(dev);
 +      bnx2x_set_ethtool_ops(bp, dev);
  
        dev->priv_flags |= IFF_UNICAST_FLT;
  
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
 -              NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
 +              NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
 +      if (!CHIP_IS_E1x(bp)) {
 +              dev->hw_features |= NETIF_F_GSO_GRE;
 +              dev->hw_enc_features =
 +                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
 +                      NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 +                      NETIF_F_GSO_GRE;
 +      }
  
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
  
 -      dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
 +      dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        if (bp->flags & USING_DAC_FLAG)
                dev->features |= NETIF_F_HIGHDMA;
  
@@@ -12521,7 -12451,7 +12525,7 @@@ static int bnx2x_init_one(struct pci_de
         * l2 connections.
         */
        if (IS_VF(bp)) {
 -              bnx2x_vf_map_doorbells(bp);
 +              bp->doorbells = bnx2x_vf_doorbells(bp);
                rc = bnx2x_vf_pci_alloc(bp);
                if (rc)
                        goto init_one_exit;
                        goto init_one_exit;
        }
  
 -      /* Enable SRIOV if capability found in configuration space.
 -       * Once the generic SR-IOV framework makes it in from the
 -       * pci tree this will be revised, to allow dynamic control
 -       * over the number of VFs. Right now, change the num of vfs
 -       * param below to enable SR-IOV.
 -       */
 -      rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
 +      /* Enable SRIOV if capability found in configuration space */
 +      rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
        if (rc)
                goto init_one_exit;
  
        if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
  
 -      /* disable FCOE for 57840 device, until FW supports it */
 -      switch (ent->driver_data) {
 -      case BCM57840_O:
 -      case BCM57840_4_10:
 -      case BCM57840_2_20:
 -      case BCM57840_MFO:
 -      case BCM57840_MF:
 -              bp->flags |= NO_FCOE_FLAG;
 -      }
 -
        /* Set bp->num_queues for MSI-X mode*/
        bnx2x_set_num_queues(bp);
  
@@@ -12695,7 -12640,9 +12699,7 @@@ static void bnx2x_remove_one(struct pci
  
  static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
  {
 -      int i;
 -
 -      bp->state = BNX2X_STATE_ERROR;
 +      bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
  
        bp->rx_mode = BNX2X_RX_MODE_NONE;
  
  
        /* Stop Tx */
        bnx2x_tx_disable(bp);
 -
 -      bnx2x_netif_stop(bp, 0);
        /* Delete all NAPI objects */
        bnx2x_del_all_napi(bp);
        if (CNIC_LOADED(bp))
                bnx2x_del_all_napi_cnic(bp);
 +      netdev_reset_tc(bp->dev);
  
        del_timer_sync(&bp->timer);
 +      cancel_delayed_work(&bp->sp_task);
 +      cancel_delayed_work(&bp->period_task);
  
 -      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 -
 -      /* Release IRQs */
 -      bnx2x_free_irq(bp);
 -
 -      /* Free SKBs, SGEs, TPA pool and driver internals */
 -      bnx2x_free_skbs(bp);
 -
 -      for_each_rx_queue(bp, i)
 -              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 -
 -      bnx2x_free_mem(bp);
 +      spin_lock_bh(&bp->stats_lock);
 +      bp->stats_state = STATS_STATE_DISABLED;
 +      spin_unlock_bh(&bp->stats_lock);
  
 -      bp->state = BNX2X_STATE_CLOSED;
 +      bnx2x_save_statistics(bp);
  
        netif_carrier_off(bp->dev);
  
@@@ -12754,8 -12709,6 +12758,8 @@@ static pci_ers_result_t bnx2x_io_error_
  
        rtnl_lock();
  
 +      BNX2X_ERR("IO error detected\n");
 +
        netif_device_detach(dev);
  
        if (state == pci_channel_io_perm_failure) {
        if (netif_running(dev))
                bnx2x_eeh_nic_unload(bp);
  
 +      bnx2x_prev_path_mark_eeh(bp);
 +
        pci_disable_device(pdev);
  
        rtnl_unlock();
@@@ -12786,10 -12737,9 +12790,10 @@@ static pci_ers_result_t bnx2x_io_slot_r
  {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2x *bp = netdev_priv(dev);
 +      int i;
  
        rtnl_lock();
 -
 +      BNX2X_ERR("IO slot reset initializing...\n");
        if (pci_enable_device(pdev)) {
                dev_err(&pdev->dev,
                        "Cannot re-enable PCI device after reset\n");
        if (netif_running(dev))
                bnx2x_set_power_state(bp, PCI_D0);
  
 +      if (netif_running(dev)) {
 +              BNX2X_ERR("IO slot reset --> driver unload\n");
 +              if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
 +                      u32 v;
 +
 +                      v = SHMEM2_RD(bp,
 +                                    drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
 +                      SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
 +                                v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
 +              }
 +              bnx2x_drain_tx_queues(bp);
 +              bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
 +              bnx2x_netif_stop(bp, 1);
 +              bnx2x_free_irq(bp);
 +
 +              /* Report UNLOAD_DONE to MCP */
 +              bnx2x_send_unload_done(bp, true);
 +
 +              bp->sp_state = 0;
 +              bp->port.pmf = 0;
 +
 +              bnx2x_prev_unload(bp);
 +
 +              /* We should have resetted the engine, so It's fair to
 +               * assume the FW will no longer write to the bnx2x driver.
 +               */
 +              bnx2x_squeeze_objects(bp);
 +              bnx2x_free_skbs(bp);
 +              for_each_rx_queue(bp, i)
 +                      bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +              bnx2x_free_fp_mem(bp);
 +              bnx2x_free_mem(bp);
 +
 +              bp->state = BNX2X_STATE_CLOSED;
 +      }
 +
        rtnl_unlock();
  
        return PCI_ERS_RESULT_RECOVERED;
@@@ -12865,9 -12779,6 +12869,9 @@@ static void bnx2x_io_resume(struct pci_
  
        bnx2x_eeh_recover(bp);
  
 +      bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
 +                                                      DRV_MSG_SEQ_NUMBER_MASK;
 +
        if (netif_running(dev))
                bnx2x_nic_load(bp, LOAD_NORMAL);
  
@@@ -12890,9 -12801,6 +12894,9 @@@ static struct pci_driver bnx2x_pci_driv
        .suspend     = bnx2x_suspend,
        .resume      = bnx2x_resume,
        .err_handler = &bnx2x_err_handler,
 +#ifdef CONFIG_BNX2X_SRIOV
 +      .sriov_configure = bnx2x_sriov_configure,
 +#endif
  };
  
  static int __init bnx2x_init(void)
@@@ -13450,6 -13358,7 +13454,7 @@@ static int bnx2x_unregister_cnic(struc
        RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
        synchronize_rcu();
+       bp->cnic_enabled = false;
        kfree(bp->cnic_kwq);
        bp->cnic_kwq = NULL;
  
index 21808680b91fb3b8e238b9be5d3bcf34096eeacc,2886c9b63f9099059d3c8c0fe7a86cddc912854f..654e7820daa0705d0a15812d13f5d0f49d4583a5
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2005 - 2011 Emulex
 + * Copyright (C) 2005 - 2013 Emulex
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or
@@@ -146,16 -146,20 +146,16 @@@ static int be_queue_alloc(struct be_ada
        q->entry_size = entry_size;
        mem->size = len * entry_size;
        mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
 -                                   GFP_KERNEL);
 +                                   GFP_KERNEL | __GFP_ZERO);
        if (!mem->va)
                return -ENOMEM;
 -      memset(mem->va, 0, mem->size);
        return 0;
  }
  
 -static void be_intr_set(struct be_adapter *adapter, bool enable)
 +static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
  {
        u32 reg, enabled;
  
 -      if (adapter->eeh_error)
 -              return;
 -
        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
                                &reg);
        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
                        PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
  }
  
 +static void be_intr_set(struct be_adapter *adapter, bool enable)
 +{
 +      int status = 0;
 +
 +      /* On lancer interrupts can't be controlled via this register */
 +      if (lancer_chip(adapter))
 +              return;
 +
 +      if (adapter->eeh_error)
 +              return;
 +
 +      status = be_cmd_intr_set(adapter, enable);
 +      if (status)
 +              be_reg_intr_set(adapter, enable);
 +}
 +
  static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
  {
        u32 val = 0;
        iowrite32(val, adapter->db + DB_RQ_OFFSET);
  }
  
 -static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 +static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 +                        u16 posted)
  {
        u32 val = 0;
 -      val |= qid & DB_TXULP_RING_ID_MASK;
 +      val |= txo->q.id & DB_TXULP_RING_ID_MASK;
        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
  
        wmb();
 -      iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
 +      iowrite32(val, adapter->db + txo->db_offset);
  }
  
  static void be_eq_notify(struct be_adapter *adapter, u16 qid,
@@@ -772,8 -759,9 +772,9 @@@ static struct sk_buff *be_insert_vlan_i
  
        if (vlan_tx_tag_present(skb)) {
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-               __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-               skb->vlan_tci = 0;
 -              skb = __vlan_put_tag(skb, vlan_tag);
++              skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+               if (skb)
+                       skb->vlan_tci = 0;
        }
  
        return skb;
@@@ -834,7 -822,7 +835,7 @@@ static netdev_tx_t be_xmit(struct sk_bu
                        stopped = true;
                }
  
 -              be_txq_notify(adapter, txq->id, wrb_cnt);
 +              be_txq_notify(adapter, txo, wrb_cnt);
  
                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
@@@ -903,7 -891,7 +904,7 @@@ set_vlan_promisc
        return status;
  }
  
 -static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
 +static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
@@@ -929,7 -917,7 +930,7 @@@ ret
        return status;
  }
  
 -static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 +static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
@@@ -1384,7 -1372,7 +1385,7 @@@ static void be_rx_compl_process(struct 
  
  
        if (rxcp->vlanf)
 -              __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
  
        netif_receive_skb(skb);
  }
@@@ -1440,7 -1428,7 +1441,7 @@@ void be_rx_compl_process_gro(struct be_
                skb->rxhash = rxcp->rss_hash;
  
        if (rxcp->vlanf)
 -              __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
  
        napi_gro_frags(napi);
  }
@@@ -1970,7 -1958,7 +1971,7 @@@ static int be_tx_qs_create(struct be_ad
                if (status)
                        return status;
  
 -              status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
 +              status = be_cmd_txq_create(adapter, txo);
                if (status)
                        return status;
        }
@@@ -2448,6 -2436,9 +2449,6 @@@ static int be_close(struct net_device *
  
        be_roce_dev_close(adapter);
  
 -      if (!lancer_chip(adapter))
 -              be_intr_set(adapter, false);
 -
        for_all_evt_queues(adapter, eqo, i)
                napi_disable(&eqo->napi);
  
@@@ -2535,6 -2526,9 +2536,6 @@@ static int be_open(struct net_device *n
  
        be_irq_register(adapter);
  
 -      if (!lancer_chip(adapter))
 -              be_intr_set(adapter, true);
 -
        for_all_rx_queues(adapter, rxo, i)
                be_cq_notify(adapter, rxo->cq.id, true, 0);
  
@@@ -2569,9 -2563,10 +2570,9 @@@ static int be_setup_wol(struct be_adapt
  
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 -                                  GFP_KERNEL);
 +                                  GFP_KERNEL | __GFP_ZERO);
        if (cmd.va == NULL)
                return -1;
 -      memset(cmd.va, 0, cmd.size);
  
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
@@@ -2719,8 -2714,7 +2720,8 @@@ static int be_vfs_if_create(struct be_a
  
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter))
 -                      be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
 +                      be_cmd_get_profile_config(adapter, &cap_flags,
 +                                                NULL, vf + 1);
  
                /* If a FW profile exists, then cap_flags are updated */
                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@@ -2884,14 -2878,11 +2885,14 @@@ static void be_get_resources(struct be_
        u16 dev_num_vfs;
        int pos, status;
        bool profile_present = false;
 +      u16 txq_count = 0;
  
        if (!BEx_chip(adapter)) {
                status = be_cmd_get_func_config(adapter);
                if (!status)
                        profile_present = true;
 +      } else if (BE3_chip(adapter) && be_physfn(adapter)) {
 +              be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
        }
  
        if (profile_present) {
                        adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
  
                adapter->max_mcast_mac = BE_MAX_MC;
 -              adapter->max_tx_queues = MAX_TX_QS;
 +              adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
 +              adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
 +                                             MAX_TX_QS);
                adapter->max_rss_queues = (adapter->be3_native) ?
                                           BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
                adapter->max_event_queues = BE3_MAX_RSS_QS;
@@@ -2965,8 -2954,7 +2966,8 @@@ static int be_get_config(struct be_adap
  
        status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
                                     &adapter->function_mode,
 -                                   &adapter->function_caps);
 +                                   &adapter->function_caps,
 +                                   &adapter->asic_rev);
        if (status)
                goto err;
  
@@@ -3227,7 -3215,7 +3228,7 @@@ static int be_flash(struct be_adapter *
        return 0;
  }
  
 -/* For BE2 and BE3 */
 +/* For BE2, BE3 and BE3-R */
  static int be_flash_BEx(struct be_adapter *adapter,
                         const struct firmware *fw,
                         struct be_dma_mem *flash_cmd,
@@@ -3470,9 -3458,11 +3471,9 @@@ static int lancer_fw_download(struct be
        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
                                + LANCER_FW_DOWNLOAD_CHUNK;
        flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
 -                                              &flash_cmd.dma, GFP_KERNEL);
 +                                        &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va) {
                status = -ENOMEM;
 -              dev_err(&adapter->pdev->dev,
 -                      "Memory allocation failure while flashing\n");
                goto lancer_fw_exit;
        }
  
@@@ -3540,22 -3530,18 +3541,22 @@@ lancer_fw_exit
  
  #define UFI_TYPE2             2
  #define UFI_TYPE3             3
 +#define UFI_TYPE3R            10
  #define UFI_TYPE4             4
  static int be_get_ufi_type(struct be_adapter *adapter,
 -                         struct flash_file_hdr_g2 *fhdr)
 +                         struct flash_file_hdr_g3 *fhdr)
  {
        if (fhdr == NULL)
                goto be_get_ufi_exit;
  
        if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
                return UFI_TYPE4;
 -      else if (BE3_chip(adapter) && fhdr->build[0] == '3')
 -              return UFI_TYPE3;
 -      else if (BE2_chip(adapter) && fhdr->build[0] == '2')
 +      else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
 +              if (fhdr->asic_type_rev == 0x10)
 +                      return UFI_TYPE3R;
 +              else
 +                      return UFI_TYPE3;
 +      } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
                return UFI_TYPE2;
  
  be_get_ufi_exit:
  
  static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
  {
 -      struct flash_file_hdr_g2 *fhdr;
        struct flash_file_hdr_g3 *fhdr3;
        struct image_hdr *img_hdr_ptr = NULL;
        struct be_dma_mem flash_cmd;
                                          &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va) {
                status = -ENOMEM;
 -              dev_err(&adapter->pdev->dev,
 -                      "Memory allocation failure while flashing\n");
                goto be_fw_exit;
        }
  
        p = fw->data;
 -      fhdr = (struct flash_file_hdr_g2 *)p;
 +      fhdr3 = (struct flash_file_hdr_g3 *)p;
  
 -      ufi_type = be_get_ufi_type(adapter, fhdr);
 +      ufi_type = be_get_ufi_type(adapter, fhdr3);
  
 -      fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
        num_imgs = le32_to_cpu(fhdr3->num_imgs);
        for (i = 0; i < num_imgs; i++) {
                img_hdr_ptr = (struct image_hdr *)(fw->data +
                                (sizeof(struct flash_file_hdr_g3) +
                                 i * sizeof(struct image_hdr)));
                if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
 -                      if (ufi_type == UFI_TYPE4)
 +                      switch (ufi_type) {
 +                      case UFI_TYPE4:
                                status = be_flash_skyhawk(adapter, fw,
                                                        &flash_cmd, num_imgs);
 -                      else if (ufi_type == UFI_TYPE3)
 +                              break;
 +                      case UFI_TYPE3R:
                                status = be_flash_BEx(adapter, fw, &flash_cmd,
                                                      num_imgs);
 +                              break;
 +                      case UFI_TYPE3:
 +                              /* Do not flash this ufi on BE3-R cards */
 +                              if (adapter->asic_rev < 0x10)
 +                                      status = be_flash_BEx(adapter, fw,
 +                                                            &flash_cmd,
 +                                                            num_imgs);
 +                              else {
 +                                      status = -1;
 +                                      dev_err(&adapter->pdev->dev,
 +                                              "Can't load BE3 UFI on BE3R\n");
 +                              }
 +                      }
                }
        }
  
@@@ -3688,12 -3663,12 +3689,12 @@@ static void be_netdev_init(struct net_d
  
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
 -              NETIF_F_HW_VLAN_TX;
 +              NETIF_F_HW_VLAN_CTAG_TX;
        if (be_multi_rxq(adapter))
                netdev->hw_features |= NETIF_F_RXHASH;
  
        netdev->features |= netdev->hw_features |
 -              NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 +              NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
  
        netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@@ -3817,13 -3792,12 +3818,13 @@@ static int be_ctrl_init(struct be_adapt
  
        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
        rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
 -                                      &rx_filter->dma, GFP_KERNEL);
 +                                         &rx_filter->dma,
 +                                         GFP_KERNEL | __GFP_ZERO);
        if (rx_filter->va == NULL) {
                status = -ENOMEM;
                goto free_mbox;
        }
 -      memset(rx_filter->va, 0, rx_filter->size);
 +
        mutex_init(&adapter->mbox_lock);
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
@@@ -3865,9 -3839,10 +3866,9 @@@ static int be_stats_init(struct be_adap
                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
  
        cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
 -                                   GFP_KERNEL);
 +                                   GFP_KERNEL | __GFP_ZERO);
        if (cmd->va == NULL)
                return -1;
 -      memset(cmd->va, 0, cmd->size);
        return 0;
  }
  
@@@ -3879,7 -3854,6 +3880,7 @@@ static void be_remove(struct pci_dev *p
                return;
  
        be_roce_dev_remove(adapter);
 +      be_intr_set(adapter, false);
  
        cancel_delayed_work_sync(&adapter->func_recovery_work);
  
@@@ -4134,11 -4108,6 +4135,11 @@@ static int be_probe(struct pci_dev *pde
  
        status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
 +              status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
 +              if (status < 0) {
 +                      dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
 +                      goto free_netdev;
 +              }
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
                status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                        goto ctrl_clean;
        }
  
 -      /* The INTR bit may be set in the card when probed by a kdump kernel
 -       * after a crash.
 -       */
 -      if (!lancer_chip(adapter))
 -              be_intr_set(adapter, false);
 +      /* Wait for interrupts to quiesce after an FLR */
 +      msleep(100);
 +
 +      /* Allow interrupts for other ULPs running on NIC function */
 +      be_intr_set(adapter, true);
  
        status = be_stats_init(adapter);
        if (status)
index 20890874ead74e231bb923118314a66e6ac2b673,73195f643c9c3b23e45a0fac4008caa919473a25..2451ab1b5a83c46dc6c1bae24ad8bfee509f6eab
@@@ -29,6 -29,7 +29,6 @@@
  #include <linux/ioport.h>
  #include <linux/slab.h>
  #include <linux/interrupt.h>
 -#include <linux/pci.h>
  #include <linux/init.h>
  #include <linux/delay.h>
  #include <linux/netdevice.h>
  
  #include <asm/cacheflush.h>
  
 -#ifndef CONFIG_ARM
 -#include <asm/coldfire.h>
 -#include <asm/mcfsim.h>
 -#endif
 -
  #include "fec.h"
  
  #if defined(CONFIG_ARM)
@@@ -100,9 -106,6 +100,9 @@@ static struct platform_device_id fec_de
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
                                FEC_QUIRK_HAS_BUFDESC_EX,
 +      }, {
 +              .name = "mvf-fec",
 +              .driver_data = FEC_QUIRK_ENET_MAC,
        }, {
                /* sentinel */
        }
@@@ -114,7 -117,6 +114,7 @@@ enum imx_fec_type 
        IMX27_FEC,      /* runs on i.mx27/35/51 */
        IMX28_FEC,
        IMX6Q_FEC,
 +      MVF_FEC,
  };
  
  static const struct of_device_id fec_dt_ids[] = {
        { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
        { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
        { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
 +      { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], },
        { /* sentinel */ }
  };
  MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@@ -261,7 -262,7 +261,7 @@@ fec_enet_start_xmit(struct sk_buff *skb
                /* Ooops.  All transmit buffers are full.  Bail out.
                 * This should not happen, since ndev->tbusy should be set.
                 */
 -              printk("%s: tx queue full!.\n", ndev->name);
 +              netdev_err(ndev, "tx queue full!\n");
                return NETDEV_TX_BUSY;
        }
  
@@@ -573,7 -574,7 +573,7 @@@ fec_stop(struct net_device *ndev
                writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
                udelay(10);
                if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
 -                      printk("fec_stop : Graceful transmit stop did not complete !\n");
 +                      netdev_err(ndev, "Graceful transmit stop did not complete!\n");
        }
  
        /* Whack a reset.  We should wait for this. */
@@@ -671,7 -672,7 +671,7 @@@ fec_enet_tx(struct net_device *ndev
                }
  
                if (status & BD_ENET_TX_READY)
 -                      printk("HEY! Enet xmit interrupt and TX_READY.\n");
 +                      netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
  
                /* Deferred means some collisions occurred during transmit,
                 * but we eventually sent the packet OK.
@@@ -739,7 -740,7 +739,7 @@@ fec_enet_rx(struct net_device *ndev, in
                 * the last indicator should be set.
                 */
                if ((status & BD_ENET_RX_LAST) == 0)
 -                      printk("FEC ENET: rcv is not +last\n");
 +                      netdev_err(ndev, "rcv is not +last\n");
  
                if (!fep->opened)
                        goto rx_processing_done;
                skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
  
                if (unlikely(!skb)) {
 -                      printk("%s: Memory squeeze, dropping packet.\n",
 -                                      ndev->name);
                        ndev->stats.rx_dropped++;
                } else {
                        skb_reserve(skb, NET_IP_ALIGN);
@@@ -913,6 -916,7 +913,6 @@@ static void fec_get_mac(struct net_devi
         */
        iap = macaddr;
  
 -#ifdef CONFIG_OF
        /*
         * 2) from device tree data
         */
                                iap = (unsigned char *) mac;
                }
        }
 -#endif
  
        /*
         * 3) from flash or fuse (via platform data)
@@@ -997,6 -1002,7 +997,7 @@@ static void fec_enet_adjust_link(struc
        } else {
                if (fep->link) {
                        fec_stop(ndev);
+                       fep->link = phy_dev->link;
                        status_change = 1;
                }
        }
@@@ -1026,7 -1032,7 +1027,7 @@@ static int fec_enet_mdio_read(struct mi
                        usecs_to_jiffies(FEC_MII_TIMEOUT));
        if (time_left == 0) {
                fep->mii_timeout = 1;
 -              printk(KERN_ERR "FEC: MDIO read timeout\n");
 +              netdev_err(fep->netdev, "MDIO read timeout\n");
                return -ETIMEDOUT;
        }
  
@@@ -1054,7 -1060,7 +1055,7 @@@ static int fec_enet_mdio_write(struct m
                        usecs_to_jiffies(FEC_MII_TIMEOUT));
        if (time_left == 0) {
                fep->mii_timeout = 1;
 -              printk(KERN_ERR "FEC: MDIO write timeout\n");
 +              netdev_err(fep->netdev, "MDIO write timeout\n");
                return -ETIMEDOUT;
        }
  
@@@ -1094,7 -1100,9 +1095,7 @@@ static int fec_enet_mii_probe(struct ne
        }
  
        if (phy_id >= PHY_MAX_ADDR) {
 -              printk(KERN_INFO
 -                      "%s: no PHY, assuming direct connection to switch\n",
 -                      ndev->name);
 +              netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
                strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
                phy_id = 0;
        }
        phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
                              fep->phy_interface);
        if (IS_ERR(phy_dev)) {
 -              printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
 +              netdev_err(ndev, "could not attach to PHY\n");
                return PTR_ERR(phy_dev);
        }
  
        fep->link = 0;
        fep->full_duplex = 0;
  
 -      printk(KERN_INFO
 -              "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
 -              ndev->name,
 -              fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
 -              fep->phy_dev->irq);
 +      netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
 +                  fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
 +                  fep->phy_dev->irq);
  
        return 0;
  }
@@@ -1433,7 -1443,7 +1434,7 @@@ static int fec_enet_alloc_buffers(struc
  
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 -                      ebdp->cbd_esc = BD_ENET_RX_INT;
 +                      ebdp->cbd_esc = BD_ENET_TX_INT;
                }
  
                bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
@@@ -1598,7 -1608,7 +1599,7 @@@ fec_set_mac_address(struct net_device *
   * Polled functionality used by netconsole and others in non interrupt mode
   *
   */
 -void fec_poll_controller(struct net_device *dev)
 +static void fec_poll_controller(struct net_device *dev)
  {
        int i;
        struct fec_enet_private *fep = netdev_priv(dev);
@@@ -1639,9 -1649,11 +1640,9 @@@ static int fec_enet_init(struct net_dev
  
        /* Allocate memory for buffer descriptors. */
        cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
 -                      GFP_KERNEL);
 -      if (!cbd_base) {
 -              printk("FEC: allocate descriptor memory failed?\n");
 +                                    GFP_KERNEL);
 +      if (!cbd_base)
                return -ENOMEM;
 -      }
  
        memset(cbd_base, 0, PAGE_SIZE);
        spin_lock_init(&fep->hw_lock);
  }
  
  #ifdef CONFIG_OF
 -static int fec_get_phy_mode_dt(struct platform_device *pdev)
 -{
 -      struct device_node *np = pdev->dev.of_node;
 -
 -      if (np)
 -              return of_get_phy_mode(np);
 -
 -      return -ENODEV;
 -}
 -
  static void fec_reset_phy(struct platform_device *pdev)
  {
        int err, phy_reset;
        gpio_set_value(phy_reset, 1);
  }
  #else /* CONFIG_OF */
 -static int fec_get_phy_mode_dt(struct platform_device *pdev)
 -{
 -      return -ENODEV;
 -}
 -
  static void fec_reset_phy(struct platform_device *pdev)
  {
        /*
@@@ -1731,10 -1758,16 +1732,10 @@@ fec_probe(struct platform_device *pdev
        if (!r)
                return -ENXIO;
  
 -      r = request_mem_region(r->start, resource_size(r), pdev->name);
 -      if (!r)
 -              return -EBUSY;
 -
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct fec_enet_private));
 -      if (!ndev) {
 -              ret = -ENOMEM;
 -              goto failed_alloc_etherdev;
 -      }
 +      if (!ndev)
 +              return -ENOMEM;
  
        SET_NETDEV_DEV(ndev, &pdev->dev);
  
            (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
  
 -      fep->hwp = ioremap(r->start, resource_size(r));
 +      fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
        fep->pdev = pdev;
        fep->dev_id = dev_id++;
  
  
        platform_set_drvdata(pdev, ndev);
  
 -      ret = fec_get_phy_mode_dt(pdev);
 +      ret = of_get_phy_mode(pdev->dev.of_node);
        if (ret < 0) {
                pdata = pdev->dev.platform_data;
                if (pdata)
        if (ret)
                goto failed_register;
  
 +      if (fep->bufdesc_ex && fep->ptp_clock)
 +              netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
 +
        return 0;
  
  failed_register:
@@@ -1871,8 -1901,11 +1872,8 @@@ failed_regulator
                clk_disable_unprepare(fep->clk_ptp);
  failed_pin:
  failed_clk:
 -      iounmap(fep->hwp);
  failed_ioremap:
        free_netdev(ndev);
 -failed_alloc_etherdev:
 -      release_mem_region(r->start, resource_size(r));
  
        return ret;
  }
@@@ -1882,6 -1915,7 +1883,6 @@@ fec_drv_remove(struct platform_device *
  {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 -      struct resource *r;
        int i;
  
        unregister_netdev(ndev);
                if (irq > 0)
                        free_irq(irq, ndev);
        }
 -      iounmap(fep->hwp);
        free_netdev(ndev);
  
 -      r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 -      BUG_ON(!r);
 -      release_mem_region(r->start, resource_size(r));
 -
        platform_set_drvdata(pdev, NULL);
  
        return 0;
  }
  
 -#ifdef CONFIG_PM
 +#ifdef CONFIG_PM_SLEEP
  static int
  fec_suspend(struct device *dev)
  {
@@@ -1936,15 -1975,24 +1937,15 @@@ fec_resume(struct device *dev
  
        return 0;
  }
 +#endif /* CONFIG_PM_SLEEP */
  
 -static const struct dev_pm_ops fec_pm_ops = {
 -      .suspend        = fec_suspend,
 -      .resume         = fec_resume,
 -      .freeze         = fec_suspend,
 -      .thaw           = fec_resume,
 -      .poweroff       = fec_suspend,
 -      .restore        = fec_resume,
 -};
 -#endif
 +static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
  
  static struct platform_driver fec_driver = {
        .driver = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
 -#ifdef CONFIG_PM
                .pm     = &fec_pm_ops,
 -#endif
                .of_match_table = fec_dt_ids,
        },
        .id_table = fec_devtype,
index c92115e71ebea810287df4515f98611bd908fcf5,ab577a763a20d96cb68c151b8f3ceb483af6f8bf..9d6c075e232d9c5c2885cebcfa58c297e03b049e
  
  struct igb_adapter;
  
 -#define E1000_PCS_CFG_IGN_SD               1
 +#define E1000_PCS_CFG_IGN_SD  1
  
  /* Interrupt defines */
 -#define IGB_START_ITR                    648 /* ~6000 ints/sec */
 -#define IGB_4K_ITR                       980
 -#define IGB_20K_ITR                      196
 -#define IGB_70K_ITR                       56
 +#define IGB_START_ITR         648 /* ~6000 ints/sec */
 +#define IGB_4K_ITR            980
 +#define IGB_20K_ITR           196
 +#define IGB_70K_ITR           56
  
  /* TX/RX descriptor defines */
 -#define IGB_DEFAULT_TXD                  256
 -#define IGB_DEFAULT_TX_WORK            128
 -#define IGB_MIN_TXD                       80
 -#define IGB_MAX_TXD                     4096
 +#define IGB_DEFAULT_TXD               256
 +#define IGB_DEFAULT_TX_WORK   128
 +#define IGB_MIN_TXD           80
 +#define IGB_MAX_TXD           4096
  
 -#define IGB_DEFAULT_RXD                  256
 -#define IGB_MIN_RXD                       80
 -#define IGB_MAX_RXD                     4096
 +#define IGB_DEFAULT_RXD               256
 +#define IGB_MIN_RXD           80
 +#define IGB_MAX_RXD           4096
  
 -#define IGB_DEFAULT_ITR                    3 /* dynamic */
 -#define IGB_MAX_ITR_USECS              10000
 -#define IGB_MIN_ITR_USECS                 10
 -#define NON_Q_VECTORS                      1
 -#define MAX_Q_VECTORS                      8
 +#define IGB_DEFAULT_ITR               3 /* dynamic */
 +#define IGB_MAX_ITR_USECS     10000
 +#define IGB_MIN_ITR_USECS     10
 +#define NON_Q_VECTORS         1
 +#define MAX_Q_VECTORS         8
  
  /* Transmit and receive queues */
 -#define IGB_MAX_RX_QUEUES                  8
 -#define IGB_MAX_RX_QUEUES_82575            4
 -#define IGB_MAX_RX_QUEUES_I211             2
 -#define IGB_MAX_TX_QUEUES                  8
 -#define IGB_MAX_VF_MC_ENTRIES              30
 -#define IGB_MAX_VF_FUNCTIONS               8
 -#define IGB_MAX_VFTA_ENTRIES               128
 -#define IGB_82576_VF_DEV_ID                0x10CA
 -#define IGB_I350_VF_DEV_ID                 0x1520
 +#define IGB_MAX_RX_QUEUES     8
 +#define IGB_MAX_RX_QUEUES_82575       4
 +#define IGB_MAX_RX_QUEUES_I211        2
 +#define IGB_MAX_TX_QUEUES     8
 +#define IGB_MAX_VF_MC_ENTRIES 30
 +#define IGB_MAX_VF_FUNCTIONS  8
 +#define IGB_MAX_VFTA_ENTRIES  128
 +#define IGB_82576_VF_DEV_ID   0x10CA
 +#define IGB_I350_VF_DEV_ID    0x1520
  
  /* NVM version defines */
 -#define IGB_MAJOR_MASK                        0xF000
 -#define IGB_MINOR_MASK                        0x0FF0
 -#define IGB_BUILD_MASK                        0x000F
 -#define IGB_COMB_VER_MASK             0x00FF
 -#define IGB_MAJOR_SHIFT                       12
 -#define IGB_MINOR_SHIFT                       4
 -#define IGB_COMB_VER_SHFT             8
 -#define IGB_NVM_VER_INVALID           0xFFFF
 -#define IGB_ETRACK_SHIFT              16
 -#define NVM_ETRACK_WORD                       0x0042
 -#define NVM_COMB_VER_OFF              0x0083
 -#define NVM_COMB_VER_PTR              0x003d
 +#define IGB_MAJOR_MASK                0xF000
 +#define IGB_MINOR_MASK                0x0FF0
 +#define IGB_BUILD_MASK                0x000F
 +#define IGB_COMB_VER_MASK     0x00FF
 +#define IGB_MAJOR_SHIFT               12
 +#define IGB_MINOR_SHIFT               4
 +#define IGB_COMB_VER_SHFT     8
 +#define IGB_NVM_VER_INVALID   0xFFFF
 +#define IGB_ETRACK_SHIFT      16
 +#define NVM_ETRACK_WORD               0x0042
 +#define NVM_COMB_VER_OFF      0x0083
 +#define NVM_COMB_VER_PTR      0x003d
  
  struct vf_data_storage {
        unsigned char vf_mac_addresses[ETH_ALEN];
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
        u16 tx_rate;
 +      bool spoofchk_enabled;
  };
  
  #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
   *           descriptors until either it has this many to write back, or the
   *           ITR timer expires.
   */
 -#define IGB_RX_PTHRESH                     8
 -#define IGB_RX_HTHRESH                     8
 -#define IGB_TX_PTHRESH                     8
 -#define IGB_TX_HTHRESH                     1
 -#define IGB_RX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
 -                                           adapter->msix_entries) ? 1 : 4)
 -#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
 -                                           adapter->msix_entries) ? 1 : 16)
 +#define IGB_RX_PTHRESH        ((hw->mac.type == e1000_i354) ? 12 : 8)
 +#define IGB_RX_HTHRESH        8
 +#define IGB_TX_PTHRESH        ((hw->mac.type == e1000_i354) ? 20 : 8)
 +#define IGB_TX_HTHRESH        1
 +#define IGB_RX_WTHRESH        ((hw->mac.type == e1000_82576 && \
 +                        adapter->msix_entries) ? 1 : 4)
 +#define IGB_TX_WTHRESH        ((hw->mac.type == e1000_82576 && \
 +                        adapter->msix_entries) ? 1 : 16)
  
  /* this is the size past which hardware will drop packets when setting LPE=0 */
  #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
  #define IGB_RX_BUFSZ          IGB_RXBUFFER_2048
  
  /* How many Rx Buffers do we bundle into one write to the hardware ? */
 -#define IGB_RX_BUFFER_WRITE   16      /* Must be power of 2 */
 +#define IGB_RX_BUFFER_WRITE   16 /* Must be power of 2 */
  
 -#define AUTO_ALL_MODES            0
 -#define IGB_EEPROM_APME         0x0400
 +#define AUTO_ALL_MODES                0
 +#define IGB_EEPROM_APME               0x0400
  
  #ifndef IGB_MASTER_SLAVE
  /* Switch to override PHY master/slave setting */
  #define IGB_MASTER_SLAVE      e1000_ms_hw_default
  #endif
  
 -#define IGB_MNG_VLAN_NONE -1
 +#define IGB_MNG_VLAN_NONE     -1
  
  enum igb_tx_flags {
        /* cmd_type flags */
  };
  
  /* VLAN info */
 -#define IGB_TX_FLAGS_VLAN_MASK                0xffff0000
 +#define IGB_TX_FLAGS_VLAN_MASK        0xffff0000
  #define IGB_TX_FLAGS_VLAN_SHIFT       16
  
 -/*
 - * The largest size we can write to the descriptor is 65535.  In order to
 +/* The largest size we can write to the descriptor is 65535.  In order to
   * maintain a power of two alignment we have to limit ourselves to 32K.
   */
  #define IGB_MAX_TXD_PWR       15
  #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
  #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
  
 +/* EEPROM byte offsets */
 +#define IGB_SFF_8472_SWAP             0x5C
 +#define IGB_SFF_8472_COMP             0x5E
 +
 +/* Bitmasks */
 +#define IGB_SFF_ADDRESSING_MODE               0x4
 +#define IGB_SFF_8472_UNSUP            0x00
 +
  /* wrapper around a pointer to a socket buffer,
 - * so a DMA handle can be stored along with the buffer */
 + * so a DMA handle can be stored along with the buffer
 + */
  struct igb_tx_buffer {
        union e1000_adv_tx_desc *next_to_watch;
        unsigned long time_stamp;
@@@ -293,25 -284,17 +293,17 @@@ struct igb_q_vector 
  enum e1000_ring_flags_t {
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
-       IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
        IGB_RING_FLAG_TX_CTX_IDX,
        IGB_RING_FLAG_TX_DETECT_HANG
  };
  
- #define ring_uses_build_skb(ring) \
-       test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
- #define set_ring_build_skb_enabled(ring) \
-       set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
- #define clear_ring_build_skb_enabled(ring) \
-       clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
  #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
  
 -#define IGB_RX_DESC(R, i)         \
 +#define IGB_RX_DESC(R, i)     \
        (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
 -#define IGB_TX_DESC(R, i)         \
 +#define IGB_TX_DESC(R, i)     \
        (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
 -#define IGB_TX_CTXTDESC(R, i)     \
 +#define IGB_TX_CTXTDESC(R, i) \
        (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
  
  /* igb_test_staterr - tests bits within Rx descriptor status and error fields */
@@@ -470,12 -453,12 +462,12 @@@ struct igb_adapter 
  #define IGB_FLAG_WOL_SUPPORTED                (1 << 8)
  
  /* DMA Coalescing defines */
 -#define IGB_MIN_TXPBSIZE           20408
 -#define IGB_TX_BUF_4096            4096
 -#define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */
 +#define IGB_MIN_TXPBSIZE      20408
 +#define IGB_TX_BUF_4096               4096
 +#define IGB_DMCTLX_DCFLUSH_DIS        0x80000000  /* Disable DMA Coal Flush */
  
 -#define IGB_82576_TSYNC_SHIFT 19
 -#define IGB_TS_HDR_LEN        16
 +#define IGB_82576_TSYNC_SHIFT 19
 +#define IGB_TS_HDR_LEN                16
  enum e1000_state_t {
        __IGB_TESTING,
        __IGB_RESETTING,
index 9bf08b977daab541ab66f7ecabfb94d85be12eb9,64f75291e3a5ca5402dfbee6dccb7a68bf1f1dd0..dcaa35481dd7997048b8f22139cb572ddf518fb6
@@@ -77,9 -77,6 +77,9 @@@ static const struct e1000_info *igb_inf
  };
  
  static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
@@@ -159,8 -156,8 +159,8 @@@ static int igb_ioctl(struct net_device 
  static void igb_tx_timeout(struct net_device *);
  static void igb_reset_task(struct work_struct *);
  static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
 -static int igb_vlan_rx_add_vid(struct net_device *, u16);
 -static int igb_vlan_rx_kill_vid(struct net_device *, u16);
 +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
 +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
  static void igb_restore_vlan(struct igb_adapter *);
  static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
  static void igb_ping_all_vfs(struct igb_adapter *);
@@@ -172,8 -169,6 +172,8 @@@ static int igb_ndo_set_vf_mac(struct ne
  static int igb_ndo_set_vf_vlan(struct net_device *netdev,
                               int vf, u16 vlan, u8 qos);
  static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
 +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
 +                                 bool setting);
  static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
                                 struct ifla_vf_info *ivi);
  static void igb_check_vf_rate_limit(struct igb_adapter *);
@@@ -297,7 -292,9 +297,7 @@@ static const struct igb_reg_info igb_re
        {}
  };
  
 -/*
 - * igb_regdump - register printout routine
 - */
 +/* igb_regdump - register printout routine */
  static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
  {
        int n = 0;
                regs[2], regs[3]);
  }
  
 -/*
 - * igb_dump - Print registers, tx-rings and rx-rings
 - */
 +/* igb_dump - Print registers, Tx-rings and Rx-rings */
  static void igb_dump(struct igb_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
@@@ -570,13 -569,12 +570,13 @@@ exit
        return;
  }
  
 -/*  igb_get_i2c_data - Reads the I2C SDA data bit
 +/**
 + *  igb_get_i2c_data - Reads the I2C SDA data bit
   *  @hw: pointer to hardware structure
   *  @i2cctl: Current value of I2CCTL register
   *
   *  Returns the I2C data bit value
 - */
 + **/
  static int igb_get_i2c_data(void *data)
  {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
        return ((i2cctl & E1000_I2C_DATA_IN) != 0);
  }
  
 -/* igb_set_i2c_data - Sets the I2C data bit
 +/**
 + *  igb_set_i2c_data - Sets the I2C data bit
   *  @data: pointer to hardware structure
   *  @state: I2C data value (0 or 1) to set
   *
   *  Sets the I2C data bit
 - */
 + **/
  static void igb_set_i2c_data(void *data, int state)
  {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
  
  }
  
 -/* igb_set_i2c_clk - Sets the I2C SCL clock
 +/**
 + *  igb_set_i2c_clk - Sets the I2C SCL clock
   *  @data: pointer to hardware structure
   *  @state: state to set clock
   *
   *  Sets the I2C clock line to state
 - */
 + **/
  static void igb_set_i2c_clk(void *data, int state)
  {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
        wrfl();
  }
  
 -/* igb_get_i2c_clk - Gets the I2C SCL clock state
 +/**
 + *  igb_get_i2c_clk - Gets the I2C SCL clock state
   *  @data: pointer to hardware structure
   *
   *  Gets the I2C clock state
 - */
 + **/
  static int igb_get_i2c_clk(void *data)
  {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
@@@ -660,10 -655,8 +660,10 @@@ static const struct i2c_algo_bit_data i
  };
  
  /**
 - * igb_get_hw_dev - return device
 - * used by hardware layer to print debugging information
 + *  igb_get_hw_dev - return device
 + *  @hw: pointer to hardware structure
 + *
 + *  used by hardware layer to print debugging information
   **/
  struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  {
  }
  
  /**
 - * igb_init_module - Driver Registration Routine
 + *  igb_init_module - Driver Registration Routine
   *
 - * igb_init_module is the first routine called when the driver is
 - * loaded. All it does is register with the PCI subsystem.
 + *  igb_init_module is the first routine called when the driver is
 + *  loaded. All it does is register with the PCI subsystem.
   **/
  static int __init igb_init_module(void)
  {
  module_init(igb_init_module);
  
  /**
 - * igb_exit_module - Driver Exit Cleanup Routine
 + *  igb_exit_module - Driver Exit Cleanup Routine
   *
 - * igb_exit_module is called just before the driver is removed
 - * from memory.
 + *  igb_exit_module is called just before the driver is removed
 + *  from memory.
   **/
  static void __exit igb_exit_module(void)
  {
@@@ -712,11 -705,11 +712,11 @@@ module_exit(igb_exit_module)
  
  #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  /**
 - * igb_cache_ring_register - Descriptor ring to register mapping
 - * @adapter: board private structure to initialize
 + *  igb_cache_ring_register - Descriptor ring to register mapping
 + *  @adapter: board private structure to initialize
   *
 - * Once we know the feature-set enabled for the device, we'll cache
 - * the register offset the descriptor ring is assigned to.
 + *  Once we know the feature-set enabled for the device, we'll cache
 + *  the register offset the descriptor ring is assigned to.
   **/
  static void igb_cache_ring_register(struct igb_adapter *adapter)
  {
                if (adapter->vfs_allocated_count) {
                        for (; i < adapter->rss_queues; i++)
                                adapter->rx_ring[i]->reg_idx = rbase_offset +
 -                                                             Q_IDX_82576(i);
 +                                                             Q_IDX_82576(i);
                }
        case e1000_82575:
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i354:
        case e1000_i210:
        case e1000_i211:
        default:
@@@ -793,10 -785,9 +793,10 @@@ static void igb_assign_vector(struct ig
        switch (hw->mac.type) {
        case e1000_82575:
                /* The 82575 assigns vectors using a bitmask, which matches the
 -                 bitmask for the EICR/EIMS/EIMC registers.  To assign one
 -                 or more queues to a vector, we write the appropriate bits
 -                 into the MSIXBM register for that vector. */
 +               * bitmask for the EICR/EIMS/EIMC registers.  To assign one
 +               * or more queues to a vector, we write the appropriate bits
 +               * into the MSIXBM register for that vector.
 +               */
                if (rx_queue > IGB_N0_QUEUE)
                        msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
                if (tx_queue > IGB_N0_QUEUE)
                q_vector->eims_value = msixbm;
                break;
        case e1000_82576:
 -              /*
 -               * 82576 uses a table that essentially consists of 2 columns
 +              /* 82576 uses a table that essentially consists of 2 columns
                 * with 8 rows.  The ordering is column-major so we use the
                 * lower 3 bits as the row index, and the 4th bit as the
                 * column offset.
                break;
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i354:
        case e1000_i210:
        case e1000_i211:
 -              /*
 -               * On 82580 and newer adapters the scheme is similar to 82576
 +              /* On 82580 and newer adapters the scheme is similar to 82576
                 * however instead of ordering column-major we have things
                 * ordered row-major.  So we traverse the table by using
                 * bit 0 as the column offset, and the remaining bits as the
  }
  
  /**
 - * igb_configure_msix - Configure MSI-X hardware
 + *  igb_configure_msix - Configure MSI-X hardware
 + *  @adapter: board private structure to initialize
   *
 - * igb_configure_msix sets up the hardware to properly
 - * generate MSI-X interrupts.
 + *  igb_configure_msix sets up the hardware to properly
 + *  generate MSI-X interrupts.
   **/
  static void igb_configure_msix(struct igb_adapter *adapter)
  {
                wr32(E1000_CTRL_EXT, tmp);
  
                /* enable msix_other interrupt */
 -              array_wr32(E1000_MSIXBM(0), vector++,
 -                                    E1000_EIMS_OTHER);
 +              array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
                adapter->eims_other = E1000_EIMS_OTHER;
  
                break;
        case e1000_82576:
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i354:
        case e1000_i210:
        case e1000_i211:
                /* Turn on MSI-X capability first, or our settings
 -               * won't stick.  And it will take days to debug. */
 +               * won't stick.  And it will take days to debug.
 +               */
                wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
 -                              E1000_GPIE_PBA | E1000_GPIE_EIAME |
 -                              E1000_GPIE_NSICR);
 +                   E1000_GPIE_PBA | E1000_GPIE_EIAME |
 +                   E1000_GPIE_NSICR);
  
                /* enable msix_other interrupt */
                adapter->eims_other = 1 << vector;
  }
  
  /**
 - * igb_request_msix - Initialize MSI-X interrupts
 + *  igb_request_msix - Initialize MSI-X interrupts
 + *  @adapter: board private structure to initialize
   *
 - * igb_request_msix allocates MSI-X vectors and requests interrupts from the
 - * kernel.
 + *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
 + *  kernel.
   **/
  static int igb_request_msix(struct igb_adapter *adapter)
  {
        int i, err = 0, vector = 0, free_vector = 0;
  
        err = request_irq(adapter->msix_entries[vector].vector,
 -                        igb_msix_other, 0, netdev->name, adapter);
 +                        igb_msix_other, 0, netdev->name, adapter);
        if (err)
                goto err_out;
  
                        sprintf(q_vector->name, "%s-unused", netdev->name);
  
                err = request_irq(adapter->msix_entries[vector].vector,
 -                                igb_msix_ring, 0, q_vector->name,
 -                                q_vector);
 +                                igb_msix_ring, 0, q_vector->name,
 +                                q_vector);
                if (err)
                        goto err_free;
        }
@@@ -993,13 -982,13 +993,13 @@@ static void igb_reset_interrupt_capabil
  }
  
  /**
 - * igb_free_q_vector - Free memory allocated for specific interrupt vector
 - * @adapter: board private structure to initialize
 - * @v_idx: Index of vector to be freed
 + *  igb_free_q_vector - Free memory allocated for specific interrupt vector
 + *  @adapter: board private structure to initialize
 + *  @v_idx: Index of vector to be freed
   *
 - * This function frees the memory allocated to the q_vector.  In addition if
 - * NAPI is enabled it will delete any references to the NAPI struct prior
 - * to freeing the q_vector.
 + *  This function frees the memory allocated to the q_vector.  In addition if
 + *  NAPI is enabled it will delete any references to the NAPI struct prior
 + *  to freeing the q_vector.
   **/
  static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
  {
        adapter->q_vector[v_idx] = NULL;
        netif_napi_del(&q_vector->napi);
  
 -      /*
 -       * ixgbe_get_stats64() might access the rings on this vector,
 +      /* ixgbe_get_stats64() might access the rings on this vector,
         * we must wait a grace period before freeing it.
         */
        kfree_rcu(q_vector, rcu);
  }
  
  /**
 - * igb_free_q_vectors - Free memory allocated for interrupt vectors
 - * @adapter: board private structure to initialize
 + *  igb_free_q_vectors - Free memory allocated for interrupt vectors
 + *  @adapter: board private structure to initialize
   *
 - * This function frees the memory allocated to the q_vectors.  In addition if
 - * NAPI is enabled it will delete any references to the NAPI struct prior
 - * to freeing the q_vector.
 + *  This function frees the memory allocated to the q_vectors.  In addition if
 + *  NAPI is enabled it will delete any references to the NAPI struct prior
 + *  to freeing the q_vector.
   **/
  static void igb_free_q_vectors(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
 + *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
 + *  @adapter: board private structure to initialize
   *
 - * This function resets the device so that it has 0 rx queues, tx queues, and
 - * MSI-X interrupts allocated.
 + *  This function resets the device so that it has 0 Rx queues, Tx queues, and
 + *  MSI-X interrupts allocated.
   */
  static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_set_interrupt_capability - set MSI or MSI-X if supported
 + *  igb_set_interrupt_capability - set MSI or MSI-X if supported
 + *  @adapter: board private structure to initialize
 + *  @msix: boolean value of MSIX capability
   *
 - * Attempt to configure interrupts using the best available
 - * capabilities of the hardware and kernel.
 + *  Attempt to configure interrupts using the best available
 + *  capabilities of the hardware and kernel.
   **/
  static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
  {
        else
                adapter->num_tx_queues = adapter->rss_queues;
  
 -      /* start with one vector for every rx queue */
 +      /* start with one vector for every Rx queue */
        numvecs = adapter->num_rx_queues;
  
 -      /* if tx handler is separate add 1 for every tx queue */
 +      /* if Tx handler is separate add 1 for every Tx queue */
        if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
                numvecs += adapter->num_tx_queues;
  
@@@ -1141,16 -1128,16 +1141,16 @@@ static void igb_add_ring(struct igb_rin
  }
  
  /**
 - * igb_alloc_q_vector - Allocate memory for a single interrupt vector
 - * @adapter: board private structure to initialize
 - * @v_count: q_vectors allocated on adapter, used for ring interleaving
 - * @v_idx: index of vector in adapter struct
 - * @txr_count: total number of Tx rings to allocate
 - * @txr_idx: index of first Tx ring to allocate
 - * @rxr_count: total number of Rx rings to allocate
 - * @rxr_idx: index of first Rx ring to allocate
 + *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
 + *  @adapter: board private structure to initialize
 + *  @v_count: q_vectors allocated on adapter, used for ring interleaving
 + *  @v_idx: index of vector in adapter struct
 + *  @txr_count: total number of Tx rings to allocate
 + *  @txr_idx: index of first Tx ring to allocate
 + *  @rxr_count: total number of Rx rings to allocate
 + *  @rxr_idx: index of first Rx ring to allocate
   *
 - * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 + *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
   **/
  static int igb_alloc_q_vector(struct igb_adapter *adapter,
                              int v_count, int v_idx,
        /* initialize pointer to rings */
        ring = q_vector->ring;
  
 +      /* intialize ITR */
 +      if (rxr_count) {
 +              /* rx or rx/tx vector */
 +              if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
 +                      q_vector->itr_val = adapter->rx_itr_setting;
 +      } else {
 +              /* tx only vector */
 +              if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
 +                      q_vector->itr_val = adapter->tx_itr_setting;
 +      }
 +
        if (txr_count) {
                /* assign generic ring traits */
                ring->dev = &adapter->pdev->dev;
                        set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  
                /*
 -               * On i350, i210, and i211, loopback VLAN packets
 +               * On i350, i354, i210, and i211, loopback VLAN packets
                 * have the tag byte-swapped.
 -               * */
 +               */
                if (adapter->hw.mac.type >= e1000_i350)
                        set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  
  
  
  /**
 - * igb_alloc_q_vectors - Allocate memory for interrupt vectors
 - * @adapter: board private structure to initialize
 + *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
 + *  @adapter: board private structure to initialize
   *
 - * We allocate one q_vector per queue interrupt.  If allocation fails we
 - * return -ENOMEM.
 + *  We allocate one q_vector per queue interrupt.  If allocation fails we
 + *  return -ENOMEM.
   **/
  static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  {
@@@ -1322,11 -1298,9 +1322,11 @@@ err_out
  }
  
  /**
 - * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
 + *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
 + *  @adapter: board private structure to initialize
 + *  @msix: boolean value of MSIX capability
   *
 - * This function initializes the interrupts and allocates all of the queues.
 + *  This function initializes the interrupts and allocates all of the queues.
   **/
  static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
  {
@@@ -1351,11 -1325,10 +1351,11 @@@ err_alloc_q_vectors
  }
  
  /**
 - * igb_request_irq - initialize interrupts
 + *  igb_request_irq - initialize interrupts
 + *  @adapter: board private structure to initialize
   *
 - * Attempts to configure interrupts using the best available
 - * capabilities of the hardware and kernel.
 + *  Attempts to configure interrupts using the best available
 + *  capabilities of the hardware and kernel.
   **/
  static int igb_request_irq(struct igb_adapter *adapter)
  {
@@@ -1421,14 -1394,15 +1421,14 @@@ static void igb_free_irq(struct igb_ada
  }
  
  /**
 - * igb_irq_disable - Mask off interrupt generation on the NIC
 - * @adapter: board private structure
 + *  igb_irq_disable - Mask off interrupt generation on the NIC
 + *  @adapter: board private structure
   **/
  static void igb_irq_disable(struct igb_adapter *adapter)
  {
        struct e1000_hw *hw = &adapter->hw;
  
 -      /*
 -       * we need to be careful when disabling interrupts.  The VFs are also
 +      /* we need to be careful when disabling interrupts.  The VFs are also
         * mapped into these registers and so clearing the bits can cause
         * issues on the VF drivers so we only need to clear what we set
         */
  }
  
  /**
 - * igb_irq_enable - Enable default interrupt generation settings
 - * @adapter: board private structure
 + *  igb_irq_enable - Enable default interrupt generation settings
 + *  @adapter: board private structure
   **/
  static void igb_irq_enable(struct igb_adapter *adapter)
  {
@@@ -1503,12 -1477,13 +1503,12 @@@ static void igb_update_mng_vlan(struct 
  }
  
  /**
 - * igb_release_hw_control - release control of the h/w to f/w
 - * @adapter: address of board private structure
 - *
 - * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
 - * For ASF and Pass Through versions of f/w this means that the
 - * driver is no longer loaded.
 + *  igb_release_hw_control - release control of the h/w to f/w
 + *  @adapter: address of board private structure
   *
 + *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
 + *  For ASF and Pass Through versions of f/w this means that the
 + *  driver is no longer loaded.
   **/
  static void igb_release_hw_control(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_get_hw_control - get control of the h/w from f/w
 - * @adapter: address of board private structure
 - *
 - * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
 - * For ASF and Pass Through versions of f/w this means that
 - * the driver is loaded.
 + *  igb_get_hw_control - get control of the h/w from f/w
 + *  @adapter: address of board private structure
   *
 + *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
 + *  For ASF and Pass Through versions of f/w this means that
 + *  the driver is loaded.
   **/
  static void igb_get_hw_control(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_configure - configure the hardware for RX and TX
 - * @adapter: private board structure
 + *  igb_configure - configure the hardware for RX and TX
 + *  @adapter: private board structure
   **/
  static void igb_configure(struct igb_adapter *adapter)
  {
  
        /* call igb_desc_unused which always leaves
         * at least 1 descriptor unused to make sure
 -       * next_to_use != next_to_clean */
 +       * next_to_use != next_to_clean
 +       */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct igb_ring *ring = adapter->rx_ring[i];
                igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
  }
  
  /**
 - * igb_power_up_link - Power up the phy/serdes link
 - * @adapter: address of board private structure
 + *  igb_power_up_link - Power up the phy/serdes link
 + *  @adapter: address of board private structure
   **/
  void igb_power_up_link(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_power_down_link - Power down the phy/serdes link
 - * @adapter: address of board private structure
 + *  igb_power_down_link - Power down the phy/serdes link
 + *  @adapter: address of board private structure
   */
  static void igb_power_down_link(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_up - Open the interface and prepare it to handle traffic
 - * @adapter: board private structure
 + *  igb_up - Open the interface and prepare it to handle traffic
 + *  @adapter: board private structure
   **/
  int igb_up(struct igb_adapter *adapter)
  {
@@@ -1649,8 -1624,7 +1649,8 @@@ void igb_down(struct igb_adapter *adapt
        int i;
  
        /* signal that we're down so the interrupt handler does not
 -       * reschedule our watchdog timer */
 +       * reschedule our watchdog timer
 +       */
        set_bit(__IGB_DOWN, &adapter->state);
  
        /* disable receives in the hardware */
@@@ -1720,7 -1694,6 +1720,7 @@@ void igb_reset(struct igb_adapter *adap
         */
        switch (mac->type) {
        case e1000_i350:
 +      case e1000_i354:
        case e1000_82580:
                pba = rd32(E1000_RXPBS);
                pba = igb_rxpbs_adjust_82580(pba);
                 * rounded up to the next 1KB and expressed in KB.  Likewise,
                 * the Rx FIFO should be large enough to accommodate at least
                 * one full receive packet and is similarly rounded up and
 -               * expressed in KB. */
 +               * expressed in KB.
 +               */
                pba = rd32(E1000_PBA);
                /* upper 16 bits has Tx packet buffer allocation size in KB */
                tx_space = pba >> 16;
                /* lower 16 bits has Rx packet buffer allocation size in KB */
                pba &= 0xffff;
 -              /* the tx fifo also stores 16 bytes of information about the tx
 -               * but don't include ethernet FCS because hardware appends it */
 +              /* the Tx fifo also stores 16 bytes of information about the Tx
 +               * but don't include ethernet FCS because hardware appends it
 +               */
                min_tx_space = (adapter->max_frame_size +
                                sizeof(union e1000_adv_tx_desc) -
                                ETH_FCS_LEN) * 2;
  
                /* If current Tx allocation is less than the min Tx FIFO size,
                 * and the min Tx FIFO size is less than the current Rx FIFO
 -               * allocation, take space away from current Rx allocation */
 +               * allocation, take space away from current Rx allocation
 +               */
                if (tx_space < min_tx_space &&
                    ((min_tx_space - tx_space) < pba)) {
                        pba = pba - (min_tx_space - tx_space);
  
 -                      /* if short on rx space, rx wins and must trump tx
 -                       * adjustment */
 +                      /* if short on Rx space, Rx wins and must trump Tx
 +                       * adjustment
 +                       */
                        if (pba < min_rx_space)
                                pba = min_rx_space;
                }
         * (or the size used for early receive) above it in the Rx FIFO.
         * Set it to the lower of:
         * - 90% of the Rx FIFO size, or
 -       * - the full Rx FIFO size minus one full frame */
 +       * - the full Rx FIFO size minus one full frame
 +       */
        hwm = min(((pba << 10) * 9 / 10),
                        ((pba << 10) - 2 * adapter->max_frame_size));
  
        if (hw->mac.ops.init_hw(hw))
                dev_err(&pdev->dev, "Hardware Error\n");
  
 -      /*
 -       * Flow control settings reset on hardware reset, so guarantee flow
 +      /* Flow control settings reset on hardware reset, so guarantee flow
         * control is off when forcing speed.
         */
        if (!hw->mac.autoneg)
  static netdev_features_t igb_fix_features(struct net_device *netdev,
        netdev_features_t features)
  {
 -      /*
 -       * Since there is no support for separate rx/tx vlan accel
 -       * enable/disable make sure tx flag is always in same state as rx.
 +      /* Since there is no support for separate Rx/Tx vlan accel
 +       * enable/disable make sure Tx flag is always in same state as Rx.
         */
 -      if (features & NETIF_F_HW_VLAN_RX)
 -              features |= NETIF_F_HW_VLAN_TX;
 +      if (features & NETIF_F_HW_VLAN_CTAG_RX)
 +              features |= NETIF_F_HW_VLAN_CTAG_TX;
        else
 -              features &= ~NETIF_F_HW_VLAN_TX;
 +              features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  
        return features;
  }
@@@ -1874,7 -1844,7 +1874,7 @@@ static int igb_set_features(struct net_
        netdev_features_t changed = netdev->features ^ features;
        struct igb_adapter *adapter = netdev_priv(netdev);
  
 -      if (changed & NETIF_F_HW_VLAN_RX)
 +      if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                igb_vlan_mode(netdev, features);
  
        if (!(changed & NETIF_F_RXALL))
@@@ -1906,7 -1876,6 +1906,7 @@@ static const struct net_device_ops igb_
        .ndo_set_vf_mac         = igb_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = igb_ndo_set_vf_vlan,
        .ndo_set_vf_tx_rate     = igb_ndo_set_vf_bw,
 +      .ndo_set_vf_spoofchk    = igb_ndo_set_vf_spoofchk,
        .ndo_get_vf_config      = igb_ndo_get_vf_config,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = igb_netpoll,
  /**
   * igb_set_fw_version - Configure version string for ethtool
   * @adapter: adapter struct
 - *
   **/
  void igb_set_fw_version(struct igb_adapter *adapter)
  {
        return;
  }
  
 -/*  igb_init_i2c - Init I2C interface
 +/**
 + *  igb_init_i2c - Init I2C interface
   *  @adapter: pointer to adapter structure
 - *
 - */
 + **/
  static s32 igb_init_i2c(struct igb_adapter *adapter)
  {
        s32 status = E1000_SUCCESS;
  }
  
  /**
 - * igb_probe - Device Initialization Routine
 - * @pdev: PCI device information struct
 - * @ent: entry in igb_pci_tbl
 + *  igb_probe - Device Initialization Routine
 + *  @pdev: PCI device information struct
 + *  @ent: entry in igb_pci_tbl
   *
 - * Returns 0 on success, negative on failure
 + *  Returns 0 on success, negative on failure
   *
 - * igb_probe initializes an adapter identified by a pci_dev structure.
 - * The OS initialization, configuring of the adapter private structure,
 - * and a hardware reset occur.
 + *  igb_probe initializes an adapter identified by a pci_dev structure.
 + *  The OS initialization, configuring of the adapter private structure,
 + *  and a hardware reset occur.
   **/
  static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        } else {
                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
 -                      err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 +                      err = dma_set_coherent_mask(&pdev->dev,
 +                                                  DMA_BIT_MASK(32));
                        if (err) {
 -                              dev_err(&pdev->dev, "No usable DMA "
 -                                      "configuration, aborting\n");
 +                              dev_err(&pdev->dev,
 +                                      "No usable DMA configuration, aborting\n");
                                goto err_dma;
                        }
                }
        }
  
        err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
 -                                         IORESOURCE_MEM),
 -                                         igb_driver_name);
 +                                         IORESOURCE_MEM),
 +                                         igb_driver_name);
        if (err)
                goto err_pci_reg;
  
                dev_info(&pdev->dev,
                        "PHY reset is blocked due to SOL/IDER session.\n");
  
 -      /*
 -       * features is initialized to 0 in allocation, it might have bits
 +      /* features is initialized to 0 in allocation, it might have bits
         * set by igb_sw_init so we should use an or instead of an
         * assignment.
         */
                            NETIF_F_TSO6 |
                            NETIF_F_RXHASH |
                            NETIF_F_RXCSUM |
 -                          NETIF_F_HW_VLAN_RX |
 -                          NETIF_F_HW_VLAN_TX;
 +                          NETIF_F_HW_VLAN_CTAG_RX |
 +                          NETIF_F_HW_VLAN_CTAG_TX;
  
        /* copy netdev features into list of user selectable features */
        netdev->hw_features |= netdev->features;
        netdev->hw_features |= NETIF_F_RXALL;
  
        /* set this bit last since it cannot be part of hw_features */
 -      netdev->features |= NETIF_F_HW_VLAN_FILTER;
 +      netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  
        netdev->vlan_features |= NETIF_F_TSO |
                                 NETIF_F_TSO6 |
        adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
  
        /* before reading the NVM, reset the controller to put the device in a
 -       * known good starting state */
 +       * known good starting state
 +       */
        hw->mac.ops.reset_hw(hw);
  
 -      /*
 -       * make sure the NVM is good , i211 parts have special NVM that
 +      /* make sure the NVM is good , i211 parts have special NVM that
         * doesn't contain a checksum
         */
        if (hw->mac.type != e1000_i211) {
        igb_set_fw_version(adapter);
  
        setup_timer(&adapter->watchdog_timer, igb_watchdog,
 -                  (unsigned long) adapter);
 +                  (unsigned long) adapter);
        setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
 -                  (unsigned long) adapter);
 +                  (unsigned long) adapter);
  
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
        /* Check the NVM for wake support on non-port A ports */
        if (hw->mac.type >= e1000_82580)
                hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
 -                               NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
 -                               &eeprom_data);
 +                               NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
 +                               &eeprom_data);
        else if (hw->bus.func == 1)
                hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  
  
        /* now that we have the eeprom settings, apply the special cases where
         * the eeprom may be wrong or the board simply won't support wake on
 -       * lan on a particular port */
 +       * lan on a particular port
 +       */
        switch (pdev->device) {
        case E1000_DEV_ID_82575GB_QUAD_COPPER:
                adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
        case E1000_DEV_ID_82576_FIBER:
        case E1000_DEV_ID_82576_SERDES:
                /* Wake events only supported on port A for dual fiber
 -               * regardless of eeprom setting */
 +               * regardless of eeprom setting
 +               */
                if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
                        adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
                break;
        if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
                u16 ets_word;
  
 -              /*
 -               * Read the NVM to determine if this i350 device supports an
 +              /* Read the NVM to determine if this i350 device supports an
                 * external thermal sensor.
                 */
                hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
        igb_ptp_init(adapter);
  
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
 -      /* print bus type/speed/width info */
 -      dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
 -               netdev->name,
 -               ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
 -                (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
 -                                                          "unknown"),
 -               ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
 -                (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
 -                (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
 -                 "unknown"),
 -               netdev->dev_addr);
 +      /* print bus type/speed/width info, not applicable to i354 */
 +      if (hw->mac.type != e1000_i354) {
 +              dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
 +                       netdev->name,
 +                       ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
 +                        (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
 +                         "unknown"),
 +                       ((hw->bus.width == e1000_bus_width_pcie_x4) ?
 +                        "Width x4" :
 +                        (hw->bus.width == e1000_bus_width_pcie_x2) ?
 +                        "Width x2" :
 +                        (hw->bus.width == e1000_bus_width_pcie_x1) ?
 +                        "Width x1" : "unknown"), netdev->dev_addr);
 +      }
  
        ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
        if (ret_val)
        case e1000_i211:
                igb_set_eee_i350(hw);
                break;
 +      case e1000_i354:
 +              if (hw->phy.media_type == e1000_media_type_copper) {
 +                      if ((rd32(E1000_CTRL_EXT) &
 +                          E1000_CTRL_EXT_LINK_MODE_SGMII))
 +                              igb_set_eee_i354(hw);
 +              }
 +              break;
        default:
                break;
        }
@@@ -2385,7 -2344,7 +2385,7 @@@ err_ioremap
        free_netdev(netdev);
  err_alloc_etherdev:
        pci_release_selected_regions(pdev,
 -                                   pci_select_bars(pdev, IORESOURCE_MEM));
 +                                   pci_select_bars(pdev, IORESOURCE_MEM));
  err_pci_reg:
  err_dma:
        pci_disable_device(pdev);
  }
  
  #endif
 -/*
 +/**
   *  igb_remove_i2c - Cleanup  I2C interface
   *  @adapter: pointer to adapter structure
 - *
 - */
 + **/
  static void igb_remove_i2c(struct igb_adapter *adapter)
  {
 -
        /* free the adapter bus structure */
        i2c_del_adapter(&adapter->i2c_adap);
  }
  
  /**
 - * igb_remove - Device Removal Routine
 - * @pdev: PCI device information struct
 + *  igb_remove - Device Removal Routine
 + *  @pdev: PCI device information struct
   *
 - * igb_remove is called by the PCI subsystem to alert the driver
 - * that it should release a PCI device.  The could be caused by a
 - * Hot-Plug event, or because the driver is going to be removed from
 - * memory.
 + *  igb_remove is called by the PCI subsystem to alert the driver
 + *  that it should release a PCI device.  The could be caused by a
 + *  Hot-Plug event, or because the driver is going to be removed from
 + *  memory.
   **/
  static void igb_remove(struct pci_dev *pdev)
  {
  #endif
        igb_remove_i2c(adapter);
        igb_ptp_stop(adapter);
 -      /*
 -       * The watchdog timer may be rescheduled, so explicitly
 +      /* The watchdog timer may be rescheduled, so explicitly
         * disable watchdog from being rescheduled.
         */
        set_bit(__IGB_DOWN, &adapter->state);
  #endif
  
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
 -       * would have already happened in close and is redundant. */
 +       * would have already happened in close and is redundant.
 +       */
        igb_release_hw_control(adapter);
  
        unregister_netdev(netdev);
        if (hw->flash_address)
                iounmap(hw->flash_address);
        pci_release_selected_regions(pdev,
 -                                   pci_select_bars(pdev, IORESOURCE_MEM));
 +                                   pci_select_bars(pdev, IORESOURCE_MEM));
  
        kfree(adapter->shadow_vfta);
        free_netdev(netdev);
  }
  
  /**
 - * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
 - * @adapter: board private structure to initialize
 + *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
 + *  @adapter: board private structure to initialize
   *
 - * This function initializes the vf specific data storage and then attempts to
 - * allocate the VFs.  The reason for ordering it this way is because it is much
 - * mor expensive time wise to disable SR-IOV than it is to allocate and free
 - * the memory for the VFs.
 + *  This function initializes the vf specific data storage and then attempts to
 + *  allocate the VFs.  The reason for ordering it this way is because it is much
 + *  mor expensive time wise to disable SR-IOV than it is to allocate and free
 + *  the memory for the VFs.
   **/
  static void igb_probe_vfs(struct igb_adapter *adapter)
  {
@@@ -2615,7 -2576,6 +2615,7 @@@ static void igb_init_queue_configuratio
                }
                /* fall through */
        case e1000_82580:
 +      case e1000_i354:
        default:
                max_rss_queues = IGB_MAX_RX_QUEUES;
                break;
                /* Device supports enough interrupts without queue pairing. */
                break;
        case e1000_82576:
 -              /*
 -               * If VFs are going to be allocated with RSS queues then we
 +              /* If VFs are going to be allocated with RSS queues then we
                 * should pair the queues in order to conserve interrupts due
                 * to limited supply.
                 */
                /* fall through */
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i354:
        case e1000_i210:
        default:
 -              /*
 -               * If rss_queues > half of max_rss_queues, pair the queues in
 +              /* If rss_queues > half of max_rss_queues, pair the queues in
                 * order to conserve interrupts due to limited supply.
                 */
                if (adapter->rss_queues > (max_rss_queues / 2))
  }
  
  /**
 - * igb_sw_init - Initialize general software structures (struct igb_adapter)
 - * @adapter: board private structure to initialize
 + *  igb_sw_init - Initialize general software structures (struct igb_adapter)
 + *  @adapter: board private structure to initialize
   *
 - * igb_sw_init initializes the Adapter private data structure.
 - * Fields are initialized based on PCI device information and
 - * OS network device settings (MTU size).
 + *  igb_sw_init initializes the Adapter private data structure.
 + *  Fields are initialized based on PCI device information and
 + *  OS network device settings (MTU size).
   **/
  static int igb_sw_init(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_open - Called when a network interface is made active
 - * @netdev: network interface device structure
 + *  igb_open - Called when a network interface is made active
 + *  @netdev: network interface device structure
   *
 - * Returns 0 on success, negative value on failure
 + *  Returns 0 on success, negative value on failure
   *
 - * The open entry point is called when a network interface is made
 - * active by the system (IFF_UP).  At this point all resources needed
 - * for transmit and receive operations are allocated, the interrupt
 - * handler is registered with the OS, the watchdog timer is started,
 - * and the stack is notified that the interface is ready.
 + *  The open entry point is called when a network interface is made
 + *  active by the system (IFF_UP).  At this point all resources needed
 + *  for transmit and receive operations are allocated, the interrupt
 + *  handler is registered with the OS, the watchdog timer is started,
 + *  and the stack is notified that the interface is ready.
   **/
  static int __igb_open(struct net_device *netdev, bool resuming)
  {
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
 -       * clean_rx handler before we do so.  */
 +       * clean_rx handler before we do so.
 +       */
        igb_configure(adapter);
  
        err = igb_request_irq(adapter);
@@@ -2843,15 -2803,15 +2843,15 @@@ static int igb_open(struct net_device *
  }
  
  /**
 - * igb_close - Disables a network interface
 - * @netdev: network interface device structure
 + *  igb_close - Disables a network interface
 + *  @netdev: network interface device structure
   *
 - * Returns 0, this is not allowed to fail
 + *  Returns 0, this is not allowed to fail
   *
 - * The close entry point is called when an interface is de-activated
 - * by the OS.  The hardware is still under the driver's control, but
 - * needs to be disabled.  A global MAC reset is issued to stop the
 - * hardware, and all transmit and receive resources are freed.
 + *  The close entry point is called when an interface is de-activated
 + *  by the OS.  The hardware is still under the driver's control, but
 + *  needs to be disabled.  A global MAC reset is issued to stop the
 + *  hardware, and all transmit and receive resources are freed.
   **/
  static int __igb_close(struct net_device *netdev, bool suspending)
  {
@@@ -2880,10 -2840,10 +2880,10 @@@ static int igb_close(struct net_device 
  }
  
  /**
 - * igb_setup_tx_resources - allocate Tx resources (Descriptors)
 - * @tx_ring: tx descriptor ring (for a specific queue) to setup
 + *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
 + *  @tx_ring: tx descriptor ring (for a specific queue) to setup
   *
 - * Return 0 on success, negative on failure
 + *  Return 0 on success, negative on failure
   **/
  int igb_setup_tx_resources(struct igb_ring *tx_ring)
  {
  }
  
  /**
 - * igb_setup_all_tx_resources - wrapper to allocate Tx resources
 - *                              (Descriptors) for all queues
 - * @adapter: board private structure
 + *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
 + *                             (Descriptors) for all queues
 + *  @adapter: board private structure
   *
 - * Return 0 on success, negative on failure
 + *  Return 0 on success, negative on failure
   **/
  static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_setup_tctl - configure the transmit control registers
 - * @adapter: Board private structure
 + *  igb_setup_tctl - configure the transmit control registers
 + *  @adapter: Board private structure
   **/
  void igb_setup_tctl(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_configure_tx_ring - Configure transmit ring after Reset
 - * @adapter: board private structure
 - * @ring: tx ring to configure
 + *  igb_configure_tx_ring - Configure transmit ring after Reset
 + *  @adapter: board private structure
 + *  @ring: tx ring to configure
   *
 - * Configure a transmit ring after a reset.
 + *  Configure a transmit ring after a reset.
   **/
  void igb_configure_tx_ring(struct igb_adapter *adapter,
                             struct igb_ring *ring)
        mdelay(10);
  
        wr32(E1000_TDLEN(reg_idx),
 -                      ring->count * sizeof(union e1000_adv_tx_desc));
 +           ring->count * sizeof(union e1000_adv_tx_desc));
        wr32(E1000_TDBAL(reg_idx),
 -                      tdba & 0x00000000ffffffffULL);
 +           tdba & 0x00000000ffffffffULL);
        wr32(E1000_TDBAH(reg_idx), tdba >> 32);
  
        ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
  }
  
  /**
 - * igb_configure_tx - Configure transmit Unit after Reset
 - * @adapter: board private structure
 + *  igb_configure_tx - Configure transmit Unit after Reset
 + *  @adapter: board private structure
   *
 - * Configure the Tx unit of the MAC after a reset.
 + *  Configure the Tx unit of the MAC after a reset.
   **/
  static void igb_configure_tx(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_setup_rx_resources - allocate Rx resources (Descriptors)
 - * @rx_ring:    rx descriptor ring (for a specific queue) to setup
 + *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
 + *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
   *
 - * Returns 0 on success, negative on failure
 + *  Returns 0 on success, negative on failure
   **/
  int igb_setup_rx_resources(struct igb_ring *rx_ring)
  {
  }
  
  /**
 - * igb_setup_all_rx_resources - wrapper to allocate Rx resources
 - *                              (Descriptors) for all queues
 - * @adapter: board private structure
 + *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
 + *                             (Descriptors) for all queues
 + *  @adapter: board private structure
   *
 - * Return 0 on success, negative on failure
 + *  Return 0 on success, negative on failure
   **/
  static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_setup_mrqc - configure the multiple receive queue control registers
 - * @adapter: Board private structure
 + *  igb_setup_mrqc - configure the multiple receive queue control registers
 + *  @adapter: Board private structure
   **/
  static void igb_setup_mrqc(struct igb_adapter *adapter)
  {
                break;
        }
  
 -      /*
 -       * Populate the indirection table 4 entries at a time.  To do this
 +      /* Populate the indirection table 4 entries at a time.  To do this
         * we are generating the results for n and n+2 and then interleaving
         * those with the results with n+1 and n+3.
         */
                wr32(E1000_RETA(j), reta);
        }
  
 -      /*
 -       * Disable raw packet checksumming so that RSS hash is placed in
 +      /* Disable raw packet checksumming so that RSS hash is placed in
         * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
         * offloads as they are enabled by default
         */
  
        /* If VMDq is enabled then we set the appropriate mode for that, else
         * we default to RSS so that an RSS hash is calculated per packet even
 -       * if we are only using one queue */
 +       * if we are only using one queue
 +       */
        if (adapter->vfs_allocated_count) {
                if (hw->mac.type > e1000_82575) {
                        /* Set the default pool for the PF's first queue */
  }
  
  /**
 - * igb_setup_rctl - configure the receive control registers
 - * @adapter: Board private structure
 + *  igb_setup_rctl - configure the receive control registers
 + *  @adapter: Board private structure
   **/
  void igb_setup_rctl(struct igb_adapter *adapter)
  {
        rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
                (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
  
 -      /*
 -       * enable stripping of CRC. It's unlikely this will break BMC
 +      /* enable stripping of CRC. It's unlikely this will break BMC
         * redirection as it did with e1000. Newer features require
         * that the HW strips the CRC.
         */
        /* This is useful for sniffing bad packets. */
        if (adapter->netdev->features & NETIF_F_RXALL) {
                /* UPE and MPE will be handled by normal PROMISC logic
 -               * in e1000e_set_rx_mode */
 +               * in e1000e_set_rx_mode
 +               */
                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@@ -3260,8 -3221,7 +3260,8 @@@ static inline int igb_set_vf_rlpml(stru
        u32 vmolr;
  
        /* if it isn't the PF check to see if VFs are enabled and
 -       * increase the size to support vlan tags */
 +       * increase the size to support vlan tags
 +       */
        if (vfn < adapter->vfs_allocated_count &&
            adapter->vf_data[vfn].vlans_enabled)
                size += VLAN_TAG_SIZE;
  }
  
  /**
 - * igb_rlpml_set - set maximum receive packet size
 - * @adapter: board private structure
 + *  igb_rlpml_set - set maximum receive packet size
 + *  @adapter: board private structure
   *
 - * Configure maximum receivable packet size.
 + *  Configure maximum receivable packet size.
   **/
  static void igb_rlpml_set(struct igb_adapter *adapter)
  {
  
        if (pf_id) {
                igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
 -              /*
 -               * If we're in VMDQ or SR-IOV mode, then set global RLPML
 +              /* If we're in VMDQ or SR-IOV mode, then set global RLPML
                 * to our max jumbo frame size, in case we need to enable
                 * jumbo frames on one of the rings later.
                 * This will not pass over-length frames into the default
@@@ -3306,16 -3267,17 +3306,16 @@@ static inline void igb_set_vmolr(struc
        struct e1000_hw *hw = &adapter->hw;
        u32 vmolr;
  
 -      /*
 -       * This register exists only on 82576 and newer so if we are older then
 +      /* This register exists only on 82576 and newer so if we are older then
         * we should exit and do nothing
         */
        if (hw->mac.type < e1000_82576)
                return;
  
        vmolr = rd32(E1000_VMOLR(vfn));
 -      vmolr |= E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
 +      vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
        if (aupe)
 -              vmolr |= E1000_VMOLR_AUPE;        /* Accept untagged packets */
 +              vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
        else
                vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
  
  
        if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
                vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
 -      /*
 -       * for VMDq only allow the VFs and pool 0 to accept broadcast and
 +      /* for VMDq only allow the VFs and pool 0 to accept broadcast and
         * multicast packets
         */
        if (vfn <= adapter->vfs_allocated_count)
 -              vmolr |= E1000_VMOLR_BAM;          /* Accept broadcast */
 +              vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
  
        wr32(E1000_VMOLR(vfn), vmolr);
  }
  
  /**
 - * igb_configure_rx_ring - Configure a receive ring after Reset
 - * @adapter: board private structure
 - * @ring: receive ring to be configured
 + *  igb_configure_rx_ring - Configure a receive ring after Reset
 + *  @adapter: board private structure
 + *  @ring: receive ring to be configured
   *
 - * Configure the Rx unit of the MAC after a reset.
 + *  Configure the Rx unit of the MAC after a reset.
   **/
  void igb_configure_rx_ring(struct igb_adapter *adapter,
 -                           struct igb_ring *ring)
 +                         struct igb_ring *ring)
  {
        struct e1000_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
             rdba & 0x00000000ffffffffULL);
        wr32(E1000_RDBAH(reg_idx), rdba >> 32);
        wr32(E1000_RDLEN(reg_idx),
 -                     ring->count * sizeof(union e1000_adv_rx_desc));
 +           ring->count * sizeof(union e1000_adv_rx_desc));
  
        /* initialize head and tail */
        ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
  }
  
- static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
-                                 struct igb_ring *rx_ring)
- {
- #define IGB_MAX_BUILD_SKB_SIZE \
-       (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
-        (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
-       /* set build_skb flag */
-       if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
-               set_ring_build_skb_enabled(rx_ring);
-       else
-               clear_ring_build_skb_enabled(rx_ring);
- }
  /**
 - * igb_configure_rx - Configure receive Unit after Reset
 - * @adapter: board private structure
 + *  igb_configure_rx - Configure receive Unit after Reset
 + *  @adapter: board private structure
   *
 - * Configure the Rx unit of the MAC after a reset.
 + *  Configure the Rx unit of the MAC after a reset.
   **/
  static void igb_configure_rx(struct igb_adapter *adapter)
  {
  
        /* set the correct pool for the PF default MAC address in entry 0 */
        igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
 -                       adapter->vfs_allocated_count);
 +                       adapter->vfs_allocated_count);
  
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
 -       * the Base and Length of the Rx Descriptor Ring */
 +       * the Base and Length of the Rx Descriptor Ring
 +       */
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igb_ring *rx_ring = adapter->rx_ring[i];
-               igb_set_rx_buffer_len(adapter, rx_ring);
-               igb_configure_rx_ring(adapter, rx_ring);
-       }
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
  }
  
  /**
 - * igb_free_tx_resources - Free Tx Resources per Queue
 - * @tx_ring: Tx descriptor ring for a specific queue
 + *  igb_free_tx_resources - Free Tx Resources per Queue
 + *  @tx_ring: Tx descriptor ring for a specific queue
   *
 - * Free all transmit software resources
 + *  Free all transmit software resources
   **/
  void igb_free_tx_resources(struct igb_ring *tx_ring)
  {
  }
  
  /**
 - * igb_free_all_tx_resources - Free Tx Resources for All Queues
 - * @adapter: board private structure
 + *  igb_free_all_tx_resources - Free Tx Resources for All Queues
 + *  @adapter: board private structure
   *
 - * Free all transmit software resources
 + *  Free all transmit software resources
   **/
  static void igb_free_all_tx_resources(struct igb_adapter *adapter)
  {
@@@ -3488,8 -3433,8 +3471,8 @@@ void igb_unmap_and_free_tx_resource(str
  }
  
  /**
 - * igb_clean_tx_ring - Free Tx Buffers
 - * @tx_ring: ring to be cleaned
 + *  igb_clean_tx_ring - Free Tx Buffers
 + *  @tx_ring: ring to be cleaned
   **/
  static void igb_clean_tx_ring(struct igb_ring *tx_ring)
  {
  }
  
  /**
 - * igb_clean_all_tx_rings - Free Tx Buffers for all queues
 - * @adapter: board private structure
 + *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
 + *  @adapter: board private structure
   **/
  static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_free_rx_resources - Free Rx Resources
 - * @rx_ring: ring to clean the resources from
 + *  igb_free_rx_resources - Free Rx Resources
 + *  @rx_ring: ring to clean the resources from
   *
 - * Free all receive software resources
 + *  Free all receive software resources
   **/
  void igb_free_rx_resources(struct igb_ring *rx_ring)
  {
  }
  
  /**
 - * igb_free_all_rx_resources - Free Rx Resources for All Queues
 - * @adapter: board private structure
 + *  igb_free_all_rx_resources - Free Rx Resources for All Queues
 + *  @adapter: board private structure
   *
 - * Free all receive software resources
 + *  Free all receive software resources
   **/
  static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_clean_rx_ring - Free Rx Buffers per Queue
 - * @rx_ring: ring to free buffers from
 + *  igb_clean_rx_ring - Free Rx Buffers per Queue
 + *  @rx_ring: ring to free buffers from
   **/
  static void igb_clean_rx_ring(struct igb_ring *rx_ring)
  {
  }
  
  /**
 - * igb_clean_all_rx_rings - Free Rx Buffers for all queues
 - * @adapter: board private structure
 + *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
 + *  @adapter: board private structure
   **/
  static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
  {
  }
  
  /**
 - * igb_set_mac - Change the Ethernet Address of the NIC
 - * @netdev: network interface device structure
 - * @p: pointer to an address structure
 + *  igb_set_mac - Change the Ethernet Address of the NIC
 + *  @netdev: network interface device structure
 + *  @p: pointer to an address structure
   *
 - * Returns 0 on success, negative on failure
 + *  Returns 0 on success, negative on failure
   **/
  static int igb_set_mac(struct net_device *netdev, void *p)
  {
  
        /* set the correct pool for the new PF MAC address in entry 0 */
        igb_rar_set_qsel(adapter, hw->mac.addr, 0,
 -                       adapter->vfs_allocated_count);
 +                       adapter->vfs_allocated_count);
  
        return 0;
  }
  
  /**
 - * igb_write_mc_addr_list - write multicast addresses to MTA
 - * @netdev: network interface device structure
 + *  igb_write_mc_addr_list - write multicast addresses to MTA
 + *  @netdev: network interface device structure
   *
 - * Writes multicast address list to the MTA hash table.
 - * Returns: -ENOMEM on failure
 - *                0 on no addresses written
 - *                X on writing X addresses to MTA
 + *  Writes multicast address list to the MTA hash table.
 + *  Returns: -ENOMEM on failure
 + *           0 on no addresses written
 + *           X on writing X addresses to MTA
   **/
  static int igb_write_mc_addr_list(struct net_device *netdev)
  {
  }
  
  /**
 - * igb_write_uc_addr_list - write unicast addresses to RAR table
 - * @netdev: network interface device structure
 + *  igb_write_uc_addr_list - write unicast addresses to RAR table
 + *  @netdev: network interface device structure
   *
 - * Writes unicast address list to the RAR table.
 - * Returns: -ENOMEM on failure/insufficient address space
 - *                0 on no addresses written
 - *                X on writing X addresses to the RAR table
 + *  Writes unicast address list to the RAR table.
 + *  Returns: -ENOMEM on failure/insufficient address space
 + *           0 on no addresses written
 + *           X on writing X addresses to the RAR table
   **/
  static int igb_write_uc_addr_list(struct net_device *netdev)
  {
                        if (!rar_entries)
                                break;
                        igb_rar_set_qsel(adapter, ha->addr,
 -                                       rar_entries--,
 -                                       vfn);
 +                                       rar_entries--,
 +                                       vfn);
                        count++;
                }
        }
  }
  
  /**
 - * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
 - * @netdev: network interface device structure
 + *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
 + *  @netdev: network interface device structure
   *
 - * The set_rx_mode entry point is called whenever the unicast or multicast
 - * address lists or the network interface flags are updated.  This routine is
 - * responsible for configuring the hardware for proper unicast, multicast,
 - * promiscuous mode, and all-multi behavior.
 + *  The set_rx_mode entry point is called whenever the unicast or multicast
 + *  address lists or the network interface flags are updated.  This routine is
 + *  responsible for configuring the hardware for proper unicast, multicast,
 + *  promiscuous mode, and all-multi behavior.
   **/
  static void igb_set_rx_mode(struct net_device *netdev)
  {
                        rctl |= E1000_RCTL_MPE;
                        vmolr |= E1000_VMOLR_MPME;
                } else {
 -                      /*
 -                       * Write addresses to the MTA, if the attempt fails
 +                      /* Write addresses to the MTA, if the attempt fails
                         * then we should just turn on promiscuous mode so
                         * that we can at least receive multicast traffic
                         */
                                vmolr |= E1000_VMOLR_ROMPE;
                        }
                }
 -              /*
 -               * Write addresses to available RAR registers, if there is not
 +              /* Write addresses to available RAR registers, if there is not
                 * sufficient space to store all the addresses then enable
                 * unicast promiscuous mode
                 */
        }
        wr32(E1000_RCTL, rctl);
  
 -      /*
 -       * In order to support SR-IOV and eventually VMDq it is necessary to set
 +      /* In order to support SR-IOV and eventually VMDq it is necessary to set
         * the VMOLR to enable the appropriate modes.  Without this workaround
         * we will have issues with VLAN tag stripping not being done for frames
         * that are only arriving because we are the default pool
                return;
  
        vmolr |= rd32(E1000_VMOLR(vfn)) &
 -               ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
 +               ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
        wr32(E1000_VMOLR(vfn), vmolr);
        igb_restore_vf_multicasts(adapter);
  }
@@@ -3840,8 -3788,7 +3823,8 @@@ static void igb_spoof_check(struct igb_
  }
  
  /* Need to wait a few seconds after link up to get diagnostic information from
 - * the phy */
 + * the phy
 + */
  static void igb_update_phy_info(unsigned long data)
  {
        struct igb_adapter *adapter = (struct igb_adapter *) data;
  }
  
  /**
 - * igb_has_link - check shared code for link and determine up/down
 - * @adapter: pointer to driver private info
 + *  igb_has_link - check shared code for link and determine up/down
 + *  @adapter: pointer to driver private info
   **/
  bool igb_has_link(struct igb_adapter *adapter)
  {
@@@ -3895,16 -3842,17 +3878,16 @@@ static bool igb_thermal_sensor_event(st
                ctrl_ext = rd32(E1000_CTRL_EXT);
  
                if ((hw->phy.media_type == e1000_media_type_copper) &&
 -                  !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
 +                  !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
                        ret = !!(thstat & event);
 -              }
        }
  
        return ret;
  }
  
  /**
 - * igb_watchdog - Timer Call-back
 - * @data: pointer to adapter cast into an unsigned long
 + *  igb_watchdog - Timer Call-back
 + *  @data: pointer to adapter cast into an unsigned long
   **/
  static void igb_watchdog(unsigned long data)
  {
  static void igb_watchdog_task(struct work_struct *work)
  {
        struct igb_adapter *adapter = container_of(work,
 -                                                 struct igb_adapter,
 -                                                   watchdog_task);
 +                                                 struct igb_adapter,
 +                                                 watchdog_task);
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        u32 link;
                if (!netif_carrier_ok(netdev)) {
                        u32 ctrl;
                        hw->mac.ops.get_speed_and_duplex(hw,
 -                                                       &adapter->link_speed,
 -                                                       &adapter->link_duplex);
 +                                                       &adapter->link_speed,
 +                                                       &adapter->link_duplex);
  
                        ctrl = rd32(E1000_CTRL);
                        /* Links status message must follow this format */
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
 -                       * (Do the reset outside of interrupt context). */
 +                       * (Do the reset outside of interrupt context).
 +                       */
                        if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
                                adapter->tx_timeout_count++;
                                schedule_work(&adapter->reset_task);
                set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
        }
  
 -      /* Cause software interrupt to ensure rx ring is cleaned */
 +      /* Cause software interrupt to ensure Rx ring is cleaned */
        if (adapter->msix_entries) {
                u32 eics = 0;
                for (i = 0; i < adapter->num_q_vectors; i++)
@@@ -4056,20 -4003,20 +4039,20 @@@ enum latency_range 
  };
  
  /**
 - * igb_update_ring_itr - update the dynamic ITR value based on packet size
 + *  igb_update_ring_itr - update the dynamic ITR value based on packet size
 + *  @q_vector: pointer to q_vector
   *
 - *      Stores a new ITR value based on strictly on packet size.  This
 - *      algorithm is less sophisticated than that used in igb_update_itr,
 - *      due to the difficulty of synchronizing statistics across multiple
 - *      receive rings.  The divisors and thresholds used by this function
 - *      were determined based on theoretical maximum wire speed and testing
 - *      data, in order to minimize response time while increasing bulk
 - *      throughput.
 - *      This functionality is controlled by the InterruptThrottleRate module
 - *      parameter (see igb_param.c)
 - *      NOTE:  This function is called only when operating in a multiqueue
 - *             receive environment.
 - * @q_vector: pointer to q_vector
 + *  Stores a new ITR value based on strictly on packet size.  This
 + *  algorithm is less sophisticated than that used in igb_update_itr,
 + *  due to the difficulty of synchronizing statistics across multiple
 + *  receive rings.  The divisors and thresholds used by this function
 + *  were determined based on theoretical maximum wire speed and testing
 + *  data, in order to minimize response time while increasing bulk
 + *  throughput.
 + *  This functionality is controlled by the InterruptThrottleRate module
 + *  parameter (see igb_param.c)
 + *  NOTE:  This function is called only when operating in a multiqueue
 + *         receive environment.
   **/
  static void igb_update_ring_itr(struct igb_q_vector *q_vector)
  {
@@@ -4130,21 -4077,20 +4113,21 @@@ clear_counts
  }
  
  /**
 - * igb_update_itr - update the dynamic ITR value based on statistics
 - *      Stores a new ITR value based on packets and byte
 - *      counts during the last interrupt.  The advantage of per interrupt
 - *      computation is faster updates and more accurate ITR for the current
 - *      traffic pattern.  Constants in this function were computed
 - *      based on theoretical maximum wire speed and thresholds were set based
 - *      on testing data as well as attempting to minimize response time
 - *      while increasing bulk throughput.
 - *      this functionality is controlled by the InterruptThrottleRate module
 - *      parameter (see igb_param.c)
 - *      NOTE:  These calculations are only valid when operating in a single-
 - *             queue environment.
 - * @q_vector: pointer to q_vector
 - * @ring_container: ring info to update the itr for
 + *  igb_update_itr - update the dynamic ITR value based on statistics
 + *  @q_vector: pointer to q_vector
 + *  @ring_container: ring info to update the itr for
 + *
 + *  Stores a new ITR value based on packets and byte
 + *  counts during the last interrupt.  The advantage of per interrupt
 + *  computation is faster updates and more accurate ITR for the current
 + *  traffic pattern.  Constants in this function were computed
 + *  based on theoretical maximum wire speed and thresholds were set based
 + *  on testing data as well as attempting to minimize response time
 + *  while increasing bulk throughput.
 + *  this functionality is controlled by the InterruptThrottleRate module
 + *  parameter (see igb_param.c)
 + *  NOTE:  These calculations are only valid when operating in a single-
 + *         queue environment.
   **/
  static void igb_update_itr(struct igb_q_vector *q_vector,
                           struct igb_ring_container *ring_container)
@@@ -4242,12 -4188,12 +4225,12 @@@ set_itr_now
        if (new_itr != q_vector->itr_val) {
                /* this attempts to bias the interrupt rate towards Bulk
                 * by adding intermediate steps when interrupt rate is
 -               * increasing */
 +               * increasing
 +               */
                new_itr = new_itr > q_vector->itr_val ?
 -                           max((new_itr * q_vector->itr_val) /
 -                               (new_itr + (q_vector->itr_val >> 2)),
 -                               new_itr) :
 -                           new_itr;
 +                        max((new_itr * q_vector->itr_val) /
 +                        (new_itr + (q_vector->itr_val >> 2)),
 +                        new_itr) : new_itr;
                /* Don't write the value here; it resets the adapter's
                 * internal timer, and causes us to delay far longer than
                 * we should between interrupts.  Instead, we write the ITR
@@@ -4374,8 -4320,8 +4357,8 @@@ static void igb_tx_csum(struct igb_rin
                default:
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
 -                               "partial checksum but proto=%x!\n",
 -                               first->protocol);
 +                                       "partial checksum but proto=%x!\n",
 +                                       first->protocol);
                        }
                        break;
                }
                default:
                        if (unlikely(net_ratelimit())) {
                                dev_warn(tx_ring->dev,
 -                               "partial checksum but l4 proto=%x!\n",
 -                               l4_hdr);
 +                                       "partial checksum but l4 proto=%x!\n",
 +                                       l4_hdr);
                        }
                        break;
                }
@@@ -4551,7 -4497,8 +4534,7 @@@ static void igb_tx_map(struct igb_ring 
        /* set the timestamp */
        first->time_stamp = jiffies;
  
 -      /*
 -       * Force memory writes to complete before letting h/w know there
 +      /* Force memory writes to complete before letting h/w know there
         * are new descriptors to fetch.  (Only applicable for weak-ordered
         * memory model archs, such as IA-64).
         *
        writel(i, tx_ring->tail);
  
        /* we need this if more than one processor can write to our tail
 -       * at a time, it syncronizes IO on IA64/Altix systems */
 +       * at a time, it synchronizes IO on IA64/Altix systems
 +       */
        mmiowb();
  
        return;
@@@ -4603,13 -4549,11 +4586,13 @@@ static int __igb_maybe_stop_tx(struct i
  
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
 -       * but since that doesn't exist yet, just open code it. */
 +       * but since that doesn't exist yet, just open code it.
 +       */
        smp_mb();
  
        /* We need to check again in a case another CPU has just
 -       * made room available. */
 +       * made room available.
 +       */
        if (igb_desc_unused(tx_ring) < size)
                return -EBUSY;
  
@@@ -4633,6 -4577,7 +4616,6 @@@ static inline int igb_maybe_stop_tx(str
  netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                                struct igb_ring *tx_ring)
  {
 -      struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
        struct igb_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
  
        skb_tx_timestamp(skb);
  
 -      if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
 -                   !(adapter->ptp_tx_skb))) {
 -              skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 -              tx_flags |= IGB_TX_FLAGS_TSTAMP;
 +      if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
 +              struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  
 -              adapter->ptp_tx_skb = skb_get(skb);
 -              adapter->ptp_tx_start = jiffies;
 -              if (adapter->hw.mac.type == e1000_82576)
 -                      schedule_work(&adapter->ptp_tx_work);
 +              if (!(adapter->ptp_tx_skb)) {
 +                      skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 +                      tx_flags |= IGB_TX_FLAGS_TSTAMP;
 +
 +                      adapter->ptp_tx_skb = skb_get(skb);
 +                      adapter->ptp_tx_start = jiffies;
 +                      if (adapter->hw.mac.type == e1000_82576)
 +                              schedule_work(&adapter->ptp_tx_work);
 +              }
        }
  
        if (vlan_tx_tag_present(skb)) {
@@@ -4735,7 -4677,8 +4718,7 @@@ static netdev_tx_t igb_xmit_frame(struc
                return NETDEV_TX_OK;
        }
  
 -      /*
 -       * The minimum packet size with TCTL.PSP set is 17 so pad the skb
 +      /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
         * in order to meet this minimum size requirement.
         */
        if (unlikely(skb->len < 17)) {
  }
  
  /**
 - * igb_tx_timeout - Respond to a Tx Hang
 - * @netdev: network interface device structure
 + *  igb_tx_timeout - Respond to a Tx Hang
 + *  @netdev: network interface device structure
   **/
  static void igb_tx_timeout(struct net_device *netdev)
  {
@@@ -4779,12 -4722,13 +4762,12 @@@ static void igb_reset_task(struct work_
  }
  
  /**
 - * igb_get_stats64 - Get System Network Statistics
 - * @netdev: network interface device structure
 - * @stats: rtnl_link_stats64 pointer
 - *
 + *  igb_get_stats64 - Get System Network Statistics
 + *  @netdev: network interface device structure
 + *  @stats: rtnl_link_stats64 pointer
   **/
  static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
 -                                               struct rtnl_link_stats64 *stats)
 +                                              struct rtnl_link_stats64 *stats)
  {
        struct igb_adapter *adapter = netdev_priv(netdev);
  
  }
  
  /**
 - * igb_change_mtu - Change the Maximum Transfer Unit
 - * @netdev: network interface device structure
 - * @new_mtu: new value for maximum frame size
 + *  igb_change_mtu - Change the Maximum Transfer Unit
 + *  @netdev: network interface device structure
 + *  @new_mtu: new value for maximum frame size
   *
 - * Returns 0 on success, negative on failure
 + *  Returns 0 on success, negative on failure
   **/
  static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  {
  }
  
  /**
 - * igb_update_stats - Update the board statistics counters
 - * @adapter: board private structure
 + *  igb_update_stats - Update the board statistics counters
 + *  @adapter: board private structure
   **/
 -
  void igb_update_stats(struct igb_adapter *adapter,
                      struct rtnl_link_stats64 *net_stats)
  {
  
  #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  
 -      /*
 -       * Prevent stats update while adapter is being reset, or if the pci
 +      /* Prevent stats update while adapter is being reset, or if the pci
         * connection is down.
         */
        if (adapter->link_speed == 0)
        /* Rx Errors */
  
        /* RLEC on some newer hardware can be incorrect so build
 -       * our own version based on RUC and ROC */
 +       * our own version based on RUC and ROC
 +       */
        net_stats->rx_errors = adapter->stats.rxerrc +
                adapter->stats.crcerrs + adapter->stats.algnerrc +
                adapter->stats.ruc + adapter->stats.roc +
@@@ -5055,8 -5000,7 +5038,8 @@@ static irqreturn_t igb_msix_other(int i
                adapter->stats.doosync++;
                /* The DMA Out of Sync is also indication of a spoof event
                 * in IOV mode. Check the Wrong VM Behavior register to
 -               * see if it is really a spoof event. */
 +               * see if it is really a spoof event.
 +               */
                igb_check_wvbr(adapter);
        }
  
@@@ -5130,7 -5074,8 +5113,7 @@@ static void igb_update_tx_dca(struct ig
        if (hw->mac.type != e1000_82575)
                txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
  
 -      /*
 -       * We can enable relaxed ordering for reads, but not writes when
 +      /* We can enable relaxed ordering for reads, but not writes when
         * DCA is enabled.  This is due to a known issue in some chipsets
         * which will cause the DCA tag to be cleared.
         */
@@@ -5151,7 -5096,8 +5134,7 @@@ static void igb_update_rx_dca(struct ig
        if (hw->mac.type != e1000_82575)
                rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
  
 -      /*
 -       * We can enable relaxed ordering for reads, but not writes when
 +      /* We can enable relaxed ordering for reads, but not writes when
         * DCA is enabled.  This is due to a known issue in some chipsets
         * which will cause the DCA tag to be cleared.
         */
@@@ -5220,8 -5166,7 +5203,8 @@@ static int __igb_notify_dca(struct devi
        case DCA_PROVIDER_REMOVE:
                if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
                        /* without this a class_device is left
 -                       * hanging around in the sysfs model */
 +                       * hanging around in the sysfs model
 +                       */
                        dca_remove_requester(dev);
                        dev_info(&pdev->dev, "DCA disabled\n");
                        adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  }
  
  static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
 -                          void *p)
 +                        void *p)
  {
        int ret_val;
  
        ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
 -                                       __igb_notify_dca);
 +                                       __igb_notify_dca);
  
        return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  }
@@@ -5253,9 -5198,6 +5236,9 @@@ static int igb_vf_configure(struct igb_
        eth_zero_addr(mac_addr);
        igb_set_vf_mac(adapter, vf, mac_addr);
  
 +      /* By default spoof check is enabled for all VFs */
 +      adapter->vf_data[vf].spoofchk_enabled = true;
 +
        return 0;
  }
  
@@@ -5314,7 -5256,7 +5297,7 @@@ static int igb_set_vf_promisc(struct ig
        struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  
        vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
 -                          IGB_VF_FLAG_MULTI_PROMISC);
 +                          IGB_VF_FLAG_MULTI_PROMISC);
        vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  
        if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
                vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
                *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
        } else {
 -              /*
 -               * if we have hashes and we are clearing a multicast promisc
 +              /* if we have hashes and we are clearing a multicast promisc
                 * flag we need to write the hashes to the MTA as this step
                 * was previously skipped
                 */
                return -EINVAL;
  
        return 0;
 -
  }
  
  static int igb_set_vf_multicasts(struct igb_adapter *adapter,
@@@ -5549,20 -5493,22 +5532,20 @@@ static int igb_ndo_set_vf_vlan(struct n
                         "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
                if (test_bit(__IGB_DOWN, &adapter->state)) {
                        dev_warn(&adapter->pdev->dev,
 -                               "The VF VLAN has been set,"
 -                               " but the PF device is not up.\n");
 +                               "The VF VLAN has been set, but the PF device is not up.\n");
                        dev_warn(&adapter->pdev->dev,
 -                               "Bring the PF device up before"
 -                               " attempting to use the VF device.\n");
 +                               "Bring the PF device up before attempting to use the VF device.\n");
                }
        } else {
                igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
 -                                 false, vf);
 +                           false, vf);
                igb_set_vmvir(adapter, vlan, vf);
                igb_set_vmolr(adapter, vf, true);
                adapter->vf_data[vf].pf_vlan = 0;
                adapter->vf_data[vf].pf_qos = 0;
 -       }
 +      }
  out:
 -       return err;
 +      return err;
  }
  
  static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@@ -5640,7 -5586,8 +5623,7 @@@ static void igb_vf_reset_msg(struct igb
  
  static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
  {
 -      /*
 -       * The VF MAC Address is stored in a packed array of bytes
 +      /* The VF MAC Address is stored in a packed array of bytes
         * starting at the second 32 bit word of the msg array
         */
        unsigned char *addr = (char *)&msg[1];
@@@ -5689,9 -5636,11 +5672,9 @@@ static void igb_rcv_msg_from_vf(struct 
        if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
                return;
  
 -      /*
 -       * until the vf completes a reset it should not be
 +      /* until the vf completes a reset it should not be
         * allowed to start any configuration.
         */
 -
        if (msgbuf[0] == E1000_VF_RESET) {
                igb_vf_reset_msg(adapter, vf);
                return;
                        retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
                else
                        dev_warn(&pdev->dev,
 -                               "VF %d attempted to override administratively "
 -                               "set MAC address\nReload the VF driver to "
 -                               "resume operations\n", vf);
 +                               "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
 +                               vf);
                break;
        case E1000_VF_SET_PROMISC:
                retval = igb_set_vf_promisc(adapter, msgbuf, vf);
                retval = -1;
                if (vf_data->pf_vlan)
                        dev_warn(&pdev->dev,
 -                               "VF %d attempted to override administratively "
 -                               "set VLAN tag\nReload the VF driver to "
 -                               "resume operations\n", vf);
 +                               "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
 +                               vf);
                else
                        retval = igb_set_vf_vlan(adapter, msgbuf, vf);
                break;
@@@ -5797,9 -5748,9 +5780,9 @@@ static void igb_set_uta(struct igb_adap
  }
  
  /**
 - * igb_intr_msi - Interrupt Handler
 - * @irq: interrupt number
 - * @data: pointer to a network interface device structure
 + *  igb_intr_msi - Interrupt Handler
 + *  @irq: interrupt number
 + *  @data: pointer to a network interface device structure
   **/
  static irqreturn_t igb_intr_msi(int irq, void *data)
  {
  }
  
  /**
 - * igb_intr - Legacy Interrupt Handler
 - * @irq: interrupt number
 - * @data: pointer to a network interface device structure
 + *  igb_intr - Legacy Interrupt Handler
 + *  @irq: interrupt number
 + *  @data: pointer to a network interface device structure
   **/
  static irqreturn_t igb_intr(int irq, void *data)
  {
        struct igb_q_vector *q_vector = adapter->q_vector[0];
        struct e1000_hw *hw = &adapter->hw;
        /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
 -       * need for the IMC write */
 +       * need for the IMC write
 +       */
        u32 icr = rd32(E1000_ICR);
  
        /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
 -       * not set, then the adapter didn't send an interrupt */
 +       * not set, then the adapter didn't send an interrupt
 +       */
        if (!(icr & E1000_ICR_INT_ASSERTED))
                return IRQ_NONE;
  
@@@ -5917,15 -5866,15 +5900,15 @@@ static void igb_ring_irq_enable(struct 
  }
  
  /**
 - * igb_poll - NAPI Rx polling callback
 - * @napi: napi polling structure
 - * @budget: count of how many packets we should handle
 + *  igb_poll - NAPI Rx polling callback
 + *  @napi: napi polling structure
 + *  @budget: count of how many packets we should handle
   **/
  static int igb_poll(struct napi_struct *napi, int budget)
  {
        struct igb_q_vector *q_vector = container_of(napi,
 -                                                   struct igb_q_vector,
 -                                                   napi);
 +                                                   struct igb_q_vector,
 +                                                   napi);
        bool clean_complete = true;
  
  #ifdef CONFIG_IGB_DCA
  }
  
  /**
 - * igb_clean_tx_irq - Reclaim resources after transmit completes
 - * @q_vector: pointer to q_vector containing needed info
 + *  igb_clean_tx_irq - Reclaim resources after transmit completes
 + *  @q_vector: pointer to q_vector containing needed info
   *
 - * returns true if ring is completely cleaned
 + *  returns true if ring is completely cleaned
   **/
  static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
  {
                struct e1000_hw *hw = &adapter->hw;
  
                /* Detect a transmit hang in hardware, this serializes the
 -               * check with the clearing of time_stamp and movement of i */
 +               * check with the clearing of time_stamp and movement of i
 +               */
                clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
                if (tx_buffer->next_to_watch &&
                    time_after(jiffies, tx_buffer->time_stamp +
  
  #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(total_packets &&
 -                   netif_carrier_ok(tx_ring->netdev) &&
 -                   igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
 +          netif_carrier_ok(tx_ring->netdev) &&
 +          igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
  }
  
  /**
 - * igb_reuse_rx_page - page flip buffer and store it back on the ring
 - * @rx_ring: rx descriptor ring to store buffers on
 - * @old_buff: donor buffer to have page reused
 + *  igb_reuse_rx_page - page flip buffer and store it back on the ring
 + *  @rx_ring: rx descriptor ring to store buffers on
 + *  @old_buff: donor buffer to have page reused
   *
 - * Synchronizes page for reuse by the adapter
 + *  Synchronizes page for reuse by the adapter
   **/
  static void igb_reuse_rx_page(struct igb_ring *rx_ring,
                              struct igb_rx_buffer *old_buff)
@@@ -6185,19 -6133,19 +6168,19 @@@ static bool igb_can_reuse_rx_page(struc
  }
  
  /**
 - * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
 - * @rx_ring: rx descriptor ring to transact packets on
 - * @rx_buffer: buffer containing page to add
 - * @rx_desc: descriptor containing length of buffer written by hardware
 - * @skb: sk_buff to place the data into
 + *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff
 + *  @rx_ring: rx descriptor ring to transact packets on
 + *  @rx_buffer: buffer containing page to add
 + *  @rx_desc: descriptor containing length of buffer written by hardware
 + *  @skb: sk_buff to place the data into
   *
 - * This function will add the data contained in rx_buffer->page to the skb.
 - * This is done either through a direct copy if the data in the buffer is
 - * less than the skb header size, otherwise it will just attach the page as
 - * a frag to the skb.
 + *  This function will add the data contained in rx_buffer->page to the skb.
 + *  This is done either through a direct copy if the data in the buffer is
 + *  less than the skb header size, otherwise it will just attach the page as
 + *  a frag to the skb.
   *
 - * The function will then update the page offset if necessary and return
 - * true if the buffer can be reused by the adapter.
 + *  The function will then update the page offset if necessary and return
 + *  true if the buffer can be reused by the adapter.
   **/
  static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                            struct igb_rx_buffer *rx_buffer,
        return igb_can_reuse_rx_page(rx_buffer, page, truesize);
  }
  
- static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
-                                          union e1000_adv_rx_desc *rx_desc)
- {
-       struct igb_rx_buffer *rx_buffer;
-       struct sk_buff *skb;
-       struct page *page;
-       void *page_addr;
-       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
- #if (PAGE_SIZE < 8192)
-       unsigned int truesize = IGB_RX_BUFSZ;
- #else
-       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(NET_SKB_PAD +
-                                              NET_IP_ALIGN +
-                                              size);
- #endif
-       /* If we spanned a buffer we have a huge mess so test for it */
-       BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
-       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
-       page_addr = page_address(page) + rx_buffer->page_offset;
-       /* prefetch first cache line of first page */
-       prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
- #if L1_CACHE_BYTES < 128
-       prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
- #endif
-       /* build an skb to around the page buffer */
-       skb = build_skb(page_addr, truesize);
-       if (unlikely(!skb)) {
-               rx_ring->rx_stats.alloc_failed++;
-               return NULL;
-       }
-       /* we are reusing so sync this buffer for CPU use */
-       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                     rx_buffer->dma,
-                                     rx_buffer->page_offset,
-                                     IGB_RX_BUFSZ,
-                                     DMA_FROM_DEVICE);
-       /* update pointers within the skb to store the data */
-       skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
-       __skb_put(skb, size);
-       /* pull timestamp out of packet data */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
-               __skb_pull(skb, IGB_TS_HDR_LEN);
-       }
-       if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
-               /* hand second half of page back to the ring */
-               igb_reuse_rx_page(rx_ring, rx_buffer);
-       } else {
-               /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       }
-       /* clear contents of buffer_info */
-       rx_buffer->dma = 0;
-       rx_buffer->page = NULL;
-       return skb;
- }
  static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                                           union e1000_adv_rx_desc *rx_desc,
                                           struct sk_buff *skb)
                        return NULL;
                }
  
 -              /*
 -               * we will be copying header into skb->data in
 +              /* we will be copying header into skb->data in
                 * pskb_may_pull so it is in our interest to prefetch
                 * it now to avoid a possible cache miss
                 */
@@@ -6388,7 -6265,8 +6299,7 @@@ static inline void igb_rx_checksum(stru
        if (igb_test_staterr(rx_desc,
                             E1000_RXDEXT_STATERR_TCPE |
                             E1000_RXDEXT_STATERR_IPE)) {
 -              /*
 -               * work around errata with sctp packets where the TCPE aka
 +              /* work around errata with sctp packets where the TCPE aka
                 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
                 * packets, (aka let the stack check the crc32c)
                 */
@@@ -6419,15 -6297,15 +6330,15 @@@ static inline void igb_rx_hash(struct i
  }
  
  /**
 - * igb_is_non_eop - process handling of non-EOP buffers
 - * @rx_ring: Rx ring being processed
 - * @rx_desc: Rx descriptor for current buffer
 - * @skb: current socket buffer containing buffer in progress
 + *  igb_is_non_eop - process handling of non-EOP buffers
 + *  @rx_ring: Rx ring being processed
 + *  @rx_desc: Rx descriptor for current buffer
 + *  @skb: current socket buffer containing buffer in progress
   *
 - * This function updates next to clean.  If the buffer is an EOP buffer
 - * this function exits returning false, otherwise it will place the
 - * sk_buff in the next buffer to be chained and return true indicating
 - * that this is in fact a non-EOP buffer.
 + *  This function updates next to clean.  If the buffer is an EOP buffer
 + *  this function exits returning false, otherwise it will place the
 + *  sk_buff in the next buffer to be chained and return true indicating
 + *  that this is in fact a non-EOP buffer.
   **/
  static bool igb_is_non_eop(struct igb_ring *rx_ring,
                           union e1000_adv_rx_desc *rx_desc)
  }
  
  /**
 - * igb_get_headlen - determine size of header for LRO/GRO
 - * @data: pointer to the start of the headers
 - * @max_len: total length of section to find headers in
 + *  igb_get_headlen - determine size of header for LRO/GRO
 + *  @data: pointer to the start of the headers
 + *  @max_len: total length of section to find headers in
   *
 - * This function is meant to determine the length of headers that will
 - * be recognized by hardware for LRO, and GRO offloads.  The main
 - * motivation of doing this is to only perform one pull for IPv4 TCP
 - * packets so that we can do basic things like calculating the gso_size
 - * based on the average data per packet.
 + *  This function is meant to determine the length of headers that will
 + *  be recognized by hardware for LRO, and GRO offloads.  The main
 + *  motivation of doing this is to only perform one pull for IPv4 TCP
 + *  packets so that we can do basic things like calculating the gso_size
 + *  based on the average data per packet.
   **/
  static unsigned int igb_get_headlen(unsigned char *data,
                                    unsigned int max_len)
                        return hdr.network - data;
  
                /* record next protocol if header is present */
 -              if (!hdr.ipv4->frag_off)
 +              if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
                        nexthdr = hdr.ipv4->protocol;
        } else if (protocol == __constant_htons(ETH_P_IPV6)) {
                if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
                hdr.network += sizeof(struct udphdr);
        }
  
 -      /*
 -       * If everything has gone correctly hdr.network should be the
 +      /* If everything has gone correctly hdr.network should be the
         * data section of the packet and will be the end of the header.
         * If not then it probably represents the end of the last recognized
         * header.
  }
  
  /**
 - * igb_pull_tail - igb specific version of skb_pull_tail
 - * @rx_ring: rx descriptor ring packet is being transacted on
 - * @rx_desc: pointer to the EOP Rx descriptor
 - * @skb: pointer to current skb being adjusted
 + *  igb_pull_tail - igb specific version of skb_pull_tail
 + *  @rx_ring: rx descriptor ring packet is being transacted on
 + *  @rx_desc: pointer to the EOP Rx descriptor
 + *  @skb: pointer to current skb being adjusted
   *
 - * This function is an igb specific version of __pskb_pull_tail.  The
 - * main difference between this version and the original function is that
 - * this function can make several assumptions about the state of things
 - * that allow for significant optimizations versus the standard function.
 - * As a result we can do things like drop a frag and maintain an accurate
 - * truesize for the skb.
 + *  This function is an igb specific version of __pskb_pull_tail.  The
 + *  main difference between this version and the original function is that
 + *  this function can make several assumptions about the state of things
 + *  that allow for significant optimizations versus the standard function.
 + *  As a result we can do things like drop a frag and maintain an accurate
 + *  truesize for the skb.
   */
  static void igb_pull_tail(struct igb_ring *rx_ring,
                          union e1000_adv_rx_desc *rx_desc,
        unsigned char *va;
        unsigned int pull_len;
  
 -      /*
 -       * it is valid to use page_address instead of kmap since we are
 +      /* it is valid to use page_address instead of kmap since we are
         * working with pages allocated out of the lomem pool per
         * alloc_page(GFP_ATOMIC)
         */
                va += IGB_TS_HDR_LEN;
        }
  
 -      /*
 -       * we need the header to contain the greater of either ETH_HLEN or
 +      /* we need the header to contain the greater of either ETH_HLEN or
         * 60 bytes if the skb->len is less than 60 for skb_pad.
         */
        pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
  }
  
  /**
 - * igb_cleanup_headers - Correct corrupted or empty headers
 - * @rx_ring: rx descriptor ring packet is being transacted on
 - * @rx_desc: pointer to the EOP Rx descriptor
 - * @skb: pointer to current skb being fixed
 + *  igb_cleanup_headers - Correct corrupted or empty headers
 + *  @rx_ring: rx descriptor ring packet is being transacted on
 + *  @rx_desc: pointer to the EOP Rx descriptor
 + *  @skb: pointer to current skb being fixed
   *
 - * Address the case where we are pulling data in on pages only
 - * and as such no data is present in the skb header.
 + *  Address the case where we are pulling data in on pages only
 + *  and as such no data is present in the skb header.
   *
 - * In addition if skb is not at least 60 bytes we need to pad it so that
 - * it is large enough to qualify as a valid Ethernet frame.
 + *  In addition if skb is not at least 60 bytes we need to pad it so that
 + *  it is large enough to qualify as a valid Ethernet frame.
   *
 - * Returns true if an error was encountered and skb was freed.
 + *  Returns true if an error was encountered and skb was freed.
   **/
  static bool igb_cleanup_headers(struct igb_ring *rx_ring,
                                union e1000_adv_rx_desc *rx_desc,
                                struct sk_buff *skb)
  {
 -
        if (unlikely((igb_test_staterr(rx_desc,
                                       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
                struct net_device *netdev = rx_ring->netdev;
  }
  
  /**
 - * igb_process_skb_fields - Populate skb header fields from Rx descriptor
 - * @rx_ring: rx descriptor ring packet is being transacted on
 - * @rx_desc: pointer to the EOP Rx descriptor
 - * @skb: pointer to current skb being populated
 + *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
 + *  @rx_ring: rx descriptor ring packet is being transacted on
 + *  @rx_desc: pointer to the EOP Rx descriptor
 + *  @skb: pointer to current skb being populated
   *
 - * This function checks the ring, descriptor, and packet information in
 - * order to populate the hash, checksum, VLAN, timestamp, protocol, and
 - * other fields within the skb.
 + *  This function checks the ring, descriptor, and packet information in
 + *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
 + *  other fields within the skb.
   **/
  static void igb_process_skb_fields(struct igb_ring *rx_ring,
                                   union e1000_adv_rx_desc *rx_desc,
  
        igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
  
 -      if ((dev->features & NETIF_F_HW_VLAN_RX) &&
 +      if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
                u16 vid;
                if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
                else
                        vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  
 -              __vlan_hwaccel_put_tag(skb, vid);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
        }
  
        skb_record_rx_queue(skb, rx_ring->queue_index);
@@@ -6719,10 -6601,7 +6630,7 @@@ static bool igb_clean_rx_irq(struct igb
                rmb();
  
                /* retrieve a buffer from the ring */
-               if (ring_uses_build_skb(rx_ring))
-                       skb = igb_build_rx_buffer(rx_ring, rx_desc);
-               else
-                       skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+               skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
  
                /* exit if we failed to retrieve a buffer */
                if (!skb)
@@@ -6791,7 -6670,8 +6699,7 @@@ static bool igb_alloc_mapped_page(struc
        /* map page for use */
        dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  
 -      /*
 -       * if mapping failed free memory back to system since
 +      /* if mapping failed free memory back to system since
         * there isn't much point in holding memory we can't use
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
        return true;
  }
  
- static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
- {
-       if (ring_uses_build_skb(rx_ring))
-               return NET_SKB_PAD + NET_IP_ALIGN;
-       else
-               return 0;
- }
  /**
 - * igb_alloc_rx_buffers - Replace used receive buffers; packet split
 - * @adapter: address of board private structure
 + *  igb_alloc_rx_buffers - Replace used receive buffers; packet split
 + *  @adapter: address of board private structure
   **/
  void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
  {
                if (!igb_alloc_mapped_page(rx_ring, bi))
                        break;
  
 -              /*
 -               * Refresh the desc even if buffer_addrs didn't change
 +              /* Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
                 */
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
-                                                    bi->page_offset +
-                                                    igb_rx_offset(rx_ring));
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  
                rx_desc++;
                bi++;
                /* update next to alloc since we have filled the ring */
                rx_ring->next_to_alloc = i;
  
 -              /*
 -               * Force memory writes to complete before letting h/w
 +              /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
                 * such as IA-64).
@@@ -6954,7 -6826,7 +6852,7 @@@ static void igb_vlan_mode(struct net_de
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, rctl;
 -      bool enable = !!(features & NETIF_F_HW_VLAN_RX);
 +      bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
  
        if (enable) {
                /* enable VLAN tag insert/strip */
        igb_rlpml_set(adapter);
  }
  
 -static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 +static int igb_vlan_rx_add_vid(struct net_device *netdev,
 +                             __be16 proto, u16 vid)
  {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        return 0;
  }
  
 -static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 +static int igb_vlan_rx_kill_vid(struct net_device *netdev,
 +                              __be16 proto, u16 vid)
  {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@@ -7021,7 -6891,7 +6919,7 @@@ static void igb_restore_vlan(struct igb
        igb_vlan_mode(adapter->netdev, adapter->netdev->features);
  
        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
 -              igb_vlan_rx_add_vid(adapter->netdev, vid);
 +              igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  }
  
  int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
        mac->autoneg = 0;
  
        /* Make sure dplx is at most 1 bit and lsb of speed is not set
 -       * for the switch() below to work */
 +       * for the switch() below to work
 +       */
        if ((spd & 1) || (dplx & ~1))
                goto err_inval;
  
 -      /* Fiber NIC's only allow 1000 Gbps Full duplex */
 -      if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
 -          spd != SPEED_1000 &&
 -          dplx != DUPLEX_FULL)
 -              goto err_inval;
 +      /* Fiber NIC's only allow 1000 gbps Full duplex
 +       * and 100Mbps Full duplex for 100baseFx sfp
 +       */
 +      if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
 +              switch (spd + dplx) {
 +              case SPEED_10 + DUPLEX_HALF:
 +              case SPEED_10 + DUPLEX_FULL:
 +              case SPEED_100 + DUPLEX_HALF:
 +                      goto err_inval;
 +              default:
 +                      break;
 +              }
 +      }
  
        switch (spd + dplx) {
        case SPEED_10 + DUPLEX_HALF:
@@@ -7148,8 -7009,7 +7046,8 @@@ static int __igb_shutdown(struct pci_de
                igb_power_up_link(adapter);
  
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
 -       * would have already happened in close and is redundant. */
 +       * would have already happened in close and is redundant.
 +       */
        igb_release_hw_control(adapter);
  
        pci_disable_device(pdev);
@@@ -7211,8 -7071,7 +7109,8 @@@ static int igb_resume(struct device *de
        igb_reset(adapter);
  
        /* let the f/w know that the h/w is now under the control of the
 -       * driver. */
 +       * driver.
 +       */
        igb_get_hw_control(adapter);
  
        wr32(E1000_WUS, ~0);
@@@ -7348,7 -7207,8 +7246,7 @@@ static int igb_pci_sriov_configure(stru
  }
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
 -/*
 - * Polling 'interrupt' - used by things like netconsole to send skbs
 +/* Polling 'interrupt' - used by things like netconsole to send skbs
   * without having to re-enable interrupts. It's not called while
   * the interrupt routine is executing.
   */
@@@ -7371,13 -7231,13 +7269,13 @@@ static void igb_netpoll(struct net_devi
  #endif /* CONFIG_NET_POLL_CONTROLLER */
  
  /**
 - * igb_io_error_detected - called when PCI error is detected
 - * @pdev: Pointer to PCI device
 - * @state: The current pci connection state
 + *  igb_io_error_detected - called when PCI error is detected
 + *  @pdev: Pointer to PCI device
 + *  @state: The current pci connection state
   *
 - * This function is called after a PCI bus error affecting
 - * this device has been detected.
 - */
 + *  This function is called after a PCI bus error affecting
 + *  this device has been detected.
 + **/
  static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
                                              pci_channel_state_t state)
  {
  }
  
  /**
 - * igb_io_slot_reset - called after the pci bus has been reset.
 - * @pdev: Pointer to PCI device
 + *  igb_io_slot_reset - called after the pci bus has been reset.
 + *  @pdev: Pointer to PCI device
   *
 - * Restart the card from scratch, as if from a cold-boot. Implementation
 - * resembles the first-half of the igb_resume routine.
 - */
 + *  Restart the card from scratch, as if from a cold-boot. Implementation
 + *  resembles the first-half of the igb_resume routine.
 + **/
  static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
  
        err = pci_cleanup_aer_uncorrect_error_status(pdev);
        if (err) {
 -              dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
 -                      "failed 0x%0x\n", err);
 +              dev_err(&pdev->dev,
 +                      "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
 +                      err);
                /* non-fatal, continue */
        }
  
  }
  
  /**
 - * igb_io_resume - called when traffic can start flowing again.
 - * @pdev: Pointer to PCI device
 + *  igb_io_resume - called when traffic can start flowing again.
 + *  @pdev: Pointer to PCI device
   *
 - * This callback is called when the error recovery driver tells us that
 - * its OK to resume normal operation. Implementation resembles the
 - * second-half of the igb_resume routine.
 + *  This callback is called when the error recovery driver tells us that
 + *  its OK to resume normal operation. Implementation resembles the
 + *  second-half of the igb_resume routine.
   */
  static void igb_io_resume(struct pci_dev *pdev)
  {
        netif_device_attach(netdev);
  
        /* let the f/w know that the h/w is now under the control of the
 -       * driver. */
 +       * driver.
 +       */
        igb_get_hw_control(adapter);
  }
  
  static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
 -                             u8 qsel)
 +                           u8 qsel)
  {
        u32 rar_low, rar_high;
        struct e1000_hw *hw = &adapter->hw;
         * from network order (big endian) to little endian
         */
        rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
 -                ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
 +                 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
        rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
  
        /* Indicate to hardware the Address is Valid. */
  }
  
  static int igb_set_vf_mac(struct igb_adapter *adapter,
 -                          int vf, unsigned char *mac_addr)
 +                        int vf, unsigned char *mac_addr)
  {
        struct e1000_hw *hw = &adapter->hw;
        /* VF MAC addresses start at end of receive addresses and moves
 -       * torwards the first, as a result a collision should not be possible */
 +       * towards the first, as a result a collision should not be possible
 +       */
        int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  
        memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
@@@ -7518,13 -7375,13 +7416,13 @@@ static int igb_ndo_set_vf_mac(struct ne
                return -EINVAL;
        adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
        dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
 -      dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
 -                                    " change effective.");
 +      dev_info(&adapter->pdev->dev,
 +               "Reload the VF driver to make this change effective.");
        if (test_bit(__IGB_DOWN, &adapter->state)) {
 -              dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
 -                       " but the PF device is not up.\n");
 -              dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
 -                       " attempting to use the VF device.\n");
 +              dev_warn(&adapter->pdev->dev,
 +                       "The VF MAC address has been set, but the PF device is not up.\n");
 +              dev_warn(&adapter->pdev->dev,
 +                       "Bring the PF device up before attempting to use the VF device.\n");
        }
        return igb_set_vf_mac(adapter, vf, mac);
  }
@@@ -7551,19 -7408,19 +7449,19 @@@ static void igb_set_vf_rate_limit(struc
                /* Calculate the rate factor values to set */
                rf_int = link_speed / tx_rate;
                rf_dec = (link_speed - (rf_int * tx_rate));
 -              rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
 +              rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
 +                       tx_rate;
  
                bcnrc_val = E1000_RTTBCNRC_RS_ENA;
 -              bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
 -                             E1000_RTTBCNRC_RF_INT_MASK);
 +              bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
 +                            E1000_RTTBCNRC_RF_INT_MASK);
                bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
        } else {
                bcnrc_val = 0;
        }
  
        wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
 -      /*
 -       * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
 +      /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
         * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
         */
        wr32(E1000_RTTBCNRM, 0x14);
@@@ -7585,7 -7442,8 +7483,7 @@@ static void igb_check_vf_rate_limit(str
                reset_rate = true;
                adapter->vf_rate_link_speed = 0;
                dev_info(&adapter->pdev->dev,
 -                       "Link speed has been changed. VF Transmit "
 -                       "rate is disabled\n");
 +                       "Link speed has been changed. VF Transmit rate is disabled\n");
        }
  
        for (i = 0; i < adapter->vfs_allocated_count; i++) {
                        adapter->vf_data[i].tx_rate = 0;
  
                igb_set_vf_rate_limit(&adapter->hw, i,
 -                                    adapter->vf_data[i].tx_rate,
 -                                    actual_link_speed);
 +                                    adapter->vf_data[i].tx_rate,
 +                                    actual_link_speed);
        }
  }
  
@@@ -7620,33 -7478,6 +7518,33 @@@ static int igb_ndo_set_vf_bw(struct net
        return 0;
  }
  
 +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
 +                                 bool setting)
 +{
 +      struct igb_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_hw *hw = &adapter->hw;
 +      u32 reg_val, reg_offset;
 +
 +      if (!adapter->vfs_allocated_count)
 +              return -EOPNOTSUPP;
 +
 +      if (vf >= adapter->vfs_allocated_count)
 +              return -EINVAL;
 +
 +      reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
 +      reg_val = rd32(reg_offset);
 +      if (setting)
 +              reg_val |= ((1 << vf) |
 +                          (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
 +      else
 +              reg_val &= ~((1 << vf) |
 +                           (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
 +      wr32(reg_offset, reg_val);
 +
 +      adapter->vf_data[vf].spoofchk_enabled = setting;
 +      return E1000_SUCCESS;
 +}
 +
  static int igb_ndo_get_vf_config(struct net_device *netdev,
                                 int vf, struct ifla_vf_info *ivi)
  {
        ivi->tx_rate = adapter->vf_data[vf].tx_rate;
        ivi->vlan = adapter->vf_data[vf].pf_vlan;
        ivi->qos = adapter->vf_data[vf].pf_qos;
 +      ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
        return 0;
  }
  
@@@ -7671,7 -7501,6 +7569,7 @@@ static void igb_vmm_control(struct igb_
        case e1000_82575:
        case e1000_i210:
        case e1000_i211:
 +      case e1000_i354:
        default:
                /* replication is not supported for 82575 */
                return;
                igb_vmdq_set_loopback_pf(hw, true);
                igb_vmdq_set_replication_pf(hw, true);
                igb_vmdq_set_anti_spoofing_pf(hw, true,
 -                                              adapter->vfs_allocated_count);
 +                                            adapter->vfs_allocated_count);
        } else {
                igb_vmdq_set_loopback_pf(hw, false);
                igb_vmdq_set_replication_pf(hw, false);
@@@ -7714,7 -7543,8 +7612,7 @@@ static void igb_init_dmac(struct igb_ad
                        /* force threshold to 0. */
                        wr32(E1000_DMCTXTH, 0);
  
 -                      /*
 -                       * DMA Coalescing high water mark needs to be greater
 +                      /* DMA Coalescing high water mark needs to be greater
                         * than the Rx threshold. Set hwm to PBA - max frame
                         * size in 16B units, capping it at PBA - 6KB.
                         */
                                & E1000_FCRTC_RTH_COAL_MASK);
                        wr32(E1000_FCRTC, reg);
  
 -                      /*
 -                       * Set the DMA Coalescing Rx threshold to PBA - 2 * max
 +                      /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
                         * frame size, capping it at PBA - 10KB.
                         */
                        dmac_thr = pba - adapter->max_frame_size / 512;
                        reg |= (1000 >> 5);
  
                        /* Disable BMC-to-OS Watchdog Enable */
 -                      reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
 +                      if (hw->mac.type != e1000_i354)
 +                              reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
 +
                        wr32(E1000_DMACR, reg);
  
 -                      /*
 -                       * no lower threshold to disable
 +                      /* no lower threshold to disable
                         * coalescing(smart fifb)-UTRESH=0
                         */
                        wr32(E1000_DMCRTRH, 0);
  
                        wr32(E1000_DMCTLX, reg);
  
 -                      /*
 -                       * free space in tx packet buffer to wake from
 +                      /* free space in tx packet buffer to wake from
                         * DMA coal
                         */
                        wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
                             (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
  
 -                      /*
 -                       * make low power state decision controlled
 +                      /* make low power state decision controlled
                         * by DMA coal
                         */
                        reg = rd32(E1000_PCIEMISC);
        }
  }
  
 -/*  igb_read_i2c_byte - Reads 8 bit word over I2C
 +/**
 + *  igb_read_i2c_byte - Reads 8 bit word over I2C
   *  @hw: pointer to hardware structure
   *  @byte_offset: byte offset to read
   *  @dev_addr: device address
   *
   *  Performs byte read operation over I2C interface at
   *  a specified device address.
 - */
 + **/
  s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
 -                              u8 dev_addr, u8 *data)
 +                    u8 dev_addr, u8 *data)
  {
        struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
        struct i2c_client *this_client = adapter->i2c_client;
        }
  }
  
 -/*  igb_write_i2c_byte - Writes 8 bit word over I2C
 +/**
 + *  igb_write_i2c_byte - Writes 8 bit word over I2C
   *  @hw: pointer to hardware structure
   *  @byte_offset: byte offset to write
   *  @dev_addr: device address
   *
   *  Performs byte write operation over I2C interface at
   *  a specified device address.
 - */
 + **/
  s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
 -                               u8 dev_addr, u8 data)
 +                     u8 dev_addr, u8 data)
  {
        struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
        struct i2c_client *this_client = adapter->i2c_client;
index 2d4bdcc4fdbea313a2ce8d5fa51f681ff06ae9a7,97e33669c0b9b4c6e4bc105ab5f125dffabd9ee0..1e7d587c4e572f9efdd3f876b98e906cb3963561
@@@ -35,7 -35,7 +35,7 @@@
  #include <linux/ip.h>
  #include <linux/tcp.h>
  #include <linux/ipv6.h>
 -#ifdef NETIF_F_HW_VLAN_TX
 +#ifdef NETIF_F_HW_VLAN_CTAG_TX
  #include <linux/if_vlan.h>
  #endif
  
@@@ -661,7 -661,13 +661,7 @@@ int ixgbe_vf_configuration(struct pci_d
        bool enable = ((event_mask & 0x10000000U) != 0);
  
        if (enable) {
 -              eth_random_addr(vf_mac_addr);
 -              e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
 -                     vfn, vf_mac_addr);
 -              /*
 -               * Store away the VF "permananet" MAC address, it will ask
 -               * for it later.
 -               */
 +              eth_zero_addr(vf_mac_addr);
                memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
        }
  
@@@ -682,8 -688,7 +682,8 @@@ static int ixgbe_vf_reset_msg(struct ix
        ixgbe_vf_reset_event(adapter, vf);
  
        /* set vf mac address */
 -      ixgbe_set_vf_mac(adapter, vf, vf_mac);
 +      if (!is_zero_ether_addr(vf_mac))
 +              ixgbe_set_vf_mac(adapter, vf, vf_mac);
  
        vf_shift = vf % 32;
        reg_offset = vf / 32;
        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
  
        /* reply to reset with ack and vf mac address */
 -      msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
 -      memcpy(addr, vf_mac, ETH_ALEN);
 +      msgbuf[0] = IXGBE_VF_RESET;
 +      if (!is_zero_ether_addr(vf_mac)) {
 +              msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
 +              memcpy(addr, vf_mac, ETH_ALEN);
 +      } else {
 +              msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 +              dev_warn(&adapter->pdev->dev,
 +                       "VF %d has no MAC address assigned, you may have to assign one manually\n",
 +                       vf);
 +      }
  
        /*
         * Piggyback the multicast filter type so VF can compute the
@@@ -1052,6 -1049,12 +1052,12 @@@ int ixgbe_ndo_set_vf_vlan(struct net_de
        if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
                return -EINVAL;
        if (vlan || qos) {
+               if (adapter->vfinfo[vf].pf_vlan)
+                       err = ixgbe_set_vf_vlan(adapter, false,
+                                               adapter->vfinfo[vf].pf_vlan,
+                                               vf);
+               if (err)
+                       goto out;
                err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
                if (err)
                        goto out;
index 0051f0ef3cd0e27c421187db74990061afe67ccd,434e33c527df102adf5052854bc92d816588b4ba..a49e81bdf8e8fab4ca445d92d438717d25083035
@@@ -21,8 -21,8 +21,8 @@@ if NET_VENDOR_MARVEL
  config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
        depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
 -      select INET_LRO
        select PHYLIB
 +      select MVMDIO
        ---help---
          This driver supports the gigabit ethernet MACs in the
          Marvell Discovery PPC/MIPS chipset family (MV643XX) and
  
  config MVMDIO
        tristate "Marvell MDIO interface support"
+       select PHYLIB
        ---help---
          This driver supports the MDIO interface found in the network
          interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
          Dove, Armada 370 and Armada XP).
  
 -        For now, this driver is only needed for the MVNETA driver
 -        (used on Armada 370 and XP), but it could be used in the
 -        future by the MV643XX_ETH driver.
 +        This driver is used by the MV643XX_ETH and MVNETA drivers.
  
  config MVNETA
        tristate "Marvell Armada 370/XP network interface support"
        depends on MACH_ARMADA_370_XP
-       select PHYLIB
        select MVMDIO
        ---help---
          This driver supports the network interface units in the
index e48261e468f355ff00d321e23fa2f3451839c4b7,a47a097c21e13b1373fb6fa896374d8d6ffcd04c..c96678555233c4afc1336c012c17c3b0da943060
@@@ -374,7 -374,6 +374,6 @@@ static int rxq_number = 8
  static int txq_number = 8;
  
  static int rxq_def;
- static int txq_def;
  
  #define MVNETA_DRIVER_NAME "mvneta"
  #define MVNETA_DRIVER_VERSION "1.0"
@@@ -1475,7 -1474,8 +1474,8 @@@ error
  static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
  {
        struct mvneta_port *pp = netdev_priv(dev);
-       struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+       u16 txq_id = skb_get_queue_mapping(skb);
+       struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
        struct netdev_queue *nq;
        int frags = 0;
                goto out;
  
        frags = skb_shinfo(skb)->nr_frags + 1;
-       nq    = netdev_get_tx_queue(dev, txq_def);
+       nq    = netdev_get_tx_queue(dev, txq_id);
  
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvneta_txq_next_desc_get(txq);
@@@ -1969,8 -1969,13 +1969,8 @@@ static int mvneta_rxq_init(struct mvnet
        rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
                                        rxq->size * MVNETA_DESC_ALIGNED_SIZE,
                                        &rxq->descs_phys, GFP_KERNEL);
 -      if (rxq->descs == NULL) {
 -              netdev_err(pp->dev,
 -                         "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
 -                         rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
 -                         rxq->size);
 +      if (rxq->descs == NULL)
                return -ENOMEM;
 -      }
  
        BUG_ON(rxq->descs !=
               PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@@@ -2024,8 -2029,13 +2024,8 @@@ static int mvneta_txq_init(struct mvnet
        txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
                                        txq->size * MVNETA_DESC_ALIGNED_SIZE,
                                        &txq->descs_phys, GFP_KERNEL);
 -      if (txq->descs == NULL) {
 -              netdev_err(pp->dev,
 -                         "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
 -                         txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
 -                         txq->size);
 +      if (txq->descs == NULL)
                return -ENOMEM;
 -      }
  
        /* Make sure descriptor address is cache line size aligned  */
        BUG_ON(txq->descs !=
@@@ -2679,7 -2689,7 +2679,7 @@@ static int mvneta_probe(struct platform
                return -EINVAL;
        }
  
-       dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+       dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
        if (!dev)
                return -ENOMEM;
  
  
        netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
  
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->priv_flags |= IFF_UNICAST_FLT;
        err = register_netdev(dev);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register\n");
                goto err_deinit;
        }
  
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->priv_flags |= IFF_UNICAST_FLT;
        netdev_info(dev, "mac: %pM\n", dev->dev_addr);
  
        platform_set_drvdata(pdev, pp->dev);
@@@ -2833,4 -2844,3 +2834,3 @@@ module_param(rxq_number, int, S_IRUGO)
  module_param(txq_number, int, S_IRUGO);
  
  module_param(rxq_def, int, S_IRUGO);
- module_param(txq_def, int, S_IRUGO);
index 32a95c105e4ea169655782e27d48f04e5c067d43,edd63f1230f3d0076f9bfbdcff146f62a6c2acaa..fd0829c2839d9a2d718c34ed784f2b5ecc855ffc
@@@ -6,7 -6,6 +6,7 @@@
   */
  
  #include "qlcnic.h"
 +#include "qlcnic_sriov.h"
  #include <linux/if_vlan.h>
  #include <linux/ipv6.h>
  #include <linux/ethtool.h>
  
  #define QLCNIC_MAX_TX_QUEUES          1
  #define RSS_HASHTYPE_IP_TCP           0x3
 -
 -/* status descriptor mailbox data
 - * @phy_addr: physical address of buffer
 - * @sds_ring_size: buffer size
 - * @intrpt_id: interrupt id
 - * @intrpt_val: source of interrupt
 - */
 -struct qlcnic_sds_mbx {
 -      u64     phy_addr;
 -      u8      rsvd1[16];
 -      u16     sds_ring_size;
 -      u16     rsvd2[3];
 -      u16     intrpt_id;
 -      u8      intrpt_val;
 -      u8      rsvd3[5];
 -} __packed;
 -
 -/* receive descriptor buffer data
 - * phy_addr_reg: physical address of regular buffer
 - * phy_addr_jmb: physical address of jumbo buffer
 - * reg_ring_sz: size of regular buffer
 - * reg_ring_len: no. of entries in regular buffer
 - * jmb_ring_len: no. of entries in jumbo buffer
 - * jmb_ring_sz: size of jumbo buffer
 - */
 -struct qlcnic_rds_mbx {
 -      u64     phy_addr_reg;
 -      u64     phy_addr_jmb;
 -      u16     reg_ring_sz;
 -      u16     reg_ring_len;
 -      u16     jmb_ring_sz;
 -      u16     jmb_ring_len;
 -} __packed;
 -
 -/* host producers for regular and jumbo rings */
 -struct __host_producer_mbx {
 -      u32     reg_buf;
 -      u32     jmb_buf;
 -} __packed;
 -
 -/* Receive context mailbox data outbox registers
 - * @state: state of the context
 - * @vport_id: virtual port id
 - * @context_id: receive context id
 - * @num_pci_func: number of pci functions of the port
 - * @phy_port: physical port id
 - */
 -struct qlcnic_rcv_mbx_out {
 -      u8      rcv_num;
 -      u8      sts_num;
 -      u16     ctx_id;
 -      u8      state;
 -      u8      num_pci_func;
 -      u8      phy_port;
 -      u8      vport_id;
 -      u32     host_csmr[QLCNIC_MAX_RING_SETS];
 -      struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
 -} __packed;
 -
 -struct qlcnic_add_rings_mbx_out {
 -      u8      rcv_num;
 -      u8      sts_num;
 -      u16  ctx_id;
 -      u32  host_csmr[QLCNIC_MAX_RING_SETS];
 -      struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
 -} __packed;
 -
 -/* Transmit context mailbox inbox registers
 - * @phys_addr: DMA address of the transmit buffer
 - * @cnsmr_index: host consumer index
 - * @size: legth of transmit buffer ring
 - * @intr_id: interrput id
 - * @src: src of interrupt
 - */
 -struct qlcnic_tx_mbx {
 -      u64     phys_addr;
 -      u64     cnsmr_index;
 -      u16     size;
 -      u16     intr_id;
 -      u8      src;
 -      u8      rsvd[3];
 -} __packed;
 -
 -/* Transmit context mailbox outbox registers
 - * @host_prod: host producer index
 - * @ctx_id: transmit context id
 - * @state: state of the transmit context
 - */
 -struct qlcnic_tx_mbx_out {
 -      u32     host_prod;
 -      u16     ctx_id;
 -      u8      state;
 -      u8      rsvd;
 -} __packed;
 +#define QLC_83XX_FW_MBX_CMD           0
  
  static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
        {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
        {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
        {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
 +      {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
 +      {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
  };
  
 -static const u32 qlcnic_83xx_ext_reg_tbl[] = {
 +const u32 qlcnic_83xx_ext_reg_tbl[] = {
        0x38CC,         /* Global Reset */
        0x38F0,         /* Wildcard */
        0x38FC,         /* Informant */
        0x34A4,         /* QLC_83XX_ASIC_TEMP */
  };
  
 -static const u32 qlcnic_83xx_reg_tbl[] = {
 +const u32 qlcnic_83xx_reg_tbl[] = {
        0x34A8,         /* PEG_HALT_STAT1 */
        0x34AC,         /* PEG_HALT_STAT2 */
        0x34B0,         /* FW_HEARTBEAT */
@@@ -157,8 -247,6 +157,8 @@@ static struct qlcnic_hardware_ops qlcni
        .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
        .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
        .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
 +      .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
 +      .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
        .setup_link_event               = qlcnic_83xx_setup_link_event,
        .get_nic_info                   = qlcnic_83xx_get_nic_info,
        .get_pci_info                   = qlcnic_83xx_get_pci_info,
        .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
        .change_l2_filter               = qlcnic_83xx_change_l2_filter,
        .get_board_info                 = qlcnic_83xx_get_port_info,
 +      .free_mac_list                  = qlcnic_82xx_free_mac_list,
  };
  
  static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@@ -268,20 -355,14 +268,20 @@@ int qlcnic_83xx_setup_intr(struct qlcni
                                              num_intr));
        /* account for AEN interrupt MSI-X based interrupts */
        num_msix += 1;
 -      num_msix += adapter->max_drv_tx_rings;
 +
 +      if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
 +              num_msix += adapter->max_drv_tx_rings;
 +
        err = qlcnic_enable_msix(adapter, num_msix);
        if (err == -ENOMEM)
                return err;
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
                num_msix = adapter->ahw->num_msix;
 -      else
 +      else {
 +              if (qlcnic_sriov_vf_check(adapter))
 +                      return -EINVAL;
                num_msix = 1;
 +      }
        /* setup interrupt mapping table for fw */
        ahw->intr_tbl = vzalloc(num_msix *
                                sizeof(struct qlcnic_intrpt_config));
@@@ -340,13 -421,12 +340,13 @@@ inline void qlcnic_83xx_enable_legacy_m
        writel(0, adapter->ahw->pci_base0 + mask);
  }
  
 -inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
 +void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
  {
        u32 mask;
  
        mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
        writel(1, adapter->ahw->pci_base0 + mask);
 +      QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
  }
  
  static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
@@@ -402,8 -482,7 +402,8 @@@ static void qlcnic_83xx_poll_process_ae
  
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
        if (event &  QLCNIC_MBX_ASYNC_EVENT)
 -              qlcnic_83xx_process_aen(adapter);
 +              __qlcnic_83xx_process_aen(adapter);
 +
  out:
        qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
        spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
@@@ -456,15 -535,17 +456,15 @@@ done
  
  void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
  {
 -      u32 val = 0, num_msix = adapter->ahw->num_msix - 1;
 +      u32 num_msix;
 +
 +      qlcnic_83xx_disable_mbx_intr(adapter);
  
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
                num_msix = adapter->ahw->num_msix - 1;
        else
                num_msix = 0;
  
 -      QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
 -
 -      qlcnic_83xx_disable_mbx_intr(adapter);
 -
        msleep(20);
        synchronize_irq(adapter->msix_entries[num_msix].vector);
        free_irq(adapter->msix_entries[num_msix].vector, adapter);
@@@ -514,7 -595,7 +514,7 @@@ int qlcnic_83xx_setup_mbx_intr(struct q
  void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
  {
        u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
 -      adapter->ahw->pci_func = val & 0xf;
 +      adapter->ahw->pci_func = (val >> 24) & 0xff;
  }
  
  int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
@@@ -626,11 -707,6 +626,11 @@@ void qlcnic_83xx_check_vf(struct qlcnic
        ahw->fw_hal_version = 2;
        qlcnic_get_func_no(adapter);
  
 +      if (qlcnic_sriov_vf_check(adapter)) {
 +              qlcnic_sriov_vf_set_ops(adapter);
 +              return;
 +      }
 +
        /* Determine function privilege level */
        op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
        if (op_mode == QLC_83XX_DEFAULT_OPMODE)
                         ahw->fw_hal_version);
                adapter->nic_ops = &qlcnic_vf_ops;
        } else {
 +              if (pci_find_ext_capability(adapter->pdev,
 +                                          PCI_EXT_CAP_ID_SRIOV))
 +                      set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
                adapter->nic_ops = &qlcnic_83xx_ops;
        }
  }
@@@ -682,7 -755,7 +682,7 @@@ static void qlcnic_dump_mbx(struct qlcn
  }
  
  /* Mailbox response for mac rcode */
 -static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
 +u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
  {
        u32 fw_data;
        u8 mac_cmd_rcode;
        return 1;
  }
  
 -static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
 +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
  {
        u32 data;
        unsigned long wait_time = 0;
@@@ -759,7 -832,7 +759,7 @@@ poll
                /* Get the FW response data */
                fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
                if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
 -                      qlcnic_83xx_process_aen(adapter);
 +                      __qlcnic_83xx_process_aen(adapter);
                        mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
                        if (mbx_val)
                                goto poll;
@@@ -811,7 -884,6 +811,7 @@@ int qlcnic_83xx_alloc_mbx_args(struct q
        size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
        for (i = 0; i < size; i++) {
                if (type == mbx_tbl[i].cmd) {
 +                      mbx->op_type = QLC_83XX_FW_MBX_CMD;
                        mbx->req.num = mbx_tbl[i].in_args;
                        mbx->rsp.num = mbx_tbl[i].out_args;
                        mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
                        memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        temp = adapter->ahw->fw_hal_version << 29;
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
 -                      break;
 +                      return 0;
                }
        }
 -      return 0;
 +      return -EINVAL;
  }
  
  void qlcnic_83xx_idc_aen_work(struct work_struct *work)
@@@ -863,7 -935,7 +863,7 @@@ static void qlcnic_83xx_handle_idc_comp
        return;
  }
  
 -void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 +void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
  {
        u32 event[QLC_83XX_MBX_AEN_CNT];
        int i;
                break;
        case QLCNIC_MBX_TIME_EXTEND_EVENT:
                break;
 +      case QLCNIC_MBX_BC_EVENT:
 +              qlcnic_sriov_handle_bc_event(adapter, event[1]);
 +              break;
        case QLCNIC_MBX_SFP_INSERT_EVENT:
                dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
                         QLCNIC_MBX_RSP(event[0]));
        QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
  }
  
 +static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      u32 resp, event;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ahw->mbx_lock, flags);
 +
 +      resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
 +      if (resp & QLCNIC_SET_OWNER) {
 +              event = readl(QLCNIC_MBX_FW(ahw, 0));
 +              if (event &  QLCNIC_MBX_ASYNC_EVENT)
 +                      __qlcnic_83xx_process_aen(adapter);
 +      }
 +
 +      spin_unlock_irqrestore(&ahw->mbx_lock, flags);
 +}
 +
 +static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
 +{
 +      struct qlcnic_adapter *adapter;
 +
 +      adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
 +
 +      if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
 +              return;
 +
 +      qlcnic_83xx_process_aen(adapter);
 +      queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
 +                         (HZ / 10));
 +}
 +
 +void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
 +{
 +      if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
 +              return;
 +
 +      INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
 +}
 +
 +void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
 +{
 +      if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
 +              return;
 +      cancel_delayed_work_sync(&adapter->mbx_poll_work);
 +}
 +
  static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
  {
        int index, i, err, sds_mbx_size;
                sds = &recv_ctx->sds_rings[i];
                sds->consumer = 0;
                memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
 -              sds_mbx.phy_addr = sds->phys_addr;
 +              sds_mbx.phy_addr_low = LSD(sds->phys_addr);
 +              sds_mbx.phy_addr_high = MSD(sds->phys_addr);
                sds_mbx.sds_ring_size = sds->num_desc;
  
                if (adapter->flags & QLCNIC_MSIX_ENABLED)
        return err;
  }
  
 +void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
 +{
 +      int err;
 +      u32 temp = 0;
 +      struct qlcnic_cmd_args cmd;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 +
 +      if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
 +              return;
 +
 +      if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
 +              cmd.req.arg[0] |= (0x3 << 29);
 +
 +      if (qlcnic_sriov_pf_check(adapter))
 +              qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
 +
 +      cmd.req.arg[1] = recv_ctx->context_id | temp;
 +      err = qlcnic_issue_cmd(adapter, &cmd);
 +      if (err)
 +              dev_err(&adapter->pdev->dev,
 +                      "Failed to destroy rx ctx in firmware\n");
 +
 +      recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
 +      qlcnic_free_mbx_args(&cmd);
 +}
 +
  int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
  {
        int i, err, index, sds_mbx_size, rds_mbx_size;
        /* set mailbox hdr and capabilities */
        qlcnic_alloc_mbx_args(&cmd, adapter,
                              QLCNIC_CMD_CREATE_RX_CTX);
 +
 +      if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
 +              cmd.req.arg[0] |= (0x3 << 29);
 +
        cmd.req.arg[1] = cap;
        cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
                         (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
 +
 +      if (qlcnic_sriov_pf_check(adapter))
 +              qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
 +                                                       &cmd.req.arg[6]);
        /* set up status rings, mbx 8-57/87 */
        index = QLC_83XX_HOST_SDS_MBX_IDX;
        for (i = 0; i < num_sds; i++) {
                sds = &recv_ctx->sds_rings[i];
                sds->consumer = 0;
                memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
 -              sds_mbx.phy_addr = sds->phys_addr;
 +              sds_mbx.phy_addr_low = LSD(sds->phys_addr);
 +              sds_mbx.phy_addr_high = MSD(sds->phys_addr);
                sds_mbx.sds_ring_size = sds->num_desc;
                if (adapter->flags & QLCNIC_MSIX_ENABLED)
                        intrpt_id = ahw->intr_tbl[i].id;
        rds = &recv_ctx->rds_rings[0];
        rds->producer = 0;
        memset(&rds_mbx, 0, rds_mbx_size);
 -      rds_mbx.phy_addr_reg = rds->phys_addr;
 +      rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
 +      rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
        rds_mbx.reg_ring_sz = rds->dma_size;
        rds_mbx.reg_ring_len = rds->num_desc;
        /* Jumbo ring */
        rds = &recv_ctx->rds_rings[1];
        rds->producer = 0;
 -      rds_mbx.phy_addr_jmb = rds->phys_addr;
 +      rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
 +      rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
        rds_mbx.jmb_ring_sz = rds->dma_size;
        rds_mbx.jmb_ring_len = rds->num_desc;
        buf = &cmd.req.arg[index];
        return err;
  }
  
 +void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
 +                          struct qlcnic_host_tx_ring *tx_ring)
 +{
 +      struct qlcnic_cmd_args cmd;
 +      u32 temp = 0;
 +
 +      if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
 +              return;
 +
 +      if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
 +              cmd.req.arg[0] |= (0x3 << 29);
 +
 +      if (qlcnic_sriov_pf_check(adapter))
 +              qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
 +
 +      cmd.req.arg[1] = tx_ring->ctx_id | temp;
 +      if (qlcnic_issue_cmd(adapter, &cmd))
 +              dev_err(&adapter->pdev->dev,
 +                      "Failed to destroy tx ctx in firmware\n");
 +      qlcnic_free_mbx_args(&cmd);
 +}
 +
  int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
                              struct qlcnic_host_tx_ring *tx, int ring)
  {
        int err;
        u16 msix_id;
 -      u32 *buf, intr_mask;
 +      u32 *buf, intr_mask, temp = 0;
        struct qlcnic_cmd_args cmd;
        struct qlcnic_tx_mbx mbx;
        struct qlcnic_tx_mbx_out *mbx_out;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      u32 msix_vector;
  
        /* Reset host resources */
        tx->producer = 0;
        memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
  
        /* setup mailbox inbox registerss */
 -      mbx.phys_addr = tx->phys_addr;
 -      mbx.cnsmr_index = tx->hw_cons_phys_addr;
 +      mbx.phys_addr_low = LSD(tx->phys_addr);
 +      mbx.phys_addr_high = MSD(tx->phys_addr);
 +      mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
 +      mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
        mbx.size = tx->num_desc;
 -      if (adapter->flags & QLCNIC_MSIX_ENABLED)
 -              msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
 -      else
 +      if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +              if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
 +                      msix_vector = adapter->max_sds_rings + ring;
 +              else
 +                      msix_vector = adapter->max_sds_rings - 1;
 +              msix_id = ahw->intr_tbl[msix_vector].id;
 +      } else {
                msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
 +      }
 +
        if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
                mbx.intr_id = msix_id;
        else
        mbx.src = 0;
  
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
 +
 +      if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
 +              cmd.req.arg[0] |= (0x3 << 29);
 +
 +      if (qlcnic_sriov_pf_check(adapter))
 +              qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
 +
        cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
 -      cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES;
 +      cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
        buf = &cmd.req.arg[6];
        memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
        /* send the mailbox command*/
        mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
        tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
        tx->ctx_id = mbx_out->ctx_id;
 -      if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +      if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
                intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
                tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
        }
@@@ -1322,8 -1267,7 +1322,8 @@@ static int qlcnic_83xx_diag_alloc_res(s
  
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                /* disable and free mailbox interrupt */
 -              qlcnic_83xx_free_mbx_intr(adapter);
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +                      qlcnic_83xx_free_mbx_intr(adapter);
                adapter->ahw->loopback_state = 0;
                adapter->ahw->hw_ops->setup_link_event(adapter, 1);
        }
@@@ -1351,14 -1295,12 +1351,14 @@@ static void qlcnic_83xx_diag_free_res(s
        qlcnic_detach(adapter);
  
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
 -              err = qlcnic_83xx_setup_mbx_intr(adapter);
 -              if (err) {
 -                      dev_err(&adapter->pdev->dev,
 -                              "%s: failed to setup mbx interrupt\n",
 -                              __func__);
 -                      goto out;
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
 +                      err = qlcnic_83xx_setup_mbx_intr(adapter);
 +                      if (err) {
 +                              dev_err(&adapter->pdev->dev,
 +                                      "%s: failed to setup mbx interrupt\n",
 +                                      __func__);
 +                              goto out;
 +                      }
                }
        }
        adapter->ahw->diag_test = 0;
@@@ -1431,60 -1373,12 +1431,60 @@@ mbx_err
        }
  }
  
 +int  qlcnic_83xx_set_led(struct net_device *netdev,
 +                       enum ethtool_phys_id_state state)
 +{
 +      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +      int err = -EIO, active = 1;
 +
 +      if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
 +              netdev_warn(netdev,
 +                          "LED test is not supported in non-privileged mode\n");
 +              return -EOPNOTSUPP;
 +      }
 +
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
 +                      return -EBUSY;
 +
 +              if (test_bit(__QLCNIC_RESETTING, &adapter->state))
 +                      break;
 +
 +              err = qlcnic_83xx_config_led(adapter, active, 0);
 +              if (err)
 +                      netdev_err(netdev, "Failed to set LED blink state\n");
 +              break;
 +      case ETHTOOL_ID_INACTIVE:
 +              active = 0;
 +
 +              if (test_bit(__QLCNIC_RESETTING, &adapter->state))
 +                      break;
 +
 +              err = qlcnic_83xx_config_led(adapter, active, 0);
 +              if (err)
 +                      netdev_err(netdev, "Failed to reset LED blink state\n");
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      if (!active || err)
 +              clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
 +
 +      return err;
 +}
 +
  void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
                                       int enable)
  {
        struct qlcnic_cmd_args cmd;
        int status;
  
 +      if (qlcnic_sriov_vf_check(adapter))
 +              return;
 +
        if (enable) {
                qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
                cmd.req.arg[1] = BIT_0 | BIT_31;
@@@ -1547,35 -1441,24 +1547,35 @@@ int qlcnic_83xx_setup_link_event(struc
        return err;
  }
  
 +static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
 +                                               u32 *interface_id)
 +{
 +      if (qlcnic_sriov_pf_check(adapter)) {
 +              qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
 +      } else {
 +              if (!qlcnic_sriov_vf_check(adapter))
 +                      *interface_id = adapter->recv_ctx->context_id << 16;
 +      }
 +}
 +
  int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
  {
        int err;
 -      u32 temp;
 +      u32 temp = 0;
        struct qlcnic_cmd_args cmd;
  
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
  
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
 -      temp = adapter->recv_ctx->context_id << 16;
 +      qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
        cmd.req.arg[1] = (mode ? 1 : 0) | temp;
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err)
                dev_info(&adapter->pdev->dev,
                         "Promiscous mode config failed\n");
 -      qlcnic_free_mbx_args(&cmd);
  
 +      qlcnic_free_mbx_args(&cmd);
        return err;
  }
  
@@@ -1607,9 -1490,7 +1607,9 @@@ int qlcnic_83xx_loopback_test(struct ne
        /* Poll for link up event before running traffic */
        do {
                msleep(500);
 -              qlcnic_83xx_process_aen(adapter);
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +                      qlcnic_83xx_process_aen(adapter);
 +
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
                        dev_info(&adapter->pdev->dev,
                                 "Firmware didn't sent link up event to loopback request\n");
                }
        } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
  
+       /* Make sure carrier is off and queue is stopped during loopback */
+       if (netif_running(netdev)) {
+               netif_carrier_off(netdev);
+               netif_stop_queue(netdev);
+       }
        ret = qlcnic_do_lb_test(adapter, mode);
  
        qlcnic_83xx_clear_lb_mode(adapter, mode);
@@@ -1663,9 -1550,7 +1669,9 @@@ int qlcnic_83xx_set_lb_mode(struct qlcn
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(300);
 -              qlcnic_83xx_process_aen(adapter);
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +                      qlcnic_83xx_process_aen(adapter);
 +
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
                        dev_err(&adapter->pdev->dev,
                                "FW did not generate IDC completion AEN\n");
@@@ -1705,9 -1590,7 +1711,9 @@@ int qlcnic_83xx_clear_lb_mode(struct ql
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(300);
 -              qlcnic_83xx_process_aen(adapter);
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +                      qlcnic_83xx_process_aen(adapter);
 +
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
                        dev_err(&adapter->pdev->dev,
                                "Firmware didn't sent IDC completion AEN\n");
        return status;
  }
  
 +static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
 +                                              u32 *interface_id)
 +{
 +      if (qlcnic_sriov_pf_check(adapter)) {
 +              qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
 +      } else {
 +              if (!qlcnic_sriov_vf_check(adapter))
 +                      *interface_id = adapter->recv_ctx->context_id << 16;
 +      }
 +}
 +
  void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
                               int mode)
  {
        int err;
 -      u32 temp, temp_ip;
 +      u32 temp = 0, temp_ip;
        struct qlcnic_cmd_args cmd;
  
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
 -      if (mode == QLCNIC_IP_UP) {
 -              temp = adapter->recv_ctx->context_id << 16;
 +      qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
 +
 +      if (mode == QLCNIC_IP_UP)
                cmd.req.arg[1] = 1 | temp;
 -      } else {
 -              temp = adapter->recv_ctx->context_id << 16;
 +      else
                cmd.req.arg[1] = 2 | temp;
 -      }
  
        /*
         * Adapter needs IP address in network byte order.
                dev_err(&adapter->netdev->dev,
                        "could not notify %s IP 0x%x request\n",
                        (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
 +
        qlcnic_free_mbx_args(&cmd);
  }
  
@@@ -1829,22 -1701,11 +1835,22 @@@ int qlcnic_83xx_config_rss(struct qlcni
  
  }
  
 +static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
 +                                               u32 *interface_id)
 +{
 +      if (qlcnic_sriov_pf_check(adapter)) {
 +              qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
 +      } else {
 +              if (!qlcnic_sriov_vf_check(adapter))
 +                      *interface_id = adapter->recv_ctx->context_id << 16;
 +      }
 +}
 +
  int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 -                                 __le16 vlan_id, u8 op)
 +                                 u16 vlan_id, u8 op)
  {
        int err;
 -      u32 *buf;
 +      u32 *buf, temp = 0;
        struct qlcnic_cmd_args cmd;
        struct qlcnic_macvlan_mbx mv;
  
        err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
        if (err)
                return err;
 -      cmd.req.arg[1] = op | (1 << 8) |
 -                      (adapter->recv_ctx->context_id << 16);
  
 -      mv.vlan = le16_to_cpu(vlan_id);
 -      memcpy(&mv.mac, addr, ETH_ALEN);
 +      if (vlan_id)
 +              op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
 +                   QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
 +
 +      cmd.req.arg[1] = op | (1 << 8);
 +      qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
 +      cmd.req.arg[1] |= temp;
 +      mv.vlan = vlan_id;
 +      mv.mac_addr0 = addr[0];
 +      mv.mac_addr1 = addr[1];
 +      mv.mac_addr2 = addr[2];
 +      mv.mac_addr3 = addr[3];
 +      mv.mac_addr4 = addr[4];
 +      mv.mac_addr5 = addr[5];
        buf = &cmd.req.arg[2];
        memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
        err = qlcnic_issue_cmd(adapter, &cmd);
  }
  
  void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
 -                                __le16 vlan_id)
 +                                u16 vlan_id)
  {
        u8 mac[ETH_ALEN];
        memcpy(&mac, addr, ETH_ALEN);
@@@ -1981,7 -1832,7 +1987,7 @@@ irqreturn_t qlcnic_83xx_handle_aen(int 
  
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
        if (event &  QLCNIC_MBX_ASYNC_EVENT)
 -              qlcnic_83xx_process_aen(adapter);
 +              __qlcnic_83xx_process_aen(adapter);
  out:
        mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
        writel(0, adapter->ahw->pci_base0 + mask);
@@@ -2157,17 -2008,14 +2163,17 @@@ int qlcnic_83xx_get_pci_info(struct qlc
  int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
  {
        int i, index, err;
 -      bool type;
        u8 max_ints;
 -      u32 val, temp;
 +      u32 val, temp, type;
        struct qlcnic_cmd_args cmd;
  
        max_ints = adapter->ahw->num_msix - 1;
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
        cmd.req.arg[1] = max_ints;
 +
 +      if (qlcnic_sriov_vf_check(adapter))
 +              cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
 +
        for (i = 0, index = 2; i < max_ints; i++) {
                type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
                val = type | (adapter->ahw->intr_tbl[i].type << 4);
@@@ -2321,7 -2169,7 +2327,7 @@@ static int qlcnic_83xx_poll_flash_statu
        return 0;
  }
  
 -static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
 +int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
  {
        int ret;
        u32 cmd;
        return 0;
  }
  
 -static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
 +int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
  {
        int ret;
  
@@@ -2413,7 -2261,7 +2419,7 @@@ int qlcnic_83xx_erase_flash_sector(stru
                return -EIO;
  
        if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 -              ret = qlcnic_83xx_enable_flash_write_op(adapter);
 +              ret = qlcnic_83xx_enable_flash_write(adapter);
                if (ret) {
                        qlcnic_83xx_unlock_flash(adapter);
                        dev_err(&adapter->pdev->dev,
        }
  
        if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 -              ret = qlcnic_83xx_disable_flash_write_op(adapter);
 +              ret = qlcnic_83xx_disable_flash_write(adapter);
                if (ret) {
                        qlcnic_83xx_unlock_flash(adapter);
                        dev_err(&adapter->pdev->dev,
@@@ -2495,8 -2343,8 +2501,8 @@@ int qlcnic_83xx_flash_bulk_write(struc
        u32 temp;
        int ret = -EIO;
  
 -      if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
 -          (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
 +      if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
 +          (count > QLC_83XX_FLASH_WRITE_MAX)) {
                dev_err(&adapter->pdev->dev,
                        "%s: Invalid word count\n", __func__);
                return -EIO;
@@@ -2774,19 -2622,13 +2780,19 @@@ int qlcnic_83xx_flash_read32(struct qlc
  
  int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
  {
 +      u8 pci_func;
        int err;
        u32 config = 0, state;
        struct qlcnic_cmd_args cmd;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
  
 -      state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func));
 -      if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) {
 +      if (qlcnic_sriov_vf_check(adapter))
 +              pci_func = adapter->portnum;
 +      else
 +              pci_func = ahw->pci_func;
 +
 +      state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
 +      if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
                dev_info(&adapter->pdev->dev, "link state down\n");
                return config;
        }
@@@ -2944,6 -2786,7 +2950,7 @@@ static u64 *qlcnic_83xx_fill_stats(stru
  void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
  {
        struct qlcnic_cmd_args cmd;
+       struct net_device *netdev = adapter->netdev;
        int ret = 0;
  
        qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_TX, &ret);
        if (ret) {
-               dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+               netdev_err(netdev, "Error getting Tx stats\n");
                goto out;
        }
        /* Get MAC stats */
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_MAC, &ret);
        if (ret) {
-               dev_info(&adapter->pdev->dev,
-                        "Error getting Rx stats\n");
+               netdev_err(netdev, "Error getting MAC stats\n");
                goto out;
        }
        /* Get Rx stats */
        data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
                                      QLC_83XX_STAT_RX, &ret);
        if (ret)
-               dev_info(&adapter->pdev->dev,
-                        "Error getting Tx stats\n");
+               netdev_err(netdev, "Error getting Rx stats\n");
  out:
        qlcnic_free_mbx_args(&cmd);
  }
index 356859b9f21cb9c53ac7d72542cc4b4f32892a84,5fa847fe388a9df3b7f78b44b63f72bffaaeeced..d3f8797efcc3d7a6012ea56ddb4feda210727f22
@@@ -9,7 -9,6 +9,7 @@@
  #include <linux/if_vlan.h>
  #include <net/ip.h>
  #include <linux/ipv6.h>
 +#include <net/checksum.h>
  
  #include "qlcnic.h"
  
@@@ -147,10 -146,7 +147,10 @@@ static inline u8 qlcnic_mac_hash(u64 ma
  static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
                                        u16 handle, u8 ring_id)
  {
 -      if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
 +      unsigned short device = adapter->pdev->device;
 +
 +      if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
 +          (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
                return handle | (ring_id << 15);
        else
                return handle;
@@@ -162,7 -158,7 +162,7 @@@ static inline int qlcnic_82xx_is_lb_pkt
  }
  
  void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
 -                        int loopback_pkt, __le16 vlan_id)
 +                        int loopback_pkt, u16 vlan_id)
  {
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
        struct qlcnic_filter *fil, *tmp_fil;
  }
  
  void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
 -                             __le16 vlan_id)
 +                             u16 vlan_id)
  {
        struct cmd_desc_type0 *hwdesc;
        struct qlcnic_nic_req *req;
        memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
  
        vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
 -      vlan_req->vlan_id = vlan_id;
 +      vlan_req->vlan_id = cpu_to_le16(vlan_id);
  
        tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
        smp_mb();
@@@ -281,7 -277,7 +281,7 @@@ static void qlcnic_send_filter(struct q
        struct net_device *netdev = adapter->netdev;
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
        u64 src_addr = 0;
 -      __le16 vlan_id = 0;
 +      u16 vlan_id = 0;
        u8 hindex;
  
        if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
@@@ -344,14 -340,14 +344,14 @@@ static int qlcnic_tx_pkt(struct qlcnic_
                flags = FLAGS_VLAN_OOB;
                vlan_tci = vlan_tx_tag_get(skb);
        }
 -      if (unlikely(adapter->pvid)) {
 +      if (unlikely(adapter->tx_pvid)) {
                if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
                        return -EIO;
                if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
                        goto set_flags;
  
                flags = FLAGS_VLAN_OOB;
 -              vlan_tci = adapter->pvid;
 +              vlan_tci = adapter->tx_pvid;
        }
  set_flags:
        qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
                memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
        }
        opcode = TX_ETHER_PKT;
-       if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-           skb_shinfo(skb)->gso_size > 0) {
+       if (skb_is_gso(skb)) {
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
                first_desc->total_hdr_length = hdr_len;
@@@ -980,10 -975,10 +979,10 @@@ static inline int qlcnic_check_rx_taggi
                memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
                skb_pull(skb, VLAN_HLEN);
        }
 -      if (!adapter->pvid)
 +      if (!adapter->rx_pvid)
                return 0;
  
 -      if (*vlan_tag == adapter->pvid) {
 +      if (*vlan_tag == adapter->rx_pvid) {
                /* Outer vlan tag. Packet should follow non-vlan path */
                *vlan_tag = 0xffff;
                return 0;
@@@ -1029,7 -1024,8 +1028,7 @@@ qlcnic_process_rcv(struct qlcnic_adapte
            (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
                t_vid = 0;
                is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
 -              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
 -                                   cpu_to_le16(t_vid));
 +              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
        }
  
        if (length > rds_ring->skb_size)
        skb->protocol = eth_type_trans(skb, netdev);
  
        if (vid != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vid);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  
        napi_gro_receive(&sds_ring->napi, skb);
  
@@@ -1106,7 -1102,8 +1105,7 @@@ qlcnic_process_lro(struct qlcnic_adapte
            (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
                t_vid = 0;
                is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
 -              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
 -                                   cpu_to_le16(t_vid));
 +              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
        }
  
        if (timestamp)
                iph = (struct iphdr *)skb->data;
                th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
                length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
 +              csum_replace2(&iph->check, iph->tot_len, htons(length));
                iph->tot_len = htons(length);
 -              iph->check = 0;
 -              iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
        }
  
        th->psh = push;
        }
  
        if (vid != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vid);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
        netif_receive_skb(skb);
  
        adapter->stats.lro_pkts++;
@@@ -1498,7 -1496,8 +1497,7 @@@ qlcnic_83xx_process_rcv(struct qlcnic_a
            (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
                t_vid = 0;
                is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
 -              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
 -                                   cpu_to_le16(t_vid));
 +              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
        }
  
        if (length > rds_ring->skb_size)
        skb->protocol = eth_type_trans(skb, netdev);
  
        if (vid != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vid);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  
        napi_gro_receive(&sds_ring->napi, skb);
  
@@@ -1567,7 -1566,8 +1566,7 @@@ qlcnic_83xx_process_lro(struct qlcnic_a
            (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
                t_vid = 0;
                is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
 -              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
 -                                   cpu_to_le16(t_vid));
 +              qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
        }
        if (qlcnic_83xx_is_tstamp(sts_data[1]))
                data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
                iph = (struct iphdr *)skb->data;
                th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
                length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
 +              csum_replace2(&iph->check, iph->tot_len, htons(length));
                iph->tot_len = htons(length);
 -              iph->check = 0;
 -              iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
        }
  
        th->psh = push;
        }
  
        if (vid != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vid);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  
        netif_receive_skb(skb);
  
@@@ -1690,29 -1691,6 +1689,29 @@@ skip
        return count;
  }
  
 +static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
 +{
 +      int tx_complete;
 +      int work_done;
 +      struct qlcnic_host_sds_ring *sds_ring;
 +      struct qlcnic_adapter *adapter;
 +      struct qlcnic_host_tx_ring *tx_ring;
 +
 +      sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
 +      adapter = sds_ring->adapter;
 +      /* tx ring count = 1 */
 +      tx_ring = adapter->tx_ring;
 +
 +      tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
 +      work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
 +      if ((work_done < budget) && tx_complete) {
 +              napi_complete(&sds_ring->napi);
 +              qlcnic_83xx_enable_intr(adapter, sds_ring);
 +      }
 +
 +      return work_done;
 +}
 +
  static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
  {
        int tx_complete;
@@@ -1790,8 -1768,7 +1789,8 @@@ void qlcnic_83xx_napi_enable(struct qlc
                        qlcnic_83xx_enable_intr(adapter, sds_ring);
        }
  
 -      if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +      if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
                for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
                        tx_ring = &adapter->tx_ring[ring];
                        napi_enable(&tx_ring->napi);
@@@ -1818,8 -1795,7 +1817,8 @@@ void qlcnic_83xx_napi_disable(struct ql
                napi_disable(&sds_ring->napi);
        }
  
 -      if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +      if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
                for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
                        tx_ring = &adapter->tx_ring[ring];
                        qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
  int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev)
  {
 -      int ring, max_sds_rings;
 +      int ring, max_sds_rings, temp;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        max_sds_rings = adapter->max_sds_rings;
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
 -              if (adapter->flags & QLCNIC_MSIX_ENABLED)
 -                      netif_napi_add(netdev, &sds_ring->napi,
 -                                     qlcnic_83xx_rx_poll,
 -                                     QLCNIC_NETDEV_WEIGHT * 2);
 -              else
 +              if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +                      if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
 +                              netif_napi_add(netdev, &sds_ring->napi,
 +                                             qlcnic_83xx_rx_poll,
 +                                             QLCNIC_NETDEV_WEIGHT * 2);
 +                      } else {
 +                              temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
 +                              netif_napi_add(netdev, &sds_ring->napi,
 +                                             qlcnic_83xx_msix_sriov_vf_poll,
 +                                             temp);
 +                      }
 +
 +              } else {
                        netif_napi_add(netdev, &sds_ring->napi,
                                       qlcnic_83xx_poll,
                                       QLCNIC_NETDEV_WEIGHT / max_sds_rings);
 +              }
        }
  
        if (qlcnic_alloc_tx_rings(adapter, netdev)) {
                return -ENOMEM;
        }
  
 -      if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +      if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
                for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
                        tx_ring = &adapter->tx_ring[ring];
                        netif_napi_add(netdev, &tx_ring->napi,
@@@ -1894,8 -1860,7 +1893,8 @@@ void qlcnic_83xx_napi_del(struct qlcnic
  
        qlcnic_free_sds_rings(adapter->recv_ctx);
  
 -      if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
 +      if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
                for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
                        tx_ring = &adapter->tx_ring[ring];
                        netif_napi_del(&tx_ring->napi);
index c77675da671f546921c4932632eeddff8fd22173,5ef328af61d0ab9fcbc1c098b323f444fbd63854..4e22e794a1863ea1abec985d91abe154afcf8b49
@@@ -21,6 -21,8 +21,6 @@@
  #include <linux/aer.h>
  #include <linux/log2.h>
  
 -#include <linux/sysfs.h>
 -
  #define QLC_STATUS_UNSUPPORTED_CMD    -2
  
  int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
@@@ -198,10 -200,10 +198,10 @@@ beacon_err
        }
  
        err = qlcnic_config_led(adapter, b_state, b_rate);
-       if (!err)
+       if (!err) {
                err = len;
-       else
                ahw->beacon_state = b_state;
+       }
  
        if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
                qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
@@@ -884,244 -886,6 +884,244 @@@ static ssize_t qlcnic_sysfs_read_pci_co
        return size;
  }
  
 +static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
 +                                                  struct kobject *kobj,
 +                                                  struct bin_attribute *attr,
 +                                                  char *buf, loff_t offset,
 +                                                  size_t size)
 +{
 +      unsigned char *p_read_buf;
 +      int  ret, count;
 +      struct device *dev = container_of(kobj, struct device, kobj);
 +      struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 +
 +      if (!size)
 +              return QL_STATUS_INVALID_PARAM;
 +      if (!buf)
 +              return QL_STATUS_INVALID_PARAM;
 +
 +      count = size / sizeof(u32);
 +
 +      if (size % sizeof(u32))
 +              count++;
 +
 +      p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
 +      if (!p_read_buf)
 +              return -ENOMEM;
 +      if (qlcnic_83xx_lock_flash(adapter) != 0) {
 +              kfree(p_read_buf);
 +              return -EIO;
 +      }
 +
 +      ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
 +                                              count);
 +
 +      if (ret) {
 +              qlcnic_83xx_unlock_flash(adapter);
 +              kfree(p_read_buf);
 +              return ret;
 +      }
 +
 +      qlcnic_83xx_unlock_flash(adapter);
 +      memcpy(buf, p_read_buf, size);
 +      kfree(p_read_buf);
 +
 +      return size;
 +}
 +
 +static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
 +                                            char *buf, loff_t offset,
 +                                            size_t size)
 +{
 +      int  i, ret, count;
 +      unsigned char *p_cache, *p_src;
 +
 +      p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
 +      if (!p_cache)
 +              return -ENOMEM;
 +
 +      memcpy(p_cache, buf, size);
 +      p_src = p_cache;
 +      count = size / sizeof(u32);
 +
 +      if (qlcnic_83xx_lock_flash(adapter) != 0) {
 +              kfree(p_cache);
 +              return -EIO;
 +      }
 +
 +      if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 +              ret = qlcnic_83xx_enable_flash_write(adapter);
 +              if (ret) {
 +                      kfree(p_cache);
 +                      qlcnic_83xx_unlock_flash(adapter);
 +                      return -EIO;
 +              }
 +      }
 +
 +      for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
 +              ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
 +                                                 (u32 *)p_src,
 +                                                 QLC_83XX_FLASH_WRITE_MAX);
 +
 +              if (ret) {
 +                      if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 +                              ret = qlcnic_83xx_disable_flash_write(adapter);
 +                              if (ret) {
 +                                      kfree(p_cache);
 +                                      qlcnic_83xx_unlock_flash(adapter);
 +                                      return -EIO;
 +                              }
 +                      }
 +
 +                      kfree(p_cache);
 +                      qlcnic_83xx_unlock_flash(adapter);
 +                      return -EIO;
 +              }
 +
 +              p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
 +              offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
 +      }
 +
 +      if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 +              ret = qlcnic_83xx_disable_flash_write(adapter);
 +              if (ret) {
 +                      kfree(p_cache);
 +                      qlcnic_83xx_unlock_flash(adapter);
 +                      return -EIO;
 +              }
 +      }
 +
 +      kfree(p_cache);
 +      qlcnic_83xx_unlock_flash(adapter);
 +
 +      return 0;
 +}
 +
 +static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
 +                                       char *buf, loff_t offset, size_t size)
 +{
 +      int  i, ret, count;
 +      unsigned char *p_cache, *p_src;
 +
 +      p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
 +      if (!p_cache)
 +              return -ENOMEM;
 +
 +      memcpy(p_cache, buf, size);
 +      p_src = p_cache;
 +      count = size / sizeof(u32);
 +
 +      if (qlcnic_83xx_lock_flash(adapter) != 0) {
 +              kfree(p_cache);
 +              return -EIO;
 +      }
 +
 +      if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 +              ret = qlcnic_83xx_enable_flash_write(adapter);
 +              if (ret) {
 +                      kfree(p_cache);
 +                      qlcnic_83xx_unlock_flash(adapter);
 +                      return -EIO;
 +              }
 +      }
 +
 +      for (i = 0; i < count; i++) {
 +              ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
 +              if (ret) {
 +                      if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 +                              ret = qlcnic_83xx_disable_flash_write(adapter);
 +                              if (ret) {
 +                                      kfree(p_cache);
 +                                      qlcnic_83xx_unlock_flash(adapter);
 +                                      return -EIO;
 +                              }
 +                      }
 +                      kfree(p_cache);
 +                      qlcnic_83xx_unlock_flash(adapter);
 +                      return -EIO;
 +              }
 +
 +              p_src = p_src + sizeof(u32);
 +              offset = offset + sizeof(u32);
 +      }
 +
 +      if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
 +              ret = qlcnic_83xx_disable_flash_write(adapter);
 +              if (ret) {
 +                      kfree(p_cache);
 +                      qlcnic_83xx_unlock_flash(adapter);
 +                      return -EIO;
 +              }
 +      }
 +
 +      kfree(p_cache);
 +      qlcnic_83xx_unlock_flash(adapter);
 +
 +      return 0;
 +}
 +
 +static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
 +                                                   struct kobject *kobj,
 +                                                   struct bin_attribute *attr,
 +                                                   char *buf, loff_t offset,
 +                                                   size_t size)
 +{
 +      int  ret;
 +      static int flash_mode;
 +      unsigned long data;
 +      struct device *dev = container_of(kobj, struct device, kobj);
 +      struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 +
 +      if (!buf)
 +              return QL_STATUS_INVALID_PARAM;
 +
 +      ret = kstrtoul(buf, 16, &data);
 +
 +      switch (data) {
 +      case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
 +              flash_mode = QLC_83XX_ERASE_MODE;
 +              ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
 +              if (ret) {
 +                      dev_err(&adapter->pdev->dev,
 +                              "%s failed at %d\n", __func__, __LINE__);
 +                      return -EIO;
 +              }
 +              break;
 +
 +      case QLC_83XX_FLASH_BULK_WRITE_CMD:
 +              flash_mode = QLC_83XX_BULK_WRITE_MODE;
 +              break;
 +
 +      case QLC_83XX_FLASH_WRITE_CMD:
 +              flash_mode = QLC_83XX_WRITE_MODE;
 +              break;
 +      default:
 +              if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
 +                      ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
 +                                                               offset, size);
 +                      if (ret) {
 +                              dev_err(&adapter->pdev->dev,
 +                                      "%s failed at %d\n",
 +                                      __func__, __LINE__);
 +                              return -EIO;
 +                      }
 +              }
 +
 +              if (flash_mode == QLC_83XX_WRITE_MODE) {
 +                      ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
 +                                                          offset, size);
 +                      if (ret) {
 +                              dev_err(&adapter->pdev->dev,
 +                                      "%s failed at %d\n", __func__,
 +                                      __LINE__);
 +                              return -EIO;
 +                      }
 +              }
 +      }
 +
 +      return size;
 +}
 +
  static struct device_attribute dev_attr_bridged_mode = {
         .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
         .show = qlcnic_show_bridged_mode,
@@@ -1196,13 -960,6 +1196,13 @@@ static struct bin_attribute bin_attr_pm
        .write = qlcnic_sysfs_write_pm_config,
  };
  
 +static struct bin_attribute bin_attr_flash = {
 +      .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
 +      .size = 0,
 +      .read = qlcnic_83xx_sysfs_flash_read_handler,
 +      .write = qlcnic_83xx_sysfs_flash_write_handler,
 +};
 +
  void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
  {
        struct device *dev = &adapter->pdev->dev;
@@@ -1291,18 -1048,10 +1291,18 @@@ void qlcnic_82xx_remove_sysfs(struct ql
  
  void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
  {
 +      struct device *dev = &adapter->pdev->dev;
 +
        qlcnic_create_diag_entries(adapter);
 +
 +      if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
 +              dev_info(dev, "failed to create flash sysfs entry\n");
  }
  
  void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
  {
 +      struct device *dev = &adapter->pdev->dev;
 +
        qlcnic_remove_diag_entries(adapter);
 +      sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
  }
index 44cf72ac248947328fd4654ac254ca97c354a9ad,8033555e53c2f6f524211b4d848dcc13a0e14f78..87463bc701a653781371feb05599aad2b45d224a
@@@ -409,7 -409,7 +409,7 @@@ static int ql_set_mac_addr_reg(struct q
                                      (qdev->
                                       func << CAM_OUT_FUNC_SHIFT) |
                                        (0 << CAM_OUT_CQ_ID_SHIFT));
 -                      if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
 +                      if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
                                cam_output |= CAM_OUT_RV;
                        /* route to NIC core */
                        ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@@@ -1211,6 -1211,8 +1211,6 @@@ static void ql_update_sbq(struct ql_ada
                                    netdev_alloc_skb(qdev->ndev,
                                                     SMALL_BUFFER_SIZE);
                                if (sbq_desc->p.skb == NULL) {
 -                                      netif_err(qdev, probe, qdev->ndev,
 -                                                "Couldn't get an skb.\n");
                                        rx_ring->sbq_clean_idx = clean_idx;
                                        return;
                                }
@@@ -1432,11 -1434,13 +1432,13 @@@ map_error
  }
  
  /* Categorizing receive firmware frame errors */
- static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+                                struct rx_ring *rx_ring)
  {
        struct nic_stats *stats = &qdev->nic_stats;
  
        stats->rx_err_count++;
+       rx_ring->rx_errors++;
  
        switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
        case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@@ -1472,6 -1476,12 +1474,12 @@@ static void ql_process_mac_rx_gro_page(
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
        struct napi_struct *napi = &rx_ring->napi;
  
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               put_page(lbq_desc->p.pg_chunk.page);
+               return;
+       }
        napi->dev = qdev->ndev;
  
        skb = napi_get_frags(napi);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb_record_rx_queue(skb, rx_ring->cq_id);
        if (vlan_id != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vlan_id);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        napi_gro_frags(napi);
  }
  
@@@ -1517,6 -1527,8 +1525,6 @@@ static void ql_process_mac_rx_page(stru
  
        skb = netdev_alloc_skb(ndev, length);
        if (!skb) {
 -              netif_err(qdev, drv, qdev->ndev,
 -                        "Couldn't get an skb, need to unwind!.\n");
                rx_ring->rx_dropped++;
                put_page(lbq_desc->p.pg_chunk.page);
                return;
        addr = lbq_desc->p.pg_chunk.va;
        prefetch(addr);
  
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               goto err_out;
+       }
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
  
        skb_record_rx_queue(skb, rx_ring->cq_id);
        if (vlan_id != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vlan_id);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                napi_gro_receive(napi, skb);
        else
@@@ -1601,6 -1619,8 +1615,6 @@@ static void ql_process_mac_rx_skb(struc
        /* Allocate new_skb and copy */
        new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
        if (new_skb == NULL) {
 -              netif_err(qdev, probe, qdev->ndev,
 -                        "No skb available, drop the packet.\n");
                rx_ring->rx_dropped++;
                return;
        }
        memcpy(skb_put(new_skb, length), skb->data, length);
        skb = new_skb;
  
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
        /* loopback self test for ethtool */
        if (test_bit(QL_SELFTEST, &qdev->flags)) {
                ql_check_lb_frame(qdev, skb);
  
        skb_record_rx_queue(skb, rx_ring->cq_id);
        if (vlan_id != 0xffff)
 -              __vlan_hwaccel_put_tag(skb, vlan_id);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                napi_gro_receive(&rx_ring->napi, skb);
        else
@@@ -1913,6 -1940,13 +1934,13 @@@ static void ql_process_mac_split_rx_int
                return;
        }
  
+       /* Frame error, so drop the packet. */
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+               dev_kfree_skb_any(skb);
+               return;
+       }
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
        rx_ring->rx_bytes += skb->len;
        skb_record_rx_queue(skb, rx_ring->cq_id);
        if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
 -              __vlan_hwaccel_put_tag(skb, vlan_id);
 +              __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                napi_gro_receive(&rx_ring->napi, skb);
        else
@@@ -1994,12 -2028,6 +2022,6 @@@ static unsigned long ql_process_mac_rx_
  
        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  
-       /* Frame error, so drop the packet. */
-       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
-               ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
-               return (unsigned long)length;
-       }
        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
                /* The data and headers are split into
                 * separate buffers.
@@@ -2279,7 -2307,7 +2301,7 @@@ static void qlge_vlan_mode(struct net_d
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
  
 -      if (features & NETIF_F_HW_VLAN_RX) {
 +      if (features & NETIF_F_HW_VLAN_CTAG_RX) {
                ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
                                 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
        } else {
@@@ -2294,10 -2322,10 +2316,10 @@@ static netdev_features_t qlge_fix_featu
         * Since there is no support for separate rx/tx vlan accel
         * enable/disable make sure tx flag is always in same state as rx.
         */
 -      if (features & NETIF_F_HW_VLAN_RX)
 -              features |= NETIF_F_HW_VLAN_TX;
 +      if (features & NETIF_F_HW_VLAN_CTAG_RX)
 +              features |= NETIF_F_HW_VLAN_CTAG_TX;
        else
 -              features &= ~NETIF_F_HW_VLAN_TX;
 +              features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  
        return features;
  }
@@@ -2307,7 -2335,7 +2329,7 @@@ static int qlge_set_features(struct net
  {
        netdev_features_t changed = ndev->features ^ features;
  
 -      if (changed & NETIF_F_HW_VLAN_RX)
 +      if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                qlge_vlan_mode(ndev, features);
  
        return 0;
@@@ -2326,7 -2354,7 +2348,7 @@@ static int __qlge_vlan_rx_add_vid(struc
        return err;
  }
  
 -static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 +static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
@@@ -2357,7 -2385,7 +2379,7 @@@ static int __qlge_vlan_rx_kill_vid(stru
        return err;
  }
  
 -static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 +static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
  {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
@@@ -4665,9 -4693,9 +4687,9 @@@ static int qlge_probe(struct pci_dev *p
        SET_NETDEV_DEV(ndev, &pdev->dev);
        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN |
 -              NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
 +              NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
        ndev->features = ndev->hw_features |
 -              NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 +              NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
        ndev->vlan_features = ndev->hw_features;
  
        if (test_bit(QL_DMA64, &qdev->flags))
index 5cf8d03b8cae429a73a89faf1b96db36383d0390,4781d3d8e18204ad7fc11d06d102fd2eb6829da4..25c364209a2179a48312ff0c3a2045681b1be58c
@@@ -126,13 -126,6 +126,13 @@@ do {                                                             
  #define CPSW_FIFO_DUAL_MAC_MODE               (1 << 15)
  #define CPSW_FIFO_RATE_LIMIT_MODE     (2 << 15)
  
 +#define CPSW_INTPACEEN                (0x3f << 16)
 +#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
 +#define CPSW_CMINTMAX_CNT     63
 +#define CPSW_CMINTMIN_CNT     2
 +#define CPSW_CMINTMAX_INTVL   (1000 / CPSW_CMINTMIN_CNT)
 +#define CPSW_CMINTMIN_INTVL   ((1000 / CPSW_CMINTMAX_CNT) + 1)
 +
  #define cpsw_enable_irq(priv) \
        do {                    \
                u32 i;          \
                        disable_irq_nosync(priv->irqs_table[i]); \
        } while (0);
  
 +#define cpsw_slave_index(priv)                                \
 +              ((priv->data.dual_emac) ? priv->emac_port :     \
 +              priv->data.active_slave)
 +
  static int debug_level;
  module_param(debug_level, int, 0);
  MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@@ -171,15 -160,6 +171,15 @@@ struct cpsw_wr_regs 
        u32     rx_en;
        u32     tx_en;
        u32     misc_en;
 +      u32     mem_allign1[8];
 +      u32     rx_thresh_stat;
 +      u32     rx_stat;
 +      u32     tx_stat;
 +      u32     misc_stat;
 +      u32     mem_allign2[8];
 +      u32     rx_imax;
 +      u32     tx_imax;
 +
  };
  
  struct cpsw_ss_regs {
@@@ -334,8 -314,6 +334,8 @@@ struct cpsw_priv 
        struct cpsw_host_regs __iomem   *host_port_regs;
        u32                             msg_enable;
        u32                             version;
 +      u32                             coal_intvl;
 +      u32                             bus_freq_mhz;
        struct net_device_stats         stats;
        int                             rx_packet_max;
        int                             host_port;
@@@ -634,77 -612,6 +634,77 @@@ static void cpsw_adjust_link(struct net
        }
  }
  
 +static int cpsw_get_coalesce(struct net_device *ndev,
 +                              struct ethtool_coalesce *coal)
 +{
 +      struct cpsw_priv *priv = netdev_priv(ndev);
 +
 +      coal->rx_coalesce_usecs = priv->coal_intvl;
 +      return 0;
 +}
 +
 +static int cpsw_set_coalesce(struct net_device *ndev,
 +                              struct ethtool_coalesce *coal)
 +{
 +      struct cpsw_priv *priv = netdev_priv(ndev);
 +      u32 int_ctrl;
 +      u32 num_interrupts = 0;
 +      u32 prescale = 0;
 +      u32 addnl_dvdr = 1;
 +      u32 coal_intvl = 0;
 +
 +      if (!coal->rx_coalesce_usecs)
 +              return -EINVAL;
 +
 +      coal_intvl = coal->rx_coalesce_usecs;
 +
 +      int_ctrl =  readl(&priv->wr_regs->int_control);
 +      prescale = priv->bus_freq_mhz * 4;
 +
 +      if (coal_intvl < CPSW_CMINTMIN_INTVL)
 +              coal_intvl = CPSW_CMINTMIN_INTVL;
 +
 +      if (coal_intvl > CPSW_CMINTMAX_INTVL) {
 +              /* Interrupt pacer works with 4us Pulse, we can
 +               * throttle further by dilating the 4us pulse.
 +               */
 +              addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
 +
 +              if (addnl_dvdr > 1) {
 +                      prescale *= addnl_dvdr;
 +                      if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
 +                              coal_intvl = (CPSW_CMINTMAX_INTVL
 +                                              * addnl_dvdr);
 +              } else {
 +                      addnl_dvdr = 1;
 +                      coal_intvl = CPSW_CMINTMAX_INTVL;
 +              }
 +      }
 +
 +      num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
 +      writel(num_interrupts, &priv->wr_regs->rx_imax);
 +      writel(num_interrupts, &priv->wr_regs->tx_imax);
 +
 +      int_ctrl |= CPSW_INTPACEEN;
 +      int_ctrl &= (~CPSW_INTPRESCALE_MASK);
 +      int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
 +      writel(int_ctrl, &priv->wr_regs->int_control);
 +
 +      cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
 +      if (priv->data.dual_emac) {
 +              int i;
 +
 +              for (i = 0; i < priv->data.slaves; i++) {
 +                      priv = netdev_priv(priv->slaves[i].ndev);
 +                      priv->coal_intvl = coal_intvl;
 +              }
 +      } else {
 +              priv->coal_intvl = coal_intvl;
 +      }
 +
 +      return 0;
 +}
 +
  static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
  {
        static char *leader = "........................................";
@@@ -927,14 -834,6 +927,14 @@@ static int cpsw_ndo_open(struct net_dev
                cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
        }
  
 +      /* Enable Interrupt pacing if configured */
 +      if (priv->coal_intvl != 0) {
 +              struct ethtool_coalesce coal;
 +
 +              coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
 +              cpsw_set_coalesce(ndev, &coal);
 +      }
 +
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
        napi_enable(&priv->napi);
@@@ -1043,7 -942,7 +1043,7 @@@ static void cpsw_ndo_change_rx_flags(st
  
  static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
  {
 -      struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
 +      struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
        u32 ts_en, seq_id;
  
        if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
@@@ -1072,7 -971,7 +1072,7 @@@ static void cpsw_hwtstamp_v2(struct cps
        if (priv->data.dual_emac)
                slave = &priv->slaves[priv->emac_port];
        else
 -              slave = &priv->slaves[priv->data.cpts_active_slave];
 +              slave = &priv->slaves[priv->data.active_slave];
  
        ctrl = slave_read(slave, CPSW2_CONTROL);
        ctrl &= ~CTRL_ALL_TS_MASK;
@@@ -1157,26 -1056,14 +1157,26 @@@ static int cpsw_hwtstamp_ioctl(struct n
  
  static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  {
 +      struct cpsw_priv *priv = netdev_priv(dev);
 +      struct mii_ioctl_data *data = if_mii(req);
 +      int slave_no = cpsw_slave_index(priv);
 +
        if (!netif_running(dev))
                return -EINVAL;
  
 +      switch (cmd) {
  #ifdef CONFIG_TI_CPTS
 -      if (cmd == SIOCSHWTSTAMP)
 +      case SIOCSHWTSTAMP:
                return cpsw_hwtstamp_ioctl(dev, req);
  #endif
 -      return -ENOTSUPP;
 +      case SIOCGMIIPHY:
 +              data->phy_id = priv->slaves[slave_no].phy->addr;
 +              break;
 +      default:
 +              return -ENOTSUPP;
 +      }
 +
 +      return 0;
  }
  
  static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@@ -1251,7 -1138,7 +1251,7 @@@ clean_vid
  }
  
  static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
 -              unsigned short vid)
 +                                  __be16 proto, u16 vid)
  {
        struct cpsw_priv *priv = netdev_priv(ndev);
  
  }
  
  static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
 -              unsigned short vid)
 +                                   __be16 proto, u16 vid)
  {
        struct cpsw_priv *priv = netdev_priv(ndev);
        int ret;
@@@ -1357,39 -1244,12 +1357,39 @@@ static int cpsw_get_ts_info(struct net_
        return 0;
  }
  
 +static int cpsw_get_settings(struct net_device *ndev,
 +                           struct ethtool_cmd *ecmd)
 +{
 +      struct cpsw_priv *priv = netdev_priv(ndev);
 +      int slave_no = cpsw_slave_index(priv);
 +
 +      if (priv->slaves[slave_no].phy)
 +              return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
 +      else
 +              return -EOPNOTSUPP;
 +}
 +
 +static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 +{
 +      struct cpsw_priv *priv = netdev_priv(ndev);
 +      int slave_no = cpsw_slave_index(priv);
 +
 +      if (priv->slaves[slave_no].phy)
 +              return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
 +      else
 +              return -EOPNOTSUPP;
 +}
 +
  static const struct ethtool_ops cpsw_ethtool_ops = {
        .get_drvinfo    = cpsw_get_drvinfo,
        .get_msglevel   = cpsw_get_msglevel,
        .set_msglevel   = cpsw_set_msglevel,
        .get_link       = ethtool_op_get_link,
        .get_ts_info    = cpsw_get_ts_info,
 +      .get_settings   = cpsw_get_settings,
 +      .set_settings   = cpsw_set_settings,
 +      .get_coalesce   = cpsw_get_coalesce,
 +      .set_coalesce   = cpsw_set_coalesce,
  };
  
  static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@@ -1422,12 -1282,12 +1422,12 @@@ static int cpsw_probe_dt(struct cpsw_pl
        }
        data->slaves = prop;
  
 -      if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
 -              pr_err("Missing cpts_active_slave property in the DT.\n");
 +      if (of_property_read_u32(node, "active_slave", &prop)) {
 +              pr_err("Missing active_slave property in the DT.\n");
                ret = -EINVAL;
                goto error_ret;
        }
 -      data->cpts_active_slave = prop;
 +      data->active_slave = prop;
  
        if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
                pr_err("Missing cpts_clock_mult property in the DT.\n");
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
  
                if (data->dual_emac) {
-                       if (of_property_read_u32(node, "dual_emac_res_vlan",
+                       if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
                                pr_err("Missing dual_emac_res_vlan in DT.\n");
                                slave_data->dual_emac_res_vlan = i+1;
@@@ -1577,9 -1437,6 +1577,9 @@@ static int cpsw_probe_dual_emac(struct 
        priv_sl2->slaves = priv->slaves;
        priv_sl2->clk = priv->clk;
  
 +      priv_sl2->coal_intvl = 0;
 +      priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
 +
        priv_sl2->cpsw_res = priv->cpsw_res;
        priv_sl2->regs = priv->regs;
        priv_sl2->host_port = priv->host_port;
                priv_sl2->num_irqs = priv->num_irqs;
        }
  
 -      ndev->features |= NETIF_F_HW_VLAN_FILTER;
 +      ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  
        ndev->netdev_ops = &cpsw_netdev_ops;
        SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@@ -1689,8 -1546,6 +1689,8 @@@ static int cpsw_probe(struct platform_d
                ret = -ENODEV;
                goto clean_slave_ret;
        }
 +      priv->coal_intvl = 0;
 +      priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
  
        priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!priv->cpsw_res) {
                k++;
        }
  
 -      ndev->features |= NETIF_F_HW_VLAN_FILTER;
 +      ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  
        ndev->netdev_ops = &cpsw_netdev_ops;
        SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
index 4559bb8115bf7ebceae90caa51d2a1a64bb1f68f,8341b62e552166a4f796f7977b6f31a2b945bcdd..088c554961918beeec45dd9c6d22d2508409e92c
@@@ -241,13 -241,11 +241,11 @@@ void netvsc_linkstatus_callback(struct 
  
        if (status == 1) {
                netif_carrier_on(net);
-               netif_wake_queue(net);
                ndev_ctx = netdev_priv(net);
                schedule_delayed_work(&ndev_ctx->dwork, 0);
                schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
        } else {
                netif_carrier_off(net);
-               netif_tx_disable(net);
        }
  }
  
@@@ -431,7 -429,7 +429,7 @@@ static int netvsc_probe(struct hv_devic
  
        /* TODO: Add GSO and Checksum offload */
        net->hw_features = NETIF_F_SG;
 -      net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX;
 +      net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
  
        SET_ETHTOOL_OPS(net, &ethtool_ops);
        SET_NETDEV_DEV(net, &dev->device);
diff --combined drivers/net/tun.c
index 316c759bd988e6d602e7ab7dae37022f46be9ffa,729ed533bb33834fac05825be4b344a755e0e859..66109a2ad886270c0b726e51710d74fbf57aa8f1
@@@ -409,12 -409,14 +409,12 @@@ static void __tun_detach(struct tun_fil
  {
        struct tun_file *ntfile;
        struct tun_struct *tun;
 -      struct net_device *dev;
  
        tun = rtnl_dereference(tfile->tun);
  
        if (tun && !tfile->detached) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
 -              dev = tun->dev;
  
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
@@@ -1203,8 -1205,6 +1203,8 @@@ static ssize_t tun_get_user(struct tun_
        }
  
        skb_reset_network_header(skb);
 +      skb_probe_transport_header(skb, 0);
 +
        rxhash = skb_get_rxhash(skb);
        netif_rx_ni(skb);
  
@@@ -1594,7 -1594,7 +1594,7 @@@ static int tun_set_iff(struct net *net
  
                if (tun->flags & TUN_TAP_MQ &&
                    (tun->numqueues + tun->numdisabled > 1))
-                       return err;
+                       return -EBUSY;
        }
        else {
                char *name;
                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
                        TUN_USER_FEATURES;
                dev->features = dev->hw_features;
 +              dev->vlan_features = dev->features;
  
                INIT_LIST_HEAD(&tun->disabled);
                err = tun_attach(tun, file);
index cc6dfe4102fd4f21ed2df0927318003c09e4f66d,6bd91676d2cbb9a46b43108ae1fa994d4fca84f9..c96454434f7b1bbebc4fb2f1914399af1e4da3be
@@@ -101,7 -101,7 +101,7 @@@ static int cdc_mbim_bind(struct usbnet 
        dev->net->flags |= IFF_NOARP;
  
        /* no need to put the VLAN tci in the packet headers */
 -      dev->net->features |= NETIF_F_HW_VLAN_TX;
 +      dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
  err:
        return ret;
  }
@@@ -134,7 -134,7 +134,7 @@@ static struct sk_buff *cdc_mbim_tx_fixu
                goto error;
  
        if (skb) {
-               if (skb->len <= sizeof(ETH_HLEN))
+               if (skb->len <= ETH_HLEN)
                        goto error;
  
                /* mapping VLANs to MBIM sessions:
@@@ -221,7 -221,7 +221,7 @@@ static struct sk_buff *cdc_mbim_process
  
        /* map MBIM session to VLAN */
        if (tci)
 -              vlan_put_tag(skb, tci);
 +              vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
  err:
        return skb;
  }
index f9339e7ea6af082a8c7360b877b4e00b48c710cb,b70f220bc4b378e626a59752fca5bce965456340..63cca9c2bf970b8efe4114ac488689087b46325b
@@@ -2789,6 -2789,10 +2789,6 @@@ static void b43_nphy_iq_cal_gain_params
   * Tx and Rx
   **************************************************/
  
 -void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
 -{//TODO
 -}
 -
  static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
  {//TODO
  }
@@@ -4888,7 -4892,7 +4888,7 @@@ static void b43_nphy_superswitch_init(s
  }
  
  /* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
 -int b43_phy_initn(struct b43_wldev *dev)
 +static int b43_phy_initn(struct b43_wldev *dev)
  {
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
        struct b43_phy *phy = &dev->phy;
@@@ -5161,7 -5165,8 +5161,8 @@@ static void b43_nphy_pmu_spur_avoid(str
  #endif
  #ifdef CONFIG_B43_SSB
        case B43_BUS_SSB:
-               /* FIXME */
+               ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
+                                           avoid);
                break;
  #endif
        }
index 62699203869d6ec142e07931a3575b6e6373ec9a,78da3eff75e8a1c5c126a60aab7bc90d7663a314..e4f1f3c9575aae6374b7844fb6281e19b02a862a
@@@ -26,7 -26,6 +26,7 @@@
  #include <brcmu_wifi.h>
  #include "dhd.h"
  #include "dhd_dbg.h"
 +#include "tracepoint.h"
  #include "fwil_types.h"
  #include "p2p.h"
  #include "wl_cfg80211.h"
@@@ -183,6 -182,64 +183,6 @@@ static struct ieee80211_channel __wl_5g
        CHAN5G(216, 0),
  };
  
 -static struct ieee80211_channel __wl_5ghz_n_channels[] = {
 -      CHAN5G(32, 0), CHAN5G(34, 0),
 -      CHAN5G(36, 0), CHAN5G(38, 0),
 -      CHAN5G(40, 0), CHAN5G(42, 0),
 -      CHAN5G(44, 0), CHAN5G(46, 0),
 -      CHAN5G(48, 0), CHAN5G(50, 0),
 -      CHAN5G(52, 0), CHAN5G(54, 0),
 -      CHAN5G(56, 0), CHAN5G(58, 0),
 -      CHAN5G(60, 0), CHAN5G(62, 0),
 -      CHAN5G(64, 0), CHAN5G(66, 0),
 -      CHAN5G(68, 0), CHAN5G(70, 0),
 -      CHAN5G(72, 0), CHAN5G(74, 0),
 -      CHAN5G(76, 0), CHAN5G(78, 0),
 -      CHAN5G(80, 0), CHAN5G(82, 0),
 -      CHAN5G(84, 0), CHAN5G(86, 0),
 -      CHAN5G(88, 0), CHAN5G(90, 0),
 -      CHAN5G(92, 0), CHAN5G(94, 0),
 -      CHAN5G(96, 0), CHAN5G(98, 0),
 -      CHAN5G(100, 0), CHAN5G(102, 0),
 -      CHAN5G(104, 0), CHAN5G(106, 0),
 -      CHAN5G(108, 0), CHAN5G(110, 0),
 -      CHAN5G(112, 0), CHAN5G(114, 0),
 -      CHAN5G(116, 0), CHAN5G(118, 0),
 -      CHAN5G(120, 0), CHAN5G(122, 0),
 -      CHAN5G(124, 0), CHAN5G(126, 0),
 -      CHAN5G(128, 0), CHAN5G(130, 0),
 -      CHAN5G(132, 0), CHAN5G(134, 0),
 -      CHAN5G(136, 0), CHAN5G(138, 0),
 -      CHAN5G(140, 0), CHAN5G(142, 0),
 -      CHAN5G(144, 0), CHAN5G(145, 0),
 -      CHAN5G(146, 0), CHAN5G(147, 0),
 -      CHAN5G(148, 0), CHAN5G(149, 0),
 -      CHAN5G(150, 0), CHAN5G(151, 0),
 -      CHAN5G(152, 0), CHAN5G(153, 0),
 -      CHAN5G(154, 0), CHAN5G(155, 0),
 -      CHAN5G(156, 0), CHAN5G(157, 0),
 -      CHAN5G(158, 0), CHAN5G(159, 0),
 -      CHAN5G(160, 0), CHAN5G(161, 0),
 -      CHAN5G(162, 0), CHAN5G(163, 0),
 -      CHAN5G(164, 0), CHAN5G(165, 0),
 -      CHAN5G(166, 0), CHAN5G(168, 0),
 -      CHAN5G(170, 0), CHAN5G(172, 0),
 -      CHAN5G(174, 0), CHAN5G(176, 0),
 -      CHAN5G(178, 0), CHAN5G(180, 0),
 -      CHAN5G(182, 0), CHAN5G(184, 0),
 -      CHAN5G(186, 0), CHAN5G(188, 0),
 -      CHAN5G(190, 0), CHAN5G(192, 0),
 -      CHAN5G(194, 0), CHAN5G(196, 0),
 -      CHAN5G(198, 0), CHAN5G(200, 0),
 -      CHAN5G(202, 0), CHAN5G(204, 0),
 -      CHAN5G(206, 0), CHAN5G(208, 0),
 -      CHAN5G(210, 0), CHAN5G(212, 0),
 -      CHAN5G(214, 0), CHAN5G(216, 0),
 -      CHAN5G(218, 0), CHAN5G(220, 0),
 -      CHAN5G(222, 0), CHAN5G(224, 0),
 -      CHAN5G(226, 0), CHAN5G(228, 0),
 -};
 -
  static struct ieee80211_supported_band __wl_band_2ghz = {
        .band = IEEE80211_BAND_2GHZ,
        .channels = __wl_2ghz_channels,
@@@ -199,28 -256,12 +199,28 @@@ static struct ieee80211_supported_band 
        .n_bitrates = wl_a_rates_size,
  };
  
 -static struct ieee80211_supported_band __wl_band_5ghz_n = {
 -      .band = IEEE80211_BAND_5GHZ,
 -      .channels = __wl_5ghz_n_channels,
 -      .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels),
 -      .bitrates = wl_a_rates,
 -      .n_bitrates = wl_a_rates_size,
 +/* This is to override regulatory domains defined in cfg80211 module (reg.c)
 + * By default world regulatory domain defined in reg.c puts the flags
 + * NL80211_RRF_PASSIVE_SCAN and NL80211_RRF_NO_IBSS for 5GHz channels (for
 + * 36..48 and 149..165). With respect to these flags, wpa_supplicant doesn't
 + * start p2p operations on 5GHz channels. All the changes in world regulatory
 + * domain are to be done here.
 + */
 +static const struct ieee80211_regdomain brcmf_regdom = {
 +      .n_reg_rules = 4,
 +      .alpha2 =  "99",
 +      .reg_rules = {
 +              /* IEEE 802.11b/g, channels 1..11 */
 +              REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
 +              /* If any */
 +              /* IEEE 802.11 channel 14 - Only JP enables
 +               * this and for 802.11b only
 +               */
 +              REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
 +              /* IEEE 802.11a, channel 36..64 */
 +              REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
 +              /* IEEE 802.11a, channel 100..165 */
 +              REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
  };
  
  static const u32 __wl_cipher_suites[] = {
@@@ -482,16 -523,17 +482,16 @@@ static struct wireless_dev *brcmf_cfg80
                return ERR_PTR(-EOPNOTSUPP);
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_P2P_GO:
 +      case NL80211_IFTYPE_P2P_DEVICE:
                return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
        case NL80211_IFTYPE_UNSPECIFIED:
 -      case NL80211_IFTYPE_P2P_DEVICE:
        default:
                return ERR_PTR(-EINVAL);
        }
  }
  
 -void brcmf_set_mpc(struct net_device *ndev, int mpc)
 +void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
  {
 -      struct brcmf_if *ifp = netdev_priv(ndev);
        s32 err = 0;
  
        if (check_vif_up(ifp->vif)) {
        }
  }
  
 -s32
 -brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
 -                          struct net_device *ndev,
 -                          bool aborted, bool fw_abort)
 +s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
 +                              struct brcmf_if *ifp, bool aborted,
 +                              bool fw_abort)
  {
        struct brcmf_scan_params_le params_le;
        struct cfg80211_scan_request *scan_request;
                /* Scan is aborted by setting channel_list[0] to -1 */
                params_le.channel_list[0] = cpu_to_le16(-1);
                /* E-Scan (or anyother type) can be aborted by SCAN */
 -              err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
 +              err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
                                             &params_le, sizeof(params_le));
                if (err)
                        brcmf_err("Scan abort  failed\n");
                cfg->sched_escan = false;
                if (!aborted)
                        cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
 -              brcmf_set_mpc(ndev, 1);
 +              brcmf_set_mpc(ifp, 1);
        } else if (scan_request) {
                brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
                          aborted ? "Aborted" : "Done");
                cfg80211_scan_done(scan_request, aborted);
 -              brcmf_set_mpc(ndev, 1);
 +              brcmf_set_mpc(ifp, 1);
        }
        if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
                brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@@ -576,9 -619,9 +576,9 @@@ int brcmf_cfg80211_del_iface(struct wip
  
        if (ndev) {
                if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
 -                  cfg->escan_info.ndev == ndev)
 -                      brcmf_notify_escan_complete(cfg, ndev, true,
 -                                                  true);
 +                  cfg->escan_info.ifp == netdev_priv(ndev))
 +                      brcmf_notify_escan_complete(cfg, netdev_priv(ndev),
 +                                                  true, true);
  
                brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
        }
                return -EOPNOTSUPP;
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_P2P_GO:
 +      case NL80211_IFTYPE_P2P_DEVICE:
                return brcmf_p2p_del_vif(wiphy, wdev);
        case NL80211_IFTYPE_UNSPECIFIED:
 -      case NL80211_IFTYPE_P2P_DEVICE:
        default:
                return -EINVAL;
        }
@@@ -760,7 -803,7 +760,7 @@@ static void brcmf_escan_prep(struct brc
  }
  
  static s32
 -brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
 +brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
                struct cfg80211_scan_request *request, u16 action)
  {
        s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
        params->action = cpu_to_le16(action);
        params->sync_id = cpu_to_le16(0x1234);
  
 -      err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan",
 -                                     params, params_size);
 +      err = brcmf_fil_iovar_data_set(ifp, "escan", params, params_size);
        if (err) {
                if (err == -EBUSY)
                        brcmf_dbg(INFO, "system busy : escan canceled\n");
@@@ -804,7 -848,7 +804,7 @@@ exit
  
  static s32
  brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
 -             struct net_device *ndev, struct cfg80211_scan_request *request)
 +             struct brcmf_if *ifp, struct cfg80211_scan_request *request)
  {
        s32 err;
        u32 passive_scan;
        struct escan_info *escan = &cfg->escan_info;
  
        brcmf_dbg(SCAN, "Enter\n");
 -      escan->ndev = ndev;
 +      escan->ifp = ifp;
        escan->wiphy = wiphy;
        escan->escan_state = WL_ESCAN_STATE_SCANNING;
        passive_scan = cfg->active_scan ? 0 : 1;
 -      err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
 +      err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
                                    passive_scan);
        if (err) {
                brcmf_err("error (%d)\n", err);
                return err;
        }
 -      brcmf_set_mpc(ndev, 0);
 +      brcmf_set_mpc(ifp, 0);
        results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
        results->version = 0;
        results->count = 0;
        results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
  
 -      err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START);
 +      err = escan->run(cfg, ifp, request, WL_ESCAN_ACTION_START);
        if (err)
 -              brcmf_set_mpc(ndev, 1);
 +              brcmf_set_mpc(ifp, 1);
        return err;
  }
  
  static s32
 -brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
 +brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
                     struct cfg80211_scan_request *request,
                     struct cfg80211_ssid *this_ssid)
  {
 -      struct brcmf_if *ifp = netdev_priv(ndev);
 -      struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 +      struct brcmf_if *ifp = vif->ifp;
 +      struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct cfg80211_ssid *ssids;
        struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int;
        u32 passive_scan;
        }
  
        /* If scan req comes for p2p0, send it over primary I/F */
 -      if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) {
 -              ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
 -              ndev = ifp->ndev;
 -      }
 +      if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
 +              vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
  
        /* Arm scan timeout timer */
        mod_timer(&cfg->escan_timeout, jiffies +
        set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
        if (escan_req) {
                cfg->escan_info.run = brcmf_run_escan;
 -              err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif);
 +              err = brcmf_p2p_scan_prep(wiphy, request, vif);
                if (err)
                        goto scan_out;
  
 -              err = brcmf_do_escan(cfg, wiphy, ndev, request);
 +              err = brcmf_do_escan(cfg, wiphy, vif->ifp, request);
                if (err)
                        goto scan_out;
        } else {
                        brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
                        goto scan_out;
                }
 -              brcmf_set_mpc(ndev, 0);
 +              brcmf_set_mpc(ifp, 0);
                err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
                                             &sr->ssid_le, sizeof(sr->ssid_le));
                if (err) {
                        else
                                brcmf_err("WLC_SCAN error (%d)\n", err);
  
 -                      brcmf_set_mpc(ndev, 1);
 +                      brcmf_set_mpc(ifp, 1);
                        goto scan_out;
                }
        }
@@@ -944,15 -990,16 +944,15 @@@ scan_out
  static s32
  brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
  {
 -      struct net_device *ndev = request->wdev->netdev;
 +      struct brcmf_cfg80211_vif *vif;
        s32 err = 0;
  
        brcmf_dbg(TRACE, "Enter\n");
 -
 -      if (!check_vif_up(container_of(request->wdev,
 -                                     struct brcmf_cfg80211_vif, wdev)))
 +      vif = container_of(request->wdev, struct brcmf_cfg80211_vif, wdev);
 +      if (!check_vif_up(vif))
                return -EIO;
  
 -      err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
 +      err = brcmf_cfg80211_escan(wiphy, vif, request, NULL);
  
        if (err)
                brcmf_err("scan error (%d)\n", err);
@@@ -2463,7 -2510,7 +2463,7 @@@ void brcmf_abort_scanning(struct brcmf_
        set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
        if (cfg->scan_request) {
                escan->escan_state = WL_ESCAN_STATE_IDLE;
 -              brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
 +              brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
        }
        clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
        clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
@@@ -2475,7 -2522,7 +2475,7 @@@ static void brcmf_cfg80211_escan_timeou
                        container_of(work, struct brcmf_cfg80211_info,
                                     escan_timeout_work);
  
 -      brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
 +      brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
  }
  
  static void brcmf_escan_timeout(unsigned long data)
@@@ -2526,6 -2573,7 +2526,6 @@@ brcmf_cfg80211_escan_handler(struct brc
                             const struct brcmf_event_msg *e, void *data)
  {
        struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
 -      struct net_device *ndev = ifp->ndev;
        s32 status;
        s32 err = 0;
        struct brcmf_escan_result_le *escan_result_le;
  
        status = e->status;
  
 -      if (!ndev || !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
 -              brcmf_err("scan not ready ndev %p drv_status %x\n", ndev,
 -                        !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status));
 +      if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
 +              brcmf_err("scan not ready, bssidx=%d\n", ifp->bssidx);
                return -EPERM;
        }
  
                                cfg->escan_info.escan_buf;
                        brcmf_inform_bss(cfg);
                        aborted = status != BRCMF_E_STATUS_SUCCESS;
 -                      brcmf_notify_escan_complete(cfg, ndev, aborted,
 +                      brcmf_notify_escan_complete(cfg, ifp, aborted,
                                                    false);
                } else
                        brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
@@@ -2688,7 -2737,7 +2688,7 @@@ static s32 brcmf_cfg80211_suspend(struc
                brcmf_abort_scanning(cfg);
  
        /* Turn off watchdog timer */
 -      brcmf_set_mpc(ndev, 1);
 +      brcmf_set_mpc(netdev_priv(ndev), 1);
  
  exit:
        brcmf_dbg(TRACE, "Exit\n");
@@@ -2846,6 -2895,7 +2846,6 @@@ brcmf_notify_sched_scan_results(struct 
                                const struct brcmf_event_msg *e, void *data)
  {
        struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
 -      struct net_device *ndev = ifp->ndev;
        struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
        struct cfg80211_scan_request *request = NULL;
        struct cfg80211_ssid *ssid = NULL;
                }
  
                set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
 -              err = brcmf_do_escan(cfg, wiphy, ndev, request);
 +              err = brcmf_do_escan(cfg, wiphy, ifp, request);
                if (err) {
                        clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
                        goto out_err;
@@@ -3001,16 -3051,16 +3001,16 @@@ brcmf_cfg80211_sched_scan_start(struct 
        int i;
        int ret = 0;
  
 -      brcmf_dbg(SCAN, "Enter n_match_sets:%d   n_ssids:%d\n",
 +      brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
                  request->n_match_sets, request->n_ssids);
        if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
                brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
                return -EAGAIN;
        }
  
 -      if (!request || !request->n_ssids || !request->n_match_sets) {
 +      if (!request->n_ssids || !request->n_match_sets) {
                brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
 -                        request ? request->n_ssids : 0);
 +                        request->n_ssids);
                return -EINVAL;
        }
  
@@@ -3086,7 -3136,7 +3086,7 @@@ static int brcmf_cfg80211_sched_scan_st
        brcmf_dbg(SCAN, "enter\n");
        brcmf_dev_pno_clean(ndev);
        if (cfg->sched_escan)
 -              brcmf_notify_escan_complete(cfg, ndev, true, true);
 +              brcmf_notify_escan_complete(cfg, netdev_priv(ndev), true, true);
        return 0;
  }
  
@@@ -3658,7 -3708,7 +3658,7 @@@ brcmf_cfg80211_start_ap(struct wiphy *w
                ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
        }
  
 -      brcmf_set_mpc(ndev, 0);
 +      brcmf_set_mpc(ifp, 0);
  
        /* find the RSN_IE */
        rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
  
  exit:
        if (err)
 -              brcmf_set_mpc(ndev, 1);
 +              brcmf_set_mpc(ifp, 1);
        return err;
  }
  
@@@ -3806,7 -3856,7 +3806,7 @@@ static int brcmf_cfg80211_stop_ap(struc
                if (err < 0)
                        brcmf_err("bss_enable config failed %d\n", err);
        }
 -      brcmf_set_mpc(ndev, 1);
 +      brcmf_set_mpc(ifp, 1);
        set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
        clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
  
@@@ -3863,13 -3913,13 +3863,13 @@@ brcmf_cfg80211_mgmt_frame_register(stru
                                   struct wireless_dev *wdev,
                                   u16 frame_type, bool reg)
  {
 -      struct brcmf_if *ifp = netdev_priv(wdev->netdev);
 -      struct brcmf_cfg80211_vif *vif = ifp->vif;
 +      struct brcmf_cfg80211_vif *vif;
        u16 mgmt_type;
  
        brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
  
        mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
 +      vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
        if (reg)
                vif->mgmt_rx_reg |= BIT(mgmt_type);
        else
@@@ -3885,6 -3935,7 +3885,6 @@@ brcmf_cfg80211_mgmt_tx(struct wiphy *wi
  {
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        const struct ieee80211_mgmt *mgmt;
 -      struct brcmf_if *ifp;
        struct brcmf_cfg80211_vif *vif;
        s32 err = 0;
        s32 ie_offset;
                ie_offset =  DOT11_MGMT_HDR_LEN +
                             DOT11_BCN_PRB_FIXED_LEN;
                ie_len = len - ie_offset;
 -              ifp = netdev_priv(wdev->netdev);
 -              vif = ifp->vif;
 +              vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
                if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
                        vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
                err = brcmf_vif_set_mgmt_ie(vif,
                          *cookie, le16_to_cpu(action_frame->len),
                          chan->center_freq);
  
 -              ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev,
 +              ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
                                                  af_params);
  
                cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
@@@ -4027,8 -4079,6 +4027,8 @@@ static struct cfg80211_ops wl_cfg80211_
        .mgmt_tx = brcmf_cfg80211_mgmt_tx,
        .remain_on_channel = brcmf_p2p_remain_on_channel,
        .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
 +      .start_p2p_device = brcmf_p2p_start_device,
 +      .stop_p2p_device = brcmf_p2p_stop_device,
  #ifdef CONFIG_NL80211_TESTMODE
        .testmode_cmd = brcmf_cfg80211_testmode
  #endif
@@@ -4076,10 -4126,6 +4076,6 @@@ static const struct ieee80211_iface_lim
                         BIT(NL80211_IFTYPE_ADHOC) |
                         BIT(NL80211_IFTYPE_AP)
        },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
-       },
        {
                .max = 1,
                .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@@ -4116,11 -4162,6 +4112,11 @@@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] 
                      BIT(IEEE80211_STYPE_AUTH >> 4) |
                      BIT(IEEE80211_STYPE_DEAUTH >> 4) |
                      BIT(IEEE80211_STYPE_ACTION >> 4)
 +      },
 +      [NL80211_IFTYPE_P2P_DEVICE] = {
 +              .tx = 0xffff,
 +              .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
 +                    BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
        }
  };
  
@@@ -4142,11 -4183,17 +4138,10 @@@ static struct wiphy *brcmf_setup_wiphy(
                                 BIT(NL80211_IFTYPE_ADHOC) |
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) |
-                                BIT(NL80211_IFTYPE_P2P_GO) |
-                                BIT(NL80211_IFTYPE_P2P_DEVICE);
+                                BIT(NL80211_IFTYPE_P2P_GO);
        wiphy->iface_combinations = brcmf_iface_combos;
        wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
        wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
 -      wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;  /* Set
 -                                              * it as 11a by default.
 -                                              * This will be updated with
 -                                              * 11n phy tables in
 -                                              * "ifconfig up"
 -                                              * if phy has 11n capability
 -                                              */
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        wiphy->cipher_suites = __wl_cipher_suites;
        wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
        wiphy->mgmt_stypes = brcmf_txrx_stypes;
        wiphy->max_remain_on_channel_duration = 5000;
        brcmf_wiphy_pno_params(wiphy);
 +      brcmf_dbg(INFO, "Registering custom regulatory\n");
 +      wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
 +      wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
        err = wiphy_register(wiphy);
        if (err < 0) {
                brcmf_err("Could not register wiphy device (%d)\n", err);
@@@ -4577,11 -4621,9 +4572,11 @@@ static s32 brcmf_notify_vif_event(struc
  
                ifp->vif = vif;
                vif->ifp = ifp;
 -              vif->wdev.netdev = ifp->ndev;
 -              ifp->ndev->ieee80211_ptr = &vif->wdev;
 -              SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
 +              if (ifp->ndev) {
 +                      vif->wdev.netdev = ifp->ndev;
 +                      ifp->ndev->ieee80211_ptr = &vif->wdev;
 +                      SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
 +              }
                mutex_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
@@@ -4884,248 -4926,34 +4879,248 @@@ dongle_scantime_out
        return err;
  }
  
 -static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 +
 +static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
 +{
 +      struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 +      struct ieee80211_channel *band_chan_arr;
 +      struct brcmf_chanspec_list *list;
 +      s32 err;
 +      u8 *pbuf;
 +      u32 i, j;
 +      u32 total;
 +      u16 chanspec;
 +      enum ieee80211_band band;
 +      u32 channel;
 +      u32 *n_cnt;
 +      bool ht40_allowed;
 +      u32 index;
 +      u32 ht40_flag;
 +      bool update;
 +      u32 array_size;
 +
 +      pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
 +
 +      if (pbuf == NULL)
 +              return -ENOMEM;
 +
 +      list = (struct brcmf_chanspec_list *)pbuf;
 +
 +      err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
 +                                     BRCMF_DCMD_MEDLEN);
 +      if (err) {
 +              brcmf_err("get chanspecs error (%d)\n", err);
 +              goto exit;
 +      }
 +
 +      __wl_band_2ghz.n_channels = 0;
 +      __wl_band_5ghz_a.n_channels = 0;
 +
 +      total = le32_to_cpu(list->count);
 +      for (i = 0; i < total; i++) {
 +              chanspec = (u16)le32_to_cpu(list->element[i]);
 +              channel = CHSPEC_CHANNEL(chanspec);
 +
 +              if (CHSPEC_IS40(chanspec)) {
 +                      if (CHSPEC_SB_UPPER(chanspec))
 +                              channel += CH_10MHZ_APART;
 +                      else
 +                              channel -= CH_10MHZ_APART;
 +              } else if (CHSPEC_IS80(chanspec)) {
 +                      brcmf_dbg(INFO, "HT80 center channel : %d\n",
 +                                channel);
 +                      continue;
 +              }
 +              if (CHSPEC_IS2G(chanspec) && (channel >= CH_MIN_2G_CHANNEL) &&
 +                  (channel <= CH_MAX_2G_CHANNEL)) {
 +                      band_chan_arr = __wl_2ghz_channels;
 +                      array_size = ARRAY_SIZE(__wl_2ghz_channels);
 +                      n_cnt = &__wl_band_2ghz.n_channels;
 +                      band = IEEE80211_BAND_2GHZ;
 +                      ht40_allowed = (bw_cap == WLC_N_BW_40ALL);
 +              } else if (CHSPEC_IS5G(chanspec) &&
 +                         channel >= CH_MIN_5G_CHANNEL) {
 +                      band_chan_arr = __wl_5ghz_a_channels;
 +                      array_size = ARRAY_SIZE(__wl_5ghz_a_channels);
 +                      n_cnt = &__wl_band_5ghz_a.n_channels;
 +                      band = IEEE80211_BAND_5GHZ;
 +                      ht40_allowed = !(bw_cap == WLC_N_BW_20ALL);
 +              } else {
 +                      brcmf_err("Invalid channel Sepc. 0x%x.\n", chanspec);
 +                      continue;
 +              }
 +              if (!ht40_allowed && CHSPEC_IS40(chanspec))
 +                      continue;
 +              update = false;
 +              for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
 +                      if (band_chan_arr[j].hw_value == channel) {
 +                              update = true;
 +                              break;
 +                      }
 +              }
 +              if (update)
 +                      index = j;
 +              else
 +                      index = *n_cnt;
 +              if (index <  array_size) {
 +                      band_chan_arr[index].center_freq =
 +                              ieee80211_channel_to_frequency(channel, band);
 +                      band_chan_arr[index].hw_value = channel;
 +
 +                      if (CHSPEC_IS40(chanspec) && ht40_allowed) {
 +                              /* assuming the order is HT20, HT40 Upper,
 +                               * HT40 lower from chanspecs
 +                               */
 +                              ht40_flag = band_chan_arr[index].flags &
 +                                          IEEE80211_CHAN_NO_HT40;
 +                              if (CHSPEC_SB_UPPER(chanspec)) {
 +                                      if (ht40_flag == IEEE80211_CHAN_NO_HT40)
 +                                              band_chan_arr[index].flags &=
 +                                                      ~IEEE80211_CHAN_NO_HT40;
 +                                      band_chan_arr[index].flags |=
 +                                              IEEE80211_CHAN_NO_HT40PLUS;
 +                              } else {
 +                                      /* It should be one of
 +                                       * IEEE80211_CHAN_NO_HT40 or
 +                                       * IEEE80211_CHAN_NO_HT40PLUS
 +                                       */
 +                                      band_chan_arr[index].flags &=
 +                                                      ~IEEE80211_CHAN_NO_HT40;
 +                                      if (ht40_flag == IEEE80211_CHAN_NO_HT40)
 +                                              band_chan_arr[index].flags |=
 +                                                  IEEE80211_CHAN_NO_HT40MINUS;
 +                              }
 +                      } else {
 +                              band_chan_arr[index].flags =
 +                                                      IEEE80211_CHAN_NO_HT40;
 +                              if (band == IEEE80211_BAND_2GHZ)
 +                                      channel |= WL_CHANSPEC_BAND_2G;
 +                              else
 +                                      channel |= WL_CHANSPEC_BAND_5G;
 +                              channel |= WL_CHANSPEC_BW_20;
 +                              err = brcmf_fil_bsscfg_int_get(ifp,
 +                                                             "per_chan_info",
 +                                                             &channel);
 +                              if (!err) {
 +                                      if (channel & WL_CHAN_RADAR)
 +                                              band_chan_arr[index].flags |=
 +                                                      (IEEE80211_CHAN_RADAR |
 +                                                      IEEE80211_CHAN_NO_IBSS);
 +                                      if (channel & WL_CHAN_PASSIVE)
 +                                              band_chan_arr[index].flags |=
 +                                                  IEEE80211_CHAN_PASSIVE_SCAN;
 +                              }
 +                      }
 +                      if (!update)
 +                              (*n_cnt)++;
 +              }
 +      }
 +exit:
 +      kfree(pbuf);
 +      return err;
 +}
 +
 +
 +static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
  {
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
        struct wiphy *wiphy;
        s32 phy_list;
 +      u32 band_list[3];
 +      u32 nmode;
 +      u32 bw_cap = 0;
        s8 phy;
 -      s32 err = 0;
 +      s32 err;
 +      u32 nband;
 +      s32 i;
 +      struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
 +      s32 index;
  
        err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
                                     &phy_list, sizeof(phy_list));
        if (err) {
 -              brcmf_err("error (%d)\n", err);
 +              brcmf_err("BRCMF_C_GET_PHYLIST error (%d)\n", err);
                return err;
        }
  
        phy = ((char *)&phy_list)[0];
 -      brcmf_dbg(INFO, "%c phy\n", phy);
 -      if (phy == 'n' || phy == 'a') {
 -              wiphy = cfg_to_wiphy(cfg);
 -              wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
 +      brcmf_dbg(INFO, "BRCMF_C_GET_PHYLIST reported: %c phy\n", phy);
 +
 +
 +      err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST,
 +                                   &band_list, sizeof(band_list));
 +      if (err) {
 +              brcmf_err("BRCMF_C_GET_BANDLIST error (%d)\n", err);
 +              return err;
        }
 +      brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
 +                band_list[0], band_list[1], band_list[2]);
 +
 +      err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
 +      if (err) {
 +              brcmf_err("nmode error (%d)\n", err);
 +      } else {
 +              err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &bw_cap);
 +              if (err)
 +                      brcmf_err("mimo_bw_cap error (%d)\n", err);
 +      }
 +      brcmf_dbg(INFO, "nmode=%d, mimo_bw_cap=%d\n", nmode, bw_cap);
 +
 +      err = brcmf_construct_reginfo(cfg, bw_cap);
 +      if (err) {
 +              brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
 +              return err;
 +      }
 +
 +      nband = band_list[0];
 +      memset(bands, 0, sizeof(bands));
 +
 +      for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) {
 +              index = -1;
 +              if ((band_list[i] == WLC_BAND_5G) &&
 +                  (__wl_band_5ghz_a.n_channels > 0)) {
 +                      index = IEEE80211_BAND_5GHZ;
 +                      bands[index] = &__wl_band_5ghz_a;
 +                      if ((bw_cap == WLC_N_BW_40ALL) ||
 +                          (bw_cap == WLC_N_BW_20IN2G_40IN5G))
 +                              bands[index]->ht_cap.cap |=
 +                                                      IEEE80211_HT_CAP_SGI_40;
 +              } else if ((band_list[i] == WLC_BAND_2G) &&
 +                         (__wl_band_2ghz.n_channels > 0)) {
 +                      index = IEEE80211_BAND_2GHZ;
 +                      bands[index] = &__wl_band_2ghz;
 +                      if (bw_cap == WLC_N_BW_40ALL)
 +                              bands[index]->ht_cap.cap |=
 +                                                      IEEE80211_HT_CAP_SGI_40;
 +              }
 +
 +              if ((index >= 0) && nmode) {
 +                      bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
 +                      bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
 +                      bands[index]->ht_cap.ht_supported = true;
 +                      bands[index]->ht_cap.ampdu_factor =
 +                                              IEEE80211_HT_MAX_AMPDU_64K;
 +                      bands[index]->ht_cap.ampdu_density =
 +                                              IEEE80211_HT_MPDU_DENSITY_16;
 +                      /* An HT shall support all EQM rates for one spatial
 +                       * stream
 +                       */
 +                      bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
 +              }
 +      }
 +
 +      wiphy = cfg_to_wiphy(cfg);
 +      wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
 +      wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
 +      wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
  
        return err;
  }
  
 +
  static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
  {
 -      return wl_update_wiphybands(cfg);
 +      return brcmf_update_wiphybands(cfg);
  }
  
  static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
index cd837860cd42cd1d1667ba0960c31efb9592ab04,e2340b231aa163cfba03f8b8f90c0822003a385d..c837be242cba6a90775365710e10f25f6dec675c
@@@ -1,6 -1,5 +1,6 @@@
  /*
   * Copyright (c) 2010 Broadcom Corporation
 + * Copyright (c) 2013 Hauke Mehrtens <hauke@hauke-m.de>
   *
   * Permission to use, copy, modify, and/or distribute this software for any
   * purpose with or without fee is hereby granted, provided that the above
@@@ -35,7 -34,6 +35,7 @@@
  #include "mac80211_if.h"
  #include "main.h"
  #include "debug.h"
 +#include "led.h"
  
  #define N_TX_QUEUES   4 /* #tx queues on mac80211<->driver interface */
  #define BRCMS_FLUSH_TIMEOUT   500 /* msec */
@@@ -276,6 -274,130 +276,130 @@@ static void brcms_set_basic_rate(struc
        }
  }
  
+ /**
+  * This function frees the WL per-device resources.
+  *
+  * This function frees resources owned by the WL device pointed to
+  * by the wl parameter.
+  *
+  * precondition: can both be called locked and unlocked
+  *
+  */
+ static void brcms_free(struct brcms_info *wl)
+ {
+       struct brcms_timer *t, *next;
+       /* free ucode data */
+       if (wl->fw.fw_cnt)
+               brcms_ucode_data_free(&wl->ucode);
+       if (wl->irq)
+               free_irq(wl->irq, wl);
+       /* kill dpc */
+       tasklet_kill(&wl->tasklet);
+       if (wl->pub) {
+               brcms_debugfs_detach(wl->pub);
+               brcms_c_module_unregister(wl->pub, "linux", wl);
+       }
+       /* free common resources */
+       if (wl->wlc) {
+               brcms_c_detach(wl->wlc);
+               wl->wlc = NULL;
+               wl->pub = NULL;
+       }
+       /* virtual interface deletion is deferred so we cannot spinwait */
+       /* wait for all pending callbacks to complete */
+       while (atomic_read(&wl->callbacks) > 0)
+               schedule();
+       /* free timers */
+       for (t = wl->timers; t; t = next) {
+               next = t->next;
+ #ifdef DEBUG
+               kfree(t->name);
+ #endif
+               kfree(t);
+       }
+ }
+ /*
+ * called from both kernel as from this kernel module (error flow on attach)
+ * precondition: perimeter lock is not acquired.
+ */
+ static void brcms_remove(struct bcma_device *pdev)
+ {
+       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+       struct brcms_info *wl = hw->priv;
+       if (wl->wlc) {
+               wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
+               wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+               ieee80211_unregister_hw(hw);
+       }
+       brcms_free(wl);
+       bcma_set_drvdata(pdev, NULL);
+       ieee80211_free_hw(hw);
+ }
+ /*
+  * Precondition: Since this function is called in brcms_pci_probe() context,
+  * no locking is required.
+  */
+ static void brcms_release_fw(struct brcms_info *wl)
+ {
+       int i;
+       for (i = 0; i < MAX_FW_IMAGES; i++) {
+               release_firmware(wl->fw.fw_bin[i]);
+               release_firmware(wl->fw.fw_hdr[i]);
+       }
+ }
+ /*
+  * Precondition: Since this function is called in brcms_pci_probe() context,
+  * no locking is required.
+  */
+ static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
+ {
+       int status;
+       struct device *device = &pdev->dev;
+       char fw_name[100];
+       int i;
+       memset(&wl->fw, 0, sizeof(struct brcms_firmware));
+       for (i = 0; i < MAX_FW_IMAGES; i++) {
+               if (brcms_firmwares[i] == NULL)
+                       break;
+               sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
+                       UCODE_LOADER_API_VER);
+               status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
+               if (status) {
+                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+                                 KBUILD_MODNAME, fw_name);
+                       return status;
+               }
+               sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
+                       UCODE_LOADER_API_VER);
+               status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
+               if (status) {
+                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+                                 KBUILD_MODNAME, fw_name);
+                       return status;
+               }
+               wl->fw.hdr_num_entries[i] =
+                   wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
+       }
+       wl->fw.fw_cnt = i;
+       status = brcms_ucode_data_init(wl, &wl->ucode);
+       brcms_release_fw(wl);
+       return status;
+ }
  static void brcms_ops_tx(struct ieee80211_hw *hw,
                         struct ieee80211_tx_control *control,
                         struct sk_buff *skb)
@@@ -308,6 -430,14 +432,14 @@@ static int brcms_ops_start(struct ieee8
        if (!blocked)
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
  
+       if (!wl->ucode.bcm43xx_bomminor) {
+               err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+               if (err) {
+                       brcms_remove(wl->wlc->hw->d11core);
+                       return -ENOENT;
+               }
+       }
        spin_lock_bh(&wl->lock);
        /* avoid acknowledging frames before a non-monitor device is added */
        wl->mute_tx = true;
@@@ -357,26 -487,18 +489,26 @@@ brcms_ops_add_interface(struct ieee8021
  {
        struct brcms_info *wl = hw->priv;
  
 -      /* Just STA for now */
 -      if (vif->type != NL80211_IFTYPE_STATION) {
 +      /* Just STA, AP and ADHOC for now */
 +      if (vif->type != NL80211_IFTYPE_STATION &&
 +          vif->type != NL80211_IFTYPE_AP &&
 +          vif->type != NL80211_IFTYPE_ADHOC) {
                brcms_err(wl->wlc->hw->d11core,
 -                        "%s: Attempt to add type %d, only STA for now\n",
 +                        "%s: Attempt to add type %d, only STA, AP and AdHoc for now\n",
                          __func__, vif->type);
                return -EOPNOTSUPP;
        }
  
        spin_lock_bh(&wl->lock);
 -      memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr));
        wl->mute_tx = false;
        brcms_c_mute(wl->wlc, false);
 +      if (vif->type == NL80211_IFTYPE_STATION)
 +              brcms_c_start_station(wl->wlc, vif->addr);
 +      else if (vif->type == NL80211_IFTYPE_AP)
 +              brcms_c_start_ap(wl->wlc, vif->addr, vif->bss_conf.bssid,
 +                               vif->bss_conf.ssid, vif->bss_conf.ssid_len);
 +      else if (vif->type == NL80211_IFTYPE_ADHOC)
 +              brcms_c_start_adhoc(wl->wlc, vif->addr);
        spin_unlock_bh(&wl->lock);
  
        return 0;
@@@ -528,43 -650,14 +660,43 @@@ brcms_ops_bss_info_changed(struct ieee8
                brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET, info->bssid);
                spin_unlock_bh(&wl->lock);
        }
 -      if (changed & BSS_CHANGED_BEACON)
 +      if (changed & BSS_CHANGED_SSID) {
 +              /* BSSID changed, for whatever reason (IBSS and managed mode) */
 +              spin_lock_bh(&wl->lock);
 +              brcms_c_set_ssid(wl->wlc, info->ssid, info->ssid_len);
 +              spin_unlock_bh(&wl->lock);
 +      }
 +      if (changed & BSS_CHANGED_BEACON) {
                /* Beacon data changed, retrieve new beacon (beaconing modes) */
 -              brcms_err(core, "%s: beacon changed\n", __func__);
 +              struct sk_buff *beacon;
 +              u16 tim_offset = 0;
 +
 +              spin_lock_bh(&wl->lock);
 +              beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL);
 +              brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
 +                                     info->dtim_period);
 +              spin_unlock_bh(&wl->lock);
 +      }
 +
 +      if (changed & BSS_CHANGED_AP_PROBE_RESP) {
 +              struct sk_buff *probe_resp;
 +
 +              spin_lock_bh(&wl->lock);
 +              probe_resp = ieee80211_proberesp_get(hw, vif);
 +              brcms_c_set_new_probe_resp(wl->wlc, probe_resp);
 +              spin_unlock_bh(&wl->lock);
 +      }
  
        if (changed & BSS_CHANGED_BEACON_ENABLED) {
                /* Beaconing should be enabled/disabled (beaconing modes) */
                brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
                          info->enable_beacon ? "true" : "false");
 +              if (info->enable_beacon &&
 +                  hw->wiphy->flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) {
 +                      brcms_c_enable_probe_resp(wl->wlc, true);
 +              } else {
 +                      brcms_c_enable_probe_resp(wl->wlc, false);
 +              }
        }
  
        if (changed & BSS_CHANGED_CQM) {
@@@ -762,7 -855,7 +894,7 @@@ static bool brcms_tx_flush_completed(st
        return result;
  }
  
 -static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
 +static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
  {
        struct brcms_info *wl = hw->priv;
        int ret;
                           "ret=%d\n", jiffies_to_msecs(ret));
  }
  
 +static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 +{
 +      struct brcms_info *wl = hw->priv;
 +      u64 tsf;
 +
 +      spin_lock_bh(&wl->lock);
 +      tsf = brcms_c_tsf_get(wl->wlc);
 +      spin_unlock_bh(&wl->lock);
 +
 +      return tsf;
 +}
 +
 +static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
 +                         struct ieee80211_vif *vif, u64 tsf)
 +{
 +      struct brcms_info *wl = hw->priv;
 +
 +      spin_lock_bh(&wl->lock);
 +      brcms_c_tsf_set(wl->wlc, tsf);
 +      spin_unlock_bh(&wl->lock);
 +}
 +
  static const struct ieee80211_ops brcms_ops = {
        .tx = brcms_ops_tx,
        .start = brcms_ops_start,
        .ampdu_action = brcms_ops_ampdu_action,
        .rfkill_poll = brcms_ops_rfkill_poll,
        .flush = brcms_ops_flush,
 +      .get_tsf = brcms_ops_get_tsf,
 +      .set_tsf = brcms_ops_set_tsf,
  };
  
  void brcms_dpc(unsigned long data)
        wake_up(&wl->tx_flush_wq);
  }
  
- /*
-  * Precondition: Since this function is called in brcms_pci_probe() context,
-  * no locking is required.
-  */
- static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
- {
-       int status;
-       struct device *device = &pdev->dev;
-       char fw_name[100];
-       int i;
-       memset(&wl->fw, 0, sizeof(struct brcms_firmware));
-       for (i = 0; i < MAX_FW_IMAGES; i++) {
-               if (brcms_firmwares[i] == NULL)
-                       break;
-               sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
-                       UCODE_LOADER_API_VER);
-               status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
-               if (status) {
-                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
-                                 KBUILD_MODNAME, fw_name);
-                       return status;
-               }
-               sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
-                       UCODE_LOADER_API_VER);
-               status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
-               if (status) {
-                       wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
-                                 KBUILD_MODNAME, fw_name);
-                       return status;
-               }
-               wl->fw.hdr_num_entries[i] =
-                   wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
-       }
-       wl->fw.fw_cnt = i;
-       return brcms_ucode_data_init(wl, &wl->ucode);
- }
- /*
-  * Precondition: Since this function is called in brcms_pci_probe() context,
-  * no locking is required.
-  */
- static void brcms_release_fw(struct brcms_info *wl)
- {
-       int i;
-       for (i = 0; i < MAX_FW_IMAGES; i++) {
-               release_firmware(wl->fw.fw_bin[i]);
-               release_firmware(wl->fw.fw_hdr[i]);
-       }
- }
- /**
-  * This function frees the WL per-device resources.
-  *
-  * This function frees resources owned by the WL device pointed to
-  * by the wl parameter.
-  *
-  * precondition: can both be called locked and unlocked
-  *
-  */
- static void brcms_free(struct brcms_info *wl)
- {
-       struct brcms_timer *t, *next;
-       /* free ucode data */
-       if (wl->fw.fw_cnt)
-               brcms_ucode_data_free(&wl->ucode);
-       if (wl->irq)
-               free_irq(wl->irq, wl);
-       /* kill dpc */
-       tasklet_kill(&wl->tasklet);
-       if (wl->pub) {
-               brcms_debugfs_detach(wl->pub);
-               brcms_c_module_unregister(wl->pub, "linux", wl);
-       }
-       /* free common resources */
-       if (wl->wlc) {
-               brcms_c_detach(wl->wlc);
-               wl->wlc = NULL;
-               wl->pub = NULL;
-       }
-       /* virtual interface deletion is deferred so we cannot spinwait */
-       /* wait for all pending callbacks to complete */
-       while (atomic_read(&wl->callbacks) > 0)
-               schedule();
-       /* free timers */
-       for (t = wl->timers; t; t = next) {
-               next = t->next;
- #ifdef DEBUG
-               kfree(t->name);
- #endif
-               kfree(t);
-       }
- }
- /*
- * called from both kernel as from this kernel module (error flow on attach)
- * precondition: perimeter lock is not acquired.
- */
- static void brcms_remove(struct bcma_device *pdev)
- {
-       struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
-       struct brcms_info *wl = hw->priv;
-       if (wl->wlc) {
-               brcms_led_unregister(wl);
-               wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
-               wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
-               ieee80211_unregister_hw(hw);
-       }
-       brcms_free(wl);
-       bcma_set_drvdata(pdev, NULL);
-       ieee80211_free_hw(hw);
- }
  static irqreturn_t brcms_isr(int irq, void *dev_id)
  {
        struct brcms_info *wl;
@@@ -1058,16 -1004,7 +1067,16 @@@ static int ieee_hw_init(struct ieee8021
  
        /* channel change time is dependent on chip and band  */
        hw->channel_change_time = 7 * 1000;
 -      hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 +      hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 +                                   BIT(NL80211_IFTYPE_AP) |
 +                                   BIT(NL80211_IFTYPE_ADHOC);
 +
 +      /*
 +       * deactivate sending probe responses by ucude, because this will
 +       * cause problems when WPS is used.
 +       *
 +       * hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
 +       */
  
        hw->rate_control_algorithm = "minstrel_ht";
  
@@@ -1120,18 -1057,8 +1129,8 @@@ static struct brcms_info *brcms_attach(
        spin_lock_init(&wl->lock);
        spin_lock_init(&wl->isr_lock);
  
-       /* prepare ucode */
-       if (brcms_request_fw(wl, pdev) < 0) {
-               wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
-                         "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
-               brcms_release_fw(wl);
-               brcms_remove(pdev);
-               return NULL;
-       }
        /* common load-time initialization */
        wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
-       brcms_release_fw(wl);
        if (!wl->wlc) {
                wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
                          KBUILD_MODNAME, err);
@@@ -1224,8 -1151,6 +1223,8 @@@ static int brcms_bcma_probe(struct bcma
                pr_err("%s: brcms_attach failed!\n", __func__);
                return -ENODEV;
        }
 +      brcms_led_register(wl);
 +
        return 0;
  }
  
index ab4d2861ef3c1fe5b5e4cbf454cb22f51fbe0b73,6ccb7457746b29e4e6ac0f53731bba725709f6a8..c4f392d5db4cd595dfea3211fbcc8f99a81f48c2
@@@ -769,6 -769,7 +769,7 @@@ struct qeth_card 
        unsigned long thread_start_mask;
        unsigned long thread_allowed_mask;
        unsigned long thread_running_mask;
+       struct task_struct *recovery_task;
        spinlock_t ip_lock;
        struct list_head ip_list;
        struct list_head *ip_tbd_list;
@@@ -862,6 -863,8 +863,8 @@@ extern struct qeth_card_list_struct qet
  extern struct kmem_cache *qeth_core_header_cache;
  extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
  
+ void qeth_set_recovery_task(struct qeth_card *);
+ void qeth_clear_recovery_task(struct qeth_card *);
  void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
  int qeth_threads_running(struct qeth_card *, unsigned long);
  int qeth_wait_for_threads(struct qeth_card *, unsigned long);
@@@ -915,7 -918,7 +918,7 @@@ int qeth_send_control_data(struct qeth_
        int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
        void *reply_param);
  int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
 -int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
 +int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
  int qeth_get_elements_for_frags(struct sk_buff *);
  int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
                        struct sk_buff *, struct qeth_hdr *, int, int, int);
@@@ -929,7 -932,7 +932,7 @@@ void qeth_core_get_drvinfo(struct net_d
  void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
  int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
  int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
 -int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
 +int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
  int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
  int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
  int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
index a86ce07736ef4d620ca8280c6d71d5697aa11d3f,451f92020599e68add4a6b8a52888f672ad69084..6cd0fc1b203a2c6e8147dc37d980933ff9b7e89f
@@@ -177,6 -177,23 +177,23 @@@ const char *qeth_get_cardname_short(str
        return "n/a";
  }
  
+ void qeth_set_recovery_task(struct qeth_card *card)
+ {
+       card->recovery_task = current;
+ }
+ EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
+ void qeth_clear_recovery_task(struct qeth_card *card)
+ {
+       card->recovery_task = NULL;
+ }
+ EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
+ static bool qeth_is_recovery_task(const struct qeth_card *card)
+ {
+       return card->recovery_task == current;
+ }
  void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
                         int clear_start_mask)
  {
@@@ -205,6 -222,8 +222,8 @@@ EXPORT_SYMBOL_GPL(qeth_threads_running)
  
  int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
  {
+       if (qeth_is_recovery_task(card))
+               return 0;
        return wait_event_interruptible(card->wait_q,
                        qeth_threads_running(card, threads) == 0);
  }
@@@ -316,7 -335,7 +335,7 @@@ static inline int qeth_alloc_cq(struct 
  
                card->qdio.no_in_queues = 2;
  
 -              card->qdio.out_bufstates = (struct qdio_outbuf_state *)
 +              card->qdio.out_bufstates =
                        kzalloc(card->qdio.no_out_queues *
                                QDIO_MAX_BUFFERS_PER_Q *
                                sizeof(struct qdio_outbuf_state), GFP_KERNEL);
@@@ -3698,7 -3717,7 +3717,7 @@@ int qeth_get_elements_for_frags(struct 
  }
  EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  
 -int qeth_get_elements_no(struct qeth_card *card, void *hdr,
 +int qeth_get_elements_no(struct qeth_card *card,
                     struct sk_buff *skb, int elems)
  {
        int dlen = skb->len - skb->data_len;
  }
  EXPORT_SYMBOL_GPL(qeth_get_elements_no);
  
 -int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
 +int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
  {
        int hroom, inpage, rest;
  
                        return 1;
                memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
                skb->data -= rest;
 +              skb->tail -= rest;
 +              *hdr = (struct qeth_hdr *)skb->data;
                QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
        }
        return 0;
index 2d425416b0a1106a5d62eef7e3e2e825efe9ab52,155b101bd7309432d32ce9ed915c1a3a1b889a60..ec8ccdae7aba3d5b2685f4c8e7954912259b7870
@@@ -302,8 -302,7 +302,8 @@@ static void qeth_l2_process_vlans(struc
        spin_unlock_bh(&card->vlanlock);
  }
  
 -static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 +static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
 +                                 __be16 proto, u16 vid)
  {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
        return 0;
  }
  
 -static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 +static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
 +                                  __be16 proto, u16 vid)
  {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
@@@ -773,7 -771,8 +773,7 @@@ static int qeth_l2_hard_start_xmit(stru
                }
        }
  
 -      elements = qeth_get_elements_no(card, (void *)hdr, new_skb,
 -                                              elements_needed);
 +      elements = qeth_get_elements_no(card, new_skb, elements_needed);
        if (!elements) {
                if (data_offset >= 0)
                        kmem_cache_free(qeth_core_header_cache, hdr);
        }
  
        if (card->info.type != QETH_CARD_TYPE_IQD) {
 -              if (qeth_hdr_chk_and_bounce(new_skb,
 +              if (qeth_hdr_chk_and_bounce(new_skb, &hdr,
                    sizeof(struct qeth_hdr_layer2)))
                        goto tx_drop;
                rc = qeth_do_send_packet(card, queue, new_skb, hdr,
@@@ -960,7 -959,7 +960,7 @@@ static int qeth_l2_setup_netdev(struct 
                SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
        else
                SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
 -      card->dev->features |= NETIF_F_HW_VLAN_FILTER;
 +      card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
@@@ -1144,6 -1143,7 +1144,7 @@@ static int qeth_l2_recover(void *ptr
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
+       qeth_set_recovery_task(card);
        __qeth_l2_set_offline(card->gdev, 1);
        rc = __qeth_l2_set_online(card->gdev, 1);
        if (!rc)
                dev_warn(&card->gdev->dev, "The qeth device driver "
                                "failed to recover an error on the device\n");
        }
+       qeth_clear_recovery_task(card);
        qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
        qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
        return 0;
index 449676e48fd0f65c65794dc6b56c5e75fca5a8d1,1f7edf1b26c33bff2b6a9baa6b163311a79fad6f..c1b0b2761f8dce6e0d3ad709902eeb19a925beba
@@@ -1659,8 -1659,7 +1659,8 @@@ static void qeth_l3_add_vlan_mc(struct 
        for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
                struct net_device *netdev;
  
 -              netdev = __vlan_find_dev_deep(card->dev, vid);
 +              netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
 +                                            vid);
                if (netdev == NULL ||
                    !(netdev->flags & IFF_UP))
                        continue;
@@@ -1721,8 -1720,7 +1721,8 @@@ static void qeth_l3_add_vlan_mc6(struc
        for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
                struct net_device *netdev;
  
 -              netdev = __vlan_find_dev_deep(card->dev, vid);
 +              netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
 +                                            vid);
                if (netdev == NULL ||
                    !(netdev->flags & IFF_UP))
                        continue;
@@@ -1766,7 -1764,7 +1766,7 @@@ static void qeth_l3_free_vlan_addresses
  
        QETH_CARD_TEXT(card, 4, "frvaddr4");
  
 -      netdev = __vlan_find_dev_deep(card->dev, vid);
 +      netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
        if (!netdev)
                return;
        in_dev = in_dev_get(netdev);
@@@ -1796,7 -1794,7 +1796,7 @@@ static void qeth_l3_free_vlan_addresses
  
        QETH_CARD_TEXT(card, 4, "frvaddr6");
  
 -      netdev = __vlan_find_dev_deep(card->dev, vid);
 +      netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
        if (!netdev)
                return;
        in6_dev = in6_dev_get(netdev);
@@@ -1826,8 -1824,7 +1826,8 @@@ static void qeth_l3_free_vlan_addresses
        rcu_read_unlock();
  }
  
 -static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 +static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
 +                                 __be16 proto, u16 vid)
  {
        struct qeth_card *card = dev->ml_priv;
  
        return 0;
  }
  
 -static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 +static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
 +                                  __be16 proto, u16 vid)
  {
        struct qeth_card *card = dev->ml_priv;
        unsigned long flags;
@@@ -1979,8 -1975,7 +1979,8 @@@ static int qeth_l3_process_inbound_buff
                                                      &vlan_tag);
                                len = skb->len;
                                if (is_vlan && !card->options.sniffer)
 -                                      __vlan_hwaccel_put_tag(skb, vlan_tag);
 +                                      __vlan_hwaccel_put_tag(skb,
 +                                              htons(ETH_P_8021Q), vlan_tag);
                                napi_gro_receive(&card->napi, skb);
                        }
                        break;
@@@ -2089,8 -2084,7 +2089,8 @@@ static int qeth_l3_verify_vlan_dev(stru
                struct net_device *netdev;
  
                rcu_read_lock();
 -              netdev = __vlan_find_dev_deep(card->dev, vid);
 +              netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
 +                                            vid);
                rcu_read_unlock();
                if (netdev == dev) {
                        rc = QETH_VLAN_CARD;
@@@ -3037,7 -3031,8 +3037,7 @@@ static int qeth_l3_hard_start_xmit(stru
                        qeth_l3_hdr_csum(card, hdr, new_skb);
        }
  
 -      elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
 -                                               elements_needed);
 +      elems = qeth_get_elements_no(card, new_skb, elements_needed);
        if (!elems) {
                if (data_offset >= 0)
                        kmem_cache_free(qeth_core_header_cache, hdr);
                else
                        len = sizeof(struct qeth_hdr_layer3);
  
 -              if (qeth_hdr_chk_and_bounce(new_skb, len))
 +              if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
                        goto tx_drop;
                rc = qeth_do_send_packet(card, queue, new_skb, hdr,
                                         elements_needed);
@@@ -3299,9 -3294,9 +3299,9 @@@ static int qeth_l3_setup_netdev(struct 
        card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
        card->dev->mtu = card->info.initial_mtu;
        SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
 -      card->dev->features |=  NETIF_F_HW_VLAN_TX |
 -                              NETIF_F_HW_VLAN_RX |
 -                              NETIF_F_HW_VLAN_FILTER;
 +      card->dev->features |=  NETIF_F_HW_VLAN_CTAG_TX |
 +                              NETIF_F_HW_VLAN_CTAG_RX |
 +                              NETIF_F_HW_VLAN_CTAG_FILTER;
        card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
        card->dev->gso_max_size = 15 * PAGE_SIZE;
  
@@@ -3520,6 -3515,7 +3520,7 @@@ static int qeth_l3_recover(void *ptr
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
+       qeth_set_recovery_task(card);
        __qeth_l3_set_offline(card->gdev, 1);
        rc = __qeth_l3_set_online(card->gdev, 1);
        if (!rc)
                dev_warn(&card->gdev->dev, "The qeth device driver "
                                "failed to recover an error on the device\n");
        }
+       qeth_clear_recovery_task(card);
        qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
        qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
        return 0;
index 791da2c0d8f640df64178c458bf05f0f89cb6920,7b0bce9367626ad987eb900528a7b329a2f32261..23c5dbfea115acdf9a2272c365fcc0ecbf89d14c
@@@ -110,8 -110,8 +110,8 @@@ static void ssb_pmu0_pllinit_r0(struct 
                return;
        }
  
 -      ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
 -                 (crystalfreq / 1000), (crystalfreq % 1000));
 +      ssb_info("Programming PLL to %u.%03u MHz\n",
 +               crystalfreq / 1000, crystalfreq % 1000);
  
        /* First turn the PLL off. */
        switch (bus->chip_id) {
        }
        tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
        if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
 -              ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
 +              ssb_emerg("Failed to turn the PLL off!\n");
  
        /* Set PDIV in PLL control 0. */
        pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
@@@ -249,8 -249,8 +249,8 @@@ static void ssb_pmu1_pllinit_r0(struct 
                return;
        }
  
 -      ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
 -                 (crystalfreq / 1000), (crystalfreq % 1000));
 +      ssb_info("Programming PLL to %u.%03u MHz\n",
 +               crystalfreq / 1000, crystalfreq % 1000);
  
        /* First turn the PLL off. */
        switch (bus->chip_id) {
        }
        tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
        if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
 -              ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
 +              ssb_emerg("Failed to turn the PLL off!\n");
  
        /* Set p1div and p2div. */
        pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
@@@ -349,8 -349,9 +349,8 @@@ static void ssb_pmu_pll_init(struct ssb
        case 43222:
                break;
        default:
 -              ssb_printk(KERN_ERR PFX
 -                         "ERROR: PLL init unknown for device %04X\n",
 -                         bus->chip_id);
 +              ssb_err("ERROR: PLL init unknown for device %04X\n",
 +                      bus->chip_id);
        }
  }
  
@@@ -471,8 -472,9 +471,8 @@@ static void ssb_pmu_resources_init(stru
                max_msk = 0xFFFFF;
                break;
        default:
 -              ssb_printk(KERN_ERR PFX
 -                         "ERROR: PMU resource config unknown for device %04X\n",
 -                         bus->chip_id);
 +              ssb_err("ERROR: PMU resource config unknown for device %04X\n",
 +                      bus->chip_id);
        }
  
        if (updown_tab) {
@@@ -524,8 -526,8 +524,8 @@@ void ssb_pmu_init(struct ssb_chipcommo
        pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
        cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
  
 -      ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
 -                  cc->pmu.rev, pmucap);
 +      ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n",
 +              cc->pmu.rev, pmucap);
  
        if (cc->pmu.rev == 1)
                chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
@@@ -636,8 -638,9 +636,8 @@@ u32 ssb_pmu_get_alp_clock(struct ssb_ch
        case 0x5354:
                ssb_pmu_get_alp_clock_clk0(cc);
        default:
 -              ssb_printk(KERN_ERR PFX
 -                         "ERROR: PMU alp clock unknown for device %04X\n",
 -                         bus->chip_id);
 +              ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
 +                      bus->chip_id);
                return 0;
        }
  }
@@@ -651,8 -654,9 +651,8 @@@ u32 ssb_pmu_get_cpu_clock(struct ssb_ch
                /* 5354 chip uses a non programmable PLL of frequency 240MHz */
                return 240000000;
        default:
 -              ssb_printk(KERN_ERR PFX
 -                         "ERROR: PMU cpu clock unknown for device %04X\n",
 -                         bus->chip_id);
 +              ssb_err("ERROR: PMU cpu clock unknown for device %04X\n",
 +                      bus->chip_id);
                return 0;
        }
  }
@@@ -665,8 -669,38 +665,37 @@@ u32 ssb_pmu_get_controlclock(struct ssb
        case 0x5354:
                return 120000000;
        default:
 -              ssb_printk(KERN_ERR PFX
 -                         "ERROR: PMU controlclock unknown for device %04X\n",
 -                         bus->chip_id);
 +              ssb_err("ERROR: PMU controlclock unknown for device %04X\n",
 +                      bus->chip_id);
                return 0;
        }
  }
+ void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
+ {
+       u32 pmu_ctl = 0;
+       switch (cc->dev->bus->chip_id) {
+       case 0x4322:
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
+               ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
+               if (spuravoid == 1)
+                       ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
+               else
+                       ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
+               pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
+               break;
+       case 43222:
+               /* TODO: BCM43222 requires updating PLLs too */
+               return;
+       default:
+               ssb_printk(KERN_ERR PFX
+                          "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+                          cc->dev->bus->chip_id);
+               return;
+       }
+       chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
+ }
+ EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
diff --combined include/net/scm.h
index 5a4c6a9eb12258449abad265eda33692ab00f908,b11708105681e04f5c24f7ed206f732e2c37dc02..8de2d37d2077f2df685baec887b1f836a8fa6514
@@@ -26,6 -26,7 +26,6 @@@ struct scm_fp_list 
  
  struct scm_cookie {
        struct pid              *pid;           /* Skb credentials */
 -      const struct cred       *cred;
        struct scm_fp_list      *fp;            /* Passed files         */
        struct scm_creds        creds;          /* Skb credentials      */
  #ifdef CONFIG_SECURITY_NETWORK
@@@ -50,18 -51,23 +50,18 @@@ static __inline__ void unix_get_peersec
  #endif /* CONFIG_SECURITY_NETWORK */
  
  static __inline__ void scm_set_cred(struct scm_cookie *scm,
 -                                  struct pid *pid, const struct cred *cred)
 +                                  struct pid *pid, kuid_t uid, kgid_t gid)
  {
        scm->pid  = get_pid(pid);
 -      scm->cred = cred ? get_cred(cred) : NULL;
        scm->creds.pid = pid_vnr(pid);
 -      scm->creds.uid = cred ? cred->uid : INVALID_UID;
 -      scm->creds.gid = cred ? cred->gid : INVALID_GID;
 +      scm->creds.uid = uid;
 +      scm->creds.gid = gid;
  }
  
  static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
  {
        put_pid(scm->pid);
        scm->pid  = NULL;
 -
 -      if (scm->cred)
 -              put_cred(scm->cred);
 -      scm->cred = NULL;
  }
  
  static __inline__ void scm_destroy(struct scm_cookie *scm)
@@@ -75,10 -81,8 +75,10 @@@ static __inline__ int scm_send(struct s
                               struct scm_cookie *scm, bool forcecreds)
  {
        memset(scm, 0, sizeof(*scm));
 +      scm->creds.uid = INVALID_UID;
 +      scm->creds.gid = INVALID_GID;
        if (forcecreds)
-               scm_set_cred(scm, task_tgid(current), current_euid(), current_egid());
 -              scm_set_cred(scm, task_tgid(current), current_cred());
++              scm_set_cred(scm, task_tgid(current), current_uid(), current_gid());
        unix_get_peersec_dgram(sock, scm);
        if (msg->msg_controllen <= 0)
                return 0;
diff --combined kernel/signal.c
index 497330ec2ae948d707553824f7bb3b9b780d9065,598dc06be4214c7a11fbf128baff984fa9c1c4b5..06ff7764ab7c748fd31634521122b1381af17d73
@@@ -32,7 -32,6 +32,7 @@@
  #include <linux/user_namespace.h>
  #include <linux/uprobes.h>
  #include <linux/compat.h>
 +#include <linux/cn_proc.h>
  #define CREATE_TRACE_POINTS
  #include <trace/events/signal.h>
  
@@@ -2351,7 -2350,6 +2351,7 @@@ relock
                if (sig_kernel_coredump(signr)) {
                        if (print_fatal_signals)
                                print_fatal_signal(info->si_signo);
 +                      proc_coredump_connector(current);
                        /*
                         * If it was able to dump core, this kills all
                         * other threads in the group and synchronizes with
@@@ -2950,7 -2948,7 +2950,7 @@@ do_send_specific(pid_t tgid, pid_t pid
  
  static int do_tkill(pid_t tgid, pid_t pid, int sig)
  {
-       struct siginfo info;
+       struct siginfo info = {};
  
        info.si_signo = sig;
        info.si_errno = 0;
diff --combined net/batman-adv/main.c
index 6277735cd89e33797fdb10662c878490aa6cbb49,fa563e497c4861a3fd1656a91887eed47e81ec81..3e30a0f1b908b2872bcb6a13100306e0deda0c85
@@@ -35,7 -35,6 +35,7 @@@
  #include "vis.h"
  #include "hash.h"
  #include "bat_algo.h"
 +#include "network-coding.h"
  
  
  /* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@@ -71,7 -70,6 +71,7 @@@ static int __init batadv_init(void
        batadv_debugfs_init();
  
        register_netdevice_notifier(&batadv_hard_if_notifier);
 +      rtnl_link_register(&batadv_link_ops);
  
        pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
                BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
@@@ -82,7 -80,6 +82,7 @@@
  static void __exit batadv_exit(void)
  {
        batadv_debugfs_destroy();
 +      rtnl_link_unregister(&batadv_link_ops);
        unregister_netdevice_notifier(&batadv_hard_if_notifier);
        batadv_hardif_remove_interfaces();
  
@@@ -138,10 -135,6 +138,10 @@@ int batadv_mesh_init(struct net_device 
        if (ret < 0)
                goto err;
  
 +      ret = batadv_nc_init(bat_priv);
 +      if (ret < 0)
 +              goto err;
 +
        atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
  
@@@ -164,7 -157,6 +164,7 @@@ void batadv_mesh_free(struct net_devic
  
        batadv_gw_node_purge(bat_priv);
        batadv_originator_free(bat_priv);
 +      batadv_nc_free(bat_priv);
  
        batadv_tt_free(bat_priv);
  
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
  }
  
- int batadv_is_my_mac(const uint8_t *addr)
++/**
++ * batadv_is_my_mac - check if the given mac address belongs to any of the real
++ * interfaces in the current mesh
++ * @bat_priv: the bat priv with all the soft interface information
++ * @addr: the address to check
++ */
+ int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
  {
        const struct batadv_hard_iface *hard_iface;
  
                if (hard_iface->if_status != BATADV_IF_ACTIVE)
                        continue;
  
+               if (hard_iface->soft_iface != bat_priv->soft_iface)
+                       continue;
                if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
                        rcu_read_unlock();
                        return 1;
@@@ -419,7 -414,7 +428,7 @@@ int batadv_algo_seq_print_text(struct s
  {
        struct batadv_algo_ops *bat_algo_ops;
  
 -      seq_printf(seq, "Available routing algorithms:\n");
 +      seq_puts(seq, "Available routing algorithms:\n");
  
        hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
                seq_printf(seq, "%s\n", bat_algo_ops->name);
diff --combined net/batman-adv/main.h
index f90f5bc8e4260751c8b74e20a67f6237d45c3070,d40910dfc8ea5a20fbe2c839f59bd65018bf1c28..59a0d6af15c88aca2f05c0883fc2fdfeadea66f4
@@@ -26,7 -26,7 +26,7 @@@
  #define BATADV_DRIVER_DEVICE "batman-adv"
  
  #ifndef BATADV_SOURCE_VERSION
 -#define BATADV_SOURCE_VERSION "2013.1.0"
 +#define BATADV_SOURCE_VERSION "2013.2.0"
  #endif
  
  /* B.A.T.M.A.N. parameters */
  #define BATADV_RESET_PROTECTION_MS 30000
  #define BATADV_EXPECTED_SEQNO_RANGE   65536
  
 +#define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */
 +
  enum batadv_mesh_state {
        BATADV_MESH_INACTIVE,
        BATADV_MESH_ACTIVE,
@@@ -152,7 -150,6 +152,7 @@@ enum batadv_uev_type 
  #include <linux/percpu.h>
  #include <linux/slab.h>
  #include <net/sock.h>         /* struct sock */
 +#include <net/rtnetlink.h>
  #include <linux/jiffies.h>
  #include <linux/seq_file.h>
  #include "types.h"
@@@ -165,7 -162,7 +165,7 @@@ extern struct workqueue_struct *batadv_
  
  int batadv_mesh_init(struct net_device *soft_iface);
  void batadv_mesh_free(struct net_device *soft_iface);
- int batadv_is_my_mac(const uint8_t *addr);
+ int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
  struct batadv_hard_iface *
  batadv_seq_print_text_primary_if_get(struct seq_file *seq);
  int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@@ -188,7 -185,6 +188,7 @@@ __be32 batadv_skb_crc32(struct sk_buff 
   * @BATADV_DBG_TT: translation table messages
   * @BATADV_DBG_BLA: bridge loop avoidance messages
   * @BATADV_DBG_DAT: ARP snooping and DAT related messages
 + * @BATADV_DBG_NC: network coding related messages
   * @BATADV_DBG_ALL: the union of all the above log levels
   */
  enum batadv_dbg_level {
        BATADV_DBG_TT     = BIT(2),
        BATADV_DBG_BLA    = BIT(3),
        BATADV_DBG_DAT    = BIT(4),
 -      BATADV_DBG_ALL    = 31,
 +      BATADV_DBG_NC     = BIT(5),
 +      BATADV_DBG_ALL    = 63,
  };
  
  #ifdef CONFIG_BATMAN_ADV_DEBUG
@@@ -303,10 -298,4 +303,10 @@@ static inline uint64_t batadv_sum_count
        return sum;
  }
  
 +/* Define a macro to reach the control buffer of the skb. The members of the
 + * control buffer are defined in struct batadv_skb_cb in types.h.
 + * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
 + */
 +#define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 +
  #endif /* _NET_BATMAN_ADV_MAIN_H_ */
index 6b9a54485314be35fa76635acb6f2148b6d8b6c0,0000000000000000000000000000000000000000..f7c54305a9188f83dbd02af6eae287247854babf
mode 100644,000000..100644
--- /dev/null
@@@ -1,1821 -1,0 +1,1822 @@@
-       if (batadv_is_my_mac(ethhdr->h_dest))
 +/* Copyright (C) 2012-2013 B.A.T.M.A.N. contributors:
 + *
 + * Martin Hundebøll, Jeppe Ledet-Pedersen
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + */
 +
 +#include <linux/debugfs.h>
 +
 +#include "main.h"
 +#include "hash.h"
 +#include "network-coding.h"
 +#include "send.h"
 +#include "originator.h"
 +#include "hard-interface.h"
 +#include "routing.h"
 +
 +static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
 +static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
 +
 +static void batadv_nc_worker(struct work_struct *work);
 +static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 +                                     struct batadv_hard_iface *recv_if);
 +
 +/**
 + * batadv_nc_start_timer - initialise the nc periodic worker
 + * @bat_priv: the bat priv with all the soft interface information
 + */
 +static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 +{
 +      queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work,
 +                         msecs_to_jiffies(10));
 +}
 +
 +/**
 + * batadv_nc_init - initialise coding hash table and start house keeping
 + * @bat_priv: the bat priv with all the soft interface information
 + */
 +int batadv_nc_init(struct batadv_priv *bat_priv)
 +{
 +      bat_priv->nc.timestamp_fwd_flush = jiffies;
 +      bat_priv->nc.timestamp_sniffed_purge = jiffies;
 +
 +      if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash)
 +              return 0;
 +
 +      bat_priv->nc.coding_hash = batadv_hash_new(128);
 +      if (!bat_priv->nc.coding_hash)
 +              goto err;
 +
 +      batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
 +                                 &batadv_nc_coding_hash_lock_class_key);
 +
 +      bat_priv->nc.decoding_hash = batadv_hash_new(128);
 +      if (!bat_priv->nc.decoding_hash)
 +              goto err;
 +
 +      batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
 +                                 &batadv_nc_decoding_hash_lock_class_key);
 +
 +      /* Register our packet type */
 +      if (batadv_recv_handler_register(BATADV_CODED,
 +                                       batadv_nc_recv_coded_packet) < 0)
 +              goto err;
 +
 +      INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
 +      batadv_nc_start_timer(bat_priv);
 +
 +      return 0;
 +
 +err:
 +      return -ENOMEM;
 +}
 +
 +/**
 + * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
 + * @bat_priv: the bat priv with all the soft interface information
 + */
 +void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 +{
 +      atomic_set(&bat_priv->network_coding, 1);
 +      bat_priv->nc.min_tq = 200;
 +      bat_priv->nc.max_fwd_delay = 10;
 +      bat_priv->nc.max_buffer_time = 200;
 +}
 +
 +/**
 + * batadv_nc_init_orig - initialise the nc fields of an orig_node
 + * @orig_node: the orig_node which is going to be initialised
 + */
 +void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 +{
 +      INIT_LIST_HEAD(&orig_node->in_coding_list);
 +      INIT_LIST_HEAD(&orig_node->out_coding_list);
 +      spin_lock_init(&orig_node->in_coding_list_lock);
 +      spin_lock_init(&orig_node->out_coding_list_lock);
 +}
 +
 +/**
 + * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
 + *  its refcount on the orig_node
 + * @rcu: rcu pointer of the nc node
 + */
 +static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
 +{
 +      struct batadv_nc_node *nc_node;
 +
 +      nc_node = container_of(rcu, struct batadv_nc_node, rcu);
 +      batadv_orig_node_free_ref(nc_node->orig_node);
 +      kfree(nc_node);
 +}
 +
 +/**
 + * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
 + * frees it
 + * @nc_node: the nc node to free
 + */
 +static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
 +{
 +      if (atomic_dec_and_test(&nc_node->refcount))
 +              call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
 +}
 +
 +/**
 + * batadv_nc_path_free_ref - decrements the nc path refcounter and possibly
 + * frees it
 + * @nc_path: the nc node to free
 + */
 +static void batadv_nc_path_free_ref(struct batadv_nc_path *nc_path)
 +{
 +      if (atomic_dec_and_test(&nc_path->refcount))
 +              kfree_rcu(nc_path, rcu);
 +}
 +
 +/**
 + * batadv_nc_packet_free - frees nc packet
 + * @nc_packet: the nc packet to free
 + */
 +static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet)
 +{
 +      if (nc_packet->skb)
 +              kfree_skb(nc_packet->skb);
 +
 +      batadv_nc_path_free_ref(nc_packet->nc_path);
 +      kfree(nc_packet);
 +}
 +
 +/**
 + * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
 + * @bat_priv: the bat priv with all the soft interface information
 + * @nc_node: the nc node to check
 + *
 + * Returns true if the entry has to be purged now, false otherwise
 + */
 +static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
 +                                     struct batadv_nc_node *nc_node)
 +{
 +      if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 +              return true;
 +
 +      return batadv_has_timed_out(nc_node->last_seen, BATADV_NC_NODE_TIMEOUT);
 +}
 +
 +/**
 + * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
 + * @bat_priv: the bat priv with all the soft interface information
 + * @nc_path: the nc path to check
 + *
 + * Returns true if the entry has to be purged now, false otherwise
 + */
 +static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
 +                                            struct batadv_nc_path *nc_path)
 +{
 +      if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 +              return true;
 +
 +      /* purge the path when no packets has been added for 10 times the
 +       * max_fwd_delay time
 +       */
 +      return batadv_has_timed_out(nc_path->last_valid,
 +                                  bat_priv->nc.max_fwd_delay * 10);
 +}
 +
 +/**
 + * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
 + * @bat_priv: the bat priv with all the soft interface information
 + * @nc_path: the nc path to check
 + *
 + * Returns true if the entry has to be purged now, false otherwise
 + */
 +static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
 +                                              struct batadv_nc_path *nc_path)
 +{
 +      if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 +              return true;
 +
 +      /* purge the path when no packets has been added for 10 times the
 +       * max_buffer time
 +       */
 +      return batadv_has_timed_out(nc_path->last_valid,
 +                                  bat_priv->nc.max_buffer_time*10);
 +}
 +
 +/**
 + * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
 + *  entries
 + * @bat_priv: the bat priv with all the soft interface information
 + * @list: list of nc nodes
 + * @lock: nc node list lock
 + * @to_purge: function in charge to decide whether an entry has to be purged or
 + *          not. This function takes the nc node as argument and has to return
 + *          a boolean value: true if the entry has to be deleted, false
 + *          otherwise
 + */
 +static void
 +batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
 +                            struct list_head *list,
 +                            spinlock_t *lock,
 +                            bool (*to_purge)(struct batadv_priv *,
 +                                             struct batadv_nc_node *))
 +{
 +      struct batadv_nc_node *nc_node, *nc_node_tmp;
 +
 +      /* For each nc_node in list */
 +      spin_lock_bh(lock);
 +      list_for_each_entry_safe(nc_node, nc_node_tmp, list, list) {
 +              /* if an helper function has been passed as parameter,
 +               * ask it if the entry has to be purged or not
 +               */
 +              if (to_purge && !to_purge(bat_priv, nc_node))
 +                      continue;
 +
 +              batadv_dbg(BATADV_DBG_NC, bat_priv,
 +                         "Removing nc_node %pM -> %pM\n",
 +                         nc_node->addr, nc_node->orig_node->orig);
 +              list_del_rcu(&nc_node->list);
 +              batadv_nc_node_free_ref(nc_node);
 +      }
 +      spin_unlock_bh(lock);
 +}
 +
 +/**
 + * batadv_nc_purge_orig - purges all nc node data attached of the given
 + *  originator
 + * @bat_priv: the bat priv with all the soft interface information
 + * @orig_node: orig_node with the nc node entries to be purged
 + * @to_purge: function in charge to decide whether an entry has to be purged or
 + *          not. This function takes the nc node as argument and has to return
 + *          a boolean value: true is the entry has to be deleted, false
 + *          otherwise
 + */
 +void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
 +                        struct batadv_orig_node *orig_node,
 +                        bool (*to_purge)(struct batadv_priv *,
 +                                         struct batadv_nc_node *))
 +{
 +      /* Check ingoing nc_node's of this orig_node */
 +      batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->in_coding_list,
 +                                    &orig_node->in_coding_list_lock,
 +                                    to_purge);
 +
 +      /* Check outgoing nc_node's of this orig_node */
 +      batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->out_coding_list,
 +                                    &orig_node->out_coding_list_lock,
 +                                    to_purge);
 +}
 +
 +/**
 + * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
 + *  have timed out nc nodes
 + * @bat_priv: the bat priv with all the soft interface information
 + */
 +static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
 +{
 +      struct batadv_hashtable *hash = bat_priv->orig_hash;
 +      struct hlist_head *head;
 +      struct batadv_orig_node *orig_node;
 +      uint32_t i;
 +
 +      if (!hash)
 +              return;
 +
 +      /* For each orig_node */
 +      for (i = 0; i < hash->size; i++) {
 +              head = &hash->table[i];
 +
 +              rcu_read_lock();
 +              hlist_for_each_entry_rcu(orig_node, head, hash_entry)
 +                      batadv_nc_purge_orig(bat_priv, orig_node,
 +                                           batadv_nc_to_purge_nc_node);
 +              rcu_read_unlock();
 +      }
 +}
 +
 +/**
 + * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
 + *  unused ones
 + * @bat_priv: the bat priv with all the soft interface information
 + * @hash: hash table containing the nc paths to check
 + * @to_purge: function in charge to decide whether an entry has to be purged or
 + *          not. This function takes the nc node as argument and has to return
 + *          a boolean value: true is the entry has to be deleted, false
 + *          otherwise
 + */
 +static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
 +                                struct batadv_hashtable *hash,
 +                                bool (*to_purge)(struct batadv_priv *,
 +                                                 struct batadv_nc_path *))
 +{
 +      struct hlist_head *head;
 +      struct hlist_node *node_tmp;
 +      struct batadv_nc_path *nc_path;
 +      spinlock_t *lock; /* Protects lists in hash */
 +      uint32_t i;
 +
 +      for (i = 0; i < hash->size; i++) {
 +              head = &hash->table[i];
 +              lock = &hash->list_locks[i];
 +
 +              /* For each nc_path in this bin */
 +              spin_lock_bh(lock);
 +              hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) {
 +                      /* if an helper function has been passed as parameter,
 +                       * ask it if the entry has to be purged or not
 +                       */
 +                      if (to_purge && !to_purge(bat_priv, nc_path))
 +                              continue;
 +
 +                      /* purging an non-empty nc_path should never happen, but
 +                       * is observed under high CPU load. Delay the purging
 +                       * until next iteration to allow the packet_list to be
 +                       * emptied first.
 +                       */
 +                      if (!unlikely(list_empty(&nc_path->packet_list))) {
 +                              net_ratelimited_function(printk,
 +                                                       KERN_WARNING
 +                                                       "Skipping free of non-empty nc_path (%pM -> %pM)!\n",
 +                                                       nc_path->prev_hop,
 +                                                       nc_path->next_hop);
 +                              continue;
 +                      }
 +
 +                      /* nc_path is unused, so remove it */
 +                      batadv_dbg(BATADV_DBG_NC, bat_priv,
 +                                 "Remove nc_path %pM -> %pM\n",
 +                                 nc_path->prev_hop, nc_path->next_hop);
 +                      hlist_del_rcu(&nc_path->hash_entry);
 +                      batadv_nc_path_free_ref(nc_path);
 +              }
 +              spin_unlock_bh(lock);
 +      }
 +}
 +
 +/**
 + * batadv_nc_hash_key_gen - computes the nc_path hash key
 + * @key: buffer to hold the final hash key
 + * @src: source ethernet mac address going into the hash key
 + * @dst: destination ethernet mac address going into the hash key
 + */
 +static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
 +                                 const char *dst)
 +{
 +      memcpy(key->prev_hop, src, sizeof(key->prev_hop));
 +      memcpy(key->next_hop, dst, sizeof(key->next_hop));
 +}
 +
 +/**
 + * batadv_nc_hash_choose - compute the hash value for an nc path
 + * @data: data to hash
 + * @size: size of the hash table
 + *
 + * Returns the selected index in the hash table for the given data.
 + */
 +static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size)
 +{
 +      const struct batadv_nc_path *nc_path = data;
 +      uint32_t hash = 0;
 +
 +      hash = batadv_hash_bytes(hash, &nc_path->prev_hop,
 +                               sizeof(nc_path->prev_hop));
 +      hash = batadv_hash_bytes(hash, &nc_path->next_hop,
 +                               sizeof(nc_path->next_hop));
 +
 +      hash += (hash << 3);
 +      hash ^= (hash >> 11);
 +      hash += (hash << 15);
 +
 +      return hash % size;
 +}
 +
 +/**
 + * batadv_nc_hash_compare - comparing function used in the network coding hash
 + *  tables
 + * @node: node in the local table
 + * @data2: second object to compare the node to
 + *
 + * Returns 1 if the two entry are the same, 0 otherwise
 + */
 +static int batadv_nc_hash_compare(const struct hlist_node *node,
 +                                const void *data2)
 +{
 +      const struct batadv_nc_path *nc_path1, *nc_path2;
 +
 +      nc_path1 = container_of(node, struct batadv_nc_path, hash_entry);
 +      nc_path2 = data2;
 +
 +      /* Return 1 if the two keys are identical */
 +      if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop,
 +                 sizeof(nc_path1->prev_hop)) != 0)
 +              return 0;
 +
 +      if (memcmp(nc_path1->next_hop, nc_path2->next_hop,
 +                 sizeof(nc_path1->next_hop)) != 0)
 +              return 0;
 +
 +      return 1;
 +}
 +
 +/**
 + * batadv_nc_hash_find - search for an existing nc path and return it
 + * @hash: hash table containing the nc path
 + * @data: search key
 + *
 + * Returns the nc_path if found, NULL otherwise.
 + */
 +static struct batadv_nc_path *
 +batadv_nc_hash_find(struct batadv_hashtable *hash,
 +                  void *data)
 +{
 +      struct hlist_head *head;
 +      struct batadv_nc_path *nc_path, *nc_path_tmp = NULL;
 +      int index;
 +
 +      if (!hash)
 +              return NULL;
 +
 +      index = batadv_nc_hash_choose(data, hash->size);
 +      head = &hash->table[index];
 +
 +      rcu_read_lock();
 +      hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
 +              if (!batadv_nc_hash_compare(&nc_path->hash_entry, data))
 +                      continue;
 +
 +              if (!atomic_inc_not_zero(&nc_path->refcount))
 +                      continue;
 +
 +              nc_path_tmp = nc_path;
 +              break;
 +      }
 +      rcu_read_unlock();
 +
 +      return nc_path_tmp;
 +}
 +
 +/**
 + * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
 + * @nc_packet: the nc packet to send
 + */
 +static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
 +{
 +      batadv_send_skb_packet(nc_packet->skb,
 +                             nc_packet->neigh_node->if_incoming,
 +                             nc_packet->nc_path->next_hop);
 +      nc_packet->skb = NULL;
 +      batadv_nc_packet_free(nc_packet);
 +}
 +
 +/**
 + * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
 + * @bat_priv: the bat priv with all the soft interface information
 + * @nc_path: the nc path the packet belongs to
 + * @nc_packet: the nc packet to be checked
 + *
 + * Checks whether the given sniffed (overheard) nc_packet has hit its buffering
 + * timeout. If so, the packet is no longer kept and the entry deleted from the
 + * queue. Has to be called with the appropriate locks.
 + *
 + * Returns false as soon as the entry in the fifo queue has not been timed out
 + * yet and true otherwise.
 + */
 +static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
 +                                  struct batadv_nc_path *nc_path,
 +                                  struct batadv_nc_packet *nc_packet)
 +{
 +      unsigned long timeout = bat_priv->nc.max_buffer_time;
 +      bool res = false;
 +
 +      /* Packets are added to tail, so the remaining packets did not time
 +       * out and we can stop processing the current queue
 +       */
 +      if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
 +          !batadv_has_timed_out(nc_packet->timestamp, timeout))
 +              goto out;
 +
 +      /* purge nc packet */
 +      list_del(&nc_packet->list);
 +      batadv_nc_packet_free(nc_packet);
 +
 +      res = true;
 +
 +out:
 +      return res;
 +}
 +
 +/**
 + * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
 + * @bat_priv: the bat priv with all the soft interface information
 + * @nc_path: the nc path the packet belongs to
 + * @nc_packet: the nc packet to be checked
 + *
 + * Checks whether the given nc packet has hit its forward timeout. If so, the
 + * packet is no longer delayed, immediately sent and the entry deleted from the
 + * queue. Has to be called with the appropriate locks.
 + *
 + * Returns false as soon as the entry in the fifo queue has not been timed out
 + * yet and true otherwise.
 + */
 +static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
 +                              struct batadv_nc_path *nc_path,
 +                              struct batadv_nc_packet *nc_packet)
 +{
 +      unsigned long timeout = bat_priv->nc.max_fwd_delay;
 +
 +      /* Packets are added to tail, so the remaining packets did not time
 +       * out and we can stop processing the current queue
 +       */
 +      if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
 +          !batadv_has_timed_out(nc_packet->timestamp, timeout))
 +              return false;
 +
 +      /* Send packet */
 +      batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
 +      batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
 +                         nc_packet->skb->len + ETH_HLEN);
 +      list_del(&nc_packet->list);
 +      batadv_nc_send_packet(nc_packet);
 +
 +      return true;
 +}
 +
 +/**
 + * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
 + *  nc packets
 + * @bat_priv: the bat priv with all the soft interface information
 + * @hash: to be processed hash table
 + * @process_fn: Function called to process given nc packet. Should return true
 + *            to encourage this function to proceed with the next packet.
 + *            Otherwise the rest of the current queue is skipped.
 + */
 +static void
 +batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
 +                         struct batadv_hashtable *hash,
 +                         bool (*process_fn)(struct batadv_priv *,
 +                                            struct batadv_nc_path *,
 +                                            struct batadv_nc_packet *))
 +{
 +      struct hlist_head *head;
 +      struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
 +      struct batadv_nc_path *nc_path;
 +      bool ret;
 +      int i;
 +
 +      if (!hash)
 +              return;
 +
 +      /* Loop hash table bins */
 +      for (i = 0; i < hash->size; i++) {
 +              head = &hash->table[i];
 +
 +              /* Loop coding paths */
 +              rcu_read_lock();
 +              hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
 +                      /* Loop packets */
 +                      spin_lock_bh(&nc_path->packet_list_lock);
 +                      list_for_each_entry_safe(nc_packet, nc_packet_tmp,
 +                                               &nc_path->packet_list, list) {
 +                              ret = process_fn(bat_priv, nc_path, nc_packet);
 +                              if (!ret)
 +                                      break;
 +                      }
 +                      spin_unlock_bh(&nc_path->packet_list_lock);
 +              }
 +              rcu_read_unlock();
 +      }
 +}
 +
 +/**
 + * batadv_nc_worker - periodic task for house keeping related to network coding
 + * @work: kernel work struct
 + */
 +static void batadv_nc_worker(struct work_struct *work)
 +{
 +      struct delayed_work *delayed_work;
 +      struct batadv_priv_nc *priv_nc;
 +      struct batadv_priv *bat_priv;
 +      unsigned long timeout;
 +
 +      delayed_work = container_of(work, struct delayed_work, work);
 +      priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
 +      bat_priv = container_of(priv_nc, struct batadv_priv, nc);
 +
 +      batadv_nc_purge_orig_hash(bat_priv);
 +      batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash,
 +                            batadv_nc_to_purge_nc_path_coding);
 +      batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash,
 +                            batadv_nc_to_purge_nc_path_decoding);
 +
 +      timeout = bat_priv->nc.max_fwd_delay;
 +
 +      if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) {
 +              batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash,
 +                                         batadv_nc_fwd_flush);
 +              bat_priv->nc.timestamp_fwd_flush = jiffies;
 +      }
 +
 +      if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge,
 +                               bat_priv->nc.max_buffer_time)) {
 +              batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash,
 +                                         batadv_nc_sniffed_purge);
 +              bat_priv->nc.timestamp_sniffed_purge = jiffies;
 +      }
 +
 +      /* Schedule a new check */
 +      batadv_nc_start_timer(bat_priv);
 +}
 +
 +/**
 + * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
 + *  coding or not
 + * @bat_priv: the bat priv with all the soft interface information
 + * @orig_node: neighboring orig node which may be used as nc candidate
 + * @ogm_packet: incoming ogm packet also used for the checks
 + *
 + * Returns true if:
 + *  1) The OGM must have the most recent sequence number.
 + *  2) The TTL must be decremented by one and only one.
 + *  3) The OGM must be received from the first hop from orig_node.
 + *  4) The TQ value of the OGM must be above bat_priv->nc.min_tq.
 + */
 +static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 +                                  struct batadv_orig_node *orig_node,
 +                                  struct batadv_ogm_packet *ogm_packet)
 +{
 +      if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
 +              return false;
 +      if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
 +              return false;
 +      if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
 +              return false;
 +      if (ogm_packet->tq < bat_priv->nc.min_tq)
 +              return false;
 +
 +      return true;
 +}
 +
 +/**
 + * batadv_nc_find_nc_node - search for an existing nc node and return it
 + * @orig_node: orig node originating the ogm packet
 + * @orig_neigh_node: neighboring orig node from which we received the ogm packet
 + *  (can be equal to orig_node)
 + * @in_coding: traverse incoming or outgoing network coding list
 + *
 + * Returns the nc_node if found, NULL otherwise.
 + */
 +static struct batadv_nc_node
 +*batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
 +                      struct batadv_orig_node *orig_neigh_node,
 +                      bool in_coding)
 +{
 +      struct batadv_nc_node *nc_node, *nc_node_out = NULL;
 +      struct list_head *list;
 +
 +      if (in_coding)
 +              list = &orig_neigh_node->in_coding_list;
 +      else
 +              list = &orig_neigh_node->out_coding_list;
 +
 +      /* Traverse list of nc_nodes to orig_node */
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(nc_node, list, list) {
 +              if (!batadv_compare_eth(nc_node->addr, orig_node->orig))
 +                      continue;
 +
 +              if (!atomic_inc_not_zero(&nc_node->refcount))
 +                      continue;
 +
 +              /* Found a match */
 +              nc_node_out = nc_node;
 +              break;
 +      }
 +      rcu_read_unlock();
 +
 +      return nc_node_out;
 +}
 +
 +/**
 + * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
 + *  not found
 + * @bat_priv: the bat priv with all the soft interface information
 + * @orig_node: orig node originating the ogm packet
 + * @orig_neigh_node: neighboring orig node from which we received the ogm packet
 + *  (can be equal to orig_node)
 + * @in_coding: traverse incoming or outgoing network coding list
 + *
 + * Returns the nc_node if found or created, NULL in case of an error.
 + */
 +static struct batadv_nc_node
 +*batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
 +                     struct batadv_orig_node *orig_node,
 +                     struct batadv_orig_node *orig_neigh_node,
 +                     bool in_coding)
 +{
 +      struct batadv_nc_node *nc_node;
 +      spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
 +      struct list_head *list;
 +
 +      /* Check if nc_node is already added */
 +      nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
 +
 +      /* Node found */
 +      if (nc_node)
 +              return nc_node;
 +
 +      nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
 +      if (!nc_node)
 +              return NULL;
 +
 +      if (!atomic_inc_not_zero(&orig_neigh_node->refcount))
 +              goto free;
 +
 +      /* Initialize nc_node */
 +      INIT_LIST_HEAD(&nc_node->list);
 +      memcpy(nc_node->addr, orig_node->orig, ETH_ALEN);
 +      nc_node->orig_node = orig_neigh_node;
 +      atomic_set(&nc_node->refcount, 2);
 +
 +      /* Select ingoing or outgoing coding node */
 +      if (in_coding) {
 +              lock = &orig_neigh_node->in_coding_list_lock;
 +              list = &orig_neigh_node->in_coding_list;
 +      } else {
 +              lock = &orig_neigh_node->out_coding_list_lock;
 +              list = &orig_neigh_node->out_coding_list;
 +      }
 +
 +      batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
 +                 nc_node->addr, nc_node->orig_node->orig);
 +
 +      /* Add nc_node to orig_node */
 +      spin_lock_bh(lock);
 +      list_add_tail_rcu(&nc_node->list, list);
 +      spin_unlock_bh(lock);
 +
 +      return nc_node;
 +
 +free:
 +      kfree(nc_node);
 +      return NULL;
 +}
 +
 +/**
 + * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node structs
 + *  (best called on incoming OGMs)
 + * @bat_priv: the bat priv with all the soft interface information
 + * @orig_node: orig node originating the ogm packet
 + * @orig_neigh_node: neighboring orig node from which we received the ogm packet
 + *  (can be equal to orig_node)
 + * @ogm_packet: incoming ogm packet
 + * @is_single_hop_neigh: orig_node is a single hop neighbor
 + */
 +void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
 +                            struct batadv_orig_node *orig_node,
 +                            struct batadv_orig_node *orig_neigh_node,
 +                            struct batadv_ogm_packet *ogm_packet,
 +                            int is_single_hop_neigh)
 +{
 +      struct batadv_nc_node *in_nc_node = NULL, *out_nc_node = NULL;
 +
 +      /* Check if network coding is enabled */
 +      if (!atomic_read(&bat_priv->network_coding))
 +              goto out;
 +
 +      /* accept ogms from 'good' neighbors and single hop neighbors */
 +      if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
 +          !is_single_hop_neigh)
 +              goto out;
 +
 +      /* Add orig_node as in_nc_node on hop */
 +      in_nc_node = batadv_nc_get_nc_node(bat_priv, orig_node,
 +                                         orig_neigh_node, true);
 +      if (!in_nc_node)
 +              goto out;
 +
 +      in_nc_node->last_seen = jiffies;
 +
 +      /* Add hop as out_nc_node on orig_node */
 +      out_nc_node = batadv_nc_get_nc_node(bat_priv, orig_neigh_node,
 +                                          orig_node, false);
 +      if (!out_nc_node)
 +              goto out;
 +
 +      out_nc_node->last_seen = jiffies;
 +
 +out:
 +      if (in_nc_node)
 +              batadv_nc_node_free_ref(in_nc_node);
 +      if (out_nc_node)
 +              batadv_nc_node_free_ref(out_nc_node);
 +}
 +
 +/**
 + * batadv_nc_get_path - get existing nc_path or allocate a new one
 + * @bat_priv: the bat priv with all the soft interface information
 + * @hash: hash table containing the nc path
 + * @src: ethernet source address - first half of the nc path search key
 + * @dst: ethernet destination address - second half of the nc path search key
 + *
 + * Returns pointer to nc_path if the path was found or created, returns NULL
 + * on error.
 + */
 +static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 +                                               struct batadv_hashtable *hash,
 +                                               uint8_t *src,
 +                                               uint8_t *dst)
 +{
 +      int hash_added;
 +      struct batadv_nc_path *nc_path, nc_path_key;
 +
 +      batadv_nc_hash_key_gen(&nc_path_key, src, dst);
 +
 +      /* Search for existing nc_path */
 +      nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key);
 +
 +      if (nc_path) {
 +              /* Set timestamp to delay removal of nc_path */
 +              nc_path->last_valid = jiffies;
 +              return nc_path;
 +      }
 +
 +      /* No existing nc_path was found; create a new */
 +      nc_path = kzalloc(sizeof(*nc_path), GFP_ATOMIC);
 +
 +      if (!nc_path)
 +              return NULL;
 +
 +      /* Initialize nc_path */
 +      INIT_LIST_HEAD(&nc_path->packet_list);
 +      spin_lock_init(&nc_path->packet_list_lock);
 +      atomic_set(&nc_path->refcount, 2);
 +      nc_path->last_valid = jiffies;
 +      memcpy(nc_path->next_hop, dst, ETH_ALEN);
 +      memcpy(nc_path->prev_hop, src, ETH_ALEN);
 +
 +      batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n",
 +                 nc_path->prev_hop,
 +                 nc_path->next_hop);
 +
 +      /* Add nc_path to hash table */
 +      hash_added = batadv_hash_add(hash, batadv_nc_hash_compare,
 +                                   batadv_nc_hash_choose, &nc_path_key,
 +                                   &nc_path->hash_entry);
 +
 +      if (hash_added < 0) {
 +              kfree(nc_path);
 +              return NULL;
 +      }
 +
 +      return nc_path;
 +}
 +
 +/**
 + * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
 + *  selection of a receiver with slightly lower TQ than the other
 + * @tq: to be weighted tq value
 + */
 +static uint8_t batadv_nc_random_weight_tq(uint8_t tq)
 +{
 +      uint8_t rand_val, rand_tq;
 +
 +      get_random_bytes(&rand_val, sizeof(rand_val));
 +
 +      /* randomize the estimated packet loss (max TQ - estimated TQ) */
 +      rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
 +
 +      /* normalize the randomized packet loss */
 +      rand_tq /= BATADV_TQ_MAX_VALUE;
 +
 +      /* convert to (randomized) estimated tq again */
 +      return BATADV_TQ_MAX_VALUE - rand_tq;
 +}
 +
 +/**
 + * batadv_nc_memxor - XOR destination with source
 + * @dst: byte array to XOR into
 + * @src: byte array to XOR from
 + * @len: length of destination array
 + */
 +static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < len; ++i)
 +              dst[i] ^= src[i];
 +}
 +
 +/**
 + * batadv_nc_code_packets - code a received unicast_packet with an nc packet
 + *  into a coded_packet and send it
 + * @bat_priv: the bat priv with all the soft interface information
 + * @skb: data skb to forward
 + * @ethhdr: pointer to the ethernet header inside the skb
 + * @nc_packet: structure containing the packet to the skb can be coded with
 + * @neigh_node: next hop to forward packet to
 + *
 + * Returns true if both packets are consumed, false otherwise.
 + */
 +static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
 +                                 struct sk_buff *skb,
 +                                 struct ethhdr *ethhdr,
 +                                 struct batadv_nc_packet *nc_packet,
 +                                 struct batadv_neigh_node *neigh_node)
 +{
 +      uint8_t tq_weighted_neigh, tq_weighted_coding;
 +      struct sk_buff *skb_dest, *skb_src;
 +      struct batadv_unicast_packet *packet1;
 +      struct batadv_unicast_packet *packet2;
 +      struct batadv_coded_packet *coded_packet;
 +      struct batadv_neigh_node *neigh_tmp, *router_neigh;
 +      struct batadv_neigh_node *router_coding = NULL;
 +      uint8_t *first_source, *first_dest, *second_source, *second_dest;
 +      __be32 packet_id1, packet_id2;
 +      size_t count;
 +      bool res = false;
 +      int coding_len;
 +      int unicast_size = sizeof(*packet1);
 +      int coded_size = sizeof(*coded_packet);
 +      int header_add = coded_size - unicast_size;
 +
 +      router_neigh = batadv_orig_node_get_router(neigh_node->orig_node);
 +      if (!router_neigh)
 +              goto out;
 +
 +      neigh_tmp = nc_packet->neigh_node;
 +      router_coding = batadv_orig_node_get_router(neigh_tmp->orig_node);
 +      if (!router_coding)
 +              goto out;
 +
 +      tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg);
 +      tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg);
 +
 +      /* Select one destination for the MAC-header dst-field based on
 +       * weighted TQ-values.
 +       */
 +      if (tq_weighted_neigh >= tq_weighted_coding) {
 +              /* Destination from nc_packet is selected for MAC-header */
 +              first_dest = nc_packet->nc_path->next_hop;
 +              first_source = nc_packet->nc_path->prev_hop;
 +              second_dest = neigh_node->addr;
 +              second_source = ethhdr->h_source;
 +              packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
 +              packet2 = (struct batadv_unicast_packet *)skb->data;
 +              packet_id1 = nc_packet->packet_id;
 +              packet_id2 = batadv_skb_crc32(skb,
 +                                            skb->data + sizeof(*packet2));
 +      } else {
 +              /* Destination for skb is selected for MAC-header */
 +              first_dest = neigh_node->addr;
 +              first_source = ethhdr->h_source;
 +              second_dest = nc_packet->nc_path->next_hop;
 +              second_source = nc_packet->nc_path->prev_hop;
 +              packet1 = (struct batadv_unicast_packet *)skb->data;
 +              packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
 +              packet_id1 = batadv_skb_crc32(skb,
 +                                            skb->data + sizeof(*packet1));
 +              packet_id2 = nc_packet->packet_id;
 +      }
 +
 +      /* Instead of zero padding the smallest data buffer, we
 +       * code into the largest.
 +       */
 +      if (skb->len <= nc_packet->skb->len) {
 +              skb_dest = nc_packet->skb;
 +              skb_src = skb;
 +      } else {
 +              skb_dest = skb;
 +              skb_src = nc_packet->skb;
 +      }
 +
 +      /* coding_len is used when decoding the packet shorter packet */
 +      coding_len = skb_src->len - unicast_size;
 +
 +      if (skb_linearize(skb_dest) < 0 || skb_linearize(skb_src) < 0)
 +              goto out;
 +
 +      skb_push(skb_dest, header_add);
 +
 +      coded_packet = (struct batadv_coded_packet *)skb_dest->data;
 +      skb_reset_mac_header(skb_dest);
 +
 +      coded_packet->header.packet_type = BATADV_CODED;
 +      coded_packet->header.version = BATADV_COMPAT_VERSION;
 +      coded_packet->header.ttl = packet1->header.ttl;
 +
 +      /* Info about first unicast packet */
 +      memcpy(coded_packet->first_source, first_source, ETH_ALEN);
 +      memcpy(coded_packet->first_orig_dest, packet1->dest, ETH_ALEN);
 +      coded_packet->first_crc = packet_id1;
 +      coded_packet->first_ttvn = packet1->ttvn;
 +
 +      /* Info about second unicast packet */
 +      memcpy(coded_packet->second_dest, second_dest, ETH_ALEN);
 +      memcpy(coded_packet->second_source, second_source, ETH_ALEN);
 +      memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
 +      coded_packet->second_crc = packet_id2;
 +      coded_packet->second_ttl = packet2->header.ttl;
 +      coded_packet->second_ttvn = packet2->ttvn;
 +      coded_packet->coded_len = htons(coding_len);
 +
 +      /* This is where the magic happens: Code skb_src into skb_dest */
 +      batadv_nc_memxor(skb_dest->data + coded_size,
 +                       skb_src->data + unicast_size, coding_len);
 +
 +      /* Update counters accordingly */
 +      if (BATADV_SKB_CB(skb_src)->decoded &&
 +          BATADV_SKB_CB(skb_dest)->decoded) {
 +              /* Both packets are recoded */
 +              count = skb_src->len + ETH_HLEN;
 +              count += skb_dest->len + ETH_HLEN;
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE, 2);
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, count);
 +      } else if (!BATADV_SKB_CB(skb_src)->decoded &&
 +                 !BATADV_SKB_CB(skb_dest)->decoded) {
 +              /* Both packets are newly coded */
 +              count = skb_src->len + ETH_HLEN;
 +              count += skb_dest->len + ETH_HLEN;
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE, 2);
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, count);
 +      } else if (BATADV_SKB_CB(skb_src)->decoded &&
 +                 !BATADV_SKB_CB(skb_dest)->decoded) {
 +              /* skb_src recoded and skb_dest is newly coded */
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
 +                                 skb_src->len + ETH_HLEN);
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
 +                                 skb_dest->len + ETH_HLEN);
 +      } else if (!BATADV_SKB_CB(skb_src)->decoded &&
 +                 BATADV_SKB_CB(skb_dest)->decoded) {
 +              /* skb_src is newly coded and skb_dest is recoded */
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
 +                                 skb_src->len + ETH_HLEN);
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
 +              batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
 +                                 skb_dest->len + ETH_HLEN);
 +      }
 +
 +      /* skb_src is now coded into skb_dest, so free it */
 +      kfree_skb(skb_src);
 +
 +      /* avoid duplicate free of skb from nc_packet */
 +      nc_packet->skb = NULL;
 +      batadv_nc_packet_free(nc_packet);
 +
 +      /* Send the coded packet and return true */
 +      batadv_send_skb_packet(skb_dest, neigh_node->if_incoming, first_dest);
 +      res = true;
 +out:
 +      if (router_neigh)
 +              batadv_neigh_node_free_ref(router_neigh);
 +      if (router_coding)
 +              batadv_neigh_node_free_ref(router_coding);
 +      return res;
 +}
 +
 +/**
 + * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
 + * @skb: data skb to forward
 + * @dst: destination mac address of the other skb to code with
 + * @src: source mac address of skb
 + *
 + * Whenever we network code a packet we have to check whether we received it in
 + * a network coded form. If so, we may not be able to use it for coding because
 + * some neighbors may also have received (overheard) the packet in the network
 + * coded form without being able to decode it. It is hard to know which of the
 + * neighboring nodes was able to decode the packet, therefore we can only
 + * re-code the packet if the source of the previous encoded packet is involved.
 + * Since the source encoded the packet we can be certain it has all necessary
 + * decode information.
 + *
 + * Returns true if coding of a decoded packet is allowed.
 + */
 +static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
 +                                        uint8_t *dst, uint8_t *src)
 +{
 +      if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
 +              return false;
 +      else
 +              return true;
 +}
 +
 +/**
 + * batadv_nc_path_search - Find the coding path matching in_nc_node and
 + *  out_nc_node to retrieve a buffered packet that can be used for coding.
 + * @bat_priv: the bat priv with all the soft interface information
 + * @in_nc_node: pointer to skb next hop's neighbor nc node
 + * @out_nc_node: pointer to skb source's neighbor nc node
 + * @skb: data skb to forward
 + * @eth_dst: next hop mac address of skb
 + *
 + * Returns true if coding of a decoded skb is allowed.
 + */
 +static struct batadv_nc_packet *
 +batadv_nc_path_search(struct batadv_priv *bat_priv,
 +                    struct batadv_nc_node *in_nc_node,
 +                    struct batadv_nc_node *out_nc_node,
 +                    struct sk_buff *skb,
 +                    uint8_t *eth_dst)
 +{
 +      struct batadv_nc_path *nc_path, nc_path_key;
 +      struct batadv_nc_packet *nc_packet_out = NULL;
 +      struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
 +      struct batadv_hashtable *hash = bat_priv->nc.coding_hash;
 +      int idx;
 +
 +      if (!hash)
 +              return NULL;
 +
 +      /* Create almost path key */
 +      batadv_nc_hash_key_gen(&nc_path_key, in_nc_node->addr,
 +                             out_nc_node->addr);
 +      idx = batadv_nc_hash_choose(&nc_path_key, hash->size);
 +
 +      /* Check for coding opportunities in this nc_path */
 +      rcu_read_lock();
 +      hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) {
 +              if (!batadv_compare_eth(nc_path->prev_hop, in_nc_node->addr))
 +                      continue;
 +
 +              if (!batadv_compare_eth(nc_path->next_hop, out_nc_node->addr))
 +                      continue;
 +
 +              spin_lock_bh(&nc_path->packet_list_lock);
 +              if (list_empty(&nc_path->packet_list)) {
 +                      spin_unlock_bh(&nc_path->packet_list_lock);
 +                      continue;
 +              }
 +
 +              list_for_each_entry_safe(nc_packet, nc_packet_tmp,
 +                                       &nc_path->packet_list, list) {
 +                      if (!batadv_nc_skb_coding_possible(nc_packet->skb,
 +                                                         eth_dst,
 +                                                         in_nc_node->addr))
 +                              continue;
 +
 +                      /* Coding opportunity is found! */
 +                      list_del(&nc_packet->list);
 +                      nc_packet_out = nc_packet;
 +                      break;
 +              }
 +
 +              spin_unlock_bh(&nc_path->packet_list_lock);
 +              break;
 +      }
 +      rcu_read_unlock();
 +
 +      return nc_packet_out;
 +}
 +
 +/**
 + * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
 + *  skb's sender (may be equal to the originator).
 + * @bat_priv: the bat priv with all the soft interface information
 + * @skb: data skb to forward
 + * @eth_dst: next hop mac address of skb
 + * @eth_src: source mac address of skb
 + * @in_nc_node: pointer to skb next hop's neighbor nc node
 + *
 + * Returns an nc packet if a suitable coding packet was found, NULL otherwise.
 + */
 +static struct batadv_nc_packet *
 +batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 +                       struct sk_buff *skb,
 +                       uint8_t *eth_dst,
 +                       uint8_t *eth_src,
 +                       struct batadv_nc_node *in_nc_node)
 +{
 +      struct batadv_orig_node *orig_node;
 +      struct batadv_nc_node *out_nc_node;
 +      struct batadv_nc_packet *nc_packet = NULL;
 +
 +      orig_node = batadv_orig_hash_find(bat_priv, eth_src);
 +      if (!orig_node)
 +              return NULL;
 +
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(out_nc_node,
 +                              &orig_node->out_coding_list, list) {
 +              /* Check if the skb is decoded and if recoding is possible */
 +              if (!batadv_nc_skb_coding_possible(skb,
 +                                                 out_nc_node->addr, eth_src))
 +                      continue;
 +
 +              /* Search for an opportunity in this nc_path */
 +              nc_packet = batadv_nc_path_search(bat_priv, in_nc_node,
 +                                                out_nc_node, skb, eth_dst);
 +              if (nc_packet)
 +                      break;
 +      }
 +      rcu_read_unlock();
 +
 +      batadv_orig_node_free_ref(orig_node);
 +      return nc_packet;
 +}
 +
 +/**
 + * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
 + *  unicast skb before it is stored for use in later decoding
 + * @bat_priv: the bat priv with all the soft interface information
 + * @skb: data skb to store
 + * @eth_dst_new: new destination mac address of skb
 + */
 +static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 +                                            struct sk_buff *skb,
 +                                            uint8_t *eth_dst_new)
 +{
 +      struct ethhdr *ethhdr;
 +
 +      /* Copy skb header to change the mac header */
 +      skb = pskb_copy(skb, GFP_ATOMIC);
 +      if (!skb)
 +              return;
 +
 +      /* Set the mac header as if we actually sent the packet uncoded */
 +      ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +      memcpy(ethhdr->h_source, ethhdr->h_dest, ETH_ALEN);
 +      memcpy(ethhdr->h_dest, eth_dst_new, ETH_ALEN);
 +
 +      /* Set data pointer to MAC header to mimic packets from our tx path */
 +      skb_push(skb, ETH_HLEN);
 +
 +      /* Add the packet to the decoding packet pool */
 +      batadv_nc_skb_store_for_decoding(bat_priv, skb);
 +
 +      /* batadv_nc_skb_store_for_decoding() clones the skb, so we must free
 +       * our ref
 +       */
 +      kfree_skb(skb);
 +}
 +
 +/**
 + * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
 + * @skb: data skb to forward
 + * @neigh_node: next hop to forward packet to
 + * @ethhdr: pointer to the ethernet header inside the skb
 + *
 + * Loops through list of neighboring nodes the next hop has a good connection to
 + * (receives OGMs with a sufficient quality). We need to find a neighbor of our
 + * next hop that potentially sent a packet which our next hop also received
 + * (overheard) and has stored for later decoding.
 + *
 + * Returns true if the skb was consumed (encoded packet sent) or false otherwise
 + */
 +static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
 +                                   struct batadv_neigh_node *neigh_node,
 +                                   struct ethhdr *ethhdr)
 +{
 +      struct net_device *netdev = neigh_node->if_incoming->soft_iface;
 +      struct batadv_priv *bat_priv = netdev_priv(netdev);
 +      struct batadv_orig_node *orig_node = neigh_node->orig_node;
 +      struct batadv_nc_node *nc_node;
 +      struct batadv_nc_packet *nc_packet = NULL;
 +
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(nc_node, &orig_node->in_coding_list, list) {
 +              /* Search for coding opportunity with this in_nc_node */
 +              nc_packet = batadv_nc_skb_src_search(bat_priv, skb,
 +                                                   neigh_node->addr,
 +                                                   ethhdr->h_source, nc_node);
 +
 +              /* Opportunity was found, so stop searching */
 +              if (nc_packet)
 +                      break;
 +      }
 +      rcu_read_unlock();
 +
 +      if (!nc_packet)
 +              return false;
 +
 +      /* Save packets for later decoding */
 +      batadv_nc_skb_store_before_coding(bat_priv, skb,
 +                                        neigh_node->addr);
 +      batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb,
 +                                        nc_packet->neigh_node->addr);
 +
 +      /* Code and send packets */
 +      if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet,
 +                                 neigh_node))
 +              return true;
 +
 +      /* out of mem ? Coding failed - we have to free the buffered packet
 +       * to avoid memleaks. The skb passed as argument will be dealt with
 +       * by the calling function.
 +       */
 +      batadv_nc_send_packet(nc_packet);
 +      return false;
 +}
 +
 +/**
 + * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
 + * @skb: skb to add to path
 + * @nc_path: path to add skb to
 + * @neigh_node: next hop to forward packet to
 + * @packet_id: checksum to identify packet
 + *
 + * Returns true if the packet was buffered or false in case of an error.
 + */
 +static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
 +                                    struct batadv_nc_path *nc_path,
 +                                    struct batadv_neigh_node *neigh_node,
 +                                    __be32 packet_id)
 +{
 +      struct batadv_nc_packet *nc_packet;
 +
 +      nc_packet = kzalloc(sizeof(*nc_packet), GFP_ATOMIC);
 +      if (!nc_packet)
 +              return false;
 +
 +      /* Initialize nc_packet */
 +      nc_packet->timestamp = jiffies;
 +      nc_packet->packet_id = packet_id;
 +      nc_packet->skb = skb;
 +      nc_packet->neigh_node = neigh_node;
 +      nc_packet->nc_path = nc_path;
 +
 +      /* Add coding packet to list */
 +      spin_lock_bh(&nc_path->packet_list_lock);
 +      list_add_tail(&nc_packet->list, &nc_path->packet_list);
 +      spin_unlock_bh(&nc_path->packet_list_lock);
 +
 +      return true;
 +}
 +
 +/**
 + * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
 + *  buffer
 + * @skb: data skb to forward
 + * @neigh_node: next hop to forward packet to
 + * @ethhdr: pointer to the ethernet header inside the skb
 + *
 + * Returns true if the skb was consumed (encoded packet sent) or false otherwise
 + */
 +bool batadv_nc_skb_forward(struct sk_buff *skb,
 +                         struct batadv_neigh_node *neigh_node,
 +                         struct ethhdr *ethhdr)
 +{
 +      const struct net_device *netdev = neigh_node->if_incoming->soft_iface;
 +      struct batadv_priv *bat_priv = netdev_priv(netdev);
 +      struct batadv_unicast_packet *packet;
 +      struct batadv_nc_path *nc_path;
 +      __be32 packet_id;
 +      u8 *payload;
 +
 +      /* Check if network coding is enabled */
 +      if (!atomic_read(&bat_priv->network_coding))
 +              goto out;
 +
 +      /* We only handle unicast packets */
 +      payload = skb_network_header(skb);
 +      packet = (struct batadv_unicast_packet *)payload;
 +      if (packet->header.packet_type != BATADV_UNICAST)
 +              goto out;
 +
 +      /* Try to find a coding opportunity and send the skb if one is found */
 +      if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr))
 +              return true;
 +
 +      /* Find or create a nc_path for this src-dst pair */
 +      nc_path = batadv_nc_get_path(bat_priv,
 +                                   bat_priv->nc.coding_hash,
 +                                   ethhdr->h_source,
 +                                   neigh_node->addr);
 +
 +      if (!nc_path)
 +              goto out;
 +
 +      /* Add skb to nc_path */
 +      packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
 +      if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id))
 +              goto free_nc_path;
 +
 +      /* Packet is consumed */
 +      return true;
 +
 +free_nc_path:
 +      batadv_nc_path_free_ref(nc_path);
 +out:
 +      /* Packet is not consumed */
 +      return false;
 +}
 +
 +/**
 + * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
 + *  when decoding coded packets
 + * @bat_priv: the bat priv with all the soft interface information
 + * @skb: data skb to store
 + */
 +void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
 +                                    struct sk_buff *skb)
 +{
 +      struct batadv_unicast_packet *packet;
 +      struct batadv_nc_path *nc_path;
 +      struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +      __be32 packet_id;
 +      u8 *payload;
 +
 +      /* Check if network coding is enabled */
 +      if (!atomic_read(&bat_priv->network_coding))
 +              goto out;
 +
 +      /* Check for supported packet type */
 +      payload = skb_network_header(skb);
 +      packet = (struct batadv_unicast_packet *)payload;
 +      if (packet->header.packet_type != BATADV_UNICAST)
 +              goto out;
 +
 +      /* Find existing nc_path or create a new */
 +      nc_path = batadv_nc_get_path(bat_priv,
 +                                   bat_priv->nc.decoding_hash,
 +                                   ethhdr->h_source,
 +                                   ethhdr->h_dest);
 +
 +      if (!nc_path)
 +              goto out;
 +
 +      /* Clone skb and adjust skb->data to point at batman header */
 +      skb = skb_clone(skb, GFP_ATOMIC);
 +      if (unlikely(!skb))
 +              goto free_nc_path;
 +
 +      if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
 +              goto free_skb;
 +
 +      if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN)))
 +              goto free_skb;
 +
 +      /* Add skb to nc_path */
 +      packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
 +      if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id))
 +              goto free_skb;
 +
 +      batadv_inc_counter(bat_priv, BATADV_CNT_NC_BUFFER);
 +      return;
 +
 +free_skb:
 +      kfree_skb(skb);
 +free_nc_path:
 +      batadv_nc_path_free_ref(nc_path);
 +out:
 +      return;
 +}
 +
 +/**
 + * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
 + *  should be saved in the decoding buffer and, if so, store it there
 + * @bat_priv: the bat priv with all the soft interface information
 + * @skb: unicast skb to store
 + */
 +void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
 +                                       struct sk_buff *skb)
 +{
 +      struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
- batadv_nc_skb_decode_packet(struct sk_buff *skb,
++      if (batadv_is_my_mac(bat_priv, ethhdr->h_dest))
 +              return;
 +
 +      /* Set data pointer to MAC header to mimic packets from our tx path */
 +      skb_push(skb, ETH_HLEN);
 +
 +      batadv_nc_skb_store_for_decoding(bat_priv, skb);
 +}
 +
 +/**
 + * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
 + *  in nc_packet
++ * @bat_priv: the bat priv with all the soft interface information
 + * @skb: unicast skb to decode
 + * @nc_packet: decode data needed to decode the skb
 + *
 + * Returns pointer to decoded unicast packet if the packet was decoded or NULL
 + * in case of an error.
 + */
 +static struct batadv_unicast_packet *
-       if (batadv_is_my_mac(coded_packet_tmp.second_dest)) {
++batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 +                          struct batadv_nc_packet *nc_packet)
 +{
 +      const int h_size = sizeof(struct batadv_unicast_packet);
 +      const int h_diff = sizeof(struct batadv_coded_packet) - h_size;
 +      struct batadv_unicast_packet *unicast_packet;
 +      struct batadv_coded_packet coded_packet_tmp;
 +      struct ethhdr *ethhdr, ethhdr_tmp;
 +      uint8_t *orig_dest, ttl, ttvn;
 +      unsigned int coding_len;
 +
 +      /* Save headers temporarily */
 +      memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
 +      memcpy(&ethhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp));
 +
 +      if (skb_cow(skb, 0) < 0)
 +              return NULL;
 +
 +      if (unlikely(!skb_pull_rcsum(skb, h_diff)))
 +              return NULL;
 +
 +      /* Data points to batman header, so set mac header 14 bytes before
 +       * and network to data
 +       */
 +      skb_set_mac_header(skb, -ETH_HLEN);
 +      skb_reset_network_header(skb);
 +
 +      /* Reconstruct original mac header */
 +      ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +      memcpy(ethhdr, &ethhdr_tmp, sizeof(*ethhdr));
 +
 +      /* Select the correct unicast header information based on the location
 +       * of our mac address in the coded_packet header
 +       */
-       if (!batadv_is_my_mac(coded->second_dest)) {
++      if (batadv_is_my_mac(bat_priv, coded_packet_tmp.second_dest)) {
 +              /* If we are the second destination the packet was overheard,
 +               * so the Ethernet address must be copied to h_dest and
 +               * pkt_type changed from PACKET_OTHERHOST to PACKET_HOST
 +               */
 +              memcpy(ethhdr->h_dest, coded_packet_tmp.second_dest, ETH_ALEN);
 +              skb->pkt_type = PACKET_HOST;
 +
 +              orig_dest = coded_packet_tmp.second_orig_dest;
 +              ttl = coded_packet_tmp.second_ttl;
 +              ttvn = coded_packet_tmp.second_ttvn;
 +      } else {
 +              orig_dest = coded_packet_tmp.first_orig_dest;
 +              ttl = coded_packet_tmp.header.ttl;
 +              ttvn = coded_packet_tmp.first_ttvn;
 +      }
 +
 +      coding_len = ntohs(coded_packet_tmp.coded_len);
 +
 +      if (coding_len > skb->len)
 +              return NULL;
 +
 +      /* Here the magic is reversed:
 +       *   extract the missing packet from the received coded packet
 +       */
 +      batadv_nc_memxor(skb->data + h_size,
 +                       nc_packet->skb->data + h_size,
 +                       coding_len);
 +
 +      /* Resize decoded skb if decoded with larger packet */
 +      if (nc_packet->skb->len > coding_len + h_size)
 +              pskb_trim_rcsum(skb, coding_len + h_size);
 +
 +      /* Create decoded unicast packet */
 +      unicast_packet = (struct batadv_unicast_packet *)skb->data;
 +      unicast_packet->header.packet_type = BATADV_UNICAST;
 +      unicast_packet->header.version = BATADV_COMPAT_VERSION;
 +      unicast_packet->header.ttl = ttl;
 +      memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
 +      unicast_packet->ttvn = ttvn;
 +
 +      batadv_nc_packet_free(nc_packet);
 +      return unicast_packet;
 +}
 +
 +/**
 + * batadv_nc_find_decoding_packet - search through buffered decoding data to
 + *  find the data needed to decode the coded packet
 + * @bat_priv: the bat priv with all the soft interface information
 + * @ethhdr: pointer to the ethernet header inside the coded packet
 + * @coded: coded packet we try to find decode data for
 + *
 + * Returns pointer to nc packet if the needed data was found or NULL otherwise.
 + */
 +static struct batadv_nc_packet *
 +batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
 +                             struct ethhdr *ethhdr,
 +                             struct batadv_coded_packet *coded)
 +{
 +      struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
 +      struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
 +      struct batadv_nc_path *nc_path, nc_path_key;
 +      uint8_t *dest, *source;
 +      __be32 packet_id;
 +      int index;
 +
 +      if (!hash)
 +              return NULL;
 +
 +      /* Select the correct packet id based on the location of our mac-addr */
 +      dest = ethhdr->h_source;
-       if (!batadv_is_my_mac(ethhdr->h_dest) &&
-           !batadv_is_my_mac(coded_packet->second_dest))
++      if (!batadv_is_my_mac(bat_priv, coded->second_dest)) {
 +              source = coded->second_source;
 +              packet_id = coded->second_crc;
 +      } else {
 +              source = coded->first_source;
 +              packet_id = coded->first_crc;
 +      }
 +
 +      batadv_nc_hash_key_gen(&nc_path_key, source, dest);
 +      index = batadv_nc_hash_choose(&nc_path_key, hash->size);
 +
 +      /* Search for matching coding path */
 +      rcu_read_lock();
 +      hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) {
 +              /* Find matching nc_packet */
 +              spin_lock_bh(&nc_path->packet_list_lock);
 +              list_for_each_entry(tmp_nc_packet,
 +                                  &nc_path->packet_list, list) {
 +                      if (packet_id == tmp_nc_packet->packet_id) {
 +                              list_del(&tmp_nc_packet->list);
 +
 +                              nc_packet = tmp_nc_packet;
 +                              break;
 +                      }
 +              }
 +              spin_unlock_bh(&nc_path->packet_list_lock);
 +
 +              if (nc_packet)
 +                      break;
 +      }
 +      rcu_read_unlock();
 +
 +      if (!nc_packet)
 +              batadv_dbg(BATADV_DBG_NC, bat_priv,
 +                         "No decoding packet found for %u\n", packet_id);
 +
 +      return nc_packet;
 +}
 +
 +/**
 + * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
 + *  resulting unicast packet
 + * @skb: incoming coded packet
 + * @recv_if: pointer to interface this packet was received on
 + */
 +static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 +                                     struct batadv_hard_iface *recv_if)
 +{
 +      struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 +      struct batadv_unicast_packet *unicast_packet;
 +      struct batadv_coded_packet *coded_packet;
 +      struct batadv_nc_packet *nc_packet;
 +      struct ethhdr *ethhdr;
 +      int hdr_size = sizeof(*coded_packet);
 +
 +      /* Check if network coding is enabled */
 +      if (!atomic_read(&bat_priv->network_coding))
 +              return NET_RX_DROP;
 +
 +      /* Make sure we can access (and remove) header */
 +      if (unlikely(!pskb_may_pull(skb, hdr_size)))
 +              return NET_RX_DROP;
 +
 +      coded_packet = (struct batadv_coded_packet *)skb->data;
 +      ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +      /* Verify frame is destined for us */
-       if (batadv_is_my_mac(coded_packet->second_dest))
++      if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) &&
++          !batadv_is_my_mac(bat_priv, coded_packet->second_dest))
 +              return NET_RX_DROP;
 +
 +      /* Update stat counter */
-       unicast_packet = batadv_nc_skb_decode_packet(skb, nc_packet);
++      if (batadv_is_my_mac(bat_priv, coded_packet->second_dest))
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_SNIFFED);
 +
 +      nc_packet = batadv_nc_find_decoding_packet(bat_priv, ethhdr,
 +                                                 coded_packet);
 +      if (!nc_packet) {
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
 +              return NET_RX_DROP;
 +      }
 +
 +      /* Make skb's linear, because decoding accesses the entire buffer */
 +      if (skb_linearize(skb) < 0)
 +              goto free_nc_packet;
 +
 +      if (skb_linearize(nc_packet->skb) < 0)
 +              goto free_nc_packet;
 +
 +      /* Decode the packet */
++      unicast_packet = batadv_nc_skb_decode_packet(bat_priv, skb, nc_packet);
 +      if (!unicast_packet) {
 +              batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
 +              goto free_nc_packet;
 +      }
 +
 +      /* Mark packet as decoded to do correct recoding when forwarding */
 +      BATADV_SKB_CB(skb)->decoded = true;
 +      batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE);
 +      batadv_add_counter(bat_priv, BATADV_CNT_NC_DECODE_BYTES,
 +                         skb->len + ETH_HLEN);
 +      return batadv_recv_unicast_packet(skb, recv_if);
 +
 +free_nc_packet:
 +      batadv_nc_packet_free(nc_packet);
 +      return NET_RX_DROP;
 +}
 +
 +/**
 + * batadv_nc_free - clean up network coding memory
 + * @bat_priv: the bat priv with all the soft interface information
 + */
 +void batadv_nc_free(struct batadv_priv *bat_priv)
 +{
 +      batadv_recv_handler_unregister(BATADV_CODED);
 +      cancel_delayed_work_sync(&bat_priv->nc.work);
 +
 +      batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
 +      batadv_hash_destroy(bat_priv->nc.coding_hash);
 +      batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL);
 +      batadv_hash_destroy(bat_priv->nc.decoding_hash);
 +}
 +
 +/**
 + * batadv_nc_nodes_seq_print_text - print the nc node information
 + * @seq: seq file to print on
 + * @offset: not used
 + */
 +int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
 +{
 +      struct net_device *net_dev = (struct net_device *)seq->private;
 +      struct batadv_priv *bat_priv = netdev_priv(net_dev);
 +      struct batadv_hashtable *hash = bat_priv->orig_hash;
 +      struct batadv_hard_iface *primary_if;
 +      struct hlist_head *head;
 +      struct batadv_orig_node *orig_node;
 +      struct batadv_nc_node *nc_node;
 +      int i;
 +
 +      primary_if = batadv_seq_print_text_primary_if_get(seq);
 +      if (!primary_if)
 +              goto out;
 +
 +      /* Traverse list of originators */
 +      for (i = 0; i < hash->size; i++) {
 +              head = &hash->table[i];
 +
 +              /* For each orig_node in this bin */
 +              rcu_read_lock();
 +              hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
 +                      seq_printf(seq, "Node:      %pM\n", orig_node->orig);
 +
 +                      seq_puts(seq, " Ingoing:  ");
 +                      /* For each in_nc_node to this orig_node */
 +                      list_for_each_entry_rcu(nc_node,
 +                                              &orig_node->in_coding_list,
 +                                              list)
 +                              seq_printf(seq, "%pM ",
 +                                         nc_node->addr);
 +                      seq_puts(seq, "\n");
 +
 +                      seq_puts(seq, " Outgoing: ");
 +                      /* For out_nc_node to this orig_node */
 +                      list_for_each_entry_rcu(nc_node,
 +                                              &orig_node->out_coding_list,
 +                                              list)
 +                              seq_printf(seq, "%pM ",
 +                                         nc_node->addr);
 +                      seq_puts(seq, "\n\n");
 +              }
 +              rcu_read_unlock();
 +      }
 +
 +out:
 +      if (primary_if)
 +              batadv_hardif_free_ref(primary_if);
 +      return 0;
 +}
 +
 +/**
 + * batadv_nc_init_debugfs - create nc folder and related files in debugfs
 + * @bat_priv: the bat priv with all the soft interface information
 + */
 +int batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
 +{
 +      struct dentry *nc_dir, *file;
 +
 +      nc_dir = debugfs_create_dir("nc", bat_priv->debug_dir);
 +      if (!nc_dir)
 +              goto out;
 +
 +      file = debugfs_create_u8("min_tq", S_IRUGO | S_IWUSR, nc_dir,
 +                               &bat_priv->nc.min_tq);
 +      if (!file)
 +              goto out;
 +
 +      file = debugfs_create_u32("max_fwd_delay", S_IRUGO | S_IWUSR, nc_dir,
 +                                &bat_priv->nc.max_fwd_delay);
 +      if (!file)
 +              goto out;
 +
 +      file = debugfs_create_u32("max_buffer_time", S_IRUGO | S_IWUSR, nc_dir,
 +                                &bat_priv->nc.max_buffer_time);
 +      if (!file)
 +              goto out;
 +
 +      return 0;
 +
 +out:
 +      return -ENOMEM;
 +}
diff --combined net/batman-adv/routing.c
index 8f88967ff14ba4ff98fd85b207f904e08c45db2c,319f2906c71a2146328c1e6ad1c4e9cbedee1e53..2f1f88923df8ca0208222b559bcffbf23870603d
@@@ -29,7 -29,6 +29,7 @@@
  #include "unicast.h"
  #include "bridge_loop_avoidance.h"
  #include "distributed-arp-table.h"
 +#include "network-coding.h"
  
  static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
@@@ -403,7 -402,7 +403,7 @@@ int batadv_recv_icmp_packet(struct sk_b
                goto out;
  
        /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
+       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                goto out;
  
        icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
        }
  
        /* packet for me */
-       if (batadv_is_my_mac(icmp_packet->dst))
+       if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
                return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
  
        /* TTL exceeded */
@@@ -549,37 -548,28 +549,39 @@@ batadv_find_ifalter_router(struct batad
        return router;
  }
  
- static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
 +/**
 + * batadv_check_unicast_packet - Check for malformed unicast packets
++ * @bat_priv: the bat priv with all the soft interface information
 + * @skb: packet to check
 + * @hdr_size: size of header to pull
 + *
 + * Check for short header and bad addresses in given packet. Returns negative
 + * value when check fails and 0 otherwise. The negative value depends on the
 + * reason: -ENODATA for bad header, -EBADR for broadcast destination or source,
 + * and -EREMOTE for non-local (other host) destination.
 + */
+ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
+                                      struct sk_buff *skb, int hdr_size)
  {
        struct ethhdr *ethhdr;
  
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
 -              return -1;
 +              return -ENODATA;
  
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
  
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
 -              return -1;
 +              return -EBADR;
  
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
 -              return -1;
 +              return -EBADR;
  
        /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
+       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
 -              return -1;
 +              return -EREMOTE;
  
        return 0;
  }
@@@ -593,7 -583,7 +595,7 @@@ int batadv_recv_tt_query(struct sk_buf
        char tt_flag;
        size_t packet_size;
  
-       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
  
        /* I could need to modify it */
        case BATADV_TT_RESPONSE:
                batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
  
-               if (batadv_is_my_mac(tt_query->dst)) {
+               if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
                        /* packet needs to be linearized to access the TT
                         * changes
                         */
@@@ -668,14 -658,15 +670,15 @@@ int batadv_recv_roam_adv(struct sk_buf
        struct batadv_roam_adv_packet *roam_adv_packet;
        struct batadv_orig_node *orig_node;
  
-       if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb,
+                                       sizeof(*roam_adv_packet)) < 0)
                goto out;
  
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
  
        roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
  
-       if (!batadv_is_my_mac(roam_adv_packet->dst))
+       if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
                return batadv_route_unicast_packet(skb, recv_if);
  
        /* check if it is a backbone gateway. we don't accept
@@@ -861,17 -852,14 +864,17 @@@ static int batadv_route_unicast_packet(
        /* decrement ttl */
        unicast_packet->header.ttl--;
  
 -      /* Update stats counter */
 -      batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
 -      batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
 -                         skb->len + ETH_HLEN);
 -
 -      /* route it */
 -      if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
 +      /* network code packet if possible */
 +      if (batadv_nc_skb_forward(skb, neigh_node, ethhdr)) {
                ret = NET_RX_SUCCESS;
 +      } else if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) {
 +              ret = NET_RX_SUCCESS;
 +
 +              /* Update stats counter */
 +              batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
 +              batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
 +                                 skb->len + ETH_HLEN);
 +      }
  
  out:
        if (neigh_node)
@@@ -981,7 -969,7 +984,7 @@@ static int batadv_check_unicast_ttvn(st
         * last time) the packet had an updated information or not
         */
        curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-       if (!batadv_is_my_mac(unicast_packet->dest)) {
+       if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
                orig_node = batadv_orig_hash_find(bat_priv,
                                                  unicast_packet->dest);
                /* if it is not possible to find the orig_node representing the
@@@ -1047,7 -1035,7 +1050,7 @@@ int batadv_recv_unicast_packet(struct s
        struct batadv_unicast_4addr_packet *unicast_4addr_packet;
        uint8_t *orig_addr;
        struct batadv_orig_node *orig_node = NULL;
 -      int hdr_size = sizeof(*unicast_packet);
 +      int check, hdr_size = sizeof(*unicast_packet);
        bool is4addr;
  
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
        if (is4addr)
                hdr_size = sizeof(*unicast_4addr_packet);
  
 -      if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
 +      /* function returns -EREMOTE for promiscuous packets */
-       check = batadv_check_unicast_packet(skb, hdr_size);
++      check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
 +
 +      /* Even though the packet is not for us, we might save it to use for
 +       * decoding a later received coded packet
 +       */
 +      if (check == -EREMOTE)
 +              batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
 +
 +      if (check < 0)
                return NET_RX_DROP;
  
        if (!batadv_check_unicast_ttvn(bat_priv, skb))
                return NET_RX_DROP;
  
        /* packet for me */
-       if (batadv_is_my_mac(unicast_packet->dest)) {
+       if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
                if (is4addr) {
                        batadv_dat_inc_counter(bat_priv,
                                               unicast_4addr_packet->subtype);
@@@ -1111,7 -1090,7 +1114,7 @@@ int batadv_recv_ucast_frag_packet(struc
        struct sk_buff *new_skb = NULL;
        int ret;
  
-       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
  
        if (!batadv_check_unicast_ttvn(bat_priv, skb))
        unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
  
        /* packet for me */
-       if (batadv_is_my_mac(unicast_packet->dest)) {
+       if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
                ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
  
                if (ret == NET_RX_DROP)
@@@ -1174,13 -1153,13 +1177,13 @@@ int batadv_recv_bcast_packet(struct sk_
                goto out;
  
        /* ignore broadcasts sent by myself */
-       if (batadv_is_my_mac(ethhdr->h_source))
+       if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
                goto out;
  
        bcast_packet = (struct batadv_bcast_packet *)skb->data;
  
        /* ignore broadcasts originated by myself */
-       if (batadv_is_my_mac(bcast_packet->orig))
+       if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
                goto out;
  
        if (bcast_packet->header.ttl < 2)
@@@ -1266,14 -1245,14 +1269,14 @@@ int batadv_recv_vis_packet(struct sk_bu
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
  
        /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
+       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                return NET_RX_DROP;
  
        /* ignore own packets */
-       if (batadv_is_my_mac(vis_packet->vis_orig))
+       if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
                return NET_RX_DROP;
  
-       if (batadv_is_my_mac(vis_packet->sender_orig))
+       if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
                return NET_RX_DROP;
  
        switch (vis_packet->vis_type) {
index 9322320874493dde17146543588a03efde92c831,7abee19567e9e0f84ecb224e666f2e6c1094333c..5e89deeb9542979c72ba635fb1c18d301a9c1621
@@@ -385,19 -385,25 +385,19 @@@ static void batadv_tt_prepare_packet_bu
                                          int *packet_buff_len,
                                          int min_packet_len)
  {
 -      struct batadv_hard_iface *primary_if;
        int req_len;
  
 -      primary_if = batadv_primary_if_get_selected(bat_priv);
 -
        req_len = min_packet_len;
        req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
  
        /* if we have too many changes for one packet don't send any
         * and wait for the tt table request which will be fragmented
         */
 -      if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
 +      if (req_len > bat_priv->soft_iface->mtu)
                req_len = min_packet_len;
  
        batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
                                      min_packet_len, req_len);
 -
 -      if (primary_if)
 -              batadv_hardif_free_ref(primary_if);
  }
  
  static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
@@@ -902,7 -908,7 +902,7 @@@ out_remove
        /* remove address from local hash if present */
        local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
                                             "global tt received",
 -                                           !!(flags & BATADV_TT_CLIENT_ROAM));
 +                                           flags & BATADV_TT_CLIENT_ROAM);
        tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
  
        if (!(flags & BATADV_TT_CLIENT_ROAM))
@@@ -1574,7 -1580,7 +1574,7 @@@ static int batadv_tt_global_valid(cons
  static struct sk_buff *
  batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
                              struct batadv_hashtable *hash,
 -                            struct batadv_hard_iface *primary_if,
 +                            struct batadv_priv *bat_priv,
                              int (*valid_cb)(const void *, const void *),
                              void *cb_data)
  {
        uint32_t i;
        size_t len;
  
 -      if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
 -              tt_len = primary_if->soft_iface->mtu - tt_query_size;
 +      if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) {
 +              tt_len = bat_priv->soft_iface->mtu - tt_query_size;
                tt_len -= tt_len % sizeof(struct batadv_tt_change);
        }
        tt_tot = tt_len / sizeof(struct batadv_tt_change);
@@@ -1709,6 -1715,7 +1709,6 @@@ batadv_send_other_tt_response(struct ba
  {
        struct batadv_orig_node *req_dst_orig_node;
        struct batadv_orig_node *res_dst_orig_node = NULL;
 -      struct batadv_hard_iface *primary_if = NULL;
        uint8_t orig_ttvn, req_ttvn, ttvn;
        int ret = false;
        unsigned char *tt_buff;
        if (!res_dst_orig_node)
                goto out;
  
 -      primary_if = batadv_primary_if_get_selected(bat_priv);
 -      if (!primary_if)
 -              goto out;
 -
        orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
        req_ttvn = tt_request->ttvn;
  
  
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
                                                    bat_priv->tt.global_hash,
 -                                                  primary_if,
 +                                                  bat_priv,
                                                    batadv_tt_global_valid,
                                                    req_dst_orig_node);
                if (!skb)
@@@ -1817,6 -1828,8 +1817,6 @@@ out
                batadv_orig_node_free_ref(res_dst_orig_node);
        if (req_dst_orig_node)
                batadv_orig_node_free_ref(req_dst_orig_node);
 -      if (primary_if)
 -              batadv_hardif_free_ref(primary_if);
        if (!ret)
                kfree_skb(skb);
        return ret;
@@@ -1894,7 -1907,7 +1894,7 @@@ batadv_send_my_tt_response(struct batad
  
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
                                                    bat_priv->tt.local_hash,
 -                                                  primary_if,
 +                                                  bat_priv,
                                                    batadv_tt_local_valid_entry,
                                                    NULL);
                if (!skb)
@@@ -1940,7 -1953,7 +1940,7 @@@ out
  bool batadv_send_tt_response(struct batadv_priv *bat_priv,
                             struct batadv_tt_query_packet *tt_request)
  {
-       if (batadv_is_my_mac(tt_request->dst)) {
+       if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
                /* don't answer backbone gws! */
                if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
                        return true;
@@@ -2515,7 -2528,7 +2515,7 @@@ bool batadv_tt_global_client_is_roaming
        if (!tt_global_entry)
                goto out;
  
 -      ret = !!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM);
 +      ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
        batadv_tt_global_entry_free_ref(tt_global_entry);
  out:
        return ret;
diff --combined net/batman-adv/vis.c
index 962ccf3b838225f409f9098c358c76a8b129417e,6a1e646be96d45b21e21127a056a9aad3af91bb1..1625e5793a895d02aec97966f901ccfb8a29e4ab
@@@ -149,7 -149,7 +149,7 @@@ static void batadv_vis_data_read_prim_s
  
        hlist_for_each_entry(entry, if_list, list) {
                if (entry->primary)
 -                      seq_printf(seq, "PRIMARY, ");
 +                      seq_puts(seq, "PRIMARY, ");
                else
                        seq_printf(seq,  "SEC %pM, ", entry->addr);
        }
@@@ -207,7 -207,7 +207,7 @@@ static void batadv_vis_data_read_entrie
                if (batadv_compare_eth(entry->addr, packet->vis_orig))
                        batadv_vis_data_read_prim_sec(seq, list);
  
 -              seq_printf(seq, "\n");
 +              seq_puts(seq, "\n");
        }
  }
  
@@@ -477,7 -477,7 +477,7 @@@ void batadv_receive_client_update_packe
  
        /* Are we the target for this VIS packet? */
        if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC   &&
-           batadv_is_my_mac(vis_packet->target_orig))
+           batadv_is_my_mac(bat_priv, vis_packet->target_orig))
                are_target = 1;
  
        spin_lock_bh(&bat_priv->vis.hash_lock);
                batadv_send_list_add(bat_priv, info);
  
                /* ... we're not the recipient (and thus need to forward). */
-       } else if (!batadv_is_my_mac(packet->target_orig)) {
+       } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
                batadv_send_list_add(bat_priv, info);
        }
  
diff --combined net/bridge/br_if.c
index f17fcb3097c2d55cfc4ddcfb330fbd3ea0e179da,459dab22b3f6009dc027c01e29fe587270291053..4cdba60926ffc91c52c031793eb562fb38a84a4a
@@@ -67,7 -67,8 +67,8 @@@ void br_port_carrier_check(struct net_b
        struct net_device *dev = p->dev;
        struct net_bridge *br = p->br;
  
-       if (netif_running(dev) && netif_oper_up(dev))
+       if (!(p->flags & BR_ADMIN_COST) &&
+           netif_running(dev) && netif_oper_up(dev))
                p->path_cost = port_cost(dev);
  
        if (!netif_running(br->dev))
@@@ -148,6 -149,7 +149,6 @@@ static void del_nbp(struct net_bridge_p
        dev->priv_flags &= ~IFF_BRIDGE_PORT;
  
        netdev_rx_handler_unregister(dev);
 -      synchronize_net();
  
        netdev_upper_dev_unlink(dev, br->dev);
  
diff --combined net/can/gw.c
index 2dc619db805a059b5ba7ba7c9d1f4b00aea9b5b8,117814a7e73c720b20026289f4349528e8ed6f6a..3ee690e8c7d32354a525ad398291b7b7c5155215
@@@ -466,7 -466,7 +466,7 @@@ static int cgw_notifier(struct notifier
                        if (gwj->src.dev == dev || gwj->dst.dev == dev) {
                                hlist_del(&gwj->list);
                                cgw_unregister_filter(gwj);
-                               kfree(gwj);
+                               kmem_cache_free(cgw_cache, gwj);
                        }
                }
        }
@@@ -778,7 -778,8 +778,7 @@@ static int cgw_parse_attr(struct nlmsgh
        return 0;
  }
  
 -static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
 -                        void *arg)
 +static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
  {
        struct rtcanmsg *r;
        struct cgw_job *gwj;
@@@ -863,11 -864,11 +863,11 @@@ static void cgw_remove_all_jobs(void
        hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
                hlist_del(&gwj->list);
                cgw_unregister_filter(gwj);
-               kfree(gwj);
+               kmem_cache_free(cgw_cache, gwj);
        }
  }
  
 -static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 +static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
  {
        struct cgw_job *gwj = NULL;
        struct hlist_node *nx;
  
                hlist_del(&gwj->list);
                cgw_unregister_filter(gwj);
-               kfree(gwj);
+               kmem_cache_free(cgw_cache, gwj);
                err = 0;
                break;
        }
diff --combined net/core/dev.c
index 8a3cb2c50fbfbc6f2afa1760f619297ea4bdd4b3,b24ab0e98eb4d8dd8cef2a20ef2a39b97d9ece92..9e26b8d9eafee123e661b519f90192450e9dd3c7
@@@ -200,7 -200,7 +200,7 @@@ static inline void rps_unlock(struct so
  }
  
  /* Device list insertion */
 -static int list_netdevice(struct net_device *dev)
 +static void list_netdevice(struct net_device *dev)
  {
        struct net *net = dev_net(dev);
  
        write_unlock_bh(&dev_base_lock);
  
        dev_base_seq_inc(net);
 -
 -      return 0;
  }
  
  /* Device list removal
@@@ -2146,6 -2148,9 +2146,9 @@@ static void skb_warn_bad_offload(const 
        struct net_device *dev = skb->dev;
        const char *driver = "";
  
+       if (!net_ratelimit())
+               return;
        if (dev && dev->dev.parent)
                driver = dev_driver_string(dev->dev.parent);
  
  }
  EXPORT_SYMBOL(skb_checksum_help);
  
 -/**
 - *    skb_mac_gso_segment - mac layer segmentation handler.
 - *    @skb: buffer to segment
 - *    @features: features for the output path (see dev->features)
 - */
 -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 -                                  netdev_features_t features)
 +__be16 skb_network_protocol(struct sk_buff *skb)
  {
 -      struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 -      struct packet_offload *ptype;
        __be16 type = skb->protocol;
        int vlan_depth = ETH_HLEN;
  
 -      while (type == htons(ETH_P_8021Q)) {
 +      while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
                struct vlan_hdr *vh;
  
                if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
 -                      return ERR_PTR(-EINVAL);
 +                      return 0;
  
                vh = (struct vlan_hdr *)(skb->data + vlan_depth);
                type = vh->h_vlan_encapsulated_proto;
                vlan_depth += VLAN_HLEN;
        }
  
 +      return type;
 +}
 +
 +/**
 + *    skb_mac_gso_segment - mac layer segmentation handler.
 + *    @skb: buffer to segment
 + *    @features: features for the output path (see dev->features)
 + */
 +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 +                                  netdev_features_t features)
 +{
 +      struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 +      struct packet_offload *ptype;
 +      __be16 type = skb_network_protocol(skb);
 +
 +      if (unlikely(!type))
 +              return ERR_PTR(-EINVAL);
 +
        __skb_pull(skb, skb->mac_len);
  
        rcu_read_lock();
@@@ -2405,12 -2400,24 +2408,12 @@@ static int dev_gso_segment(struct sk_bu
        return 0;
  }
  
 -static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 -{
 -      return ((features & NETIF_F_GEN_CSUM) ||
 -              ((features & NETIF_F_V4_CSUM) &&
 -               protocol == htons(ETH_P_IP)) ||
 -              ((features & NETIF_F_V6_CSUM) &&
 -               protocol == htons(ETH_P_IPV6)) ||
 -              ((features & NETIF_F_FCOE_CRC) &&
 -               protocol == htons(ETH_P_FCOE)));
 -}
 -
  static netdev_features_t harmonize_features(struct sk_buff *skb,
        __be16 protocol, netdev_features_t features)
  {
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
 -              features &= ~NETIF_F_SG;
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
@@@ -2426,22 -2433,20 +2429,22 @@@ netdev_features_t netif_skb_features(st
        if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
  
 -      if (protocol == htons(ETH_P_8021Q)) {
 +      if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
                return harmonize_features(skb, protocol, features);
        }
  
 -      features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
 +      features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
 +                                             NETIF_F_HW_VLAN_STAG_TX);
  
 -      if (protocol != htons(ETH_P_8021Q)) {
 +      if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
                return harmonize_features(skb, protocol, features);
        } else {
                features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
 -                              NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
 +                              NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
 +                              NETIF_F_HW_VLAN_STAG_TX;
                return harmonize_features(skb, protocol, features);
        }
  }
@@@ -2482,9 -2487,8 +2485,9 @@@ int dev_hard_start_xmit(struct sk_buff 
                features = netif_skb_features(skb);
  
                if (vlan_tx_tag_present(skb) &&
 -                  !(features & NETIF_F_HW_VLAN_TX)) {
 -                      skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
 +                  !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 +                      skb = __vlan_put_tag(skb, skb->vlan_proto,
 +                                           vlan_tx_tag_get(skb));
                        if (unlikely(!skb))
                                goto out;
  
@@@ -2588,7 -2592,6 +2591,7 @@@ static void qdisc_pkt_len_init(struct s
         */
        if (shinfo->gso_size)  {
                unsigned int hdr_len;
 +              u16 gso_segs = shinfo->gso_segs;
  
                /* mac layer + network layer */
                hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
                        hdr_len += tcp_hdrlen(skb);
                else
                        hdr_len += sizeof(struct udphdr);
 -              qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
 +
 +              if (shinfo->gso_type & SKB_GSO_DODGY)
 +                      gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
 +                                              shinfo->gso_size);
 +
 +              qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
        }
  }
  
@@@ -3331,7 -3329,7 +3334,7 @@@ EXPORT_SYMBOL_GPL(netdev_rx_handler_reg
   *    netdev_rx_handler_unregister - unregister receive handler
   *    @dev: device to unregister a handler from
   *
 - *    Unregister a receive hander from a device.
 + *    Unregister a receive handler from a device.
   *
   *    The caller must hold the rtnl_mutex.
   */
@@@ -3360,7 -3358,6 +3363,7 @@@ static bool skb_pfmemalloc_protocol(str
        case __constant_htons(ETH_P_IP):
        case __constant_htons(ETH_P_IPV6):
        case __constant_htons(ETH_P_8021Q):
 +      case __constant_htons(ETH_P_8021AD):
                return true;
        default:
                return false;
@@@ -3401,8 -3398,7 +3404,8 @@@ another_round
  
        __this_cpu_inc(softnet_data.processed);
  
 -      if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
 +      if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
 +          skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
                skb = vlan_untag(skb);
                if (unlikely(!skb))
                        goto unlock;
@@@ -4070,9 -4066,6 +4073,9 @@@ void netif_napi_add(struct net_device *
        napi->gro_list = NULL;
        napi->skb = NULL;
        napi->poll = poll;
 +      if (weight > NAPI_POLL_WEIGHT)
 +              pr_err_once("netif_napi_add() called with weight %d on device %s\n",
 +                          weight, dev->name);
        napi->weight = weight;
        list_add(&napi->dev_list, &dev->napi_list);
        napi->dev = dev;
@@@ -4934,25 -4927,20 +4937,25 @@@ static netdev_features_t netdev_fix_fea
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
        }
  
 -      /* Fix illegal SG+CSUM combinations. */
 -      if ((features & NETIF_F_SG) &&
 -          !(features & NETIF_F_ALL_CSUM)) {
 -              netdev_dbg(dev,
 -                      "Dropping NETIF_F_SG since no checksum feature.\n");
 -              features &= ~NETIF_F_SG;
 -      }
 -
        /* TSO requires that SG is present as well. */
        if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
                netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
                features &= ~NETIF_F_ALL_TSO;
        }
  
 +      if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
 +                                      !(features & NETIF_F_IP_CSUM)) {
 +              netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
 +              features &= ~NETIF_F_TSO;
 +              features &= ~NETIF_F_TSO_ECN;
 +      }
 +
 +      if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
 +                                       !(features & NETIF_F_IPV6_CSUM)) {
 +              netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
 +              features &= ~NETIF_F_TSO6;
 +      }
 +
        /* TSO ECN requires that TSO is present as well. */
        if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
                features &= ~NETIF_F_TSO_ECN;
@@@ -5183,8 -5171,7 +5186,8 @@@ int register_netdevice(struct net_devic
                }
        }
  
 -      if (((dev->hw_features | dev->features) & NETIF_F_HW_VLAN_FILTER) &&
 +      if (((dev->hw_features | dev->features) &
 +           NETIF_F_HW_VLAN_CTAG_FILTER) &&
            (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
             !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
                netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
         */
        dev->vlan_features |= NETIF_F_HIGHDMA;
  
 +      /* Make NETIF_F_SG inheritable to tunnel devices.
 +       */
 +      dev->hw_enc_features |= NETIF_F_SG;
 +
        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
        ret = notifier_to_errno(ret);
        if (ret)
diff --combined net/core/rtnetlink.c
index 589d0abb34a0b8c7f84f7b07208963c5c660871f,23854b51a259cb8b65c2bb1ee4be8e32e0ca0eb8..18af08a73f0a719ecaf3e56ae0f89783c8d90f77
@@@ -517,6 -517,32 +517,6 @@@ out
        return err;
  }
  
 -static const int rtm_min[RTM_NR_FAMILIES] =
 -{
 -      [RTM_FAM(RTM_NEWLINK)]      = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
 -      [RTM_FAM(RTM_NEWADDR)]      = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
 -      [RTM_FAM(RTM_NEWROUTE)]     = NLMSG_LENGTH(sizeof(struct rtmsg)),
 -      [RTM_FAM(RTM_NEWRULE)]      = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
 -      [RTM_FAM(RTM_NEWQDISC)]     = NLMSG_LENGTH(sizeof(struct tcmsg)),
 -      [RTM_FAM(RTM_NEWTCLASS)]    = NLMSG_LENGTH(sizeof(struct tcmsg)),
 -      [RTM_FAM(RTM_NEWTFILTER)]   = NLMSG_LENGTH(sizeof(struct tcmsg)),
 -      [RTM_FAM(RTM_NEWACTION)]    = NLMSG_LENGTH(sizeof(struct tcamsg)),
 -      [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 -      [RTM_FAM(RTM_GETANYCAST)]   = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 -};
 -
 -static const int rta_max[RTM_NR_FAMILIES] =
 -{
 -      [RTM_FAM(RTM_NEWLINK)]      = IFLA_MAX,
 -      [RTM_FAM(RTM_NEWADDR)]      = IFA_MAX,
 -      [RTM_FAM(RTM_NEWROUTE)]     = RTA_MAX,
 -      [RTM_FAM(RTM_NEWRULE)]      = FRA_MAX,
 -      [RTM_FAM(RTM_NEWQDISC)]     = TCA_MAX,
 -      [RTM_FAM(RTM_NEWTCLASS)]    = TCA_MAX,
 -      [RTM_FAM(RTM_NEWTFILTER)]   = TCA_MAX,
 -      [RTM_FAM(RTM_NEWACTION)]    = TCAA_MAX,
 -};
 -
  int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
  {
        struct sock *rtnl = net->rtnl;
@@@ -1046,7 -1072,7 +1046,7 @@@ static int rtnl_dump_ifinfo(struct sk_b
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
  
-       if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
                        ifla_policy) >= 0) {
  
                if (tb[IFLA_EXT_MASK])
@@@ -1513,7 -1539,7 +1513,7 @@@ errout
        return err;
  }
  
 -static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ifinfomsg *ifm;
@@@ -1554,7 -1580,7 +1554,7 @@@ errout
        return err;
  }
  
 -static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        const struct rtnl_link_ops *ops;
@@@ -1685,7 -1711,7 +1685,7 @@@ static int rtnl_group_changelink(struc
        return 0;
  }
  
 -static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        const struct rtnl_link_ops *ops;
@@@ -1840,7 -1866,7 +1840,7 @@@ out
        }
  }
  
 -static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 +static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ifinfomsg *ifm;
@@@ -1896,7 -1922,7 +1896,7 @@@ static u16 rtnl_calcit(struct sk_buff *
        u32 ext_filter_mask = 0;
        u16 min_ifinfo_dump_size = 0;
  
-       if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+       if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
                        ifla_policy) >= 0) {
                if (tb[IFLA_EXT_MASK])
                        ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@@ -1931,11 -1957,8 +1931,11 @@@ static int rtnl_dump_all(struct sk_buf
                if (rtnl_msg_handlers[idx] == NULL ||
                    rtnl_msg_handlers[idx][type].dumpit == NULL)
                        continue;
 -              if (idx > s_idx)
 +              if (idx > s_idx) {
                        memset(&cb->args[0], 0, sizeof(cb->args));
 +                      cb->prev_seq = 0;
 +                      cb->seq = 0;
 +              }
                if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
                        break;
        }
@@@ -2028,39 -2051,7 +2028,39 @@@ errout
        rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  }
  
 -static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +/**
 + * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
 + */
 +int ndo_dflt_fdb_add(struct ndmsg *ndm,
 +                   struct nlattr *tb[],
 +                   struct net_device *dev,
 +                   const unsigned char *addr,
 +                   u16 flags)
 +{
 +      int err = -EINVAL;
 +
 +      /* If aging addresses are supported device will need to
 +       * implement its own handler for this.
 +       */
 +      if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
 +              pr_info("%s: FDB only supports static addresses\n", dev->name);
 +              return err;
 +      }
 +
 +      if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
 +              err = dev_uc_add_excl(dev, addr);
 +      else if (is_multicast_ether_addr(addr))
 +              err = dev_mc_add_excl(dev, addr);
 +
 +      /* Only return duplicate errors if NLM_F_EXCL is set */
 +      if (err == -EEXIST && !(flags & NLM_F_EXCL))
 +              err = 0;
 +
 +      return err;
 +}
 +EXPORT_SYMBOL(ndo_dflt_fdb_add);
 +
 +static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ndmsg *ndm;
        }
  
        addr = nla_data(tb[NDA_LLADDR]);
 -      if (!is_valid_ether_addr(addr)) {
 +      if (is_zero_ether_addr(addr)) {
                pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
                return -EINVAL;
        }
        }
  
        /* Embedded bridge, macvlan, and any other device support */
 -      if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
 -              err = dev->netdev_ops->ndo_fdb_add(ndm, tb,
 -                                                 dev, addr,
 -                                                 nlh->nlmsg_flags);
 +      if ((ndm->ndm_flags & NTF_SELF)) {
 +              if (dev->netdev_ops->ndo_fdb_add)
 +                      err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
 +                                                         nlh->nlmsg_flags);
 +              else
 +                      err = ndo_dflt_fdb_add(ndm, tb, dev, addr,
 +                                             nlh->nlmsg_flags);
  
                if (!err) {
                        rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
        return err;
  }
  
 -static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +/**
 + * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
 + */
 +int ndo_dflt_fdb_del(struct ndmsg *ndm,
 +                   struct nlattr *tb[],
 +                   struct net_device *dev,
 +                   const unsigned char *addr)
 +{
 +      int err = -EOPNOTSUPP;
 +
 +      /* If aging addresses are supported device will need to
 +       * implement its own handler for this.
 +       */
 +      if (ndm->ndm_state & NUD_PERMANENT) {
 +              pr_info("%s: FDB only supports static addresses\n", dev->name);
 +              return -EINVAL;
 +      }
 +
 +      if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
 +              err = dev_uc_del(dev, addr);
 +      else if (is_multicast_ether_addr(addr))
 +              err = dev_mc_del(dev, addr);
 +      else
 +              err = -EINVAL;
 +
 +      return err;
 +}
 +EXPORT_SYMBOL(ndo_dflt_fdb_del);
 +
 +static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ndmsg *ndm;
        }
  
        /* Embedded bridge, macvlan, and any other device support */
 -      if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
 -              err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
 +      if (ndm->ndm_flags & NTF_SELF) {
 +              if (dev->netdev_ops->ndo_fdb_del)
 +                      err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
 +              else
 +                      err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
  
                if (!err) {
                        rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@@ -2264,7 -2220,7 +2264,7 @@@ skip
   * @dev: netdevice
   *
   * Default netdevice operation to dump the existing unicast address list.
 - * Returns zero on success.
 + * Returns number of addresses from list put in skb.
   */
  int ndo_dflt_fdb_dump(struct sk_buff *skb,
                      struct netlink_callback *cb,
@@@ -2304,8 -2260,6 +2304,8 @@@ static int rtnl_fdb_dump(struct sk_buf
  
                if (dev->netdev_ops->ndo_fdb_dump)
                        idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
 +              else
 +                      idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
        }
        rcu_read_unlock();
  
@@@ -2457,7 -2411,8 +2457,7 @@@ errout
        return err;
  }
  
 -static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 -                             void *arg)
 +static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ifinfomsg *ifm;
@@@ -2527,7 -2482,8 +2527,7 @@@ out
        return err;
  }
  
 -static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
 -                             void *arg)
 +static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ifinfomsg *ifm;
@@@ -2597,6 -2553,10 +2597,6 @@@ out
        return err;
  }
  
 -/* Protected by RTNL sempahore.  */
 -static struct rtattr **rta_buf;
 -static int rtattr_max;
 -
  /* Process one rtnetlink message. */
  
  static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net *net = sock_net(skb->sk);
        rtnl_doit_func doit;
        int sz_idx, kind;
 -      int min_len;
        int family;
        int type;
        int err;
        type -= RTM_BASE;
  
        /* All the messages must have at least 1 byte length */
 -      if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
 +      if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
                return 0;
  
 -      family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
 +      family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
        sz_idx = type>>2;
        kind = type&3;
  
                return err;
        }
  
 -      memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
 -
 -      min_len = rtm_min[sz_idx];
 -      if (nlh->nlmsg_len < min_len)
 -              return -EINVAL;
 -
 -      if (nlh->nlmsg_len > min_len) {
 -              int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
 -              struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
 -
 -              while (RTA_OK(attr, attrlen)) {
 -                      unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
 -                      if (flavor) {
 -                              if (flavor > rta_max[sz_idx])
 -                                      return -EINVAL;
 -                              rta_buf[flavor-1] = attr;
 -                      }
 -                      attr = RTA_NEXT(attr, attrlen);
 -              }
 -      }
 -
        doit = rtnl_get_doit(family, type);
        if (doit == NULL)
                return -EOPNOTSUPP;
  
 -      return doit(skb, nlh, (void *)&rta_buf[0]);
 +      return doit(skb, nlh);
  }
  
  static void rtnetlink_rcv(struct sk_buff *skb)
@@@ -2725,6 -2707,16 +2725,6 @@@ static struct pernet_operations rtnetli
  
  void __init rtnetlink_init(void)
  {
 -      int i;
 -
 -      rtattr_max = 0;
 -      for (i = 0; i < ARRAY_SIZE(rta_max); i++)
 -              if (rta_max[i] > rtattr_max)
 -                      rtattr_max = rta_max[i];
 -      rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
 -      if (!rta_buf)
 -              panic("rtnetlink_init: cannot allocate rta_buf\n");
 -
        if (register_pernet_subsys(&rtnetlink_net_ops))
                panic("rtnetlink_init: cannot initialize rtnetlink\n");
  
diff --combined net/ipv4/devinet.c
index 2759dfd576aeb6ee85ad7c17155cc3a9fc924e94,c6287cd978c2db7b40f3aa3fe06b5d43bced64d5..dfc39d4d48b7471fc83035746026fa14d1dcf497
@@@ -536,7 -536,7 +536,7 @@@ struct in_ifaddr *inet_ifa_byprefix(str
        return NULL;
  }
  
 -static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tb[IFA_MAX+1];
@@@ -587,13 -587,16 +587,16 @@@ static void check_lifetime(struct work_
  {
        unsigned long now, next, next_sec, next_sched;
        struct in_ifaddr *ifa;
+       struct hlist_node *n;
        int i;
  
        now = jiffies;
        next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
  
-       rcu_read_lock();
        for (i = 0; i < IN4_ADDR_HSIZE; i++) {
+               bool change_needed = false;
+               rcu_read_lock();
                hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
                        unsigned long age;
  
  
                        if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
                            age >= ifa->ifa_valid_lft) {
-                               struct in_ifaddr **ifap ;
-                               rtnl_lock();
-                               for (ifap = &ifa->ifa_dev->ifa_list;
-                                    *ifap != NULL; ifap = &ifa->ifa_next) {
-                                       if (*ifap == ifa)
-                                               inet_del_ifa(ifa->ifa_dev,
-                                                            ifap, 1);
-                               }
-                               rtnl_unlock();
+                               change_needed = true;
                        } else if (ifa->ifa_preferred_lft ==
                                   INFINITY_LIFE_TIME) {
                                continue;
                                        next = ifa->ifa_tstamp +
                                               ifa->ifa_valid_lft * HZ;
  
-                               if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
-                                       ifa->ifa_flags |= IFA_F_DEPRECATED;
-                                       rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
-                               }
+                               if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
+                                       change_needed = true;
                        } else if (time_before(ifa->ifa_tstamp +
                                               ifa->ifa_preferred_lft * HZ,
                                               next)) {
                                       ifa->ifa_preferred_lft * HZ;
                        }
                }
+               rcu_read_unlock();
+               if (!change_needed)
+                       continue;
+               rtnl_lock();
+               hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
+                       unsigned long age;
+                       if (ifa->ifa_flags & IFA_F_PERMANENT)
+                               continue;
+                       /* We try to batch several events at once. */
+                       age = (now - ifa->ifa_tstamp +
+                              ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+                       if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
+                           age >= ifa->ifa_valid_lft) {
+                               struct in_ifaddr **ifap;
+                               for (ifap = &ifa->ifa_dev->ifa_list;
+                                    *ifap != NULL; ifap = &(*ifap)->ifa_next) {
+                                       if (*ifap == ifa) {
+                                               inet_del_ifa(ifa->ifa_dev,
+                                                            ifap, 1);
+                                               break;
+                                       }
+                               }
+                       } else if (ifa->ifa_preferred_lft !=
+                                  INFINITY_LIFE_TIME &&
+                                  age >= ifa->ifa_preferred_lft &&
+                                  !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
+                               ifa->ifa_flags |= IFA_F_DEPRECATED;
+                               rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
+                       }
+               }
+               rtnl_unlock();
        }
-       rcu_read_unlock();
  
        next_sec = round_jiffies_up(next);
        next_sched = next;
@@@ -775,7 -801,7 +801,7 @@@ static struct in_ifaddr *find_matching_
        return NULL;
  }
  
 -static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct in_ifaddr *ifa;
                        return -EEXIST;
                ifa = ifa_existing;
                set_ifa_lifetime(ifa, valid_lft, prefered_lft);
+               cancel_delayed_work(&check_lifetime_work);
+               schedule_delayed_work(&check_lifetime_work, 0);
                rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
                blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
        }
@@@ -1501,8 -1529,6 +1529,8 @@@ static int inet_dump_ifaddr(struct sk_b
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
 +              cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
 +                        net->dev_base_seq;
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                                        rcu_read_unlock();
                                        goto done;
                                }
 +                              nl_dump_check_consistent(cb, nlmsg_hdr(skb));
                        }
  cont:
                        idx++;
@@@ -1735,7 -1760,8 +1763,7 @@@ static const struct nla_policy devconf_
  };
  
  static int inet_netconf_get_devconf(struct sk_buff *in_skb,
 -                                  struct nlmsghdr *nlh,
 -                                  void *arg)
 +                                  struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(in_skb->sk);
        struct nlattr *tb[NETCONFA_MAX+1];
@@@ -1795,77 -1821,6 +1823,77 @@@ errout
        return err;
  }
  
 +static int inet_netconf_dump_devconf(struct sk_buff *skb,
 +                                   struct netlink_callback *cb)
 +{
 +      struct net *net = sock_net(skb->sk);
 +      int h, s_h;
 +      int idx, s_idx;
 +      struct net_device *dev;
 +      struct in_device *in_dev;
 +      struct hlist_head *head;
 +
 +      s_h = cb->args[0];
 +      s_idx = idx = cb->args[1];
 +
 +      for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
 +              idx = 0;
 +              head = &net->dev_index_head[h];
 +              rcu_read_lock();
 +              cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
 +                        net->dev_base_seq;
 +              hlist_for_each_entry_rcu(dev, head, index_hlist) {
 +                      if (idx < s_idx)
 +                              goto cont;
 +                      in_dev = __in_dev_get_rcu(dev);
 +                      if (!in_dev)
 +                              goto cont;
 +
 +                      if (inet_netconf_fill_devconf(skb, dev->ifindex,
 +                                                    &in_dev->cnf,
 +                                                    NETLINK_CB(cb->skb).portid,
 +                                                    cb->nlh->nlmsg_seq,
 +                                                    RTM_NEWNETCONF,
 +                                                    NLM_F_MULTI,
 +                                                    -1) <= 0) {
 +                              rcu_read_unlock();
 +                              goto done;
 +                      }
 +                      nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 +cont:
 +                      idx++;
 +              }
 +              rcu_read_unlock();
 +      }
 +      if (h == NETDEV_HASHENTRIES) {
 +              if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
 +                                            net->ipv4.devconf_all,
 +                                            NETLINK_CB(cb->skb).portid,
 +                                            cb->nlh->nlmsg_seq,
 +                                            RTM_NEWNETCONF, NLM_F_MULTI,
 +                                            -1) <= 0)
 +                      goto done;
 +              else
 +                      h++;
 +      }
 +      if (h == NETDEV_HASHENTRIES + 1) {
 +              if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
 +                                            net->ipv4.devconf_dflt,
 +                                            NETLINK_CB(cb->skb).portid,
 +                                            cb->nlh->nlmsg_seq,
 +                                            RTM_NEWNETCONF, NLM_F_MULTI,
 +                                            -1) <= 0)
 +                      goto done;
 +              else
 +                      h++;
 +      }
 +done:
 +      cb->args[0] = h;
 +      cb->args[1] = idx;
 +
 +      return skb->len;
 +}
 +
  #ifdef CONFIG_SYSCTL
  
  static void devinet_copy_dflt_conf(struct net *net, int i)
@@@ -2270,6 -2225,6 +2298,6 @@@ void __init devinet_init(void
        rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
        rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
        rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
 -                    NULL, NULL);
 +                    inet_netconf_dump_devconf, NULL);
  }
  
diff --combined net/ipv4/ip_fragment.c
index 938520668b2f4ae82f139dc66f65b15c074e8b0f,52c273ea05c34c902e07c00609b4f881c392c37b..b66910aaef4d633977d2e983f2c63b419da38f77
@@@ -79,11 -79,40 +79,11 @@@ struct ipq 
        struct inet_peer *peer;
  };
  
 -/* RFC 3168 support :
 - * We want to check ECN values of all fragments, do detect invalid combinations.
 - * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
 - */
 -#define       IPFRAG_ECN_NOT_ECT      0x01 /* one frag had ECN_NOT_ECT */
 -#define       IPFRAG_ECN_ECT_1        0x02 /* one frag had ECN_ECT_1 */
 -#define       IPFRAG_ECN_ECT_0        0x04 /* one frag had ECN_ECT_0 */
 -#define       IPFRAG_ECN_CE           0x08 /* one frag had ECN_CE */
 -
  static inline u8 ip4_frag_ecn(u8 tos)
  {
        return 1 << (tos & INET_ECN_MASK);
  }
  
 -/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 - * Value : 0xff if frame should be dropped.
 - *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 - */
 -static const u8 ip4_frag_ecn_table[16] = {
 -      /* at least one fragment had CE, and others ECT_0 or ECT_1 */
 -      [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]                      = INET_ECN_CE,
 -      [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]                      = INET_ECN_CE,
 -      [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]   = INET_ECN_CE,
 -
 -      /* invalid combinations : drop frame */
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
 -      [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
 -};
 -
  static struct inet_frags ip4_frags;
  
  int ip_frag_nqueues(struct net *net)
@@@ -219,8 -248,7 +219,7 @@@ static void ip_expire(unsigned long arg
                if (!head->dev)
                        goto out_rcu_unlock;
  
-               /* skb dst is stale, drop it, and perform route lookup again */
-               skb_dst_drop(head);
+               /* skb has no dst, perform route lookup again */
                iph = ip_hdr(head);
                err = ip_route_input_noref(head, iph->daddr, iph->saddr,
                                           iph->tos, head->dev);
@@@ -494,9 -522,16 +493,16 @@@ found
                qp->q.max_size = skb->len + ihl;
  
        if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           qp->q.meat == qp->q.len)
-               return ip_frag_reasm(qp, prev, dev);
+           qp->q.meat == qp->q.len) {
+               unsigned long orefdst = skb->_skb_refdst;
  
+               skb->_skb_refdst = 0UL;
+               err = ip_frag_reasm(qp, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return err;
+       }
+       skb_dst_drop(skb);
        inet_frag_lru_move(&qp->q);
        return -EINPROGRESS;
  
@@@ -522,7 -557,7 +528,7 @@@ static int ip_frag_reasm(struct ipq *qp
  
        ipq_kill(qp);
  
 -      ecn = ip4_frag_ecn_table[qp->ecn];
 +      ecn = ip_frag_ecn_table[qp->ecn];
        if (unlikely(ecn == 0xff)) {
                err = -EINVAL;
                goto out_fail;
diff --combined net/ipv4/syncookies.c
index 7f4a5cb8f8d0dc8002e0a12a1450f68076d20641,397e0f69435fd1b1b4b3775cadc409d67ea63332..b05c96e7af8b810a62bb07d95436eea07c651008
@@@ -267,6 -267,7 +267,6 @@@ struct sock *cookie_v4_check(struct soc
                             struct ip_options *opt)
  {
        struct tcp_options_received tcp_opt;
 -      const u8 *hash_location;
        struct inet_request_sock *ireq;
        struct tcp_request_sock *treq;
        struct tcp_sock *tp = tcp_sk(sk);
  
        /* check for timestamp cookie support */
        memset(&tcp_opt, 0, sizeof(tcp_opt));
 -      tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
 +      tcp_parse_options(skb, &tcp_opt, 0, NULL);
  
        if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
                goto out;
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
-                          RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+       flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+                          RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
                           ireq->loc_addr, th->source, th->dest);
diff --combined net/ipv4/tcp_input.c
index 6d9ca35f0c35a34be3961405538cb1519790cc5b,13b9c08fc1582531aa34eef92a432dc96f2a1c52..aafd052865ba813f096bb00a16acd612c4d1e6ea
@@@ -93,11 -93,12 +93,11 @@@ int sysctl_tcp_stdurg __read_mostly
  int sysctl_tcp_rfc1337 __read_mostly;
  int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
  int sysctl_tcp_frto __read_mostly = 2;
 -int sysctl_tcp_frto_response __read_mostly;
  
  int sysctl_tcp_thin_dupack __read_mostly;
  
  int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 -int sysctl_tcp_early_retrans __read_mostly = 2;
 +int sysctl_tcp_early_retrans __read_mostly = 3;
  
  #define FLAG_DATA             0x01 /* Incoming frame contained data.          */
  #define FLAG_WIN_UPDATE               0x02 /* Incoming ACK was a window update.       */
  #define FLAG_DATA_SACKED      0x20 /* New SACK.                               */
  #define FLAG_ECE              0x40 /* ECE in this ACK                         */
  #define FLAG_SLOWPATH         0x100 /* Do not skip RFC checks for window update.*/
 -#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
 +#define FLAG_ORIG_SACK_ACKED  0x200 /* Never retransmitted data are (s)acked  */
  #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
  #define FLAG_DSACKING_ACK     0x800 /* SACK blocks contained D-SACK info */
 -#define FLAG_NONHEAD_RETRANS_ACKED    0x1000 /* Non-head rexmitted data was ACKed */
  #define FLAG_SACK_RENEGING    0x2000 /* snd_una advanced to a sacked seq */
+ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
  
  #define FLAG_ACKED            (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
  #define FLAG_NOT_DUP          (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
  #define FLAG_CA_ALERT         (FLAG_DATA_SACKED|FLAG_ECE)
  #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
 -#define FLAG_ANY_PROGRESS     (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
  
  #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
  #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
@@@ -1156,8 -1160,10 +1157,8 @@@ static u8 tcp_sacktag_one(struct sock *
                                           tcp_highest_sack_seq(tp)))
                                        state->reord = min(fack_count,
                                                           state->reord);
 -
 -                              /* SACK enhanced F-RTO (RFC4138; Appendix B) */
 -                              if (!after(end_seq, tp->frto_highmark))
 -                                      state->flag |= FLAG_ONLY_ORIG_SACKED;
 +                              if (!after(end_seq, tp->high_seq))
 +                                      state->flag |= FLAG_ORIG_SACK_ACKED;
                        }
  
                        if (sacked & TCPCB_LOST) {
@@@ -1550,6 -1556,7 +1551,6 @@@ static in
  tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        u32 prior_snd_una)
  {
 -      const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        const unsigned char *ptr = (skb_transport_header(ack_skb) +
                                    TCP_SKB_CB(ack_skb)->sacked);
@@@ -1722,6 -1729,12 +1723,6 @@@ walk
                                       start_seq, end_seq, dup_sack);
  
  advance_sp:
 -              /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
 -               * due to in-order walk
 -               */
 -              if (after(end_seq, tp->frto_highmark))
 -                      state.flag &= ~FLAG_ONLY_ORIG_SACKED;
 -
                i++;
        }
  
        tcp_verify_left_out(tp);
  
        if ((state.reord < tp->fackets_out) &&
 -          ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
 -          (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
 +          ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
                tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
  
  out:
@@@ -1812,6 -1826,197 +1813,6 @@@ static inline void tcp_reset_reno_sack(
        tp->sacked_out = 0;
  }
  
 -static int tcp_is_sackfrto(const struct tcp_sock *tp)
 -{
 -      return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp);
 -}
 -
 -/* F-RTO can only be used if TCP has never retransmitted anything other than
 - * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
 - */
 -bool tcp_use_frto(struct sock *sk)
 -{
 -      const struct tcp_sock *tp = tcp_sk(sk);
 -      const struct inet_connection_sock *icsk = inet_csk(sk);
 -      struct sk_buff *skb;
 -
 -      if (!sysctl_tcp_frto)
 -              return false;
 -
 -      /* MTU probe and F-RTO won't really play nicely along currently */
 -      if (icsk->icsk_mtup.probe_size)
 -              return false;
 -
 -      if (tcp_is_sackfrto(tp))
 -              return true;
 -
 -      /* Avoid expensive walking of rexmit queue if possible */
 -      if (tp->retrans_out > 1)
 -              return false;
 -
 -      skb = tcp_write_queue_head(sk);
 -      if (tcp_skb_is_last(sk, skb))
 -              return true;
 -      skb = tcp_write_queue_next(sk, skb);    /* Skips head */
 -      tcp_for_write_queue_from(skb, sk) {
 -              if (skb == tcp_send_head(sk))
 -                      break;
 -              if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
 -                      return false;
 -              /* Short-circuit when first non-SACKed skb has been checked */
 -              if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
 -                      break;
 -      }
 -      return true;
 -}
 -
 -/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
 - * recovery a bit and use heuristics in tcp_process_frto() to detect if
 - * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
 - * keep retrans_out counting accurate (with SACK F-RTO, other than head
 - * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
 - * bits are handled if the Loss state is really to be entered (in
 - * tcp_enter_frto_loss).
 - *
 - * Do like tcp_enter_loss() would; when RTO expires the second time it
 - * does:
 - *  "Reduce ssthresh if it has not yet been made inside this window."
 - */
 -void tcp_enter_frto(struct sock *sk)
 -{
 -      const struct inet_connection_sock *icsk = inet_csk(sk);
 -      struct tcp_sock *tp = tcp_sk(sk);
 -      struct sk_buff *skb;
 -
 -      if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
 -          tp->snd_una == tp->high_seq ||
 -          ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
 -           !icsk->icsk_retransmits)) {
 -              tp->prior_ssthresh = tcp_current_ssthresh(sk);
 -              /* Our state is too optimistic in ssthresh() call because cwnd
 -               * is not reduced until tcp_enter_frto_loss() when previous F-RTO
 -               * recovery has not yet completed. Pattern would be this: RTO,
 -               * Cumulative ACK, RTO (2xRTO for the same segment does not end
 -               * up here twice).
 -               * RFC4138 should be more specific on what to do, even though
 -               * RTO is quite unlikely to occur after the first Cumulative ACK
 -               * due to back-off and complexity of triggering events ...
 -               */
 -              if (tp->frto_counter) {
 -                      u32 stored_cwnd;
 -                      stored_cwnd = tp->snd_cwnd;
 -                      tp->snd_cwnd = 2;
 -                      tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
 -                      tp->snd_cwnd = stored_cwnd;
 -              } else {
 -                      tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
 -              }
 -              /* ... in theory, cong.control module could do "any tricks" in
 -               * ssthresh(), which means that ca_state, lost bits and lost_out
 -               * counter would have to be faked before the call occurs. We
 -               * consider that too expensive, unlikely and hacky, so modules
 -               * using these in ssthresh() must deal these incompatibility
 -               * issues if they receives CA_EVENT_FRTO and frto_counter != 0
 -               */
 -              tcp_ca_event(sk, CA_EVENT_FRTO);
 -      }
 -
 -      tp->undo_marker = tp->snd_una;
 -      tp->undo_retrans = 0;
 -
 -      skb = tcp_write_queue_head(sk);
 -      if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
 -              tp->undo_marker = 0;
 -      if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
 -              TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 -              tp->retrans_out -= tcp_skb_pcount(skb);
 -      }
 -      tcp_verify_left_out(tp);
 -
 -      /* Too bad if TCP was application limited */
 -      tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
 -
 -      /* Earlier loss recovery underway (see RFC4138; Appendix B).
 -       * The last condition is necessary at least in tp->frto_counter case.
 -       */
 -      if (tcp_is_sackfrto(tp) && (tp->frto_counter ||
 -          ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
 -          after(tp->high_seq, tp->snd_una)) {
 -              tp->frto_highmark = tp->high_seq;
 -      } else {
 -              tp->frto_highmark = tp->snd_nxt;
 -      }
 -      tcp_set_ca_state(sk, TCP_CA_Disorder);
 -      tp->high_seq = tp->snd_nxt;
 -      tp->frto_counter = 1;
 -}
 -
 -/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
 - * which indicates that we should follow the traditional RTO recovery,
 - * i.e. mark everything lost and do go-back-N retransmission.
 - */
 -static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
 -{
 -      struct tcp_sock *tp = tcp_sk(sk);
 -      struct sk_buff *skb;
 -
 -      tp->lost_out = 0;
 -      tp->retrans_out = 0;
 -      if (tcp_is_reno(tp))
 -              tcp_reset_reno_sack(tp);
 -
 -      tcp_for_write_queue(skb, sk) {
 -              if (skb == tcp_send_head(sk))
 -                      break;
 -
 -              TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
 -              /*
 -               * Count the retransmission made on RTO correctly (only when
 -               * waiting for the first ACK and did not get it)...
 -               */
 -              if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) {
 -                      /* For some reason this R-bit might get cleared? */
 -                      if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
 -                              tp->retrans_out += tcp_skb_pcount(skb);
 -                      /* ...enter this if branch just for the first segment */
 -                      flag |= FLAG_DATA_ACKED;
 -              } else {
 -                      if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
 -                              tp->undo_marker = 0;
 -                      TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 -              }
 -
 -              /* Marking forward transmissions that were made after RTO lost
 -               * can cause unnecessary retransmissions in some scenarios,
 -               * SACK blocks will mitigate that in some but not in all cases.
 -               * We used to not mark them but it was causing break-ups with
 -               * receivers that do only in-order receival.
 -               *
 -               * TODO: we could detect presence of such receiver and select
 -               * different behavior per flow.
 -               */
 -              if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
 -                      TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 -                      tp->lost_out += tcp_skb_pcount(skb);
 -                      tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
 -              }
 -      }
 -      tcp_verify_left_out(tp);
 -
 -      tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
 -      tp->snd_cwnd_cnt = 0;
 -      tp->snd_cwnd_stamp = tcp_time_stamp;
 -      tp->frto_counter = 0;
 -
 -      tp->reordering = min_t(unsigned int, tp->reordering,
 -                             sysctl_tcp_reordering);
 -      tcp_set_ca_state(sk, TCP_CA_Loss);
 -      tp->high_seq = tp->snd_nxt;
 -      TCP_ECN_queue_cwr(tp);
 -
 -      tcp_clear_all_retrans_hints(tp);
 -}
 -
  static void tcp_clear_retrans_partial(struct tcp_sock *tp)
  {
        tp->retrans_out = 0;
@@@ -1838,13 -2043,10 +1839,13 @@@ void tcp_enter_loss(struct sock *sk, in
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 +      bool new_recovery = false;
  
        /* Reduce ssthresh if it has not yet been made inside this window. */
 -      if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
 +      if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
 +          !after(tp->high_seq, tp->snd_una) ||
            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
 +              new_recovery = true;
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_LOSS);
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        TCP_ECN_queue_cwr(tp);
 -      /* Abort F-RTO algorithm if one is in progress */
 -      tp->frto_counter = 0;
 +
 +      /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
 +       * loss recovery is underway except recurring timeout(s) on
 +       * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
 +       */
 +      tp->frto = sysctl_tcp_frto &&
 +                 (new_recovery || icsk->icsk_retransmits) &&
 +                 !inet_csk(sk)->icsk_mtup.probe_size;
  }
  
  /* If ACK arrived pointing to a remembered SACK, it means that our
@@@ -1952,16 -2148,15 +1953,16 @@@ static bool tcp_pause_early_retransmit(
         * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
         * available, or RTO is scheduled to fire first.
         */
 -      if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt)
 +      if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
 +          (flag & FLAG_ECE) || !tp->srtt)
                return false;
  
        delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
        if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
                return false;
  
 -      inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX);
 -      tp->early_retrans_delayed = 1;
 +      inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
 +                                TCP_RTO_MAX);
        return true;
  }
  
@@@ -2077,6 -2272,10 +2078,6 @@@ static bool tcp_time_to_recover(struct 
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out;
  
 -      /* Do not perform any recovery during F-RTO algorithm */
 -      if (tp->frto_counter)
 -              return false;
 -
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
                return true;
         * interval if appropriate.
         */
        if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
 -          (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) &&
 +          (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) &&
            !tcp_may_send_now(sk))
                return !tcp_pause_early_retransmit(sk, flag);
  
@@@ -2437,12 -2636,12 +2438,12 @@@ static int tcp_try_undo_partial(struct 
        return failed;
  }
  
 -/* Undo during loss recovery after partial ACK. */
 -static bool tcp_try_undo_loss(struct sock *sk)
 +/* Undo during loss recovery after partial ACK or using F-RTO. */
 +static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
  {
        struct tcp_sock *tp = tcp_sk(sk);
  
 -      if (tcp_may_undo(tp)) {
 +      if (frto_undo || tcp_may_undo(tp)) {
                struct sk_buff *skb;
                tcp_for_write_queue(skb, sk) {
                        if (skb == tcp_send_head(sk))
                tp->lost_out = 0;
                tcp_undo_cwr(sk, true);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
 +              if (frto_undo)
 +                      NET_INC_STATS_BH(sock_net(sk),
 +                                       LINUX_MIB_TCPSPURIOUSRTOS);
                inet_csk(sk)->icsk_retransmits = 0;
                tp->undo_marker = 0;
 -              if (tcp_is_sack(tp))
 +              if (frto_undo || tcp_is_sack(tp))
                        tcp_set_ca_state(sk, TCP_CA_Open);
                return true;
        }
@@@ -2483,7 -2679,6 +2484,7 @@@ static void tcp_init_cwnd_reduction(str
        struct tcp_sock *tp = tcp_sk(sk);
  
        tp->high_seq = tp->snd_nxt;
 +      tp->tlp_high_seq = 0;
        tp->snd_cwnd_cnt = 0;
        tp->prior_cwnd = tp->snd_cwnd;
        tp->prr_delivered = 0;
@@@ -2561,7 -2756,7 +2562,7 @@@ static void tcp_try_to_open(struct soc
  
        tcp_verify_left_out(tp);
  
 -      if (!tp->frto_counter && !tcp_any_retrans_done(sk))
 +      if (!tcp_any_retrans_done(sk))
                tp->retrans_stamp = 0;
  
        if (flag & FLAG_ECE)
@@@ -2678,58 -2873,6 +2679,58 @@@ static void tcp_enter_recovery(struct s
        tcp_set_ca_state(sk, TCP_CA_Recovery);
  }
  
 +/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
 + * recovered or spurious. Otherwise retransmits more on partial ACKs.
 + */
 +static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
 +{
 +      struct inet_connection_sock *icsk = inet_csk(sk);
 +      struct tcp_sock *tp = tcp_sk(sk);
 +      bool recovered = !before(tp->snd_una, tp->high_seq);
 +
 +      if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
 +              if (flag & FLAG_ORIG_SACK_ACKED) {
 +                      /* Step 3.b. A timeout is spurious if not all data are
 +                       * lost, i.e., never-retransmitted data are (s)acked.
 +                       */
 +                      tcp_try_undo_loss(sk, true);
 +                      return;
 +              }
 +              if (after(tp->snd_nxt, tp->high_seq) &&
 +                  (flag & FLAG_DATA_SACKED || is_dupack)) {
 +                      tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
 +              } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
 +                      tp->high_seq = tp->snd_nxt;
 +                      __tcp_push_pending_frames(sk, tcp_current_mss(sk),
 +                                                TCP_NAGLE_OFF);
 +                      if (after(tp->snd_nxt, tp->high_seq))
 +                              return; /* Step 2.b */
 +                      tp->frto = 0;
 +              }
 +      }
 +
 +      if (recovered) {
 +              /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
 +              icsk->icsk_retransmits = 0;
 +              tcp_try_undo_recovery(sk);
 +              return;
 +      }
 +      if (flag & FLAG_DATA_ACKED)
 +              icsk->icsk_retransmits = 0;
 +      if (tcp_is_reno(tp)) {
 +              /* A Reno DUPACK means new data in F-RTO step 2.b above are
 +               * delivered. Lower inflight to clock out (re)tranmissions.
 +               */
 +              if (after(tp->snd_nxt, tp->high_seq) && is_dupack)
 +                      tcp_add_reno_sack(sk);
 +              else if (flag & FLAG_SND_UNA_ADVANCED)
 +                      tcp_reset_reno_sack(tp);
 +      }
 +      if (tcp_try_undo_loss(sk, false))
 +              return;
 +      tcp_xmit_retransmit_queue(sk);
 +}
 +
  /* Process an event, which can update packets-in-flight not trivially.
   * Main goal of this function is to calculate new estimate for left_out,
   * taking into account both packets sitting in receiver's buffer and
@@@ -2776,6 -2919,12 +2777,6 @@@ static void tcp_fastretrans_alert(struc
                tp->retrans_stamp = 0;
        } else if (!before(tp->snd_una, tp->high_seq)) {
                switch (icsk->icsk_ca_state) {
 -              case TCP_CA_Loss:
 -                      icsk->icsk_retransmits = 0;
 -                      if (tcp_try_undo_recovery(sk))
 -                              return;
 -                      break;
 -
                case TCP_CA_CWR:
                        /* CWR is to be held something *above* high_seq
                         * is ACKed for CWR bit to reach receiver. */
                newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
                break;
        case TCP_CA_Loss:
 -              if (flag & FLAG_DATA_ACKED)
 -                      icsk->icsk_retransmits = 0;
 -              if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
 -                      tcp_reset_reno_sack(tp);
 -              if (!tcp_try_undo_loss(sk)) {
 -                      tcp_moderate_cwnd(tp);
 -                      tcp_xmit_retransmit_queue(sk);
 -                      return;
 -              }
 +              tcp_process_loss(sk, flag, is_dupack);
                if (icsk->icsk_ca_state != TCP_CA_Open)
                        return;
 -              /* Loss is undone; fall through to processing in Open state. */
 +              /* Fall through to processing in Open state. */
        default:
                if (tcp_is_reno(tp)) {
                        if (flag & FLAG_SND_UNA_ADVANCED)
@@@ -2922,7 -3079,6 +2923,7 @@@ static void tcp_cong_avoid(struct sock 
   */
  void tcp_rearm_rto(struct sock *sk)
  {
 +      const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
  
        /* If the retrans timer is currently being used by Fast Open
        } else {
                u32 rto = inet_csk(sk)->icsk_rto;
                /* Offset the time elapsed after installing regular RTO */
 -              if (tp->early_retrans_delayed) {
 +              if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
 +                  icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                        struct sk_buff *skb = tcp_write_queue_head(sk);
                        const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
                        s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
                        /* delta may not be positive if the socket is locked
 -                       * when the delayed ER timer fires and is rescheduled.
 +                       * when the retrans timer fires and is rescheduled.
                         */
                        if (delta > 0)
                                rto = delta;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
                                          TCP_RTO_MAX);
        }
 -      tp->early_retrans_delayed = 0;
  }
  
  /* This function is called when the delayed ER timer fires. TCP enters
@@@ -3037,6 -3193,8 +3038,6 @@@ static int tcp_clean_rtx_queue(struct s
                        flag |= FLAG_RETRANS_DATA_ACKED;
                        ca_seq_rtt = -1;
                        seq_rtt = -1;
 -                      if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
 -                              flag |= FLAG_NONHEAD_RETRANS_ACKED;
                } else {
                        ca_seq_rtt = now - scb->when;
                        last_ackt = skb->tstamp;
                        }
                        if (!(sacked & TCPCB_SACKED_ACKED))
                                reord = min(pkts_acked, reord);
 +                      if (!after(scb->end_seq, tp->high_seq))
 +                              flag |= FLAG_ORIG_SACK_ACKED;
                }
  
                if (sacked & TCPCB_SACKED_ACKED)
@@@ -3247,6 -3403,150 +3248,6 @@@ static int tcp_ack_update_window(struc
        return flag;
  }
  
 -/* A very conservative spurious RTO response algorithm: reduce cwnd and
 - * continue in congestion avoidance.
 - */
 -static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
 -{
 -      tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
 -      tp->snd_cwnd_cnt = 0;
 -      TCP_ECN_queue_cwr(tp);
 -      tcp_moderate_cwnd(tp);
 -}
 -
 -/* A conservative spurious RTO response algorithm: reduce cwnd using
 - * PRR and continue in congestion avoidance.
 - */
 -static void tcp_cwr_spur_to_response(struct sock *sk)
 -{
 -      tcp_enter_cwr(sk, 0);
 -}
 -
 -static void tcp_undo_spur_to_response(struct sock *sk, int flag)
 -{
 -      if (flag & FLAG_ECE)
 -              tcp_cwr_spur_to_response(sk);
 -      else
 -              tcp_undo_cwr(sk, true);
 -}
 -
 -/* F-RTO spurious RTO detection algorithm (RFC4138)
 - *
 - * F-RTO affects during two new ACKs following RTO (well, almost, see inline
 - * comments). State (ACK number) is kept in frto_counter. When ACK advances
 - * window (but not to or beyond highest sequence sent before RTO):
 - *   On First ACK,  send two new segments out.
 - *   On Second ACK, RTO was likely spurious. Do spurious response (response
 - *                  algorithm is not part of the F-RTO detection algorithm
 - *                  given in RFC4138 but can be selected separately).
 - * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
 - * and TCP falls back to conventional RTO recovery. F-RTO allows overriding
 - * of Nagle, this is done using frto_counter states 2 and 3, when a new data
 - * segment of any size sent during F-RTO, state 2 is upgraded to 3.
 - *
 - * Rationale: if the RTO was spurious, new ACKs should arrive from the
 - * original window even after we transmit two new data segments.
 - *
 - * SACK version:
 - *   on first step, wait until first cumulative ACK arrives, then move to
 - *   the second step. In second step, the next ACK decides.
 - *
 - * F-RTO is implemented (mainly) in four functions:
 - *   - tcp_use_frto() is used to determine if TCP is can use F-RTO
 - *   - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
 - *     called when tcp_use_frto() showed green light
 - *   - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
 - *   - tcp_enter_frto_loss() is called if there is not enough evidence
 - *     to prove that the RTO is indeed spurious. It transfers the control
 - *     from F-RTO to the conventional RTO recovery
 - */
 -static bool tcp_process_frto(struct sock *sk, int flag)
 -{
 -      struct tcp_sock *tp = tcp_sk(sk);
 -
 -      tcp_verify_left_out(tp);
 -
 -      /* Duplicate the behavior from Loss state (fastretrans_alert) */
 -      if (flag & FLAG_DATA_ACKED)
 -              inet_csk(sk)->icsk_retransmits = 0;
 -
 -      if ((flag & FLAG_NONHEAD_RETRANS_ACKED) ||
 -          ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED)))
 -              tp->undo_marker = 0;
 -
 -      if (!before(tp->snd_una, tp->frto_highmark)) {
 -              tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
 -              return true;
 -      }
 -
 -      if (!tcp_is_sackfrto(tp)) {
 -              /* RFC4138 shortcoming in step 2; should also have case c):
 -               * ACK isn't duplicate nor advances window, e.g., opposite dir
 -               * data, winupdate
 -               */
 -              if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
 -                      return true;
 -
 -              if (!(flag & FLAG_DATA_ACKED)) {
 -                      tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
 -                                          flag);
 -                      return true;
 -              }
 -      } else {
 -              if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
 -                      if (!tcp_packets_in_flight(tp)) {
 -                              tcp_enter_frto_loss(sk, 2, flag);
 -                              return true;
 -                      }
 -
 -                      /* Prevent sending of new data. */
 -                      tp->snd_cwnd = min(tp->snd_cwnd,
 -                                         tcp_packets_in_flight(tp));
 -                      return true;
 -              }
 -
 -              if ((tp->frto_counter >= 2) &&
 -                  (!(flag & FLAG_FORWARD_PROGRESS) ||
 -                   ((flag & FLAG_DATA_SACKED) &&
 -                    !(flag & FLAG_ONLY_ORIG_SACKED)))) {
 -                      /* RFC4138 shortcoming (see comment above) */
 -                      if (!(flag & FLAG_FORWARD_PROGRESS) &&
 -                          (flag & FLAG_NOT_DUP))
 -                              return true;
 -
 -                      tcp_enter_frto_loss(sk, 3, flag);
 -                      return true;
 -              }
 -      }
 -
 -      if (tp->frto_counter == 1) {
 -              /* tcp_may_send_now needs to see updated state */
 -              tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
 -              tp->frto_counter = 2;
 -
 -              if (!tcp_may_send_now(sk))
 -                      tcp_enter_frto_loss(sk, 2, flag);
 -
 -              return true;
 -      } else {
 -              switch (sysctl_tcp_frto_response) {
 -              case 2:
 -                      tcp_undo_spur_to_response(sk, flag);
 -                      break;
 -              case 1:
 -                      tcp_conservative_spur_to_response(tp);
 -                      break;
 -              default:
 -                      tcp_cwr_spur_to_response(sk);
 -                      break;
 -              }
 -              tp->frto_counter = 0;
 -              tp->undo_marker = 0;
 -              NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
 -      }
 -      return false;
 -}
 -
  /* RFC 5961 7 [ACK Throttling] */
  static void tcp_send_challenge_ack(struct sock *sk)
  {
        }
  }
  
+ static void tcp_store_ts_recent(struct tcp_sock *tp)
+ {
+       tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
+ }
+ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
+ {
+       if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
+               /* PAWS bug workaround wrt. ACK frames, the PAWS discard
+                * extra check below makes sure this can only happen
+                * for pure ACK frames.  -DaveM
+                *
+                * Not only, also it occurs for expired timestamps.
+                */
+               if (tcp_paws_check(&tp->rx_opt, 0))
+                       tcp_store_ts_recent(tp);
+       }
+ }
 +/* This routine deals with acks during a TLP episode.
 + * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
 + */
 +static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
 +{
 +      struct tcp_sock *tp = tcp_sk(sk);
 +      bool is_tlp_dupack = (ack == tp->tlp_high_seq) &&
 +                           !(flag & (FLAG_SND_UNA_ADVANCED |
 +                                     FLAG_NOT_DUP | FLAG_DATA_SACKED));
 +
 +      /* Mark the end of TLP episode on receiving TLP dupack or when
 +       * ack is after tlp_high_seq.
 +       */
 +      if (is_tlp_dupack) {
 +              tp->tlp_high_seq = 0;
 +              return;
 +      }
 +
 +      if (after(ack, tp->tlp_high_seq)) {
 +              tp->tlp_high_seq = 0;
 +              /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
 +              if (!(flag & FLAG_DSACKING_ACK)) {
 +                      tcp_init_cwnd_reduction(sk, true);
 +                      tcp_set_ca_state(sk, TCP_CA_CWR);
 +                      tcp_end_cwnd_reduction(sk);
 +                      tcp_set_ca_state(sk, TCP_CA_Open);
 +                      NET_INC_STATS_BH(sock_net(sk),
 +                                       LINUX_MIB_TCPLOSSPROBERECOVERY);
 +              }
 +      }
 +}
 +
  /* This routine deals with incoming acks, but not outgoing ones. */
  static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
  {
        int prior_packets;
        int prior_sacked = tp->sacked_out;
        int pkts_acked = 0;
 -      bool frto_cwnd = false;
  
        /* If the ack is older than previous acks
         * then we can probably ignore it.
        if (after(ack, tp->snd_nxt))
                goto invalid_ack;
  
 -      if (tp->early_retrans_delayed)
 +      if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
 +          icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
                tcp_rearm_rto(sk);
  
        if (after(ack, prior_snd_una))
        prior_fackets = tp->fackets_out;
        prior_in_flight = tcp_packets_in_flight(tp);
  
+       /* ts_recent update must be made after we are sure that the packet
+        * is in window.
+        */
+       if (flag & FLAG_UPDATE_TS_RECENT)
+               tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
        if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
                /* Window is constant, pure forward advance.
                 * No more checks are required.
  
        pkts_acked = prior_packets - tp->packets_out;
  
 -      if (tp->frto_counter)
 -              frto_cwnd = tcp_process_frto(sk, flag);
 -      /* Guarantee sacktag reordering detection against wrap-arounds */
 -      if (before(tp->frto_highmark, tp->snd_una))
 -              tp->frto_highmark = 0;
 -
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
 -              if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
 -                  tcp_may_raise_cwnd(sk, flag))
 +              if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
                tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        } else {
 -              if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
 +              if (flag & FLAG_DATA_ACKED)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
        }
  
 +      if (tp->tlp_high_seq)
 +              tcp_process_tlp_ack(sk, ack, flag);
 +
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
                struct dst_entry *dst = __sk_dst_get(sk);
                if (dst)
                        dst_confirm(dst);
        }
 +
 +      if (icsk->icsk_pending == ICSK_TIME_RETRANS)
 +              tcp_schedule_loss_probe(sk);
        return 1;
  
  no_queue:
         */
        if (tcp_send_head(sk))
                tcp_ack_probe(sk);
 +
 +      if (tp->tlp_high_seq)
 +              tcp_process_tlp_ack(sk, ack, flag);
        return 1;
  
  invalid_ack:
@@@ -3447,8 -3740,8 +3475,8 @@@ old_ack
   * But, this can also be called on packets in the established flow when
   * the fast version below fails.
   */
 -void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
 -                     const u8 **hvpp, int estab,
 +void tcp_parse_options(const struct sk_buff *skb,
 +                     struct tcp_options_received *opt_rx, int estab,
                       struct tcp_fastopen_cookie *foc)
  {
        const unsigned char *ptr;
                                 */
                                break;
  #endif
 -                      case TCPOPT_COOKIE:
 -                              /* This option is variable length.
 -                               */
 -                              switch (opsize) {
 -                              case TCPOLEN_COOKIE_BASE:
 -                                      /* not yet implemented */
 -                                      break;
 -                              case TCPOLEN_COOKIE_PAIR:
 -                                      /* not yet implemented */
 -                                      break;
 -                              case TCPOLEN_COOKIE_MIN+0:
 -                              case TCPOLEN_COOKIE_MIN+2:
 -                              case TCPOLEN_COOKIE_MIN+4:
 -                              case TCPOLEN_COOKIE_MIN+6:
 -                              case TCPOLEN_COOKIE_MAX:
 -                                      /* 16-bit multiple */
 -                                      opt_rx->cookie_plus = opsize;
 -                                      *hvpp = ptr;
 -                                      break;
 -                              default:
 -                                      /* ignore option */
 -                                      break;
 -                              }
 -                              break;
 -
                        case TCPOPT_EXP:
                                /* Fast Open option shares code 254 using a
                                 * 16 bits magic number. It's valid only in
@@@ -3577,7 -3895,8 +3605,7 @@@ static bool tcp_parse_aligned_timestamp
   * If it is wrong it falls back on tcp_parse_options().
   */
  static bool tcp_fast_parse_options(const struct sk_buff *skb,
 -                                 const struct tcphdr *th,
 -                                 struct tcp_sock *tp, const u8 **hvpp)
 +                                 const struct tcphdr *th, struct tcp_sock *tp)
  {
        /* In the spirit of fast parsing, compare doff directly to constant
         * values.  Because equality is used, short doff can be ignored here.
                        return true;
        }
  
 -      tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
 +      tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
        if (tp->rx_opt.saw_tstamp)
                tp->rx_opt.rcv_tsecr -= tp->tsoffset;
  
@@@ -3636,27 -3955,6 +3664,6 @@@ const u8 *tcp_parse_md5sig_option(cons
  EXPORT_SYMBOL(tcp_parse_md5sig_option);
  #endif
  
- static inline void tcp_store_ts_recent(struct tcp_sock *tp)
- {
-       tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = get_seconds();
- }
- static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
- {
-       if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
-               /* PAWS bug workaround wrt. ACK frames, the PAWS discard
-                * extra check below makes sure this can only happen
-                * for pure ACK frames.  -DaveM
-                *
-                * Not only, also it occurs for expired timestamps.
-                */
-               if (tcp_paws_check(&tp->rx_opt, 0))
-                       tcp_store_ts_recent(tp);
-       }
- }
  /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
   *
   * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
  static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                                  const struct tcphdr *th, int syn_inerr)
  {
 -      const u8 *hash_location;
        struct tcp_sock *tp = tcp_sk(sk);
  
        /* RFC1323: H1. Apply PAWS check first. */
 -      if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
 -          tp->rx_opt.saw_tstamp &&
 +      if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@@ -5250,14 -5550,9 +5257,9 @@@ slow_path
                return 0;
  
  step5:
-       if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+       if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
                goto discard;
  
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
        tcp_rcv_rtt_measure_ts(sk, skb);
  
        /* Process urgent data. */
@@@ -5329,11 -5624,12 +5331,11 @@@ static bool tcp_rcv_fastopen_synack(str
  
        if (mss == tp->rx_opt.user_mss) {
                struct tcp_options_received opt;
 -              const u8 *hash_location;
  
                /* Get original SYNACK MSS value if user MSS sets mss_clamp */
                tcp_clear_options(&opt);
                opt.user_mss = opt.mss_clamp = 0;
 -              tcp_parse_options(synack, &opt, &hash_location, 0, NULL);
 +              tcp_parse_options(synack, &opt, 0, NULL);
                mss = opt.mss_clamp;
        }
  
  static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                         const struct tcphdr *th, unsigned int len)
  {
 -      const u8 *hash_location;
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 -      struct tcp_cookie_values *cvp = tp->cookie_values;
        struct tcp_fastopen_cookie foc = { .len = -1 };
        int saved_clamp = tp->rx_opt.mss_clamp;
  
 -      tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
 +      tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
        if (tp->rx_opt.saw_tstamp)
                tp->rx_opt.rcv_tsecr -= tp->tsoffset;
  
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
  
 -              if (cvp != NULL &&
 -                  cvp->cookie_pair_size > 0 &&
 -                  tp->rx_opt.cookie_plus > 0) {
 -                      int cookie_size = tp->rx_opt.cookie_plus
 -                                      - TCPOLEN_COOKIE_BASE;
 -                      int cookie_pair_size = cookie_size
 -                                           + cvp->cookie_desired;
 -
 -                      /* A cookie extension option was sent and returned.
 -                       * Note that each incoming SYNACK replaces the
 -                       * Responder cookie.  The initial exchange is most
 -                       * fragile, as protection against spoofing relies
 -                       * entirely upon the sequence and timestamp (above).
 -                       * This replacement strategy allows the correct pair to
 -                       * pass through, while any others will be filtered via
 -                       * Responder verification later.
 -                       */
 -                      if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
 -                              memcpy(&cvp->cookie_pair[cvp->cookie_desired],
 -                                     hash_location, cookie_size);
 -                              cvp->cookie_pair_size = cookie_pair_size;
 -                      }
 -              }
 -
                smp_mb();
  
                tcp_finish_connect(sk, skb);
@@@ -5666,7 -5988,8 +5668,8 @@@ int tcp_rcv_state_process(struct sock *
  
        /* step 5: check the ACK field */
        if (true) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+                                                 FLAG_UPDATE_TS_RECENT) > 0;
  
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
                }
        }
  
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
        /* step 6: check the URG bit */
        tcp_urg(sk, skb, th);
  
diff --combined net/ipv4/tcp_output.c
index 5f28131eb37e35c4f858566e9cd9c6dbf24ff0d9,509912a5ff98e73edfe9d60295de196ed16874a2..b735c23a961df203fd16a03c757badecd0a6dbfc
@@@ -65,22 -65,27 +65,22 @@@ int sysctl_tcp_base_mss __read_mostly 
  /* By default, RFC2861 behavior.  */
  int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
  
 -int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
 -EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
 -
  static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                           int push_one, gfp_t gfp);
  
  /* Account for new data that has been sent to the network. */
  static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
  {
 +      struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned int prior_packets = tp->packets_out;
  
        tcp_advance_send_head(sk, skb);
        tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
  
 -      /* Don't override Nagle indefinitely with F-RTO */
 -      if (tp->frto_counter == 2)
 -              tp->frto_counter = 3;
 -
        tp->packets_out += tcp_skb_pcount(skb);
 -      if (!prior_packets || tp->early_retrans_delayed)
 +      if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
 +          icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
                tcp_rearm_rto(sk);
  }
  
@@@ -379,6 -384,7 +379,6 @@@ static inline bool tcp_urg_mode(const s
  #define OPTION_TS             (1 << 1)
  #define OPTION_MD5            (1 << 2)
  #define OPTION_WSCALE         (1 << 3)
 -#define OPTION_COOKIE_EXTENSION       (1 << 4)
  #define OPTION_FAST_OPEN_COOKIE       (1 << 8)
  
  struct tcp_out_options {
        struct tcp_fastopen_cookie *fastopen_cookie;    /* Fast open cookie */
  };
  
 -/* The sysctl int routines are generic, so check consistency here.
 - */
 -static u8 tcp_cookie_size_check(u8 desired)
 -{
 -      int cookie_size;
 -
 -      if (desired > 0)
 -              /* previously specified */
 -              return desired;
 -
 -      cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
 -      if (cookie_size <= 0)
 -              /* no default specified */
 -              return 0;
 -
 -      if (cookie_size <= TCP_COOKIE_MIN)
 -              /* value too small, specify minimum */
 -              return TCP_COOKIE_MIN;
 -
 -      if (cookie_size >= TCP_COOKIE_MAX)
 -              /* value too large, specify maximum */
 -              return TCP_COOKIE_MAX;
 -
 -      if (cookie_size & 1)
 -              /* 8-bit multiple, illegal, fix it */
 -              cookie_size++;
 -
 -      return (u8)cookie_size;
 -}
 -
  /* Write previously computed TCP options to the packet.
   *
   * Beware: Something in the Internet is very sensitive to the ordering of
@@@ -410,9 -446,27 +410,9 @@@ static void tcp_options_write(__be32 *p
  {
        u16 options = opts->options;    /* mungable copy */
  
 -      /* Having both authentication and cookies for security is redundant,
 -       * and there's certainly not enough room.  Instead, the cookie-less
 -       * extension variant is proposed.
 -       *
 -       * Consider the pessimal case with authentication.  The options
 -       * could look like:
 -       *   COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
 -       */
        if (unlikely(OPTION_MD5 & options)) {
 -              if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
 -                      *ptr++ = htonl((TCPOPT_COOKIE << 24) |
 -                                     (TCPOLEN_COOKIE_BASE << 16) |
 -                                     (TCPOPT_MD5SIG << 8) |
 -                                     TCPOLEN_MD5SIG);
 -              } else {
 -                      *ptr++ = htonl((TCPOPT_NOP << 24) |
 -                                     (TCPOPT_NOP << 16) |
 -                                     (TCPOPT_MD5SIG << 8) |
 -                                     TCPOLEN_MD5SIG);
 -              }
 -              options &= ~OPTION_COOKIE_EXTENSION;
 +              *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 +                             (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
                /* overload cookie hash location */
                opts->hash_location = (__u8 *)ptr;
                ptr += 4;
                *ptr++ = htonl(opts->tsecr);
        }
  
 -      /* Specification requires after timestamp, so do it now.
 -       *
 -       * Consider the pessimal case without authentication.  The options
 -       * could look like:
 -       *   MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
 -       */
 -      if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
 -              __u8 *cookie_copy = opts->hash_location;
 -              u8 cookie_size = opts->hash_size;
 -
 -              /* 8-bit multiple handled in tcp_cookie_size_check() above,
 -               * and elsewhere.
 -               */
 -              if (0x2 & cookie_size) {
 -                      __u8 *p = (__u8 *)ptr;
 -
 -                      /* 16-bit multiple */
 -                      *p++ = TCPOPT_COOKIE;
 -                      *p++ = TCPOLEN_COOKIE_BASE + cookie_size;
 -                      *p++ = *cookie_copy++;
 -                      *p++ = *cookie_copy++;
 -                      ptr++;
 -                      cookie_size -= 2;
 -              } else {
 -                      /* 32-bit multiple */
 -                      *ptr++ = htonl(((TCPOPT_NOP << 24) |
 -                                      (TCPOPT_NOP << 16) |
 -                                      (TCPOPT_COOKIE << 8) |
 -                                      TCPOLEN_COOKIE_BASE) +
 -                                     cookie_size);
 -              }
 -
 -              if (cookie_size > 0) {
 -                      memcpy(ptr, cookie_copy, cookie_size);
 -                      ptr += (cookie_size / 4);
 -              }
 -      }
 -
        if (unlikely(OPTION_SACK_ADVERTISE & options)) {
                *ptr++ = htonl((TCPOPT_NOP << 24) |
                               (TCPOPT_NOP << 16) |
@@@ -499,7 -591,11 +499,7 @@@ static unsigned int tcp_syn_options(str
                                struct tcp_md5sig_key **md5)
  {
        struct tcp_sock *tp = tcp_sk(sk);
 -      struct tcp_cookie_values *cvp = tp->cookie_values;
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
 -      u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
 -                       tcp_cookie_size_check(cvp->cookie_desired) :
 -                       0;
        struct tcp_fastopen_request *fastopen = tp->fastopen_req;
  
  #ifdef CONFIG_TCP_MD5SIG
                        tp->syn_fastopen = 1;
                }
        }
 -      /* Note that timestamps are required by the specification.
 -       *
 -       * Odd numbers of bytes are prohibited by the specification, ensuring
 -       * that the cookie is 16-bit aligned, and the resulting cookie pair is
 -       * 32-bit aligned.
 -       */
 -      if (*md5 == NULL &&
 -          (OPTION_TS & opts->options) &&
 -          cookie_size > 0) {
 -              int need = TCPOLEN_COOKIE_BASE + cookie_size;
 -
 -              if (0x2 & need) {
 -                      /* 32-bit multiple */
 -                      need += 2; /* NOPs */
 -
 -                      if (need > remaining) {
 -                              /* try shrinking cookie to fit */
 -                              cookie_size -= 2;
 -                              need -= 4;
 -                      }
 -              }
 -              while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
 -                      cookie_size -= 4;
 -                      need -= 4;
 -              }
 -              if (TCP_COOKIE_MIN <= cookie_size) {
 -                      opts->options |= OPTION_COOKIE_EXTENSION;
 -                      opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
 -                      opts->hash_size = cookie_size;
 -
 -                      /* Remember for future incarnations. */
 -                      cvp->cookie_desired = cookie_size;
 -
 -                      if (cvp->cookie_desired != cvp->cookie_pair_size) {
 -                              /* Currently use random bytes as a nonce,
 -                               * assuming these are completely unpredictable
 -                               * by hostile users of the same system.
 -                               */
 -                              get_random_bytes(&cvp->cookie_pair[0],
 -                                               cookie_size);
 -                              cvp->cookie_pair_size = cookie_size;
 -                      }
  
 -                      remaining -= need;
 -              }
 -      }
        return MAX_TCP_OPTION_SPACE - remaining;
  }
  
@@@ -561,10 -702,14 +561,10 @@@ static unsigned int tcp_synack_options(
                                   unsigned int mss, struct sk_buff *skb,
                                   struct tcp_out_options *opts,
                                   struct tcp_md5sig_key **md5,
 -                                 struct tcp_extend_values *xvp,
                                   struct tcp_fastopen_cookie *foc)
  {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
 -      u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
 -                       xvp->cookie_plus :
 -                       0;
  
  #ifdef CONFIG_TCP_MD5SIG
        *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
                        remaining -= need;
                }
        }
 -      /* Similar rationale to tcp_syn_options() applies here, too.
 -       * If the <SYN> options fit, the same options should fit now!
 -       */
 -      if (*md5 == NULL &&
 -          ireq->tstamp_ok &&
 -          cookie_plus > TCPOLEN_COOKIE_BASE) {
 -              int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
 -
 -              if (0x2 & need) {
 -                      /* 32-bit multiple */
 -                      need += 2; /* NOPs */
 -              }
 -              if (need <= remaining) {
 -                      opts->options |= OPTION_COOKIE_EXTENSION;
 -                      opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
 -                      remaining -= need;
 -              } else {
 -                      /* There's no error return, so flag it. */
 -                      xvp->cookie_out_never = 1; /* true */
 -                      opts->hash_size = 0;
 -              }
 -      }
 +
        return MAX_TCP_OPTION_SPACE - remaining;
  }
  
@@@ -787,7 -953,7 +787,7 @@@ void __init tcp_tasklet_init(void
   * We cant xmit new skbs from this context, as we might already
   * hold qdisc lock.
   */
 -static void tcp_wfree(struct sk_buff *skb)
 +void tcp_wfree(struct sk_buff *skb)
  {
        struct sock *sk = skb->sk;
        struct tcp_sock *tp = tcp_sk(sk);
@@@ -846,13 -1012,6 +846,13 @@@ static int tcp_transmit_skb(struct soc
                __net_timestamp(skb);
  
        if (likely(clone_it)) {
 +              const struct sk_buff *fclone = skb + 1;
 +
 +              if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
 +                           fclone->fclone == SKB_FCLONE_CLONE))
 +                      NET_INC_STATS_BH(sock_net(sk),
 +                                       LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
 +
                if (unlikely(skb_cloned(skb)))
                        skb = pskb_copy(skb, gfp_mask);
                else
@@@ -1473,8 -1632,11 +1473,8 @@@ static inline bool tcp_nagle_test(cons
        if (nonagle & TCP_NAGLE_PUSH)
                return true;
  
 -      /* Don't use the nagle rule for urgent data (or for the final FIN).
 -       * Nagle can be ignored during F-RTO too (see RFC4138).
 -       */
 -      if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
 -          (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
 +      /* Don't use the nagle rule for urgent data (or for the final FIN). */
 +      if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
                return true;
  
        if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@@ -1799,9 -1961,6 +1799,9 @@@ static int tcp_mtu_probe(struct sock *s
   * snd_up-64k-mss .. snd_up cannot be large. However, taking into
   * account rare use of URG, this is not a big flaw.
   *
 + * Send at most one packet when push_one > 0. Temporarily ignore
 + * cwnd limit to force at most one packet out when push_one == 2.
 +
   * Returns true, if no segments are in flight and we have queued segments,
   * but cannot send anything now because of SWS or another problem.
   */
@@@ -1837,13 -1996,8 +1837,13 @@@ static bool tcp_write_xmit(struct sock 
                        goto repair; /* Skip network transmission */
  
                cwnd_quota = tcp_cwnd_test(tp, skb);
 -              if (!cwnd_quota)
 -                      break;
 +              if (!cwnd_quota) {
 +                      if (push_one == 2)
 +                              /* Force out a loss probe pkt. */
 +                              cwnd_quota = 1;
 +                      else
 +                              break;
 +              }
  
                if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
                        break;
@@@ -1897,129 -2051,10 +1897,129 @@@ repair
        if (likely(sent_pkts)) {
                if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += sent_pkts;
 +
 +              /* Send one loss probe per tail loss episode. */
 +              if (push_one != 2)
 +                      tcp_schedule_loss_probe(sk);
                tcp_cwnd_validate(sk);
                return false;
        }
 -      return !tp->packets_out && tcp_send_head(sk);
 +      return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
 +}
 +
 +bool tcp_schedule_loss_probe(struct sock *sk)
 +{
 +      struct inet_connection_sock *icsk = inet_csk(sk);
 +      struct tcp_sock *tp = tcp_sk(sk);
 +      u32 timeout, tlp_time_stamp, rto_time_stamp;
 +      u32 rtt = tp->srtt >> 3;
 +
 +      if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
 +              return false;
 +      /* No consecutive loss probes. */
 +      if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
 +              tcp_rearm_rto(sk);
 +              return false;
 +      }
 +      /* Don't do any loss probe on a Fast Open connection before 3WHS
 +       * finishes.
 +       */
 +      if (sk->sk_state == TCP_SYN_RECV)
 +              return false;
 +
 +      /* TLP is only scheduled when next timer event is RTO. */
 +      if (icsk->icsk_pending != ICSK_TIME_RETRANS)
 +              return false;
 +
 +      /* Schedule a loss probe in 2*RTT for SACK capable connections
 +       * in Open state, that are either limited by cwnd or application.
 +       */
 +      if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
 +          !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
 +              return false;
 +
 +      if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
 +           tcp_send_head(sk))
 +              return false;
 +
 +      /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
 +       * for delayed ack when there's one outstanding packet.
 +       */
 +      timeout = rtt << 1;
 +      if (tp->packets_out == 1)
 +              timeout = max_t(u32, timeout,
 +                              (rtt + (rtt >> 1) + TCP_DELACK_MAX));
 +      timeout = max_t(u32, timeout, msecs_to_jiffies(10));
 +
 +      /* If RTO is shorter, just schedule TLP in its place. */
 +      tlp_time_stamp = tcp_time_stamp + timeout;
 +      rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
 +      if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
 +              s32 delta = rto_time_stamp - tcp_time_stamp;
 +              if (delta > 0)
 +                      timeout = delta;
 +      }
 +
 +      inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
 +                                TCP_RTO_MAX);
 +      return true;
 +}
 +
 +/* When probe timeout (PTO) fires, send a new segment if one exists, else
 + * retransmit the last segment.
 + */
 +void tcp_send_loss_probe(struct sock *sk)
 +{
 +      struct tcp_sock *tp = tcp_sk(sk);
 +      struct sk_buff *skb;
 +      int pcount;
 +      int mss = tcp_current_mss(sk);
 +      int err = -1;
 +
 +      if (tcp_send_head(sk) != NULL) {
 +              err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
 +              goto rearm_timer;
 +      }
 +
 +      /* At most one outstanding TLP retransmission. */
 +      if (tp->tlp_high_seq)
 +              goto rearm_timer;
 +
 +      /* Retransmit last segment. */
 +      skb = tcp_write_queue_tail(sk);
 +      if (WARN_ON(!skb))
 +              goto rearm_timer;
 +
 +      pcount = tcp_skb_pcount(skb);
 +      if (WARN_ON(!pcount))
 +              goto rearm_timer;
 +
 +      if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
 +              if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
 +                      goto rearm_timer;
 +              skb = tcp_write_queue_tail(sk);
 +      }
 +
 +      if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
 +              goto rearm_timer;
 +
 +      /* Probe with zero data doesn't trigger fast recovery. */
 +      if (skb->len > 0)
 +              err = __tcp_retransmit_skb(sk, skb);
 +
 +      /* Record snd_nxt for loss detection. */
 +      if (likely(!err))
 +              tp->tlp_high_seq = tp->snd_nxt;
 +
 +rearm_timer:
 +      inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 +                                inet_csk(sk)->icsk_rto,
 +                                TCP_RTO_MAX);
 +
 +      if (likely(!err))
 +              NET_INC_STATS_BH(sock_net(sk),
 +                               LINUX_MIB_TCPLOSSPROBES);
 +      return;
  }
  
  /* Push out any pending frames which were held back due to
@@@ -2353,8 -2388,12 +2353,12 @@@ int __tcp_retransmit_skb(struct sock *s
         */
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
  
-       /* make sure skb->data is aligned on arches that require it */
-       if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+       /* make sure skb->data is aligned on arches that require it
+        * and check if ack-trimming & collapsing extended the headroom
+        * beyond what csum_start can cover.
+        */
+       if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+                    skb_headroom(skb) >= 0xFFFF)) {
                struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
                                                   GFP_ATOMIC);
                return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
@@@ -2640,24 -2679,32 +2644,24 @@@ int tcp_send_synack(struct sock *sk
   * sk: listener socket
   * dst: dst entry attached to the SYNACK
   * req: request_sock pointer
 - * rvp: request_values pointer
   *
   * Allocate one skb and build a SYNACK packet.
   * @dst is consumed : Caller should not use it again.
   */
  struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
                                struct request_sock *req,
 -                              struct request_values *rvp,
                                struct tcp_fastopen_cookie *foc)
  {
        struct tcp_out_options opts;
 -      struct tcp_extend_values *xvp = tcp_xv(rvp);
        struct inet_request_sock *ireq = inet_rsk(req);
        struct tcp_sock *tp = tcp_sk(sk);
 -      const struct tcp_cookie_values *cvp = tp->cookie_values;
        struct tcphdr *th;
        struct sk_buff *skb;
        struct tcp_md5sig_key *md5;
        int tcp_header_size;
        int mss;
 -      int s_data_desired = 0;
  
 -      if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
 -              s_data_desired = cvp->s_data_desired;
 -      skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
 -                      sk_gfp_atomic(sk, GFP_ATOMIC));
 +      skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
        if (unlikely(!skb)) {
                dst_release(dst);
                return NULL;
        skb_reserve(skb, MAX_TCP_HEADER);
  
        skb_dst_set(skb, dst);
+       security_skb_owned_by(skb, sk);
  
        mss = dst_metric_advmss(dst);
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
        else
  #endif
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
 -      tcp_header_size = tcp_synack_options(sk, req, mss,
 -                                           skb, &opts, &md5, xvp, foc)
 -                      + sizeof(*th);
 +      tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
 +                                           foc) + sizeof(*th);
  
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
        tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
                             TCPHDR_SYN | TCPHDR_ACK);
  
 -      if (OPTION_COOKIE_EXTENSION & opts.options) {
 -              if (s_data_desired) {
 -                      u8 *buf = skb_put(skb, s_data_desired);
 -
 -                      /* copy data directly from the listening socket. */
 -                      memcpy(buf, cvp->s_data_payload, s_data_desired);
 -                      TCP_SKB_CB(skb)->end_seq += s_data_desired;
 -              }
 -
 -              if (opts.hash_size > 0) {
 -                      __u32 workspace[SHA_WORKSPACE_WORDS];
 -                      u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
 -                      u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
 -
 -                      /* Secret recipe depends on the Timestamp, (future)
 -                       * Sequence and Acknowledgment Numbers, Initiator
 -                       * Cookie, and others handled by IP variant caller.
 -                       */
 -                      *tail-- ^= opts.tsval;
 -                      *tail-- ^= tcp_rsk(req)->rcv_isn + 1;
 -                      *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
 -
 -                      /* recommended */
 -                      *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
 -                      *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
 -
 -                      sha_transform((__u32 *)&xvp->cookie_bakery[0],
 -                                    (char *)mess,
 -                                    &workspace[0]);
 -                      opts.hash_location =
 -                              (__u8 *)&xvp->cookie_bakery[0];
 -              }
 -      }
 -
        th->seq = htonl(TCP_SKB_CB(skb)->seq);
        /* XXX data is queued and acked as is. No buffer/window check */
        th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
diff --combined net/ipv6/addrconf.c
index 28b61e89bbb81cdbd6b25b1d6e660574090881a9,dae802c0af7c002df341d3dfe4ed94db6584118e..d1ab6ab29a55c11e0570bb9854c524d9f21369fc
@@@ -70,7 -70,6 +70,7 @@@
  #include <net/snmp.h>
  
  #include <net/af_ieee802154.h>
 +#include <net/firewire.h>
  #include <net/ipv6.h>
  #include <net/protocol.h>
  #include <net/ndisc.h>
@@@ -169,8 -168,6 +169,6 @@@ static void inet6_prefix_notify(int eve
  static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
                               struct net_device *dev);
  
- static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
  static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .forwarding             = 0,
        .hop_limit              = IPV6_DEFAULT_HOPLIMIT,
@@@ -422,7 -419,6 +420,7 @@@ static struct inet6_dev *ipv6_add_dev(s
                ipv6_regen_rndid((unsigned long) ndev);
        }
  #endif
 +      ndev->token = in6addr_any;
  
        if (netif_running(dev) && addrconf_qdisc_ok(dev))
                ndev->if_flags |= IF_READY;
@@@ -546,7 -542,8 +544,7 @@@ static const struct nla_policy devconf_
  };
  
  static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
 -                                   struct nlmsghdr *nlh,
 -                                   void *arg)
 +                                   struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(in_skb->sk);
        struct nlattr *tb[NETCONFA_MAX+1];
@@@ -606,77 -603,6 +604,77 @@@ errout
        return err;
  }
  
 +static int inet6_netconf_dump_devconf(struct sk_buff *skb,
 +                                    struct netlink_callback *cb)
 +{
 +      struct net *net = sock_net(skb->sk);
 +      int h, s_h;
 +      int idx, s_idx;
 +      struct net_device *dev;
 +      struct inet6_dev *idev;
 +      struct hlist_head *head;
 +
 +      s_h = cb->args[0];
 +      s_idx = idx = cb->args[1];
 +
 +      for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
 +              idx = 0;
 +              head = &net->dev_index_head[h];
 +              rcu_read_lock();
 +              cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
 +                        net->dev_base_seq;
 +              hlist_for_each_entry_rcu(dev, head, index_hlist) {
 +                      if (idx < s_idx)
 +                              goto cont;
 +                      idev = __in6_dev_get(dev);
 +                      if (!idev)
 +                              goto cont;
 +
 +                      if (inet6_netconf_fill_devconf(skb, dev->ifindex,
 +                                                     &idev->cnf,
 +                                                     NETLINK_CB(cb->skb).portid,
 +                                                     cb->nlh->nlmsg_seq,
 +                                                     RTM_NEWNETCONF,
 +                                                     NLM_F_MULTI,
 +                                                     -1) <= 0) {
 +                              rcu_read_unlock();
 +                              goto done;
 +                      }
 +                      nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 +cont:
 +                      idx++;
 +              }
 +              rcu_read_unlock();
 +      }
 +      if (h == NETDEV_HASHENTRIES) {
 +              if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
 +                                             net->ipv6.devconf_all,
 +                                             NETLINK_CB(cb->skb).portid,
 +                                             cb->nlh->nlmsg_seq,
 +                                             RTM_NEWNETCONF, NLM_F_MULTI,
 +                                             -1) <= 0)
 +                      goto done;
 +              else
 +                      h++;
 +      }
 +      if (h == NETDEV_HASHENTRIES + 1) {
 +              if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
 +                                             net->ipv6.devconf_dflt,
 +                                             NETLINK_CB(cb->skb).portid,
 +                                             cb->nlh->nlmsg_seq,
 +                                             RTM_NEWNETCONF, NLM_F_MULTI,
 +                                             -1) <= 0)
 +                      goto done;
 +              else
 +                      h++;
 +      }
 +done:
 +      cb->args[0] = h;
 +      cb->args[1] = idx;
 +
 +      return skb->len;
 +}
 +
  #ifdef CONFIG_SYSCTL
  static void dev_forward_change(struct inet6_dev *idev)
  {
@@@ -878,7 -804,6 +876,7 @@@ ipv6_add_addr(struct inet6_dev *idev, c
        ifa->prefix_len = pfxlen;
        ifa->flags = flags | IFA_F_TENTATIVE;
        ifa->cstamp = ifa->tstamp = jiffies;
 +      ifa->tokenized = false;
  
        ifa->rt = rt;
  
@@@ -910,7 -835,7 +908,7 @@@ out2
        rcu_read_unlock_bh();
  
        if (likely(err == 0))
-               atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
+               inet6addr_notifier_call_chain(NETDEV_UP, ifa);
        else {
                kfree(ifa);
                ifa = ERR_PTR(err);
@@@ -1000,7 -925,7 +998,7 @@@ static void ipv6_del_addr(struct inet6_
  
        ipv6_ifa_notify(RTM_DELADDR, ifp);
  
-       atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
+       inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
  
        /*
         * Purge or update corresponding prefix
@@@ -1741,20 -1666,6 +1739,20 @@@ static int addrconf_ifid_eui64(u8 *eui
        return 0;
  }
  
 +static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
 +{
 +      union fwnet_hwaddr *ha;
 +
 +      if (dev->addr_len != FWNET_ALEN)
 +              return -1;
 +
 +      ha = (union fwnet_hwaddr *)dev->dev_addr;
 +
 +      memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
 +      eui[0] ^= 2;
 +      return 0;
 +}
 +
  static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
  {
        /* XXX: inherit EUI-64 from other interface -- yoshfuji */
@@@ -1819,8 -1730,6 +1817,8 @@@ static int ipv6_generate_eui64(u8 *eui
                return addrconf_ifid_gre(eui, dev);
        case ARPHRD_IEEE802154:
                return addrconf_ifid_eui64(eui, dev);
 +      case ARPHRD_IEEE1394:
 +              return addrconf_ifid_ieee1394(eui, dev);
        }
        return -1;
  }
@@@ -2135,19 -2044,11 +2133,19 @@@ void addrconf_prefix_rcv(struct net_dev
                struct inet6_ifaddr *ifp;
                struct in6_addr addr;
                int create = 0, update_lft = 0;
 +              bool tokenized = false;
  
                if (pinfo->prefix_len == 64) {
                        memcpy(&addr, &pinfo->prefix, 8);
 -                      if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
 -                          ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
 +
 +                      if (!ipv6_addr_any(&in6_dev->token)) {
 +                              read_lock_bh(&in6_dev->lock);
 +                              memcpy(addr.s6_addr + 8,
 +                                     in6_dev->token.s6_addr + 8, 8);
 +                              read_unlock_bh(&in6_dev->lock);
 +                              tokenized = true;
 +                      } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
 +                                 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
                                in6_dev_put(in6_dev);
                                return;
                        }
@@@ -2188,7 -2089,6 +2186,7 @@@ ok
  
                        update_lft = create = 1;
                        ifp->cstamp = jiffies;
 +                      ifp->tokenized = tokenized;
                        addrconf_dad_start(ifp);
                }
  
@@@ -2698,8 -2598,7 +2696,8 @@@ static void addrconf_dev_config(struct 
            (dev->type != ARPHRD_FDDI) &&
            (dev->type != ARPHRD_ARCNET) &&
            (dev->type != ARPHRD_INFINIBAND) &&
 -          (dev->type != ARPHRD_IEEE802154)) {
 +          (dev->type != ARPHRD_IEEE802154) &&
 +          (dev->type != ARPHRD_IEEE1394)) {
                /* Alas, we support only Ethernet autoconfiguration. */
                return;
        }
@@@ -3087,7 -2986,7 +3085,7 @@@ static int addrconf_ifdown(struct net_d
  
                if (state != INET6_IFADDR_STATE_DEAD) {
                        __ipv6_ifa_notify(RTM_DELADDR, ifa);
-                       atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+                       inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
                }
                in6_ifa_put(ifa);
  
@@@ -3636,7 -3535,7 +3634,7 @@@ static const struct nla_policy ifa_ipv6
  };
  
  static int
 -inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ifaddrmsg *ifm;
@@@ -3702,7 -3601,7 +3700,7 @@@ static int inet6_addr_modify(struct ine
  }
  
  static int
 -inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 +inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(skb->sk);
        struct ifaddrmsg *ifm;
@@@ -3933,7 -3832,6 +3931,7 @@@ static int in6_dump_addrs(struct inet6_
                                                NLM_F_MULTI);
                        if (err <= 0)
                                break;
 +                      nl_dump_check_consistent(cb, nlmsg_hdr(skb));
                }
                break;
        }
@@@ -3991,7 -3889,6 +3989,7 @@@ static int inet6_dump_addr(struct sk_bu
        s_ip_idx = ip_idx = cb->args[2];
  
        rcu_read_lock();
 +      cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
@@@ -4043,7 -3940,8 +4041,7 @@@ static int inet6_dump_ifacaddr(struct s
        return inet6_dump_addr(skb, cb, type);
  }
  
 -static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 -                           void *arg)
 +static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
  {
        struct net *net = sock_net(in_skb->sk);
        struct ifaddrmsg *ifm;
@@@ -4176,8 -4074,7 +4174,8 @@@ static inline size_t inet6_ifla6_size(v
             + nla_total_size(sizeof(struct ifla_cacheinfo))
             + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
             + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
 -           + nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */
 +           + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
 +           + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
  }
  
  static inline size_t inet6_if_nlmsg_size(void)
@@@ -4264,13 -4161,6 +4262,13 @@@ static int inet6_fill_ifla6_attrs(struc
                goto nla_put_failure;
        snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
  
 +      nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
 +      if (nla == NULL)
 +              goto nla_put_failure;
 +      read_lock_bh(&idev->lock);
 +      memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
 +      read_unlock_bh(&idev->lock);
 +
        return 0;
  
  nla_put_failure:
@@@ -4298,80 -4188,6 +4296,80 @@@ static int inet6_fill_link_af(struct sk
        return 0;
  }
  
 +static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 +{
 +      struct inet6_ifaddr *ifp;
 +      struct net_device *dev = idev->dev;
 +      bool update_rs = false;
 +
 +      if (token == NULL)
 +              return -EINVAL;
 +      if (ipv6_addr_any(token))
 +              return -EINVAL;
 +      if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
 +              return -EINVAL;
 +      if (!ipv6_accept_ra(idev))
 +              return -EINVAL;
 +      if (idev->cnf.rtr_solicits <= 0)
 +              return -EINVAL;
 +
 +      write_lock_bh(&idev->lock);
 +
 +      BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
 +      memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
 +
 +      write_unlock_bh(&idev->lock);
 +
 +      if (!idev->dead && (idev->if_flags & IF_READY)) {
 +              struct in6_addr ll_addr;
 +
 +              ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
 +                              IFA_F_OPTIMISTIC);
 +
 +              /* If we're not ready, then normal ifup will take care
 +               * of this. Otherwise, we need to request our rs here.
 +               */
 +              ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
 +              update_rs = true;
 +      }
 +
 +      write_lock_bh(&idev->lock);
 +
 +      if (update_rs)
 +              idev->if_flags |= IF_RS_SENT;
 +
 +      /* Well, that's kinda nasty ... */
 +      list_for_each_entry(ifp, &idev->addr_list, if_list) {
 +              spin_lock(&ifp->lock);
 +              if (ifp->tokenized) {
 +                      ifp->valid_lft = 0;
 +                      ifp->prefered_lft = 0;
 +              }
 +              spin_unlock(&ifp->lock);
 +      }
 +
 +      write_unlock_bh(&idev->lock);
 +      return 0;
 +}
 +
 +static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
 +{
 +      int err = -EINVAL;
 +      struct inet6_dev *idev = __in6_dev_get(dev);
 +      struct nlattr *tb[IFLA_INET6_MAX + 1];
 +
 +      if (!idev)
 +              return -EAFNOSUPPORT;
 +
 +      if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL) < 0)
 +              BUG();
 +
 +      if (tb[IFLA_INET6_TOKEN])
 +              err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
 +
 +      return err;
 +}
 +
  static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
                             u32 portid, u32 seq, int event, unsigned int flags)
  {
@@@ -4550,8 -4366,6 +4548,8 @@@ errout
  
  static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
  {
 +      struct net *net = dev_net(ifp->idev->dev);
 +
        inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
  
        switch (event) {
                        dst_free(&ifp->rt->dst);
                break;
        }
 +      atomic_inc(&net->ipv6.dev_addr_genid);
  }
  
  static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@@ -5054,27 -4867,10 +5052,11 @@@ static struct pernet_operations addrcon
        .exit = addrconf_exit_net,
  };
  
- /*
-  *      Device notifier
-  */
- int register_inet6addr_notifier(struct notifier_block *nb)
- {
-       return atomic_notifier_chain_register(&inet6addr_chain, nb);
- }
- EXPORT_SYMBOL(register_inet6addr_notifier);
- int unregister_inet6addr_notifier(struct notifier_block *nb)
- {
-       return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
- }
- EXPORT_SYMBOL(unregister_inet6addr_notifier);
  static struct rtnl_af_ops inet6_ops = {
        .family           = AF_INET6,
        .fill_link_af     = inet6_fill_link_af,
        .get_link_af_size = inet6_get_link_af_size,
 +      .set_link_af      = inet6_set_link_af,
  };
  
  /*
@@@ -5147,7 -4943,7 +5129,7 @@@ int __init addrconf_init(void
        __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
                        inet6_dump_ifacaddr, NULL);
        __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
 -                      NULL, NULL);
 +                      inet6_netconf_dump_devconf, NULL);
  
        ipv6_addr_label_rtnl_register();
  
diff --combined net/ipv6/reassembly.c
index e6e44cef8db238ecc0fa1f0df6353c6c527f2e33,0ba10e53a6298f07cb64193681ccbfb419d40f36..790d9f4b8b0b21c1d4dd4577ee6a472bd96fd729
@@@ -58,7 -58,6 +58,7 @@@
  #include <net/ndisc.h>
  #include <net/addrconf.h>
  #include <net/inet_frag.h>
 +#include <net/inet_ecn.h>
  
  struct ip6frag_skb_cb
  {
  
  #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
  
 +static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 +{
 +      return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 +}
  
  static struct inet_frags ip6_frags;
  
@@@ -124,7 -119,6 +124,7 @@@ void ip6_frag_init(struct inet_frag_que
        fq->user = arg->user;
        fq->saddr = *arg->src;
        fq->daddr = *arg->dst;
 +      fq->ecn = arg->ecn;
  }
  EXPORT_SYMBOL(ip6_frag_init);
  
@@@ -179,8 -173,7 +179,8 @@@ static void ip6_frag_expire(unsigned lo
  }
  
  static __inline__ struct frag_queue *
 -fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
 +fq_find(struct net *net, __be32 id, const struct in6_addr *src,
 +      const struct in6_addr *dst, u8 ecn)
  {
        struct inet_frag_queue *q;
        struct ip6_create_arg arg;
        arg.user = IP6_DEFRAG_LOCAL_DELIVER;
        arg.src = src;
        arg.dst = dst;
 +      arg.ecn = ecn;
  
        read_lock(&ip6_frags.lock);
        hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
@@@ -210,7 -202,6 +210,7 @@@ static int ip6_frag_queue(struct frag_q
        struct net_device *dev;
        int offset, end;
        struct net *net = dev_net(skb_dst(skb)->dev);
 +      u8 ecn;
  
        if (fq->q.last_in & INET_FRAG_COMPLETE)
                goto err;
                return -1;
        }
  
 +      ecn = ip6_frag_ecn(ipv6_hdr(skb));
 +
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
                const unsigned char *nh = skb_network_header(skb);
                skb->csum = csum_sub(skb->csum,
@@@ -330,7 -319,6 +330,7 @@@ found
        }
        fq->q.stamp = skb->tstamp;
        fq->q.meat += skb->len;
 +      fq->ecn |= ecn;
        add_frag_mem_limit(&fq->q, skb->truesize);
  
        /* The first fragment.
        }
  
        if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           fq->q.meat == fq->q.len)
-               return ip6_frag_reasm(fq, prev, dev);
+           fq->q.meat == fq->q.len) {
+               int res;
+               unsigned long orefdst = skb->_skb_refdst;
+               skb->_skb_refdst = 0UL;
+               res = ip6_frag_reasm(fq, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return res;
+       }
  
+       skb_dst_drop(skb);
        inet_frag_lru_move(&fq->q);
        return -1;
  
@@@ -374,14 -370,9 +382,14 @@@ static int ip6_frag_reasm(struct frag_q
        int    payload_len;
        unsigned int nhoff;
        int sum_truesize;
 +      u8 ecn;
  
        inet_frag_kill(&fq->q, &ip6_frags);
  
 +      ecn = ip_frag_ecn_table[fq->ecn];
 +      if (unlikely(ecn == 0xff))
 +              goto out_fail;
 +
        /* Make the one we just received the head. */
        if (prev) {
                head = prev->next;
        head->dev = dev;
        head->tstamp = fq->q.stamp;
        ipv6_hdr(head)->payload_len = htons(payload_len);
 +      ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
        IP6CB(head)->nhoff = nhoff;
  
        /* Yes, and fold redundant checksum back. 8) */
@@@ -544,8 -534,7 +552,8 @@@ static int ipv6_frag_rcv(struct sk_buf
                IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
                                 IPSTATS_MIB_REASMFAILS, evicted);
  
 -      fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
 +      fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
 +                   ip6_frag_ecn(hdr));
        if (fq != NULL) {
                int ret;
  
diff --combined net/iucv/af_iucv.c
index e165e8dc962e7ea98ab22312709022f572ca4a89,206ce6db2c36a2ae44d694614f8be29317f5b74d..ae691651b72141d649a4cca1aaaedc8920db5192
@@@ -49,12 -49,6 +49,6 @@@ static const u8 iprm_shutdown[8] 
  
  #define TRGCLS_SIZE   (sizeof(((struct iucv_message *)0)->class))
  
- /* macros to set/get socket control buffer at correct offset */
- #define CB_TAG(skb)   ((skb)->cb)             /* iucv message tag */
- #define CB_TAG_LEN    (sizeof(((struct iucv_message *) 0)->tag))
- #define CB_TRGCLS(skb)        ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
- #define CB_TRGCLS_LEN (TRGCLS_SIZE)
  #define __iucv_sock_wait(sk, condition, timeo, ret)                   \
  do {                                                                  \
        DEFINE_WAIT(__wait);                                            \
@@@ -1141,7 -1135,7 +1135,7 @@@ static int iucv_sock_sendmsg(struct kio
  
        /* increment and save iucv message tag for msg_completion cbk */
        txmsg.tag = iucv->send_tag++;
-       memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+       IUCV_SKB_CB(skb)->tag = txmsg.tag;
  
        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
                atomic_inc(&iucv->msg_sent);
@@@ -1224,7 -1218,7 +1218,7 @@@ static int iucv_fragment_skb(struct soc
                        return -ENOMEM;
  
                /* copy target class to control buffer of new skb */
-               memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
+               IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
  
                /* copy data fragment */
                memcpy(nskb->data, skb->data + copied, size);
@@@ -1256,7 -1250,7 +1250,7 @@@ static void iucv_process_message(struc
  
        /* store msg target class in the second 4 bytes of skb ctrl buffer */
        /* Note: the first 4 bytes are reserved for msg tag */
-       memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
+       IUCV_SKB_CB(skb)->class = msg->class;
  
        /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
        if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
                }
        }
  
+       IUCV_SKB_CB(skb)->offset = 0;
        if (sock_queue_rcv_skb(sk, skb))
                skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
  }
@@@ -1327,6 -1322,7 +1322,7 @@@ static int iucv_sock_recvmsg(struct kio
        unsigned int copied, rlen;
        struct sk_buff *skb, *rskb, *cskb;
        int err = 0;
+       u32 offset;
  
        msg->msg_namelen = 0;
  
                return err;
        }
  
-       rlen   = skb->len;              /* real length of skb */
+       offset = IUCV_SKB_CB(skb)->offset;
+       rlen   = skb->len - offset;             /* real length of skb */
        copied = min_t(unsigned int, rlen, len);
        if (!rlen)
                sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
  
        cskb = skb;
-       if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
+       if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
                if (!(flags & MSG_PEEK))
                        skb_queue_head(&sk->sk_receive_queue, skb);
                return -EFAULT;
         * get the trgcls from the control buffer of the skb due to
         * fragmentation of original iucv message. */
        err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
-                       CB_TRGCLS_LEN, CB_TRGCLS(skb));
+                      sizeof(IUCV_SKB_CB(skb)->class),
+                      (void *)&IUCV_SKB_CB(skb)->class);
        if (err) {
                if (!(flags & MSG_PEEK))
                        skb_queue_head(&sk->sk_receive_queue, skb);
  
                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
                if (sk->sk_type == SOCK_STREAM) {
-                       skb_pull(skb, copied);
-                       if (skb->len) {
-                               skb_queue_head(&sk->sk_receive_queue, skb);
+                       if (copied < rlen) {
+                               IUCV_SKB_CB(skb)->offset = offset + copied;
                                goto done;
                        }
                }
                spin_lock_bh(&iucv->message_q.lock);
                rskb = skb_dequeue(&iucv->backlog_skb_q);
                while (rskb) {
+                       IUCV_SKB_CB(rskb)->offset = 0;
                        if (sock_queue_rcv_skb(sk, rskb)) {
                                skb_queue_head(&iucv->backlog_skb_q,
                                                rskb);
@@@ -1463,8 -1461,7 +1461,8 @@@ unsigned int iucv_sock_poll(struct fil
                return iucv_accept_poll(sk);
  
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 -              mask |= POLLERR;
 +              mask |= POLLERR |
 +                      (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
  
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= POLLRDHUP;
@@@ -1833,7 -1830,7 +1831,7 @@@ static void iucv_callback_txdone(struc
                spin_lock_irqsave(&list->lock, flags);
  
                while (list_skb != (struct sk_buff *)list) {
-                       if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
+                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
@@@ -2094,6 -2091,7 +2092,7 @@@ static int afiucv_hs_callback_rx(struc
        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
        skb_reset_transport_header(skb);
        skb_reset_network_header(skb);
+       IUCV_SKB_CB(skb)->offset = 0;
        spin_lock(&iucv->message_q.lock);
        if (skb_queue_empty(&iucv->backlog_skb_q)) {
                if (sock_queue_rcv_skb(sk, skb)) {
@@@ -2198,8 -2196,7 +2197,7 @@@ static int afiucv_hs_rcv(struct sk_buf
                /* fall through and receive zero length data */
        case 0:
                /* plain data frame */
-               memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
-                      CB_TRGCLS_LEN);
+               IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
diff --combined net/mac80211/iface.c
index 69aaba79a9f782ac337c0dfc2ae221f2f8d4962e,9ed49ad0380f151fc5ad5448e953f6497d800291..e8a260f53c16d98614a660b72d56a43dc0dd8ab9
@@@ -78,7 -78,7 +78,7 @@@ void ieee80211_recalc_txpower(struct ie
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
  }
  
u32 ieee80211_idle_off(struct ieee80211_local *local)
static u32 __ieee80211_idle_off(struct ieee80211_local *local)
  {
        if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
                return 0;
        return IEEE80211_CONF_CHANGE_IDLE;
  }
  
- static u32 ieee80211_idle_on(struct ieee80211_local *local)
+ static u32 __ieee80211_idle_on(struct ieee80211_local *local)
  {
        if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
                return 0;
  
 -      drv_flush(local, false);
 +      ieee80211_flush_queues(local, NULL);
  
        local->hw.conf.flags |= IEEE80211_CONF_IDLE;
        return IEEE80211_CONF_CHANGE_IDLE;
  }
  
- void ieee80211_recalc_idle(struct ieee80211_local *local)
+ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
+                                  bool force_active)
  {
        bool working = false, scanning, active;
        unsigned int led_trig_start = 0, led_trig_stop = 0;
        struct ieee80211_roc_work *roc;
-       u32 change;
  
        lockdep_assert_held(&local->mtx);
  
-       active = !list_empty(&local->chanctx_list) || local->monitors;
+       active = force_active ||
+                !list_empty(&local->chanctx_list) ||
+                local->monitors;
  
        if (!local->ops->remain_on_channel) {
                list_for_each_entry(roc, &local->roc_list, list) {
        ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
  
        if (working || scanning || active)
-               change = ieee80211_idle_off(local);
-       else
-               change = ieee80211_idle_on(local);
+               return __ieee80211_idle_off(local);
+       return __ieee80211_idle_on(local);
+ }
+ u32 ieee80211_idle_off(struct ieee80211_local *local)
+ {
+       return __ieee80211_recalc_idle(local, true);
+ }
+ void ieee80211_recalc_idle(struct ieee80211_local *local)
+ {
+       u32 change = __ieee80211_recalc_idle(local, false);
        if (change)
                ieee80211_hw_config(local, change);
  }
@@@ -488,6 -499,8 +499,6 @@@ int ieee80211_do_open(struct wireless_d
                res = drv_start(local);
                if (res)
                        goto err_del_bss;
 -              if (local->ops->napi_poll)
 -                      napi_enable(&local->napi);
                /* we're brought up, everything changes */
                hw_reconf_flags = ~0;
                ieee80211_led_radio(local, true);
                                goto err_del_interface;
                }
  
 -              drv_add_interface_debugfs(local, sdata);
 -
                if (sdata->vif.type == NL80211_IFTYPE_AP) {
                        local->fif_pspoll++;
                        local->fif_probe_req++;
@@@ -837,15 -852,15 +848,15 @@@ static void ieee80211_do_stop(struct ie
                rcu_barrier();
                sta_info_flush_cleanup(sdata);
  
 -              skb_queue_purge(&sdata->skb_queue);
 -
                /*
                 * Free all remaining keys, there shouldn't be any,
 -               * except maybe group keys in AP more or WDS?
 +               * except maybe in WDS mode?
                 */
                ieee80211_free_keys(sdata);
  
 -              drv_remove_interface_debugfs(local, sdata);
 +              /* fall through */
 +      case NL80211_IFTYPE_AP:
 +              skb_queue_purge(&sdata->skb_queue);
  
                if (going_down)
                        drv_remove_interface(local, sdata);
        ieee80211_recalc_ps(local, -1);
  
        if (local->open_count == 0) {
 -              if (local->ops->napi_poll)
 -                      napi_disable(&local->napi);
                ieee80211_clear_tx_pending(local);
                ieee80211_stop_device(local);
  
@@@ -918,17 -935,6 +929,17 @@@ static void ieee80211_set_multicast_lis
                        atomic_dec(&local->iff_promiscs);
                sdata->flags ^= IEEE80211_SDATA_PROMISC;
        }
 +
 +      /*
 +       * TODO: If somebody needs this on AP interfaces,
 +       *       it can be enabled easily but multicast
 +       *       addresses from VLANs need to be synced.
 +       */
 +      if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
 +          sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
 +          sdata->vif.type != NL80211_IFTYPE_AP)
 +              drv_set_multicast_list(local, sdata, &dev->mc);
 +
        spin_lock_bh(&local->filter_lock);
        __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
        spin_unlock_bh(&local->filter_lock);
@@@ -1555,8 -1561,6 +1566,8 @@@ int ieee80211_if_add(struct ieee80211_l
        INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
        INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
                          ieee80211_dfs_cac_timer_work);
 +      INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
 +                        ieee80211_delayed_tailroom_dec);
  
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                struct ieee80211_supported_band *sband;
diff --combined net/mac80211/mlme.c
index e06dbbf8cb4c2bfe5e88855c21b473b38618fadc,346ad4cfb01323cf471758dd5a6fcbdb12b338a6..dec42ab1fa916a75a4aff2b77690710adb2ba5c9
@@@ -87,6 -87,9 +87,6 @@@ MODULE_PARM_DESC(probe_wait_ms
   */
  #define IEEE80211_SIGNAL_AVE_MIN_COUNT        4
  
 -#define TMR_RUNNING_TIMER     0
 -#define TMR_RUNNING_CHANSW    1
 -
  /*
   * All cfg80211 functions have to be called outside a locked
   * section so that they can acquire a lock themselves... This
@@@ -606,7 -609,6 +606,7 @@@ static void ieee80211_add_vht_ie(struc
        BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
  
        memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
 +      ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
  
        /* determine capability flags */
        cap = vht_cap.cap;
@@@ -1009,7 -1011,6 +1009,7 @@@ static void ieee80211_chswitch_work(str
  
        /* XXX: wait for a beacon first? */
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
 +                                      IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
   out:
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
@@@ -1037,8 -1038,14 +1037,8 @@@ static void ieee80211_chswitch_timer(un
  {
        struct ieee80211_sub_if_data *sdata =
                (struct ieee80211_sub_if_data *) data;
 -      struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 -
 -      if (sdata->local->quiescing) {
 -              set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
 -              return;
 -      }
  
 -      ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
 +      ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
  }
  
  void
@@@ -1109,7 -1116,6 +1109,7 @@@ ieee80211_sta_process_chanswitch(struc
  
        if (sw_elem->mode)
                ieee80211_stop_queues_by_reason(&sdata->local->hw,
 +                              IEEE80211_MAX_QUEUE_MAP,
                                IEEE80211_QUEUE_STOP_REASON_CSA);
  
        if (sdata->local->ops->channel_switch) {
@@@ -1377,7 -1383,6 +1377,7 @@@ void ieee80211_dynamic_ps_disable_work(
        }
  
        ieee80211_wake_queues_by_reason(&local->hw,
 +                                      IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_PS);
  }
  
@@@ -1439,7 -1444,7 +1439,7 @@@ void ieee80211_dynamic_ps_enable_work(s
                else {
                        ieee80211_send_nullfunc(local, sdata, 1);
                        /* Flush to get the tx status of nullfunc frame */
 -                      drv_flush(local, false);
 +                      ieee80211_flush_queues(local, sdata);
                }
        }
  
@@@ -1770,7 -1775,7 +1770,7 @@@ static void ieee80211_set_disassoc(stru
  
        /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
        if (tx)
 -              drv_flush(local, false);
 +              ieee80211_flush_queues(local, sdata);
  
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
  
        /* flush out frame */
        if (tx)
 -              drv_flush(local, false);
 +              ieee80211_flush_queues(local, sdata);
  
        /* clear bssid only after building the needed mgmt frames */
        memset(ifmgd->bssid, 0, ETH_ALEN);
        sdata->vif.bss_conf.p2p_ctwindow = 0;
        sdata->vif.bss_conf.p2p_oppps = false;
  
 -      /* on the next assoc, re-program HT parameters */
 +      /* on the next assoc, re-program HT/VHT parameters */
        memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 +      memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
 +      memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
  
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
  
        del_timer_sync(&sdata->u.mgd.timer);
        del_timer_sync(&sdata->u.mgd.chswitch_timer);
  
 -      sdata->u.mgd.timers_running = 0;
 -
        sdata->vif.bss_conf.dtim_period = 0;
  
        ifmgd->flags = 0;
@@@ -1951,7 -1956,7 +1951,7 @@@ static void ieee80211_mgd_probe_ap_send
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
        run_again(ifmgd, ifmgd->probe_timeout);
        if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
 -              drv_flush(sdata->local, false);
 +              ieee80211_flush_queues(sdata->local, sdata);
  }
  
  static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@@ -2074,7 -2079,6 +2074,7 @@@ static void __ieee80211_disconnect(stru
                               true, frame_buf);
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
 +                                      IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
        mutex_unlock(&ifmgd->mtx);
  
@@@ -3136,8 -3140,15 +3136,8 @@@ static void ieee80211_sta_timer(unsigne
  {
        struct ieee80211_sub_if_data *sdata =
                (struct ieee80211_sub_if_data *) data;
 -      struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 -      struct ieee80211_local *local = sdata->local;
 -
 -      if (local->quiescing) {
 -              set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
 -              return;
 -      }
  
 -      ieee80211_queue_work(&local->hw, &sdata->work);
 +      ieee80211_queue_work(&sdata->local->hw, &sdata->work);
  }
  
  static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
@@@ -3489,6 -3500,72 +3489,6 @@@ static void ieee80211_restart_sta_timer
        }
  }
  
 -#ifdef CONFIG_PM
 -void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
 -{
 -      struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 -
 -      /*
 -       * Stop timers before deleting work items, as timers
 -       * could race and re-add the work-items. They will be
 -       * re-established on connection.
 -       */
 -      del_timer_sync(&ifmgd->conn_mon_timer);
 -      del_timer_sync(&ifmgd->bcn_mon_timer);
 -
 -      /*
 -       * we need to use atomic bitops for the running bits
 -       * only because both timers might fire at the same
 -       * time -- the code here is properly synchronised.
 -       */
 -
 -      cancel_work_sync(&ifmgd->request_smps_work);
 -
 -      cancel_work_sync(&ifmgd->monitor_work);
 -      cancel_work_sync(&ifmgd->beacon_connection_loss_work);
 -      cancel_work_sync(&ifmgd->csa_connection_drop_work);
 -      if (del_timer_sync(&ifmgd->timer))
 -              set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
 -
 -      if (del_timer_sync(&ifmgd->chswitch_timer))
 -              set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
 -      cancel_work_sync(&ifmgd->chswitch_work);
 -}
 -
 -void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
 -{
 -      struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 -
 -      mutex_lock(&ifmgd->mtx);
 -      if (!ifmgd->associated) {
 -              mutex_unlock(&ifmgd->mtx);
 -              return;
 -      }
 -
 -      if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
 -              sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
 -              mlme_dbg(sdata, "driver requested disconnect after resume\n");
 -              ieee80211_sta_connection_lost(sdata,
 -                                            ifmgd->associated->bssid,
 -                                            WLAN_REASON_UNSPECIFIED,
 -                                            true);
 -              mutex_unlock(&ifmgd->mtx);
 -              return;
 -      }
 -      mutex_unlock(&ifmgd->mtx);
 -
 -      if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
 -              add_timer(&ifmgd->timer);
 -      if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
 -              add_timer(&ifmgd->chswitch_timer);
 -      ieee80211_sta_reset_beacon_monitor(sdata);
 -
 -      mutex_lock(&sdata->local->mtx);
 -      ieee80211_restart_sta_timer(sdata);
 -      mutex_unlock(&sdata->local->mtx);
 -}
 -#endif
 -
  /* interface setup */
  void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
  {
@@@ -3887,8 -3964,16 +3887,16 @@@ int ieee80211_mgd_auth(struct ieee80211
        /* prep auth_data so we don't go into idle on disassoc */
        ifmgd->auth_data = auth_data;
  
-       if (ifmgd->associated)
-               ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
+       if (ifmgd->associated) {
+               u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+               ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+                                      WLAN_REASON_UNSPECIFIED,
+                                      false, frame_buf);
+               __cfg80211_send_deauth(sdata->dev, frame_buf,
+                                      sizeof(frame_buf));
+       }
  
        sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
  
@@@ -3948,8 -4033,16 +3956,16 @@@ int ieee80211_mgd_assoc(struct ieee8021
  
        mutex_lock(&ifmgd->mtx);
  
-       if (ifmgd->associated)
-               ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
+       if (ifmgd->associated) {
+               u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+               ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
+                                      WLAN_REASON_UNSPECIFIED,
+                                      false, frame_buf);
+               __cfg80211_send_deauth(sdata->dev, frame_buf,
+                                      sizeof(frame_buf));
+       }
  
        if (ifmgd->auth_data && !ifmgd->auth_data->done) {
                err = -EBUSY;
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
        }
  
 +      if (req->flags & ASSOC_REQ_DISABLE_VHT)
 +              ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
 +
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
        memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
               sizeof(ifmgd->ht_capa_mask));
  
 +      memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
 +      memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
 +             sizeof(ifmgd->vht_capa_mask));
 +
        if (req->ie && req->ie_len) {
                memcpy(assoc_data->ie, req->ie, req->ie_len);
                assoc_data->ie_len = req->ie_len;
index 346f871cf096489e5eb63f339435043d8b29b7a2,ad24be070e53c0fe8c27e49a6e2cb93a39c32062..2e469ca2ca553acd9076c0ee3b9c35fc3b561262
@@@ -87,10 -87,9 +87,10 @@@ int nf_xfrm_me_harder(struct sk_buff *s
        struct flowi fl;
        unsigned int hh_len;
        struct dst_entry *dst;
 +      int err;
  
 -      if (xfrm_decode_session(skb, &fl, family) < 0)
 -              return -1;
 +      err = xfrm_decode_session(skb, &fl, family);
 +              return err;
  
        dst = skb_dst(skb);
        if (dst->xfrm)
@@@ -99,7 -98,7 +99,7 @@@
  
        dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
        if (IS_ERR(dst))
 -              return -1;
 +              return PTR_ERR(dst);
  
        skb_dst_drop(skb);
        skb_dst_set(skb, dst);
        hh_len = skb_dst(skb)->dev->hard_header_len;
        if (skb_headroom(skb) < hh_len &&
            pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
 -              return -1;
 +              return -ENOMEM;
        return 0;
  }
  EXPORT_SYMBOL(nf_xfrm_me_harder);
@@@ -468,33 -467,22 +468,22 @@@ EXPORT_SYMBOL_GPL(nf_nat_packet)
  struct nf_nat_proto_clean {
        u8      l3proto;
        u8      l4proto;
-       bool    hash;
  };
  
- /* Clear NAT section of all conntracks, in case we're loaded again. */
- static int nf_nat_proto_clean(struct nf_conn *i, void *data)
+ /* kill conntracks with affected NAT section */
+ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
  {
        const struct nf_nat_proto_clean *clean = data;
        struct nf_conn_nat *nat = nfct_nat(i);
  
        if (!nat)
                return 0;
-       if (!(i->status & IPS_SRC_NAT_DONE))
-               return 0;
        if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
            (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
                return 0;
  
-       if (clean->hash) {
-               spin_lock_bh(&nf_nat_lock);
-               hlist_del_rcu(&nat->bysource);
-               spin_unlock_bh(&nf_nat_lock);
-       } else {
-               memset(nat, 0, sizeof(*nat));
-               i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
-                              IPS_SEQ_ADJUST);
-       }
-       return 0;
+       return i->status & IPS_NAT_MASK ? 1 : 0;
  }
  
  static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
        struct net *net;
  
        rtnl_lock();
-       /* Step 1 - remove from bysource hash */
-       clean.hash = true;
        for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
-       synchronize_rcu();
-       /* Step 2 - clean NAT section */
-       clean.hash = false;
-       for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
        rtnl_unlock();
  }
  
@@@ -527,16 -507,9 +508,9 @@@ static void nf_nat_l3proto_clean(u8 l3p
        struct net *net;
  
        rtnl_lock();
-       /* Step 1 - remove from bysource hash */
-       clean.hash = true;
-       for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
-       synchronize_rcu();
  
-       /* Step 2 - clean NAT section */
-       clean.hash = false;
        for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
        rtnl_unlock();
  }
  
@@@ -774,7 -747,7 +748,7 @@@ static void __net_exit nf_nat_net_exit(
  {
        struct nf_nat_proto_clean clean = {};
  
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
+       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
  }
index 7bb5d4f6bb9097c1ad2d2eb877790fdac1c3e39b,6980c3e6f0667b0ed22098098b66da7cf929a47e..d2f9f2e572989c1fe19034b9aac37e094cf2917c
@@@ -44,7 -44,6 +44,7 @@@
  #include <linux/netfilter_ipv4.h>
  #include <linux/inetdevice.h>
  #include <linux/list.h>
 +#include <linux/lockdep.h>
  #include <linux/openvswitch.h>
  #include <linux/rculist.h>
  #include <linux/dmi.h>
  #include "flow.h"
  #include "vport-internal_dev.h"
  
 -/**
 - * struct ovs_net - Per net-namespace data for ovs.
 - * @dps: List of datapaths to enable dumping them all out.
 - * Protected by genl_mutex.
 - */
 -struct ovs_net {
 -      struct list_head dps;
 -};
 -
 -static int ovs_net_id __read_mostly;
  
  #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
  static void rehash_flow_table(struct work_struct *work);
  static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
  
 +int ovs_net_id __read_mostly;
 +
 +static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
 +                     struct genl_multicast_group *grp)
 +{
 +      genl_notify(skb, genl_info_net(info), info->snd_portid,
 +                  grp->id, info->nlhdr, GFP_KERNEL);
 +}
 +
  /**
   * DOC: Locking:
   *
 - * Writes to device state (add/remove datapath, port, set operations on vports,
 - * etc.) are protected by RTNL.
 - *
 - * Writes to other state (flow table modifications, set miscellaneous datapath
 - * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
 - * genl_mutex.
 + * All writes e.g. Writes to device state (add/remove datapath, port, set
 + * operations on vports, etc.), Writes to other state (flow table
 + * modifications, set miscellaneous datapath parameters, etc.) are protected
 + * by ovs_lock.
   *
   * Reads are protected by RCU.
   *
   * There are a few special cases (mostly stats) that have their own
   * synchronization but they nest under all of above and don't interact with
   * each other.
 + *
 + * The RTNL lock nests inside ovs_mutex.
   */
  
 +static DEFINE_MUTEX(ovs_mutex);
 +
 +void ovs_lock(void)
 +{
 +      mutex_lock(&ovs_mutex);
 +}
 +
 +void ovs_unlock(void)
 +{
 +      mutex_unlock(&ovs_mutex);
 +}
 +
 +#ifdef CONFIG_LOCKDEP
 +int lockdep_ovsl_is_held(void)
 +{
 +      if (debug_locks)
 +              return lockdep_is_held(&ovs_mutex);
 +      else
 +              return 1;
 +}
 +#endif
 +
  static struct vport *new_vport(const struct vport_parms *);
  static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
                             const struct dp_upcall_info *);
@@@ -117,7 -95,7 +117,7 @@@ static int queue_userspace_packet(struc
                                  struct sk_buff *,
                                  const struct dp_upcall_info *);
  
 -/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
 +/* Must be called with rcu_read_lock or ovs_mutex. */
  static struct datapath *get_dp(struct net *net, int dp_ifindex)
  {
        struct datapath *dp = NULL;
        return dp;
  }
  
 -/* Must be called with rcu_read_lock or RTNL lock. */
 +/* Must be called with rcu_read_lock or ovs_mutex. */
  const char *ovs_dp_name(const struct datapath *dp)
  {
 -      struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
 +      struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
        return vport->ops->get_name(vport);
  }
  
@@@ -190,7 -168,7 +190,7 @@@ struct vport *ovs_lookup_vport(const st
        return NULL;
  }
  
 -/* Called with RTNL lock and genl_lock. */
 +/* Called with ovs_mutex. */
  static struct vport *new_vport(const struct vport_parms *parms)
  {
        struct vport *vport;
  
                hlist_add_head_rcu(&vport->dp_hash_node, head);
        }
 -
        return vport;
  }
  
 -/* Called with RTNL lock. */
  void ovs_dp_detach_port(struct vport *p)
  {
 -      ASSERT_RTNL();
 +      ASSERT_OVSL();
  
        /* First drop references to device. */
        hlist_del_rcu(&p->dp_hash_node);
@@@ -357,35 -337,6 +357,35 @@@ static int queue_gso_packets(struct ne
        return err;
  }
  
 +static size_t key_attr_size(void)
 +{
 +      return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
 +              + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
 +              + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
 +              + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
 +              + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
 +              + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
 +              + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
 +              + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
 +              + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
 +              + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
 +              + nla_total_size(28); /* OVS_KEY_ATTR_ND */
 +}
 +
 +static size_t upcall_msg_size(const struct sk_buff *skb,
 +                            const struct nlattr *userdata)
 +{
 +      size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
 +              + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */
 +              + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
 +
 +      /* OVS_PACKET_ATTR_USERDATA */
 +      if (userdata)
 +              size += NLA_ALIGN(userdata->nla_len);
 +
 +      return size;
 +}
 +
  static int queue_userspace_packet(struct net *net, int dp_ifindex,
                                  struct sk_buff *skb,
                                  const struct dp_upcall_info *upcall_info)
        struct sk_buff *nskb = NULL;
        struct sk_buff *user_skb; /* to be queued to userspace */
        struct nlattr *nla;
 -      unsigned int len;
        int err;
  
        if (vlan_tx_tag_present(skb)) {
                if (!nskb)
                        return -ENOMEM;
  
 -              nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
 +              nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
                if (!nskb)
                        return -ENOMEM;
  
                goto out;
        }
  
 -      len = sizeof(struct ovs_header);
 -      len += nla_total_size(skb->len);
 -      len += nla_total_size(FLOW_BUFSIZE);
 -      if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
 -              len += nla_total_size(8);
 -
 -      user_skb = genlmsg_new(len, GFP_ATOMIC);
 +      user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC);
        if (!user_skb) {
                err = -ENOMEM;
                goto out;
        nla_nest_end(user_skb, nla);
  
        if (upcall_info->userdata)
 -              nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
 -                          nla_get_u64(upcall_info->userdata));
 +              __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
 +                        nla_len(upcall_info->userdata),
 +                        nla_data(upcall_info->userdata));
  
        nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
  
@@@ -445,13 -402,13 +445,13 @@@ out
        return err;
  }
  
 -/* Called with genl_mutex. */
 +/* Called with ovs_mutex. */
  static int flush_flows(struct datapath *dp)
  {
        struct flow_table *old_table;
        struct flow_table *new_table;
  
 -      old_table = genl_dereference(dp->table);
 +      old_table = ovsl_dereference(dp->table);
        new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
        if (!new_table)
                return -ENOMEM;
@@@ -587,7 -544,7 +587,7 @@@ static int validate_userspace(const str
  {
        static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
                [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
 -              [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
 +              [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
        };
        struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
        int error;
@@@ -704,7 -661,8 +704,7 @@@ static int ovs_packet_cmd_execute(struc
  
        err = -EINVAL;
        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
 -          !a[OVS_PACKET_ATTR_ACTIONS] ||
 -          nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
 +          !a[OVS_PACKET_ATTR_ACTIONS])
                goto err;
  
        len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
                goto err;
        skb_reserve(packet, NET_IP_ALIGN);
  
 -      memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
 +      nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
  
        skb_reset_mac_header(packet);
        eth = eth_hdr(packet);
        /* Normally, setting the skb 'protocol' field would be handled by a
         * call to eth_type_trans(), but it assumes there's a sending
         * device, which we may not have. */
 -      if (ntohs(eth->h_proto) >= 1536)
 +      if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
                packet->protocol = eth->h_proto;
        else
                packet->protocol = htons(ETH_P_802_2);
@@@ -785,7 -743,7 +785,7 @@@ err
  }
  
  static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
 -      [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
 +      [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
        [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
  };
@@@ -801,7 -759,7 +801,7 @@@ static struct genl_ops dp_packet_genl_o
  static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
  {
        int i;
 -      struct flow_table *table = genl_dereference(dp->table);
 +      struct flow_table *table = ovsl_dereference(dp->table);
  
        stats->n_flows = ovs_flow_tbl_count(table);
  
@@@ -843,17 -801,7 +843,17 @@@ static struct genl_multicast_group ovs_
        .name = OVS_FLOW_MCGROUP
  };
  
 -/* Called with genl_lock. */
 +static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 +{
 +      return NLMSG_ALIGN(sizeof(struct ovs_header))
 +              + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
 +              + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
 +              + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
 +              + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
 +              + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
 +}
 +
 +/* Called with ovs_mutex. */
  static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
                                  struct sk_buff *skb, u32 portid,
                                  u32 seq, u32 flags, u8 cmd)
        u8 tcp_flags;
        int err;
  
 -      sf_acts = rcu_dereference_protected(flow->sf_acts,
 -                                          lockdep_genl_is_held());
 +      sf_acts = ovsl_dereference(flow->sf_acts);
  
        ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
        if (!ovs_header)
@@@ -930,10 -879,25 +930,10 @@@ error
  static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
  {
        const struct sw_flow_actions *sf_acts;
 -      int len;
  
 -      sf_acts = rcu_dereference_protected(flow->sf_acts,
 -                                          lockdep_genl_is_held());
 +      sf_acts = ovsl_dereference(flow->sf_acts);
  
 -      /* OVS_FLOW_ATTR_KEY */
 -      len = nla_total_size(FLOW_BUFSIZE);
 -      /* OVS_FLOW_ATTR_ACTIONS */
 -      len += nla_total_size(sf_acts->actions_len);
 -      /* OVS_FLOW_ATTR_STATS */
 -      len += nla_total_size(sizeof(struct ovs_flow_stats));
 -      /* OVS_FLOW_ATTR_TCP_FLAGS */
 -      len += nla_total_size(1);
 -      /* OVS_FLOW_ATTR_USED */
 -      len += nla_total_size(8);
 -
 -      len += NLMSG_ALIGN(sizeof(struct ovs_header));
 -
 -      return genlmsg_new(len, GFP_KERNEL);
 +      return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
  }
  
  static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
@@@ -982,13 -946,12 +982,13 @@@ static int ovs_flow_cmd_new_or_set(stru
                goto error;
        }
  
 +      ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        error = -ENODEV;
        if (!dp)
 -              goto error;
 +              goto err_unlock_ovs;
  
 -      table = genl_dereference(dp->table);
 +      table = ovsl_dereference(dp->table);
        flow = ovs_flow_tbl_lookup(table, &key, key_len);
        if (!flow) {
                struct sw_flow_actions *acts;
                /* Bail out if we're not allowed to create a new flow. */
                error = -ENOENT;
                if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
 -                      goto error;
 +                      goto err_unlock_ovs;
  
                /* Expand table, if necessary, to make room. */
                if (ovs_flow_tbl_need_to_expand(table)) {
                        if (!IS_ERR(new_table)) {
                                rcu_assign_pointer(dp->table, new_table);
                                ovs_flow_tbl_deferred_destroy(table);
 -                              table = genl_dereference(dp->table);
 +                              table = ovsl_dereference(dp->table);
                        }
                }
  
                flow = ovs_flow_alloc();
                if (IS_ERR(flow)) {
                        error = PTR_ERR(flow);
 -                      goto error;
 +                      goto err_unlock_ovs;
                }
                flow->key = key;
                clear_stats(flow);
                error = -EEXIST;
                if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
                    info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
 -                      goto error;
 +                      goto err_unlock_ovs;
  
                /* Update actions. */
 -              old_acts = rcu_dereference_protected(flow->sf_acts,
 -                                                   lockdep_genl_is_held());
 +              old_acts = ovsl_dereference(flow->sf_acts);
                acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
                if (acts_attrs &&
                   (old_acts->actions_len != nla_len(acts_attrs) ||
                        new_acts = ovs_flow_actions_alloc(acts_attrs);
                        error = PTR_ERR(new_acts);
                        if (IS_ERR(new_acts))
 -                              goto error;
 +                              goto err_unlock_ovs;
  
                        rcu_assign_pointer(flow->sf_acts, new_acts);
                        ovs_flow_deferred_free_acts(old_acts);
                        spin_unlock_bh(&flow->lock);
                }
        }
 +      ovs_unlock();
  
        if (!IS_ERR(reply))
 -              genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                         ovs_dp_flow_multicast_group.id, info->nlhdr,
 -                         GFP_KERNEL);
 +              ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
        else
                netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
  
  error_free_flow:
        ovs_flow_free(flow);
 +err_unlock_ovs:
 +      ovs_unlock();
  error:
        return error;
  }
@@@ -1112,32 -1075,21 +1112,32 @@@ static int ovs_flow_cmd_get(struct sk_b
        if (err)
                return err;
  
 +      ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
 -      if (!dp)
 -              return -ENODEV;
 +      if (!dp) {
 +              err = -ENODEV;
 +              goto unlock;
 +      }
  
 -      table = genl_dereference(dp->table);
 +      table = ovsl_dereference(dp->table);
        flow = ovs_flow_tbl_lookup(table, &key, key_len);
 -      if (!flow)
 -              return -ENOENT;
 +      if (!flow) {
 +              err = -ENOENT;
 +              goto unlock;
 +      }
  
        reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                        info->snd_seq, OVS_FLOW_CMD_NEW);
 -      if (IS_ERR(reply))
 -              return PTR_ERR(reply);
 +      if (IS_ERR(reply)) {
 +              err = PTR_ERR(reply);
 +              goto unlock;
 +      }
  
 +      ovs_unlock();
        return genlmsg_reply(reply, info);
 +unlock:
 +      ovs_unlock();
 +      return err;
  }
  
  static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        int err;
        int key_len;
  
 +      ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
 -      if (!dp)
 -              return -ENODEV;
 -
 -      if (!a[OVS_FLOW_ATTR_KEY])
 -              return flush_flows(dp);
 +      if (!dp) {
 +              err = -ENODEV;
 +              goto unlock;
 +      }
  
 +      if (!a[OVS_FLOW_ATTR_KEY]) {
 +              err = flush_flows(dp);
 +              goto unlock;
 +      }
        err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
        if (err)
 -              return err;
 +              goto unlock;
  
 -      table = genl_dereference(dp->table);
 +      table = ovsl_dereference(dp->table);
        flow = ovs_flow_tbl_lookup(table, &key, key_len);
 -      if (!flow)
 -              return -ENOENT;
 +      if (!flow) {
 +              err = -ENOENT;
 +              goto unlock;
 +      }
  
        reply = ovs_flow_cmd_alloc_info(flow);
 -      if (!reply)
 -              return -ENOMEM;
 +      if (!reply) {
 +              err = -ENOMEM;
 +              goto unlock;
 +      }
  
        ovs_flow_tbl_remove(table, flow);
  
        BUG_ON(err < 0);
  
        ovs_flow_deferred_free(flow);
 +      ovs_unlock();
  
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
 +      ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
        return 0;
 +unlock:
 +      ovs_unlock();
 +      return err;
  }
  
  static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct datapath *dp;
        struct flow_table *table;
  
 +      ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
 -      if (!dp)
 +      if (!dp) {
 +              ovs_unlock();
                return -ENODEV;
 +      }
  
 -      table = genl_dereference(dp->table);
 +      table = ovsl_dereference(dp->table);
  
        for (;;) {
                struct sw_flow *flow;
                cb->args[0] = bucket;
                cb->args[1] = obj;
        }
 +      ovs_unlock();
        return skb->len;
  }
  
@@@ -1276,16 -1213,6 +1276,16 @@@ static struct genl_multicast_group ovs_
        .name = OVS_DATAPATH_MCGROUP
  };
  
 +static size_t ovs_dp_cmd_msg_size(void)
 +{
 +      size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
 +
 +      msgsize += nla_total_size(IFNAMSIZ);
 +      msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
 +
 +      return msgsize;
 +}
 +
  static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
                                u32 portid, u32 seq, u32 flags, u8 cmd)
  {
@@@ -1324,7 -1251,7 +1324,7 @@@ static struct sk_buff *ovs_dp_cmd_build
        struct sk_buff *skb;
        int retval;
  
 -      skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 +      skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
        if (!skb)
                return ERR_PTR(-ENOMEM);
  
        return skb;
  }
  
 -/* Called with genl_mutex and optionally with RTNL lock also. */
 +/* Called with ovs_mutex. */
  static struct datapath *lookup_datapath(struct net *net,
                                        struct ovs_header *ovs_header,
                                        struct nlattr *a[OVS_DP_ATTR_MAX + 1])
@@@ -1370,12 -1297,12 +1370,12 @@@ static int ovs_dp_cmd_new(struct sk_buf
        if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
                goto err;
  
 -      rtnl_lock();
 +      ovs_lock();
  
        err = -ENOMEM;
        dp = kzalloc(sizeof(*dp), GFP_KERNEL);
        if (dp == NULL)
 -              goto err_unlock_rtnl;
 +              goto err_unlock_ovs;
  
        ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
  
  
        ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
        list_add_tail(&dp->list_node, &ovs_net->dps);
 -      rtnl_unlock();
  
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_datapath_multicast_group.id, info->nlhdr,
 -                  GFP_KERNEL);
 +      ovs_unlock();
 +
 +      ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
        return 0;
  
  err_destroy_local_port:
 -      ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
 +      ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
  err_destroy_ports_array:
        kfree(dp->ports);
  err_destroy_percpu:
        free_percpu(dp->stats_percpu);
  err_destroy_table:
 -      ovs_flow_tbl_destroy(genl_dereference(dp->table));
 +      ovs_flow_tbl_destroy(ovsl_dereference(dp->table));
  err_free_dp:
        release_net(ovs_dp_get_net(dp));
        kfree(dp);
 -err_unlock_rtnl:
 -      rtnl_unlock();
 +err_unlock_ovs:
 +      ovs_unlock();
  err:
        return err;
  }
  
 -/* Called with genl_mutex. */
 +/* Called with ovs_mutex. */
  static void __dp_destroy(struct datapath *dp)
  {
        int i;
  
 -      rtnl_lock();
 -
        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
                struct vport *vport;
                struct hlist_node *n;
        }
  
        list_del(&dp->list_node);
 -      ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
  
 -      /* rtnl_unlock() will wait until all the references to devices that
 -       * are pending unregistration have been dropped.  We do it here to
 -       * ensure that any internal devices (which contain DP pointers) are
 -       * fully destroyed before freeing the datapath.
 +      /* OVSP_LOCAL is datapath internal port. We need to make sure that
 +       * all port in datapath are destroyed first before freeing datapath.
         */
 -      rtnl_unlock();
 +      ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
  
        call_rcu(&dp->rcu, destroy_dp_rcu);
  }
@@@ -1479,27 -1412,24 +1479,27 @@@ static int ovs_dp_cmd_del(struct sk_buf
        struct datapath *dp;
        int err;
  
 +      ovs_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        err = PTR_ERR(dp);
        if (IS_ERR(dp))
 -              return err;
 +              goto unlock;
  
        reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_DEL);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
 -              return err;
 +              goto unlock;
  
        __dp_destroy(dp);
 +      ovs_unlock();
  
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_datapath_multicast_group.id, info->nlhdr,
 -                  GFP_KERNEL);
 +      ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
  
        return 0;
 +unlock:
 +      ovs_unlock();
 +      return err;
  }
  
  static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        int err;
  
 +      ovs_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
 +      err = PTR_ERR(dp);
        if (IS_ERR(dp))
 -              return PTR_ERR(dp);
 +              goto unlock;
  
        reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
                err = PTR_ERR(reply);
                netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_datapath_multicast_group.id, err);
 -              return 0;
 +              err = 0;
 +              goto unlock;
        }
  
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_datapath_multicast_group.id, info->nlhdr,
 -                  GFP_KERNEL);
 +      ovs_unlock();
 +      ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
  
        return 0;
 +unlock:
 +      ovs_unlock();
 +      return err;
  }
  
  static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
  {
        struct sk_buff *reply;
        struct datapath *dp;
 +      int err;
  
 +      ovs_lock();
        dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
 -      if (IS_ERR(dp))
 -              return PTR_ERR(dp);
 +      if (IS_ERR(dp)) {
 +              err = PTR_ERR(dp);
 +              goto unlock;
 +      }
  
        reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
 -      if (IS_ERR(reply))
 -              return PTR_ERR(reply);
 +      if (IS_ERR(reply)) {
 +              err = PTR_ERR(reply);
 +              goto unlock;
 +      }
  
 +      ovs_unlock();
        return genlmsg_reply(reply, info);
 +
 +unlock:
 +      ovs_unlock();
 +      return err;
  }
  
  static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int skip = cb->args[0];
        int i = 0;
  
 +      ovs_lock();
        list_for_each_entry(dp, &ovs_net->dps, list_node) {
                if (i >= skip &&
                    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
                        break;
                i++;
        }
 +      ovs_unlock();
  
        cb->args[0] = i;
  
@@@ -1630,7 -1542,7 +1630,7 @@@ struct genl_multicast_group ovs_dp_vpor
        .name = OVS_VPORT_MCGROUP
  };
  
 -/* Called with RTNL lock or RCU read lock. */
 +/* Called with ovs_mutex or RCU read lock. */
  static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                                   u32 portid, u32 seq, u32 flags, u8 cmd)
  {
@@@ -1669,7 -1581,7 +1669,7 @@@ error
        return err;
  }
  
 -/* Called with RTNL lock or RCU read lock. */
 +/* Called with ovs_mutex or RCU read lock. */
  struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
                                         u32 seq, u8 cmd)
  {
                return ERR_PTR(-ENOMEM);
  
        retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
-       if (retval < 0) {
-               kfree_skb(skb);
-               return ERR_PTR(retval);
-       }
+       BUG_ON(retval < 0);
        return skb;
  }
  
 -/* Called with RTNL lock or RCU read lock. */
 +/* Called with ovs_mutex or RCU read lock. */
  static struct vport *lookup_vport(struct net *net,
                                  struct ovs_header *ovs_header,
                                  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
                if (!dp)
                        return ERR_PTR(-ENODEV);
  
 -              vport = ovs_vport_rtnl_rcu(dp, port_no);
 +              vport = ovs_vport_ovsl_rcu(dp, port_no);
                if (!vport)
 -                      return ERR_PTR(-ENOENT);
 +                      return ERR_PTR(-ENODEV);
                return vport;
        } else
                return ERR_PTR(-EINVAL);
@@@ -1738,7 -1648,7 +1736,7 @@@ static int ovs_vport_cmd_new(struct sk_
            !a[OVS_VPORT_ATTR_UPCALL_PID])
                goto exit;
  
 -      rtnl_lock();
 +      ovs_lock();
        dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
                if (port_no >= DP_MAX_PORTS)
                        goto exit_unlock;
  
 -              vport = ovs_vport_rtnl_rcu(dp, port_no);
 +              vport = ovs_vport_ovsl(dp, port_no);
                err = -EBUSY;
                if (vport)
                        goto exit_unlock;
                                err = -EFBIG;
                                goto exit_unlock;
                        }
 -                      vport = ovs_vport_rtnl(dp, port_no);
 +                      vport = ovs_vport_ovsl(dp, port_no);
                        if (!vport)
                                break;
                }
                ovs_dp_detach_port(vport);
                goto exit_unlock;
        }
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 +
 +      ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
  
  exit_unlock:
 -      rtnl_unlock();
 +      ovs_unlock();
  exit:
        return err;
  }
@@@ -1803,7 -1713,7 +1801,7 @@@ static int ovs_vport_cmd_set(struct sk_
        struct vport *vport;
        int err;
  
 -      rtnl_lock();
 +      ovs_lock();
        vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
            nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
                err = -EINVAL;
  
+       reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!reply) {
+               err = -ENOMEM;
+               goto exit_unlock;
+       }
        if (!err && a[OVS_VPORT_ATTR_OPTIONS])
                err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
        if (err)
-               goto exit_unlock;
+               goto exit_free;
        if (a[OVS_VPORT_ATTR_UPCALL_PID])
                vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
  
-       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
-                                        OVS_VPORT_CMD_NEW);
-       if (IS_ERR(reply)) {
-               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
-                               ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
-               goto exit_unlock;
-       }
+       err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
+                                     info->snd_seq, 0, OVS_VPORT_CMD_NEW);
+       BUG_ON(err < 0);
  
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 +      ovs_unlock();
 +      ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
 +      return 0;
  
+       rtnl_unlock();
+       return 0;
+ exit_free:
+       kfree_skb(reply);
  exit_unlock:
 -      rtnl_unlock();
 +      ovs_unlock();
        return err;
  }
  
@@@ -1845,7 -1762,7 +1851,7 @@@ static int ovs_vport_cmd_del(struct sk_
        struct vport *vport;
        int err;
  
 -      rtnl_lock();
 +      ovs_lock();
        vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
        err = 0;
        ovs_dp_detach_port(vport);
  
 -      genl_notify(reply, genl_info_net(info), info->snd_portid,
 -                  ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 +      ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
  
  exit_unlock:
 -      rtnl_unlock();
 +      ovs_unlock();
        return err;
  }
  
@@@ -2028,13 -1946,13 +2034,13 @@@ static void rehash_flow_table(struct wo
        struct datapath *dp;
        struct net *net;
  
 -      genl_lock();
 +      ovs_lock();
        rtnl_lock();
        for_each_net(net) {
                struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  
                list_for_each_entry(dp, &ovs_net->dps, list_node) {
 -                      struct flow_table *old_table = genl_dereference(dp->table);
 +                      struct flow_table *old_table = ovsl_dereference(dp->table);
                        struct flow_table *new_table;
  
                        new_table = ovs_flow_tbl_rehash(old_table);
                }
        }
        rtnl_unlock();
 -      genl_unlock();
 -
 +      ovs_unlock();
        schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
  }
  
@@@ -2054,21 -1973,18 +2060,21 @@@ static int __net_init ovs_init_net(stru
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  
        INIT_LIST_HEAD(&ovs_net->dps);
 +      INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
        return 0;
  }
  
  static void __net_exit ovs_exit_net(struct net *net)
  {
 -      struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
        struct datapath *dp, *dp_next;
 +      struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  
 -      genl_lock();
 +      ovs_lock();
        list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
                __dp_destroy(dp);
 -      genl_unlock();
 +      ovs_unlock();
 +
 +      cancel_work_sync(&ovs_net->dp_notify_work);
  }
  
  static struct pernet_operations ovs_net_ops = {
diff --combined net/openvswitch/flow.c
index cf9328be75e92a730d6d8b32a5a63510e117ee5d,67a2b783fe70257cb6c33542806d3f3ca17f3d76..b15321a2228c879a6a2bc56e950e7335be2a9507
@@@ -211,7 -211,7 +211,7 @@@ struct sw_flow_actions *ovs_flow_action
                return ERR_PTR(-ENOMEM);
  
        sfa->actions_len = actions_len;
 -      memcpy(sfa->actions, nla_data(actions), actions_len);
 +      nla_memcpy(sfa->actions, actions, actions_len);
        return sfa;
  }
  
@@@ -466,7 -466,7 +466,7 @@@ static __be16 parse_ethertype(struct sk
        proto = *(__be16 *) skb->data;
        __skb_pull(skb, sizeof(__be16));
  
 -      if (ntohs(proto) >= 1536)
 +      if (ntohs(proto) >= ETH_P_802_3_MIN)
                return proto;
  
        if (skb->len < sizeof(struct llc_snap_hdr))
  
        __skb_pull(skb, sizeof(struct llc_snap_hdr));
  
 -      if (ntohs(llc->ethertype) >= 1536)
 +      if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
                return llc->ethertype;
  
        return htons(ETH_P_802_2);
@@@ -795,9 -795,9 +795,9 @@@ void ovs_flow_tbl_insert(struct flow_ta
  
  void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  {
+       BUG_ON(table->count == 0);
        hlist_del_rcu(&flow->hash_node[table->node_ver]);
        table->count--;
-       BUG_ON(table->count < 0);
  }
  
  /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
@@@ -1038,7 -1038,7 +1038,7 @@@ int ovs_flow_from_nlattrs(struct sw_flo
  
        if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
                swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
 -              if (ntohs(swkey->eth.type) < 1536)
 +              if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN)
                        return -EINVAL;
                attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
        } else {
diff --combined net/unix/af_unix.c
index 5ca1631de7ef365da21ff689210e69c74047c079,2db702d82e7d16fea7a27a82cc7c5dc3d3d95726..9efe01113c5c5bc898dd6b26a4e0ced9733e1c01
@@@ -1340,6 -1340,7 +1340,6 @@@ static void unix_destruct_scm(struct sk
        struct scm_cookie scm;
        memset(&scm, 0, sizeof(scm));
        scm.pid  = UNIXCB(skb).pid;
 -      scm.cred = UNIXCB(skb).cred;
        if (UNIXCB(skb).fp)
                unix_detach_fds(&scm, skb);
  
@@@ -1390,8 -1391,8 +1390,8 @@@ static int unix_scm_to_skb(struct scm_c
        int err = 0;
  
        UNIXCB(skb).pid  = get_pid(scm->pid);
 -      if (scm->cred)
 -              UNIXCB(skb).cred = get_cred(scm->cred);
 +      UNIXCB(skb).uid = scm->creds.uid;
 +      UNIXCB(skb).gid = scm->creds.gid;
        UNIXCB(skb).fp = NULL;
        if (scm->fp && send_fds)
                err = unix_attach_fds(scm, skb);
  static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
                            const struct sock *other)
  {
 -      if (UNIXCB(skb).cred)
 +      if (UNIXCB(skb).pid)
                return;
        if (test_bit(SOCK_PASSCRED, &sock->flags) ||
            !other->sk_socket ||
            test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
                UNIXCB(skb).pid  = get_pid(task_tgid(current));
-               current_euid_egid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
 -              UNIXCB(skb).cred = get_current_cred();
++              current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
        }
  }
  
@@@ -1818,7 -1819,7 +1818,7 @@@ static int unix_dgram_recvmsg(struct ki
                siocb->scm = &tmp_scm;
                memset(&tmp_scm, 0, sizeof(tmp_scm));
        }
 -      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
 +      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
        unix_set_secdata(siocb->scm, skb);
  
        if (!(flags & MSG_PEEK)) {
@@@ -1990,12 -1991,11 +1990,12 @@@ again
                if (check_creds) {
                        /* Never glue messages from different writers */
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
 -                          (UNIXCB(skb).cred != siocb->scm->cred))
 +                          !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
 +                          !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
                                break;
                } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
                        /* Copy credentials */
 -                      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
 +                      scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
                        check_creds = 1;
                }
  
@@@ -2196,9 -2196,7 +2196,9 @@@ static unsigned int unix_dgram_poll(str
  
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 -              mask |= POLLERR;
 +              mask |= POLLERR |
 +                      (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
 +
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --combined security/selinux/hooks.c
index 0a0609fce28b66067155ae2da5111a300efaafa6,7171a957b9335694c1bbd1c5c9a41e2ec13bbda9..bf889ee51509654f1ac6550a54eec7cfa6be861d
@@@ -51,6 -51,7 +51,7 @@@
  #include <linux/tty.h>
  #include <net/icmp.h>
  #include <net/ip.h>           /* for local_port_range[] */
+ #include <net/sock.h>
  #include <net/tcp.h>          /* struct or_callable used in sock_rcv_skb */
  #include <net/net_namespace.h>
  #include <net/netlabel.h>
@@@ -60,7 -61,7 +61,7 @@@
  #include <linux/bitops.h>
  #include <linux/interrupt.h>
  #include <linux/netdevice.h>  /* for network interface checks */
 -#include <linux/netlink.h>
 +#include <net/netlink.h>
  #include <linux/tcp.h>
  #include <linux/udp.h>
  #include <linux/dccp.h>
@@@ -4363,6 -4364,11 +4364,11 @@@ static void selinux_inet_conn_establish
        selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
  }
  
+ static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+ {
+       skb_set_owner_w(skb, sk);
+ }
  static int selinux_secmark_relabel_packet(u32 sid)
  {
        const struct task_security_struct *__tsec;
@@@ -4475,7 -4481,7 +4481,7 @@@ static int selinux_nlmsg_perm(struct so
        struct nlmsghdr *nlh;
        struct sk_security_struct *sksec = sk->sk_security;
  
 -      if (skb->len < NLMSG_SPACE(0)) {
 +      if (skb->len < NLMSG_HDRLEN) {
                err = -EINVAL;
                goto out;
        }
@@@ -5664,6 -5670,7 +5670,7 @@@ static struct security_operations selin
        .tun_dev_attach_queue =         selinux_tun_dev_attach_queue,
        .tun_dev_attach =               selinux_tun_dev_attach,
        .tun_dev_open =                 selinux_tun_dev_open,
+       .skb_owned_by =                 selinux_skb_owned_by,
  
  #ifdef CONFIG_SECURITY_NETWORK_XFRM
        .xfrm_policy_alloc_security =   selinux_xfrm_policy_alloc,