]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/core/dev.c
net: WARN if skb_checksum_help() is called on skb requiring segmentation
[karo-tx-linux.git] / net / core / dev.c
index 6ba50a1e404c4bac04cc7d56718865d5a1749c2f..17db2f2e5236436b219118a0486ae7df36eaf69e 100644 (file)
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
-#include <linux/if_tunnel.h>
-#include <linux/if_pppox.h>
-#include <linux/ppp_defs.h>
 #include <linux/net_tstamp.h>
+#include <linux/jump_label.h>
+#include <net/flow_keys.h>
 
 #include "net-sysfs.h"
 
@@ -1320,8 +1319,6 @@ EXPORT_SYMBOL(dev_close);
  */
 void dev_disable_lro(struct net_device *dev)
 {
-       u32 flags;
-
        /*
         * If we're trying to disable lro on a vlan device
         * use the underlying physical device instead
@@ -1329,15 +1326,9 @@ void dev_disable_lro(struct net_device *dev)
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
 
-       if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
-               flags = dev->ethtool_ops->get_flags(dev);
-       else
-               flags = ethtool_op_get_flags(dev);
-
-       if (!(flags & ETH_FLAG_LRO))
-               return;
+       dev->wanted_features &= ~NETIF_F_LRO;
+       netdev_update_features(dev);
 
-       __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
        if (unlikely(dev->features & NETIF_F_LRO))
                netdev_WARN(dev, "failed to disable LRO!\n");
 }
@@ -1396,7 +1387,7 @@ rollback:
        for_each_net(net) {
                for_each_netdev(net, dev) {
                        if (dev == last)
-                               break;
+                               goto outroll;
 
                        if (dev->flags & IFF_UP) {
                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1398,7 @@ rollback:
                }
        }
 
+outroll:
        raw_notifier_chain_unregister(&netdev_chain, nb);
        goto unlock;
 }
@@ -1449,34 +1441,55 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
+static struct jump_label_key netstamp_needed __read_mostly;
+#ifdef HAVE_JUMP_LABEL
+/* We are not allowed to call jump_label_dec() from irq context
+ * If net_disable_timestamp() is called from irq context, defer the
+ * jump_label_dec() calls.
+ */
+static atomic_t netstamp_needed_deferred;
+#endif
 
 void net_enable_timestamp(void)
 {
-       atomic_inc(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+       int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+       if (deferred) {
+               while (--deferred)
+                       jump_label_dec(&netstamp_needed);
+               return;
+       }
+#endif
+       WARN_ON(in_interrupt());
+       jump_label_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
 void net_disable_timestamp(void)
 {
-       atomic_dec(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+       if (in_interrupt()) {
+               atomic_inc(&netstamp_needed_deferred);
+               return;
+       }
+#endif
+       jump_label_dec(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
 static inline void net_timestamp_set(struct sk_buff *skb)
 {
-       if (atomic_read(&netstamp_needed))
+       skb->tstamp.tv64 = 0;
+       if (static_branch(&netstamp_needed))
                __net_timestamp(skb);
-       else
-               skb->tstamp.tv64 = 0;
 }
 
-static inline void net_timestamp_check(struct sk_buff *skb)
-{
-       if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
-               __net_timestamp(skb);
-}
+#define net_timestamp_check(COND, SKB)                 \
+       if (static_branch(&netstamp_needed)) {          \
+               if ((COND) && !(SKB)->tstamp.tv64)      \
+                       __net_timestamp(SKB);           \
+       }                                               \
 
 static int net_hwtstamp_validate(struct ifreq *ifr)
 {
@@ -1874,6 +1887,22 @@ void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
 EXPORT_SYMBOL(skb_set_dev);
 #endif /* CONFIG_NET_NS */
 
+static void skb_warn_bad_offload(const struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       const char *driver = "";
+
+       if (dev && dev->dev.parent)
+               driver = dev_driver_string(dev->dev.parent);
+
+       WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
+            "gso_type=%d ip_summed=%d\n",
+            driver, dev ? &dev->features : NULL,
+            skb->sk ? &skb->sk->sk_route_caps : NULL,
+            skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
+            skb_shinfo(skb)->gso_type, skb->ip_summed);
+}
+
 /*
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
@@ -1887,8 +1916,8 @@ int skb_checksum_help(struct sk_buff *skb)
                goto out_set_summed;
 
        if (unlikely(skb_shinfo(skb)->gso_size)) {
-               /* Let GSO fix up the checksum. */
-               goto out_set_summed;
+               skb_warn_bad_offload(skb);
+               return -EINVAL;
        }
 
        offset = skb_checksum_start_offset(skb);
@@ -1923,7 +1952,8 @@ EXPORT_SYMBOL(skb_checksum_help);
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
@@ -1947,16 +1977,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
        __skb_pull(skb, skb->mac_len);
 
        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-               struct net_device *dev = skb->dev;
-               struct ethtool_drvinfo info = {};
-
-               if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
-                       dev->ethtool_ops->get_drvinfo(dev, &info);
-
-               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
-                    info.driver, dev ? dev->features : 0L,
-                    skb->sk ? skb->sk->sk_route_caps : 0L,
-                    skb->len, skb->data_len, skb->ip_summed);
+               skb_warn_bad_offload(skb);
 
                if (skb_header_cloned(skb) &&
                    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
@@ -2064,7 +2085,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
  *     This function segments the given skb and stores the list of segments
  *     in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb, int features)
+static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
        struct sk_buff *segs;
 
@@ -2103,7 +2124,7 @@ static inline void skb_orphan_try(struct sk_buff *skb)
        }
 }
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
                ((features & NETIF_F_V4_CSUM) &&
@@ -2114,7 +2135,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
                 protocol == htons(ETH_P_FCOE)));
 }
 
-static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+static netdev_features_t harmonize_features(struct sk_buff *skb,
+       __be16 protocol, netdev_features_t features)
 {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
@@ -2126,10 +2148,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features
        return features;
 }
 
-u32 netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       u32 features = skb->dev->features;
+       netdev_features_t features = skb->dev->features;
 
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2175,7 +2197,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int skb_len;
 
        if (likely(!skb->next)) {
-               u32 features;
+               netdev_features_t features;
 
                /*
                 * If device doesn't need skb->dst, release it right now while
@@ -2256,7 +2278,7 @@ gso:
                        return rc;
                }
                txq_trans_update(txq);
-               if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+               if (unlikely(netif_xmit_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
 
@@ -2456,6 +2478,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        return rc;
 }
 
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+static void skb_update_prio(struct sk_buff *skb)
+{
+       struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+
+       if ((!skb->priority) && (skb->sk) && map)
+               skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+}
+#else
+#define skb_update_prio(skb)
+#endif
+
 static DEFINE_PER_CPU(int, xmit_recursion);
 #define RECURSION_LIMIT 10
 
@@ -2496,6 +2530,8 @@ int dev_queue_xmit(struct sk_buff *skb)
         */
        rcu_read_lock_bh();
 
+       skb_update_prio(skb);
+
        txq = dev_pick_tx(dev, skb);
        q = rcu_dereference_bh(txq->qdisc);
 
@@ -2530,7 +2566,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
                        HARD_TX_LOCK(dev, txq, cpu);
 
-                       if (!netif_tx_queue_stopped(txq)) {
+                       if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
                                rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
@@ -2591,123 +2627,28 @@ static inline void ____napi_schedule(struct softnet_data *sd,
  */
 void __skb_get_rxhash(struct sk_buff *skb)
 {
-       int nhoff, hash = 0, poff;
-       const struct ipv6hdr *ip6;
-       const struct iphdr *ip;
-       const struct vlan_hdr *vlan;
-       u8 ip_proto;
-       u32 addr1, addr2;
-       u16 proto;
-       union {
-               u32 v32;
-               u16 v16[2];
-       } ports;
-
-       nhoff = skb_network_offset(skb);
-       proto = skb->protocol;
-
-again:
-       switch (proto) {
-       case __constant_htons(ETH_P_IP):
-ip:
-               if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
-                       goto done;
-
-               ip = (const struct iphdr *) (skb->data + nhoff);
-               if (ip_is_fragment(ip))
-                       ip_proto = 0;
-               else
-                       ip_proto = ip->protocol;
-               addr1 = (__force u32) ip->saddr;
-               addr2 = (__force u32) ip->daddr;
-               nhoff += ip->ihl * 4;
-               break;
-       case __constant_htons(ETH_P_IPV6):
-ipv6:
-               if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
-                       goto done;
-
-               ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
-               ip_proto = ip6->nexthdr;
-               addr1 = (__force u32) ip6->saddr.s6_addr32[3];
-               addr2 = (__force u32) ip6->daddr.s6_addr32[3];
-               nhoff += 40;
-               break;
-       case __constant_htons(ETH_P_8021Q):
-               if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
-                       goto done;
-               vlan = (const struct vlan_hdr *) (skb->data + nhoff);
-               proto = vlan->h_vlan_encapsulated_proto;
-               nhoff += sizeof(*vlan);
-               goto again;
-       case __constant_htons(ETH_P_PPP_SES):
-               if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
-                       goto done;
-               proto = *((__be16 *) (skb->data + nhoff +
-                                     sizeof(struct pppoe_hdr)));
-               nhoff += PPPOE_SES_HLEN;
-               switch (proto) {
-               case __constant_htons(PPP_IP):
-                       goto ip;
-               case __constant_htons(PPP_IPV6):
-                       goto ipv6;
-               default:
-                       goto done;
-               }
-       default:
-               goto done;
-       }
-
-       switch (ip_proto) {
-       case IPPROTO_GRE:
-               if (pskb_may_pull(skb, nhoff + 16)) {
-                       u8 *h = skb->data + nhoff;
-                       __be16 flags = *(__be16 *)h;
+       struct flow_keys keys;
+       u32 hash;
 
-                       /*
-                        * Only look inside GRE if version zero and no
-                        * routing
-                        */
-                       if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
-                               proto = *(__be16 *)(h + 2);
-                               nhoff += 4;
-                               if (flags & GRE_CSUM)
-                                       nhoff += 4;
-                               if (flags & GRE_KEY)
-                                       nhoff += 4;
-                               if (flags & GRE_SEQ)
-                                       nhoff += 4;
-                               goto again;
-                       }
-               }
-               break;
-       case IPPROTO_IPIP:
-               goto again;
-       default:
-               break;
-       }
+       if (!skb_flow_dissect(skb, &keys))
+               return;
 
-       ports.v32 = 0;
-       poff = proto_ports_offset(ip_proto);
-       if (poff >= 0) {
-               nhoff += poff;
-               if (pskb_may_pull(skb, nhoff + 4)) {
-                       ports.v32 = * (__force u32 *) (skb->data + nhoff);
-                       if (ports.v16[1] < ports.v16[0])
-                               swap(ports.v16[0], ports.v16[1]);
-                       skb->l4_rxhash = 1;
-               }
+       if (keys.ports) {
+               if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
+                       swap(keys.port16[0], keys.port16[1]);
+               skb->l4_rxhash = 1;
        }
 
        /* get a consistent hash (same value on both flow directions) */
-       if (addr2 < addr1)
-               swap(addr1, addr2);
+       if ((__force u32)keys.dst < (__force u32)keys.src)
+               swap(keys.dst, keys.src);
 
-       hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+       hash = jhash_3words((__force u32)keys.dst,
+                           (__force u32)keys.src,
+                           (__force u32)keys.ports, hashrnd);
        if (!hash)
                hash = 1;
 
-done:
        skb->rxhash = hash;
 }
 EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2718,6 +2659,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+struct jump_label_key rps_needed __read_mostly;
+
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2997,12 +2940,11 @@ int netif_rx(struct sk_buff *skb)
        if (netpoll_rx(skb))
                return NET_RX_DROP;
 
-       if (netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        trace_netif_rx(skb);
 #ifdef CONFIG_RPS
-       {
+       if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
@@ -3017,14 +2959,13 @@ int netif_rx(struct sk_buff *skb)
 
                rcu_read_unlock();
                preempt_enable();
-       }
-#else
+       } else
+#endif
        {
                unsigned int qtail;
                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
                put_cpu();
        }
-#endif
        return ret;
 }
 EXPORT_SYMBOL(netif_rx);
@@ -3230,8 +3171,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
        int ret = NET_RX_DROP;
        __be16 type;
 
-       if (!netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
        trace_netif_receive_skb(skb);
 
@@ -3362,14 +3302,13 @@ out:
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
-       if (netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
 
 #ifdef CONFIG_RPS
-       {
+       if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu, ret;
 
@@ -3380,16 +3319,12 @@ int netif_receive_skb(struct sk_buff *skb)
                if (cpu >= 0) {
                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        rcu_read_unlock();
-               } else {
-                       rcu_read_unlock();
-                       ret = __netif_receive_skb(skb);
+                       return ret;
                }
-
-               return ret;
+               rcu_read_unlock();
        }
-#else
-       return __netif_receive_skb(skb);
 #endif
+       return __netif_receive_skb(skb);
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -4282,6 +4217,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
                            sizeof(struct dev_iter_state));
 }
 
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+                    const struct seq_operations *ops)
+{
+       return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
 static const struct file_operations dev_seq_fops = {
        .owner   = THIS_MODULE,
        .open    = dev_seq_open,
@@ -4532,7 +4473,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 
 static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        uid_t uid;
        gid_t gid;
 
@@ -4589,7 +4530,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
  */
 int dev_set_promiscuity(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        int err;
 
        err = __dev_set_promiscuity(dev, inc);
@@ -4616,7 +4557,7 @@ EXPORT_SYMBOL(dev_set_promiscuity);
 
 int dev_set_allmulti(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
 
        ASSERT_RTNL();
 
@@ -4719,7 +4660,7 @@ EXPORT_SYMBOL(dev_get_flags);
 
 int __dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-       int old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        int ret;
 
        ASSERT_RTNL();
@@ -4802,10 +4743,10 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
  *     Change settings on device based state flags. The flags are
  *     in the userspace exported format.
  */
-int dev_change_flags(struct net_device *dev, unsigned flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-       int ret, changes;
-       int old_flags = dev->flags;
+       int ret;
+       unsigned int changes, old_flags = dev->flags;
 
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
@@ -5362,7 +5303,8 @@ static void rollback_registered(struct net_device *dev)
        list_del(&single);
 }
 
-static u32 netdev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netdev_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /* Fix illegal checksum combinations */
        if ((features & NETIF_F_HW_CSUM) &&
@@ -5371,12 +5313,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
        }
 
-       if ((features & NETIF_F_NO_CSUM) &&
-           (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               netdev_warn(dev, "mixed no checksumming and other settings.\n");
-               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-       }
-
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
@@ -5424,7 +5360,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
 
 int __netdev_update_features(struct net_device *dev)
 {
-       u32 features;
+       netdev_features_t features;
        int err = 0;
 
        ASSERT_RTNL();
@@ -5440,16 +5376,16 @@ int __netdev_update_features(struct net_device *dev)
        if (dev->features == features)
                return 0;
 
-       netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
-               dev->features, features);
+       netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
+               &dev->features, &features);
 
        if (dev->netdev_ops->ndo_set_features)
                err = dev->netdev_ops->ndo_set_features(dev, features);
 
        if (unlikely(err < 0)) {
                netdev_err(dev,
-                       "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
-                       err, features, dev->features);
+                       "set_features() failed (%d); wanted %pNF, left %pNF\n",
+                       err, &features, &dev->features);
                return -1;
        }
 
@@ -5548,6 +5484,9 @@ static void netdev_init_one_queue(struct net_device *dev,
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
+#ifdef CONFIG_BQL
+       dql_init(&queue->dql, HZ);
+#endif
 }
 
 static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -5633,11 +5572,12 @@ int register_netdevice(struct net_device *dev)
        dev->wanted_features = dev->features & dev->hw_features;
 
        /* Turn on no cache copy if HW is doing checksum */
-       dev->hw_features |= NETIF_F_NOCACHE_COPY;
-       if ((dev->features & NETIF_F_ALL_CSUM) &&
-           !(dev->features & NETIF_F_NO_CSUM)) {
-               dev->wanted_features |= NETIF_F_NOCACHE_COPY;
-               dev->features |= NETIF_F_NOCACHE_COPY;
+       if (!(dev->flags & IFF_LOOPBACK)) {
+               dev->hw_features |= NETIF_F_NOCACHE_COPY;
+               if (dev->features & NETIF_F_ALL_CSUM) {
+                       dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+                       dev->features |= NETIF_F_NOCACHE_COPY;
+               }
        }
 
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6373,7 +6313,8 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  *     @one to the master device with current feature set @all.  Will not
  *     enable anything that is off in @mask. Returns the new feature set.
  */
-u32 netdev_increment_features(u32 all, u32 one, u32 mask)
+netdev_features_t netdev_increment_features(netdev_features_t all,
+       netdev_features_t one, netdev_features_t mask)
 {
        if (mask & NETIF_F_GEN_CSUM)
                mask |= NETIF_F_ALL_CSUM;
@@ -6382,10 +6323,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
        all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
        all &= one | ~NETIF_F_ALL_FOR_ALL;
 
-       /* If device needs checksumming, downgrade to it. */
-       if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
-               all &= ~NETIF_F_NO_CSUM;
-
        /* If one device supports hw checksumming, set for all. */
        if (all & NETIF_F_GEN_CSUM)
                all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);