]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Wed, 11 Jun 2014 23:02:55 +0000 (16:02 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 11 Jun 2014 23:02:55 +0000 (16:02 -0700)
Conflicts:
net/core/rtnetlink.c
net/core/skbuff.c

Both conflicts were very simple overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
1  2 
arch/sparc/net/bpf_jit_comp.c
drivers/net/macvlan.c
net/core/dev.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dns_resolver/dns_query.c
net/ipv4/ip_tunnel.c
net/ipv4/ipip.c
net/ipv6/output_core.c
net/ipv6/sit.c
net/mac80211/iface.c

index c88cf147deed8916ae46b0bd1d2e0faa7e478003,49cee4af16f437d17af35e725d56cce426185671..892a102671adafc03950f907612f23b6902f934a
@@@ -83,9 -83,9 +83,9 @@@ static void bpf_flush_icache(void *star
  #define BNE           (F2(0, 2) | CONDNE)
  
  #ifdef CONFIG_SPARC64
- #define BNE_PTR               (F2(0, 1) | CONDNE | (2 << 20))
+ #define BE_PTR                (F2(0, 1) | CONDE | (2 << 20))
  #else
- #define BNE_PTR               BNE
+ #define BE_PTR                BE
  #endif
  
  #define SETHI(K, REG) \
@@@ -415,11 -415,20 +415,11 @@@ void bpf_jit_compile(struct sk_filter *
                emit_reg_move(O7, r_saved_O7);
  
                switch (filter[0].code) {
 -              case BPF_S_RET_K:
 -              case BPF_S_LD_W_LEN:
 -              case BPF_S_ANC_PROTOCOL:
 -              case BPF_S_ANC_PKTTYPE:
 -              case BPF_S_ANC_IFINDEX:
 -              case BPF_S_ANC_MARK:
 -              case BPF_S_ANC_RXHASH:
 -              case BPF_S_ANC_VLAN_TAG:
 -              case BPF_S_ANC_VLAN_TAG_PRESENT:
 -              case BPF_S_ANC_CPU:
 -              case BPF_S_ANC_QUEUE:
 -              case BPF_S_LD_W_ABS:
 -              case BPF_S_LD_H_ABS:
 -              case BPF_S_LD_B_ABS:
 +              case BPF_RET | BPF_K:
 +              case BPF_LD | BPF_W | BPF_LEN:
 +              case BPF_LD | BPF_W | BPF_ABS:
 +              case BPF_LD | BPF_H | BPF_ABS:
 +              case BPF_LD | BPF_B | BPF_ABS:
                        /* The first instruction sets the A register (or is
                         * a "RET 'constant'")
                         */
                        unsigned int t_offset;
                        unsigned int f_offset;
                        u32 t_op, f_op;
 +                      u16 code = bpf_anc_helper(&filter[i]);
                        int ilen;
  
 -                      switch (filter[i].code) {
 -                      case BPF_S_ALU_ADD_X:   /* A += X; */
 +                      switch (code) {
 +                      case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
                                emit_alu_X(ADD);
                                break;
 -                      case BPF_S_ALU_ADD_K:   /* A += K; */
 +                      case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
                                emit_alu_K(ADD, K);
                                break;
 -                      case BPF_S_ALU_SUB_X:   /* A -= X; */
 +                      case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
                                emit_alu_X(SUB);
                                break;
 -                      case BPF_S_ALU_SUB_K:   /* A -= K */
 +                      case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
                                emit_alu_K(SUB, K);
                                break;
 -                      case BPF_S_ALU_AND_X:   /* A &= X */
 +                      case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
                                emit_alu_X(AND);
                                break;
 -                      case BPF_S_ALU_AND_K:   /* A &= K */
 +                      case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
                                emit_alu_K(AND, K);
                                break;
 -                      case BPF_S_ALU_OR_X:    /* A |= X */
 +                      case BPF_ALU | BPF_OR | BPF_X:  /* A |= X */
                                emit_alu_X(OR);
                                break;
 -                      case BPF_S_ALU_OR_K:    /* A |= K */
 +                      case BPF_ALU | BPF_OR | BPF_K:  /* A |= K */
                                emit_alu_K(OR, K);
                                break;
 -                      case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
 -                      case BPF_S_ALU_XOR_X:
 +                      case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
 +                      case BPF_ALU | BPF_XOR | BPF_X:
                                emit_alu_X(XOR);
                                break;
 -                      case BPF_S_ALU_XOR_K:   /* A ^= K */
 +                      case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
                                emit_alu_K(XOR, K);
                                break;
 -                      case BPF_S_ALU_LSH_X:   /* A <<= X */
 +                      case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
                                emit_alu_X(SLL);
                                break;
 -                      case BPF_S_ALU_LSH_K:   /* A <<= K */
 +                      case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
                                emit_alu_K(SLL, K);
                                break;
 -                      case BPF_S_ALU_RSH_X:   /* A >>= X */
 +                      case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
                                emit_alu_X(SRL);
                                break;
 -                      case BPF_S_ALU_RSH_K:   /* A >>= K */
 +                      case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
                                emit_alu_K(SRL, K);
                                break;
 -                      case BPF_S_ALU_MUL_X:   /* A *= X; */
 +                      case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
                                emit_alu_X(MUL);
                                break;
 -                      case BPF_S_ALU_MUL_K:   /* A *= K */
 +                      case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
                                emit_alu_K(MUL, K);
                                break;
 -                      case BPF_S_ALU_DIV_K:   /* A /= K with K != 0*/
 +                      case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
                                if (K == 1)
                                        break;
                                emit_write_y(G0);
  #endif
                                emit_alu_K(DIV, K);
                                break;
 -                      case BPF_S_ALU_DIV_X:   /* A /= X; */
 +                      case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
                                emit_cmpi(r_X, 0);
                                if (pc_ret0 > 0) {
                                        t_offset = addrs[pc_ret0 - 1];
  #endif
                                emit_alu_X(DIV);
                                break;
 -                      case BPF_S_ALU_NEG:
 +                      case BPF_ALU | BPF_NEG:
                                emit_neg();
                                break;
 -                      case BPF_S_RET_K:
 +                      case BPF_RET | BPF_K:
                                if (!K) {
                                        if (pc_ret0 == -1)
                                                pc_ret0 = i;
                                        emit_loadimm(K, r_A);
                                }
                                /* Fallthrough */
 -                      case BPF_S_RET_A:
 +                      case BPF_RET | BPF_A:
                                if (seen_or_pass0) {
                                        if (i != flen - 1) {
                                                emit_jump(cleanup_addr);
                                emit_jmpl(r_saved_O7, 8, G0);
                                emit_reg_move(r_A, O0); /* delay slot */
                                break;
 -                      case BPF_S_MISC_TAX:
 +                      case BPF_MISC | BPF_TAX:
                                seen |= SEEN_XREG;
                                emit_reg_move(r_A, r_X);
                                break;
 -                      case BPF_S_MISC_TXA:
 +                      case BPF_MISC | BPF_TXA:
                                seen |= SEEN_XREG;
                                emit_reg_move(r_X, r_A);
                                break;
 -                      case BPF_S_ANC_CPU:
 +                      case BPF_ANC | SKF_AD_CPU:
                                emit_load_cpu(r_A);
                                break;
 -                      case BPF_S_ANC_PROTOCOL:
 +                      case BPF_ANC | SKF_AD_PROTOCOL:
                                emit_skb_load16(protocol, r_A);
                                break;
  #if 0
                                 * a bit field even though we very much
                                 * know what we are doing here.
                                 */
 -                      case BPF_S_ANC_PKTTYPE:
 +                      case BPF_ANC | SKF_AD_PKTTYPE:
                                __emit_skb_load8(pkt_type, r_A);
                                emit_alu_K(SRL, 5);
                                break;
  #endif
 -                      case BPF_S_ANC_IFINDEX:
 +                      case BPF_ANC | SKF_AD_IFINDEX:
                                emit_skb_loadptr(dev, r_A);
                                emit_cmpi(r_A, 0);
-                               emit_branch(BNE_PTR, cleanup_addr + 4);
+                               emit_branch(BE_PTR, cleanup_addr + 4);
                                emit_nop();
                                emit_load32(r_A, struct net_device, ifindex, r_A);
                                break;
 -                      case BPF_S_ANC_MARK:
 +                      case BPF_ANC | SKF_AD_MARK:
                                emit_skb_load32(mark, r_A);
                                break;
 -                      case BPF_S_ANC_QUEUE:
 +                      case BPF_ANC | SKF_AD_QUEUE:
                                emit_skb_load16(queue_mapping, r_A);
                                break;
 -                      case BPF_S_ANC_HATYPE:
 +                      case BPF_ANC | SKF_AD_HATYPE:
                                emit_skb_loadptr(dev, r_A);
                                emit_cmpi(r_A, 0);
-                               emit_branch(BNE_PTR, cleanup_addr + 4);
+                               emit_branch(BE_PTR, cleanup_addr + 4);
                                emit_nop();
                                emit_load16(r_A, struct net_device, type, r_A);
                                break;
 -                      case BPF_S_ANC_RXHASH:
 +                      case BPF_ANC | SKF_AD_RXHASH:
                                emit_skb_load32(hash, r_A);
                                break;
 -                      case BPF_S_ANC_VLAN_TAG:
 -                      case BPF_S_ANC_VLAN_TAG_PRESENT:
 +                      case BPF_ANC | SKF_AD_VLAN_TAG:
 +                      case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                                emit_skb_load16(vlan_tci, r_A);
 -                              if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
 +                              if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
                                        emit_andi(r_A, VLAN_VID_MASK, r_A);
                                } else {
                                        emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
                                }
                                break;
  
 -                      case BPF_S_LD_IMM:
 +                      case BPF_LD | BPF_IMM:
                                emit_loadimm(K, r_A);
                                break;
 -                      case BPF_S_LDX_IMM:
 +                      case BPF_LDX | BPF_IMM:
                                emit_loadimm(K, r_X);
                                break;
 -                      case BPF_S_LD_MEM:
 +                      case BPF_LD | BPF_MEM:
                                emit_ldmem(K * 4, r_A);
                                break;
 -                      case BPF_S_LDX_MEM:
 +                      case BPF_LDX | BPF_MEM:
                                emit_ldmem(K * 4, r_X);
                                break;
 -                      case BPF_S_ST:
 +                      case BPF_ST:
                                emit_stmem(K * 4, r_A);
                                break;
 -                      case BPF_S_STX:
 +                      case BPF_STX:
                                emit_stmem(K * 4, r_X);
                                break;
  
  #define CHOOSE_LOAD_FUNC(K, func) \
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
  
 -                      case BPF_S_LD_W_ABS:
 +                      case BPF_LD | BPF_W | BPF_ABS:
                                func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
  common_load:                  seen |= SEEN_DATAREF;
                                emit_loadimm(K, r_OFF);
                                emit_call(func);
                                break;
 -                      case BPF_S_LD_H_ABS:
 +                      case BPF_LD | BPF_H | BPF_ABS:
                                func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
                                goto common_load;
 -                      case BPF_S_LD_B_ABS:
 +                      case BPF_LD | BPF_B | BPF_ABS:
                                func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
                                goto common_load;
 -                      case BPF_S_LDX_B_MSH:
 +                      case BPF_LDX | BPF_B | BPF_MSH:
                                func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
                                goto common_load;
 -                      case BPF_S_LD_W_IND:
 +                      case BPF_LD | BPF_W | BPF_IND:
                                func = bpf_jit_load_word;
  common_load_ind:              seen |= SEEN_DATAREF | SEEN_XREG;
                                if (K) {
                                }
                                emit_call(func);
                                break;
 -                      case BPF_S_LD_H_IND:
 +                      case BPF_LD | BPF_H | BPF_IND:
                                func = bpf_jit_load_half;
                                goto common_load_ind;
 -                      case BPF_S_LD_B_IND:
 +                      case BPF_LD | BPF_B | BPF_IND:
                                func = bpf_jit_load_byte;
                                goto common_load_ind;
 -                      case BPF_S_JMP_JA:
 +                      case BPF_JMP | BPF_JA:
                                emit_jump(addrs[i + K]);
                                emit_nop();
                                break;
                f_op = FOP;             \
                goto cond_branch
  
 -                      COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
 -                      COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
 -                      COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
 -                      COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
 -                      COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
 -                      COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
 -                      COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
 -                      COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
 +                      COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
 +                      COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
 +                      COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
 +                      COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
 +                      COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
 +                      COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
 +                      COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
 +                      COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
  
  cond_branch:                  f_offset = addrs[i + filter[i].jf];
                                t_offset = addrs[i + filter[i].jt];
                                        break;
                                }
  
 -                              switch (filter[i].code) {
 -                              case BPF_S_JMP_JGT_X:
 -                              case BPF_S_JMP_JGE_X:
 -                              case BPF_S_JMP_JEQ_X:
 +                              switch (code) {
 +                              case BPF_JMP | BPF_JGT | BPF_X:
 +                              case BPF_JMP | BPF_JGE | BPF_X:
 +                              case BPF_JMP | BPF_JEQ | BPF_X:
                                        seen |= SEEN_XREG;
                                        emit_cmp(r_A, r_X);
                                        break;
 -                              case BPF_S_JMP_JSET_X:
 +                              case BPF_JMP | BPF_JSET | BPF_X:
                                        seen |= SEEN_XREG;
                                        emit_btst(r_A, r_X);
                                        break;
 -                              case BPF_S_JMP_JEQ_K:
 -                              case BPF_S_JMP_JGT_K:
 -                              case BPF_S_JMP_JGE_K:
 +                              case BPF_JMP | BPF_JEQ | BPF_K:
 +                              case BPF_JMP | BPF_JGT | BPF_K:
 +                              case BPF_JMP | BPF_JGE | BPF_K:
                                        if (is_simm13(K)) {
                                                emit_cmpi(r_A, K);
                                        } else {
                                                emit_cmp(r_A, r_TMP);
                                        }
                                        break;
 -                              case BPF_S_JMP_JSET_K:
 +                              case BPF_JMP | BPF_JSET | BPF_K:
                                        if (is_simm13(K)) {
                                                emit_btsti(r_A, K);
                                        } else {
diff --combined drivers/net/macvlan.c
index 453d55a02492cde364edd9efb267e5587ad0fa13,7eec598c5cb620b412c6f85295f77ec354577d0e..958df383068a534d01bf9632463a22689184ea50
  #include <linux/if_link.h>
  #include <linux/if_macvlan.h>
  #include <linux/hash.h>
 +#include <linux/workqueue.h>
  #include <net/rtnetlink.h>
  #include <net/xfrm.h>
 +#include <linux/netpoll.h>
  
  #define MACVLAN_HASH_SIZE     (1 << BITS_PER_BYTE)
  
@@@ -42,19 -40,10 +42,19 @@@ struct macvlan_port 
        struct hlist_head       vlan_hash[MACVLAN_HASH_SIZE];
        struct list_head        vlans;
        struct rcu_head         rcu;
 +      struct sk_buff_head     bc_queue;
 +      struct work_struct      bc_work;
        bool                    passthru;
 -      int                     count;
  };
  
 +#define MACVLAN_PORT_IS_EMPTY(port)    list_empty(&port->vlans)
 +
 +struct macvlan_skb_cb {
 +      const struct macvlan_dev *src;
 +};
 +
 +#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
 +
  static void macvlan_port_destroy(struct net_device *dev);
  
  static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@@ -131,7 -120,7 +131,7 @@@ static int macvlan_broadcast_one(struc
        struct net_device *dev = vlan->dev;
  
        if (local)
 -              return dev_forward_skb(dev, skb);
 +              return __dev_forward_skb(dev, skb);
  
        skb->dev = dev;
        if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
        else
                skb->pkt_type = PACKET_MULTICAST;
  
 -      return netif_rx(skb);
 +      return 0;
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -186,32 -175,32 +186,32 @@@ static void macvlan_broadcast(struct sk
                        if (likely(nskb))
                                err = macvlan_broadcast_one(
                                        nskb, vlan, eth,
 -                                      mode == MACVLAN_MODE_BRIDGE);
 +                                      mode == MACVLAN_MODE_BRIDGE) ?:
 +                                    netif_rx_ni(nskb);
                        macvlan_count_rx(vlan, skb->len + ETH_HLEN,
                                         err == NET_RX_SUCCESS, 1);
                }
        }
  }
  
 -/* called under rcu_read_lock() from netif_receive_skb */
 -static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 +static void macvlan_process_broadcast(struct work_struct *w)
  {
 -      struct macvlan_port *port;
 -      struct sk_buff *skb = *pskb;
 -      const struct ethhdr *eth = eth_hdr(skb);
 -      const struct macvlan_dev *vlan;
 -      const struct macvlan_dev *src;
 -      struct net_device *dev;
 -      unsigned int len = 0;
 -      int ret = NET_RX_DROP;
 +      struct macvlan_port *port = container_of(w, struct macvlan_port,
 +                                               bc_work);
 +      struct sk_buff *skb;
 +      struct sk_buff_head list;
 +
 +      skb_queue_head_init(&list);
 +
 +      spin_lock_bh(&port->bc_queue.lock);
 +      skb_queue_splice_tail_init(&port->bc_queue, &list);
 +      spin_unlock_bh(&port->bc_queue.lock);
 +
 +      while ((skb = __skb_dequeue(&list))) {
 +              const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
 +
 +              rcu_read_lock();
  
 -      port = macvlan_port_get_rcu(skb->dev);
 -      if (is_multicast_ether_addr(eth->h_dest)) {
 -              skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
 -              if (!skb)
 -                      return RX_HANDLER_CONSUMED;
 -              eth = eth_hdr(skb);
 -              src = macvlan_hash_lookup(port, eth->h_source);
                if (!src)
                        /* frame comes from an external address */
                        macvlan_broadcast(skb, port, NULL,
                        macvlan_broadcast(skb, port, src->dev,
                                          MACVLAN_MODE_VEPA |
                                          MACVLAN_MODE_BRIDGE);
 -              else if (src->mode == MACVLAN_MODE_BRIDGE)
 +              else
                        /*
                         * flood only to VEPA ports, bridge ports
                         * already saw the frame on the way out.
                         */
                        macvlan_broadcast(skb, port, src->dev,
                                          MACVLAN_MODE_VEPA);
 -              else {
 +
 +              rcu_read_unlock();
 +
 +              kfree_skb(skb);
 +      }
 +}
 +
 +static void macvlan_broadcast_enqueue(struct macvlan_port *port,
 +                                    struct sk_buff *skb)
 +{
 +      struct sk_buff *nskb;
 +      int err = -ENOMEM;
 +
 +      nskb = skb_clone(skb, GFP_ATOMIC);
 +      if (!nskb)
 +              goto err;
 +
 +      spin_lock(&port->bc_queue.lock);
 +      if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
 +              __skb_queue_tail(&port->bc_queue, nskb);
 +              err = 0;
 +      }
 +      spin_unlock(&port->bc_queue.lock);
 +
 +      if (err)
 +              goto free_nskb;
 +
 +      schedule_work(&port->bc_work);
 +      return;
 +
 +free_nskb:
 +      kfree_skb(nskb);
 +err:
 +      atomic_long_inc(&skb->dev->rx_dropped);
 +}
 +
 +/* called under rcu_read_lock() from netif_receive_skb */
 +static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 +{
 +      struct macvlan_port *port;
 +      struct sk_buff *skb = *pskb;
 +      const struct ethhdr *eth = eth_hdr(skb);
 +      const struct macvlan_dev *vlan;
 +      const struct macvlan_dev *src;
 +      struct net_device *dev;
 +      unsigned int len = 0;
 +      int ret = NET_RX_DROP;
 +
 +      port = macvlan_port_get_rcu(skb->dev);
 +      if (is_multicast_ether_addr(eth->h_dest)) {
 +              skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
 +              if (!skb)
 +                      return RX_HANDLER_CONSUMED;
 +              eth = eth_hdr(skb);
 +              src = macvlan_hash_lookup(port, eth->h_source);
 +              if (src && src->mode != MACVLAN_MODE_VEPA &&
 +                  src->mode != MACVLAN_MODE_BRIDGE) {
                        /* forward to original port. */
                        vlan = src;
 -                      ret = macvlan_broadcast_one(skb, vlan, eth, 0);
 +                      ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
 +                            netif_rx(skb);
                        goto out;
                }
  
 +              MACVLAN_SKB_CB(skb)->src = src;
 +              macvlan_broadcast_enqueue(port, skb);
 +
                return RX_HANDLER_PASS;
        }
  
@@@ -358,26 -287,12 +358,26 @@@ xmit_world
        return dev_queue_xmit(skb);
  }
  
 +static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
 +{
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      if (vlan->netpoll)
 +              netpoll_send_skb(vlan->netpoll, skb);
 +#else
 +      BUG();
 +#endif
 +      return NETDEV_TX_OK;
 +}
 +
  static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
                                      struct net_device *dev)
  {
        unsigned int len = skb->len;
        int ret;
 -      const struct macvlan_dev *vlan = netdev_priv(dev);
 +      struct macvlan_dev *vlan = netdev_priv(dev);
 +
 +      if (unlikely(netpoll_tx_running(dev)))
 +              return macvlan_netpoll_send_skb(vlan, skb);
  
        if (vlan->fwd_priv) {
                skb->dev = vlan->lowerdev;
@@@ -509,49 -424,35 +509,49 @@@ hash_del
        return 0;
  }
  
 -static int macvlan_set_mac_address(struct net_device *dev, void *p)
 +static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
 -      struct sockaddr *addr = p;
        int err;
  
 -      if (!is_valid_ether_addr(addr->sa_data))
 -              return -EADDRNOTAVAIL;
 -
        if (!(dev->flags & IFF_UP)) {
                /* Just copy in the new address */
 -              memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 +              ether_addr_copy(dev->dev_addr, addr);
        } else {
                /* Rehash and update the device filters */
 -              if (macvlan_addr_busy(vlan->port, addr->sa_data))
 +              if (macvlan_addr_busy(vlan->port, addr))
                        return -EBUSY;
  
 -              err = dev_uc_add(lowerdev, addr->sa_data);
 -              if (err)
 -                      return err;
 +              if (!vlan->port->passthru) {
 +                      err = dev_uc_add(lowerdev, addr);
 +                      if (err)
 +                              return err;
  
 -              dev_uc_del(lowerdev, dev->dev_addr);
 +                      dev_uc_del(lowerdev, dev->dev_addr);
 +              }
  
 -              macvlan_hash_change_addr(vlan, addr->sa_data);
 +              macvlan_hash_change_addr(vlan, addr);
        }
        return 0;
  }
  
 +static int macvlan_set_mac_address(struct net_device *dev, void *p)
 +{
 +      struct macvlan_dev *vlan = netdev_priv(dev);
 +      struct sockaddr *addr = p;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EADDRNOTAVAIL;
 +
 +      if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
 +              dev_set_mac_address(vlan->lowerdev, addr);
 +              return 0;
 +      }
 +
 +      return macvlan_sync_address(dev, addr->sa_data);
 +}
 +
  static void macvlan_change_rx_flags(struct net_device *dev, int change)
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@@ -666,7 -567,8 +666,7 @@@ static void macvlan_uninit(struct net_d
  
        free_percpu(vlan->pcpu_stats);
  
 -      port->count -= 1;
 -      if (!port->count)
 +      if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(port->dev);
  }
  
@@@ -803,50 -705,6 +803,50 @@@ static netdev_features_t macvlan_fix_fe
        return features;
  }
  
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void macvlan_dev_poll_controller(struct net_device *dev)
 +{
 +      return;
 +}
 +
 +static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
 +{
 +      struct macvlan_dev *vlan = netdev_priv(dev);
 +      struct net_device *real_dev = vlan->lowerdev;
 +      struct netpoll *netpoll;
 +      int err = 0;
 +
 +      netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
 +      err = -ENOMEM;
 +      if (!netpoll)
 +              goto out;
 +
 +      err = __netpoll_setup(netpoll, real_dev);
 +      if (err) {
 +              kfree(netpoll);
 +              goto out;
 +      }
 +
 +      vlan->netpoll = netpoll;
 +
 +out:
 +      return err;
 +}
 +
 +static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
 +{
 +      struct macvlan_dev *vlan = netdev_priv(dev);
 +      struct netpoll *netpoll = vlan->netpoll;
 +
 +      if (!netpoll)
 +              return;
 +
 +      vlan->netpoll = NULL;
 +
 +      __netpoll_free_async(netpoll);
 +}
 +#endif        /* CONFIG_NET_POLL_CONTROLLER */
 +
  static const struct ethtool_ops macvlan_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_settings           = macvlan_ethtool_get_settings,
@@@ -872,11 -730,6 +872,11 @@@ static const struct net_device_ops macv
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
        .ndo_get_lock_subclass  = macvlan_get_nest_level,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = macvlan_dev_poll_controller,
 +      .ndo_netpoll_setup      = macvlan_dev_netpoll_setup,
 +      .ndo_netpoll_cleanup    = macvlan_dev_netpoll_cleanup,
 +#endif
  };
  
  void macvlan_common_setup(struct net_device *dev)
@@@ -917,9 -770,6 +917,9 @@@ static int macvlan_port_create(struct n
        for (i = 0; i < MACVLAN_HASH_SIZE; i++)
                INIT_HLIST_HEAD(&port->vlan_hash[i]);
  
 +      skb_queue_head_init(&port->bc_queue);
 +      INIT_WORK(&port->bc_work, macvlan_process_broadcast);
 +
        err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
        if (err)
                kfree(port);
@@@ -932,7 -782,6 +932,7 @@@ static void macvlan_port_destroy(struc
  {
        struct macvlan_port *port = macvlan_port_get_rtnl(dev);
  
 +      cancel_work_sync(&port->bc_work);
        dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
        kfree_rcu(port, rcu);
@@@ -1019,12 -868,13 +1019,12 @@@ int macvlan_common_newlink(struct net *
                vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
  
        if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
 -              if (port->count)
 +              if (!MACVLAN_PORT_IS_EMPTY(port))
                        return -EINVAL;
                port->passthru = true;
                eth_hw_addr_inherit(dev, lowerdev);
        }
  
 -      port->count += 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto destroy_port;
  unregister_netdev:
        unregister_netdevice(dev);
  destroy_port:
 -      port->count -= 1;
 -      if (!port->count)
 +      if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(lowerdev);
  
        return err;
@@@ -1176,25 -1027,6 +1176,25 @@@ static int macvlan_device_event(struct 
                        vlan->dev->gso_max_size = dev->gso_max_size;
                        netdev_update_features(vlan->dev);
                }
 +              break;
 +      case NETDEV_CHANGEMTU:
 +              list_for_each_entry(vlan, &port->vlans, list) {
 +                      if (vlan->dev->mtu <= dev->mtu)
 +                              continue;
 +                      dev_set_mtu(vlan->dev, dev->mtu);
 +              }
 +              break;
 +      case NETDEV_CHANGEADDR:
 +              if (!port->passthru)
 +                      return NOTIFY_DONE;
 +
 +              vlan = list_first_entry_or_null(&port->vlans,
 +                                              struct macvlan_dev,
 +                                              list);
 +
 +              if (macvlan_sync_address(vlan->dev, dev->dev_addr))
 +                      return NOTIFY_BAD;
 +
                break;
        case NETDEV_UNREGISTER:
                /* twiddle thumbs on netns device moves */
                list_for_each_entry_safe(vlan, next, &port->vlans, list)
                        vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
                unregister_netdevice_many(&list_kill);
-               list_del(&list_kill);
                break;
        case NETDEV_PRE_TYPE_CHANGE:
                /* Forbid underlaying device to change its type. */
                return NOTIFY_BAD;
 +
 +      case NETDEV_NOTIFY_PEERS:
 +      case NETDEV_BONDING_FAILOVER:
 +      case NETDEV_RESEND_IGMP:
 +              /* Propagate to all vlans */
 +              list_for_each_entry(vlan, &port->vlans, list)
 +                      call_netdevice_notifiers(event, vlan->dev);
        }
        return NOTIFY_DONE;
  }
diff --combined net/core/dev.c
index ed8fe62d41afa5db363d82cad17e5aa87672f203,a30bef1882f5f1911eb08756742030deb79f149e..ab6c491bd2d31d2c91c8f15398e64a4d0d8f1632
@@@ -1661,29 -1661,6 +1661,29 @@@ bool is_skb_forwardable(struct net_devi
  }
  EXPORT_SYMBOL_GPL(is_skb_forwardable);
  
 +int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 +{
 +      if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 +              if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
 +                      atomic_long_inc(&dev->rx_dropped);
 +                      kfree_skb(skb);
 +                      return NET_RX_DROP;
 +              }
 +      }
 +
 +      if (unlikely(!is_skb_forwardable(dev, skb))) {
 +              atomic_long_inc(&dev->rx_dropped);
 +              kfree_skb(skb);
 +              return NET_RX_DROP;
 +      }
 +
 +      skb_scrub_packet(skb, true);
 +      skb->protocol = eth_type_trans(skb, dev);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(__dev_forward_skb);
 +
  /**
   * dev_forward_skb - loopback an skb to another netif
   *
   */
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  {
 -      if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 -              if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
 -                      atomic_long_inc(&dev->rx_dropped);
 -                      kfree_skb(skb);
 -                      return NET_RX_DROP;
 -              }
 -      }
 -
 -      if (unlikely(!is_skb_forwardable(dev, skb))) {
 -              atomic_long_inc(&dev->rx_dropped);
 -              kfree_skb(skb);
 -              return NET_RX_DROP;
 -      }
 -
 -      skb_scrub_packet(skb, true);
 -      skb->protocol = eth_type_trans(skb, dev);
 -
 -      return netif_rx_internal(skb);
 +      return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
  }
  EXPORT_SYMBOL_GPL(dev_forward_skb);
  
@@@ -2513,39 -2507,13 +2513,39 @@@ static int dev_gso_segment(struct sk_bu
        return 0;
  }
  
 +/* If MPLS offload request, verify we are testing hardware MPLS features
 + * instead of standard features for the netdev.
 + */
 +#ifdef CONFIG_NET_MPLS_GSO
 +static netdev_features_t net_mpls_features(struct sk_buff *skb,
 +                                         netdev_features_t features,
 +                                         __be16 type)
 +{
 +      if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
 +              features &= skb->dev->mpls_features;
 +
 +      return features;
 +}
 +#else
 +static netdev_features_t net_mpls_features(struct sk_buff *skb,
 +                                         netdev_features_t features,
 +                                         __be16 type)
 +{
 +      return features;
 +}
 +#endif
 +
  static netdev_features_t harmonize_features(struct sk_buff *skb,
        netdev_features_t features)
  {
        int tmp;
 +      __be16 type;
 +
 +      type = skb_network_protocol(skb, &tmp);
 +      features = net_mpls_features(skb, features, type);
  
        if (skb->ip_summed != CHECKSUM_NONE &&
 -          !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
 +          !can_checksum_protocol(features, type)) {
                features &= ~NETIF_F_ALL_CSUM;
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
@@@ -5721,6 -5689,10 +5721,6 @@@ static void rollback_registered_many(st
                */
                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  
 -              if (!dev->rtnl_link_ops ||
 -                  dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
 -                      rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
 -
                /*
                 *      Flush the unicast and multicast chains
                 */
                if (dev->netdev_ops->ndo_uninit)
                        dev->netdev_ops->ndo_uninit(dev);
  
 +              if (!dev->rtnl_link_ops ||
 +                  dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
 +                      rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
 +
                /* Notifier chain MUST detach us all upper devices. */
                WARN_ON(netdev_has_any_upper_dev(dev));
  
@@@ -5959,7 -5927,10 +5959,7 @@@ static void netdev_init_one_queue(struc
  
  static void netif_free_tx_queues(struct net_device *dev)
  {
 -      if (is_vmalloc_addr(dev->_tx))
 -              vfree(dev->_tx);
 -      else
 -              kfree(dev->_tx);
 +      kvfree(dev->_tx);
  }
  
  static int netif_alloc_netdev_queues(struct net_device *dev)
@@@ -6433,7 -6404,10 +6433,7 @@@ void netdev_freemem(struct net_device *
  {
        char *addr = (char *)dev - dev->padded;
  
 -      if (is_vmalloc_addr(addr))
 -              vfree(addr);
 -      else
 -              kfree(addr);
 +      kvfree(addr);
  }
  
  /**
@@@ -6538,6 -6512,11 +6538,6 @@@ free_all
  
  free_pcpu:
        free_percpu(dev->pcpu_refcnt);
 -      netif_free_tx_queues(dev);
 -#ifdef CONFIG_SYSFS
 -      kfree(dev->_rx);
 -#endif
 -
  free_dev:
        netdev_freemem(dev);
        return NULL;
@@@ -6634,6 -6613,9 +6634,9 @@@ EXPORT_SYMBOL(unregister_netdevice_queu
  /**
   *    unregister_netdevice_many - unregister many devices
   *    @head: list of devices
+  *
+  *  Note: As most callers use a stack allocated list_head,
+  *  we force a list_del() to make sure stack wont be corrupted later.
   */
  void unregister_netdevice_many(struct list_head *head)
  {
                rollback_registered_many(head);
                list_for_each_entry(dev, head, unreg_list)
                        net_set_todo(dev);
+               list_del(head);
        }
  }
  EXPORT_SYMBOL(unregister_netdevice_many);
@@@ -7098,7 -7081,6 +7102,6 @@@ static void __net_exit default_device_e
                }
        }
        unregister_netdevice_many(&dev_kill_list);
-       list_del(&dev_kill_list);
        rtnl_unlock();
  }
  
diff --combined net/core/rtnetlink.c
index 741b22c62acfd68176e73492569084cd8ac5f1e4,d57d7bc22182180d01cb3f0b45a9a83fdd5499fa..233b5ae875834b27613c43d384d87eb00154e3e4
@@@ -798,8 -798,8 +798,8 @@@ static inline int rtnl_vfinfo_size(cons
                size += num_vfs *
                        (nla_total_size(sizeof(struct ifla_vf_mac)) +
                         nla_total_size(sizeof(struct ifla_vf_vlan)) +
 -                       nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
 -                       nla_total_size(sizeof(struct ifla_vf_spoofchk)));
 +                       nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
 +                       nla_total_size(sizeof(struct ifla_vf_rate)));
                return size;
        } else
                return 0;
@@@ -1065,7 -1065,6 +1065,7 @@@ static int rtnl_fill_ifinfo(struct sk_b
                        struct ifla_vf_info ivi;
                        struct ifla_vf_mac vf_mac;
                        struct ifla_vf_vlan vf_vlan;
 +                      struct ifla_vf_rate vf_rate;
                        struct ifla_vf_tx_rate vf_tx_rate;
                        struct ifla_vf_spoofchk vf_spoofchk;
                        struct ifla_vf_link_state vf_linkstate;
                                break;
                        vf_mac.vf =
                                vf_vlan.vf =
 +                              vf_rate.vf =
                                vf_tx_rate.vf =
                                vf_spoofchk.vf =
                                vf_linkstate.vf = ivi.vf;
                        memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
                        vf_vlan.vlan = ivi.vlan;
                        vf_vlan.qos = ivi.qos;
 -                      vf_tx_rate.rate = ivi.tx_rate;
 +                      vf_tx_rate.rate = ivi.max_tx_rate;
 +                      vf_rate.min_tx_rate = ivi.min_tx_rate;
 +                      vf_rate.max_tx_rate = ivi.max_tx_rate;
                        vf_spoofchk.setting = ivi.spoofchk;
                        vf_linkstate.link_state = ivi.linkstate;
                        vf = nla_nest_start(skb, IFLA_VF_INFO);
                        }
                        if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
                            nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
 +                          nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
 +                                  &vf_rate) ||
                            nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
                                    &vf_tx_rate) ||
                            nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
@@@ -1214,8 -1208,8 +1214,10 @@@ static const struct nla_policy ifla_vf_
                                    .len = sizeof(struct ifla_vf_tx_rate) },
        [IFLA_VF_SPOOFCHK]      = { .type = NLA_BINARY,
                                    .len = sizeof(struct ifla_vf_spoofchk) },
 +      [IFLA_VF_RATE]          = { .type = NLA_BINARY,
 +                                  .len = sizeof(struct ifla_vf_rate) },
+       [IFLA_VF_LINK_STATE]    = { .type = NLA_BINARY,
+                                   .len = sizeof(struct ifla_vf_link_state) },
  };
  
  static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@@ -1375,29 -1369,11 +1377,29 @@@ static int do_setvfinfo(struct net_devi
                }
                case IFLA_VF_TX_RATE: {
                        struct ifla_vf_tx_rate *ivt;
 +                      struct ifla_vf_info ivf;
                        ivt = nla_data(vf);
                        err = -EOPNOTSUPP;
 -                      if (ops->ndo_set_vf_tx_rate)
 -                              err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
 -                                                            ivt->rate);
 +                      if (ops->ndo_get_vf_config)
 +                              err = ops->ndo_get_vf_config(dev, ivt->vf,
 +                                                           &ivf);
 +                      if (err)
 +                              break;
 +                      err = -EOPNOTSUPP;
 +                      if (ops->ndo_set_vf_rate)
 +                              err = ops->ndo_set_vf_rate(dev, ivt->vf,
 +                                                         ivf.min_tx_rate,
 +                                                         ivt->rate);
 +                      break;
 +              }
 +              case IFLA_VF_RATE: {
 +                      struct ifla_vf_rate *ivt;
 +                      ivt = nla_data(vf);
 +                      err = -EOPNOTSUPP;
 +                      if (ops->ndo_set_vf_rate)
 +                              err = ops->ndo_set_vf_rate(dev, ivt->vf,
 +                                                         ivt->min_tx_rate,
 +                                                         ivt->max_tx_rate);
                        break;
                }
                case IFLA_VF_SPOOFCHK: {
@@@ -1770,7 -1746,6 +1772,6 @@@ static int rtnl_dellink(struct sk_buff 
  
        ops->dellink(dev, &list_kill);
        unregister_netdevice_many(&list_kill);
-       list_del(&list_kill);
        return 0;
  }
  
diff --combined net/core/skbuff.c
index b9e85e6cb26af61abcbf44ca97503301d01285bf,9433047b2453bff4e7accb0c7ceff9a5f9886acf..bf92824af3f77179e519f73085f6642c32ca61b6
@@@ -694,7 -694,7 +694,7 @@@ static void __copy_skb_header(struct sk
  #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
        new->csum               = old->csum;
 -      new->local_df           = old->local_df;
 +      new->ignore_df          = old->ignore_df;
        new->pkt_type           = old->pkt_type;
        new->ip_summed          = old->ip_summed;
        skb_copy_queue_mapping(new, old);
@@@ -951,13 -951,10 +951,13 @@@ struct sk_buff *skb_copy(const struct s
  EXPORT_SYMBOL(skb_copy);
  
  /**
 - *    __pskb_copy     -       create copy of an sk_buff with private head.
 + *    __pskb_copy_fclone      -  create copy of an sk_buff with private head.
   *    @skb: buffer to copy
   *    @headroom: headroom of new skb
   *    @gfp_mask: allocation priority
 + *    @fclone: if true allocate the copy of the skb from the fclone
 + *    cache instead of the head cache; it is recommended to set this
 + *    to true for the cases where the copy will likely be cloned
   *
   *    Make a copy of both an &sk_buff and part of its data, located
   *    in header. Fragmented data remain shared. This is used when
   *    The returned buffer has a reference count of 1.
   */
  
 -struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
 +struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
 +                                 gfp_t gfp_mask, bool fclone)
  {
        unsigned int size = skb_headlen(skb) + headroom;
 -      struct sk_buff *n = __alloc_skb(size, gfp_mask,
 -                                      skb_alloc_rx_flag(skb), NUMA_NO_NODE);
 +      int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
 +      struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
  
        if (!n)
                goto out;
  out:
        return n;
  }
 -EXPORT_SYMBOL(__pskb_copy);
 +EXPORT_SYMBOL(__pskb_copy_fclone);
  
  /**
   *    pskb_expand_head - reallocate header of &sk_buff
@@@ -2885,14 -2881,13 +2885,14 @@@ struct sk_buff *skb_segment(struct sk_b
        int pos;
        int dummy;
  
+       __skb_push(head_skb, doffset);
        proto = skb_network_protocol(head_skb, &dummy);
        if (unlikely(!proto))
                return ERR_PTR(-EINVAL);
  
 -      csum = !!can_checksum_protocol(features, proto);
 +      csum = !head_skb->encap_hdr_csum &&
 +          !!can_checksum_protocol(features, proto);
  
-       __skb_push(head_skb, doffset);
        headroom = skb_headroom(head_skb);
        pos = skb_headlen(head_skb);
  
                        nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
                                                            skb_put(nskb, len),
                                                            len, 0);
 +                      SKB_GSO_CB(nskb)->csum_start =
 +                          skb_headroom(nskb) + offset;
                        continue;
                }
  
@@@ -3060,8 -3053,6 +3060,8 @@@ perform_csum_check
                        nskb->csum = skb_checksum(nskb, doffset,
                                                  nskb->len - doffset, 0);
                        nskb->ip_summed = CHECKSUM_NONE;
 +                      SKB_GSO_CB(nskb)->csum_start =
 +                          skb_headroom(nskb) + doffset;
                }
        } while ((offset += len) < head_skb->len);
  
@@@ -3923,7 -3914,7 +3923,7 @@@ void skb_scrub_packet(struct sk_buff *s
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->skb_iif = 0;
 -      skb->local_df = 0;
 +      skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
        secpath_reset(skb);
index 5b499589613fb90c018973257eb9c6139243bb98,6853d22ebc071a4001663f7e3d40e9cee1fecb76..9acec61f54334f146d87711f145f1992dbf2c360
@@@ -93,8 -93,8 +93,8 @@@ int dns_query(const char *type, const c
        }
  
        if (!namelen)
 -              namelen = strlen(name);
 -      if (namelen < 3)
 +              namelen = strnlen(name, 256);
 +      if (namelen < 3 || namelen > 255)
                return -EINVAL;
        desclen += namelen + 1;
  
        if (!*_result)
                goto put;
  
-       memcpy(*_result, upayload->data, len + 1);
+       memcpy(*_result, upayload->data, len);
+       *_result[len] = '\0';
        if (_expiry)
                *_expiry = rkey->expiry;
  
diff --combined net/ipv4/ip_tunnel.c
index 86a00bd6684c94e4ec99a185262f8562dab7d5ad,9b553157e556ce2d80b13674790450433a5623a0..097b3e7c1e8f89052f6dd686d519a3a9f0624209
@@@ -268,6 -268,7 +268,7 @@@ static struct ip_tunnel *ip_tunnel_find
        __be32 remote = parms->iph.daddr;
        __be32 local = parms->iph.saddr;
        __be32 key = parms->i_key;
+       __be16 flags = parms->i_flags;
        int link = parms->link;
        struct ip_tunnel *t = NULL;
        struct hlist_head *head = ip_bucket(itn, parms);
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (local == t->parms.iph.saddr &&
                    remote == t->parms.iph.daddr &&
-                   key == t->parms.i_key &&
                    link == t->parms.link &&
-                   type == t->dev->type)
+                   type == t->dev->type &&
+                   ip_tunnel_key_match(&t->parms, flags, key))
                        break;
        }
        return t;
@@@ -395,10 -396,11 +396,10 @@@ static struct ip_tunnel *ip_tunnel_crea
                                          struct ip_tunnel_net *itn,
                                          struct ip_tunnel_parm *parms)
  {
 -      struct ip_tunnel *nt, *fbt;
 +      struct ip_tunnel *nt;
        struct net_device *dev;
  
        BUG_ON(!itn->fb_tunnel_dev);
 -      fbt = netdev_priv(itn->fb_tunnel_dev);
        dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
        if (IS_ERR(dev))
                return ERR_CAST(dev);
@@@ -667,6 -669,7 +668,7 @@@ void ip_tunnel_xmit(struct sk_buff *skb
                dev->needed_headroom = max_headroom;
  
        if (skb_cow_head(skb, dev->needed_headroom)) {
+               ip_rt_put(rt);
                dev->stats.tx_dropped++;
                kfree_skb(skb);
                return;
@@@ -746,17 -749,21 +748,19 @@@ int ip_tunnel_ioctl(struct net_device *
                        goto done;
                if (p->iph.ttl)
                        p->iph.frag_off |= htons(IP_DF);
-               if (!(p->i_flags&TUNNEL_KEY))
-                       p->i_key = 0;
-               if (!(p->o_flags&TUNNEL_KEY))
-                       p->o_key = 0;
+               if (!(p->i_flags & VTI_ISVTI)) {
+                       if (!(p->i_flags & TUNNEL_KEY))
+                               p->i_key = 0;
+                       if (!(p->o_flags & TUNNEL_KEY))
+                               p->o_key = 0;
+               }
  
                t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
  
                if (!t && (cmd == SIOCADDTUNNEL)) {
                        t = ip_tunnel_create(net, itn, p);
 -                      if (IS_ERR(t)) {
 -                              err = PTR_ERR(t);
 -                              break;
 -                      }
 +                      err = PTR_ERR_OR_ZERO(t);
 +                      break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
diff --combined net/ipv4/ipip.c
index 4bc508f0db90287f09d6dcbaea1705ee8a9a1168,09680ddbc677f17dc96a54ac35cb0fddb55941e6..62eaa005e14610237f9e8a6016c366ba6d6b09cf
@@@ -149,13 -149,13 +149,13 @@@ static int ipip_err(struct sk_buff *skb
  
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->dev->ifindex, 0, IPPROTO_IPIP, 0);
+                                t->parms.link, 0, IPPROTO_IPIP, 0);
                err = 0;
                goto out;
        }
  
        if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
                              IPPROTO_IPIP, 0);
                err = 0;
                goto out;
@@@ -486,5 -486,4 +486,5 @@@ static void __exit ipip_fini(void
  module_init(ipip_init);
  module_exit(ipip_fini);
  MODULE_LICENSE("GPL");
 +MODULE_ALIAS_RTNL_LINK("ipip");
  MODULE_ALIAS_NETDEV("tunl0");
diff --combined net/ipv6/output_core.c
index ffa029305a094371a2209919bd50a6b82ff7f190,a2bbc0d08d927791ed70e1a34f43e9353881bd39..5ec867e4a8b74fb23d8fdd2bcf9e0dce52f94455
@@@ -8,6 -8,32 +8,6 @@@
  #include <net/addrconf.h>
  #include <net/secure_seq.h>
  
 -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 -{
 -      static atomic_t ipv6_fragmentation_id;
 -      struct in6_addr addr;
 -      int ident;
 -
 -#if IS_ENABLED(CONFIG_IPV6)
 -      struct inet_peer *peer;
 -      struct net *net;
 -
 -      net = dev_net(rt->dst.dev);
 -      peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
 -      if (peer) {
 -              fhdr->identification = htonl(inet_getid(peer, 0));
 -              inet_putpeer(peer);
 -              return;
 -      }
 -#endif
 -      ident = atomic_inc_return(&ipv6_fragmentation_id);
 -
 -      addr = rt->rt6i_dst.addr;
 -      addr.s6_addr32[0] ^= (__force __be32)ident;
 -      fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
 -}
 -EXPORT_SYMBOL(ipv6_select_ident);
 -
  int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  {
        u16 offset = sizeof(struct ipv6hdr);
@@@ -78,6 -104,7 +78,7 @@@ int __ip6_local_out(struct sk_buff *skb
        if (len > IPV6_MAXPLEN)
                len = 0;
        ipv6_hdr(skb)->payload_len = htons(len);
+       IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
  
        return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
                       skb_dst(skb)->dev, dst_output);
diff --combined net/ipv6/sit.c
index f4380041f5e7b04211d7d73a05cb3989e8a9af94,45397b2a4a0b067e8b2b177944ac9a47b52ea926..4f408176dc64eeb306e25e5bde275581080fa523
@@@ -560,12 -560,12 +560,12 @@@ static int ipip6_err(struct sk_buff *sk
  
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->dev->ifindex, 0, IPPROTO_IPV6, 0);
+                                t->parms.link, 0, IPPROTO_IPV6, 0);
                err = 0;
                goto out;
        }
        if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
                              IPPROTO_IPV6, 0);
                err = 0;
                goto out;
@@@ -1828,5 -1828,4 +1828,5 @@@ xfrm_tunnel_failed
  module_init(sit_init);
  module_exit(sit_cleanup);
  MODULE_LICENSE("GPL");
 +MODULE_ALIAS_RTNL_LINK("sit");
  MODULE_ALIAS_NETDEV("sit0");
diff --combined net/mac80211/iface.c
index 81a8e2a0b6aa0e3dac6d67b9c85fe2da863ac8a1,34799e06ee01165f78fd95affe57475876b222f3..388b863e821c6beedbcdb01a602c0d18db701d2f
@@@ -250,7 -250,6 +250,7 @@@ static int ieee80211_check_concurrent_i
  {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *nsdata;
 +      int ret;
  
        ASSERT_RTNL();
  
                }
        }
  
 -      return 0;
 +      mutex_lock(&local->chanctx_mtx);
 +      ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
 +      mutex_unlock(&local->chanctx_mtx);
 +      return ret;
  }
  
  static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@@ -399,7 -395,6 +399,7 @@@ int ieee80211_add_virtual_monitor(struc
        sdata->vif.type = NL80211_IFTYPE_MONITOR;
        snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
                 wiphy_name(local->hw.wiphy));
 +      sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
  
        sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
  
        mutex_unlock(&local->mtx);
        if (ret) {
                mutex_lock(&local->iflist_mtx);
 -              rcu_assign_pointer(local->monitor_sdata, NULL);
 +              RCU_INIT_POINTER(local->monitor_sdata, NULL);
                mutex_unlock(&local->iflist_mtx);
                synchronize_net();
                drv_remove_interface(local, sdata);
@@@ -457,7 -452,7 +457,7 @@@ void ieee80211_del_virtual_monitor(stru
                return;
        }
  
 -      rcu_assign_pointer(local->monitor_sdata, NULL);
 +      RCU_INIT_POINTER(local->monitor_sdata, NULL);
        mutex_unlock(&local->iflist_mtx);
  
        synchronize_net();
@@@ -497,9 -492,7 +497,9 @@@ int ieee80211_do_open(struct wireless_d
                if (!sdata->bss)
                        return -ENOLINK;
  
 +              mutex_lock(&local->mtx);
                list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
 +              mutex_unlock(&local->mtx);
  
                master = container_of(sdata->bss,
                                      struct ieee80211_sub_if_data, u.ap);
                drv_stop(local);
   err_del_bss:
        sdata->bss = NULL;
 -      if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 +      if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
 +              mutex_lock(&local->mtx);
                list_del(&sdata->u.vlan.list);
 +              mutex_unlock(&local->mtx);
 +      }
        /* might already be clear but that doesn't matter */
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
        return res;
@@@ -839,15 -829,8 +839,15 @@@ static void ieee80211_do_stop(struct ie
  
        cancel_work_sync(&sdata->recalc_smps);
        sdata_lock(sdata);
 +      mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
 +      if (!ieee80211_csa_needs_block_tx(local))
 +              ieee80211_wake_queues_by_reason(&local->hw,
 +                                      IEEE80211_MAX_QUEUE_MAP,
 +                                      IEEE80211_QUEUE_STOP_REASON_CSA);
 +      mutex_unlock(&local->mtx);
        sdata_unlock(sdata);
 +
        cancel_work_sync(&sdata->csa_finalize_work);
  
        cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
  
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
 +              mutex_lock(&local->mtx);
                list_del(&sdata->u.vlan.list);
 -              rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
 +              mutex_unlock(&local->mtx);
 +              RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* relies on synchronize_rcu() below */
 -              rcu_assign_pointer(local->p2p_sdata, NULL);
 +              RCU_INIT_POINTER(local->p2p_sdata, NULL);
                /* fall through */
        default:
                cancel_work_sync(&sdata->work);
@@@ -1286,7 -1267,6 +1286,7 @@@ static void ieee80211_setup_sdata(struc
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
        sdata->control_port_no_encrypt = false;
        sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
 +      sdata->vif.bss_conf.idle = true;
  
        sdata->noack_map = 0;
  
        INIT_WORK(&sdata->work, ieee80211_iface_work);
        INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
        INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
 +      INIT_LIST_HEAD(&sdata->assigned_chanctx_list);
 +      INIT_LIST_HEAD(&sdata->reserved_chanctx_list);
  
        switch (type) {
        case NL80211_IFTYPE_P2P_GO:
@@@ -1780,7 -1758,6 +1780,6 @@@ void ieee80211_remove_interfaces(struc
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
-       list_del(&unreg_list);
  
        list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
                list_del(&sdata->list);
@@@ -1796,19 -1773,20 +1795,19 @@@ static int netdev_notify(struct notifie
        struct ieee80211_sub_if_data *sdata;
  
        if (state != NETDEV_CHANGENAME)
 -              return 0;
 +              return NOTIFY_DONE;
  
        if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
 -              return 0;
 +              return NOTIFY_DONE;
  
        if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
 -              return 0;
 +              return NOTIFY_DONE;
  
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 -
        memcpy(sdata->name, dev->name, IFNAMSIZ);
 -
        ieee80211_debugfs_rename_netdev(sdata);
 -      return 0;
 +
 +      return NOTIFY_OK;
  }
  
  static struct notifier_block mac80211_netdev_notifier = {