]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/core/skbuff.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / net / core / skbuff.c
index 32e31c2996315770149b71b1e17837311d672a1e..92116dfe827c08b756a1d3662f360e91a414987a 100644 (file)
@@ -3002,7 +3002,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                if (nskb->len == len + doffset)
                        goto perform_csum_check;
 
-               if (!sg) {
+               if (!sg && !nskb->remcsum_offload) {
                        nskb->ip_summed = CHECKSUM_NONE;
                        nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
                                                            skb_put(nskb, len),
@@ -3074,7 +3074,7 @@ skip_fraglist:
                nskb->truesize += nskb->data_len;
 
 perform_csum_check:
-               if (!csum) {
+               if (!csum && !nskb->remcsum_offload) {
                        nskb->csum = skb_checksum(nskb, doffset,
                                                  nskb->len - doffset, 0);
                        nskb->ip_summed = CHECKSUM_NONE;
@@ -3088,6 +3088,16 @@ perform_csum_check:
         * (see validate_xmit_skb_list() for example)
         */
        segs->prev = tail;
+
+       /* Following permits correct backpressure, for protocols
+        * using skb_set_owner_w().
+        * Idea is to tranfert ownership from head_skb to last segment.
+        */
+       if (head_skb->destructor == sock_wfree) {
+               swap(tail->truesize, head_skb->truesize);
+               swap(tail->destructor, head_skb->destructor);
+               swap(tail->sk, head_skb->sk);
+       }
        return segs;
 
 err:
@@ -4130,6 +4140,113 @@ err_free:
 }
 EXPORT_SYMBOL(skb_vlan_untag);
 
+int skb_ensure_writable(struct sk_buff *skb, int write_len)
+{
+       if (!pskb_may_pull(skb, write_len))
+               return -ENOMEM;
+
+       if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+               return 0;
+
+       return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(skb_ensure_writable);
+
+/* remove VLAN header from packet and update csum accordingly. */
+static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
+{
+       struct vlan_hdr *vhdr;
+       unsigned int offset = skb->data - skb_mac_header(skb);
+       int err;
+
+       __skb_push(skb, offset);
+       err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
+       if (unlikely(err))
+               goto pull;
+
+       skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+
+       vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+       *vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+       memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+       __skb_pull(skb, VLAN_HLEN);
+
+       vlan_set_encap_proto(skb, vhdr);
+       skb->mac_header += VLAN_HLEN;
+
+       if (skb_network_offset(skb) < ETH_HLEN)
+               skb_set_network_header(skb, ETH_HLEN);
+
+       skb_reset_mac_len(skb);
+pull:
+       __skb_pull(skb, offset);
+
+       return err;
+}
+
+int skb_vlan_pop(struct sk_buff *skb)
+{
+       u16 vlan_tci;
+       __be16 vlan_proto;
+       int err;
+
+       if (likely(vlan_tx_tag_present(skb))) {
+               skb->vlan_tci = 0;
+       } else {
+               if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
+                             skb->protocol != htons(ETH_P_8021AD)) ||
+                            skb->len < VLAN_ETH_HLEN))
+                       return 0;
+
+               err = __skb_vlan_pop(skb, &vlan_tci);
+               if (err)
+                       return err;
+       }
+       /* move next vlan tag to hw accel tag */
+       if (likely((skb->protocol != htons(ETH_P_8021Q) &&
+                   skb->protocol != htons(ETH_P_8021AD)) ||
+                  skb->len < VLAN_ETH_HLEN))
+               return 0;
+
+       vlan_proto = skb->protocol;
+       err = __skb_vlan_pop(skb, &vlan_tci);
+       if (unlikely(err))
+               return err;
+
+       __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
+       return 0;
+}
+EXPORT_SYMBOL(skb_vlan_pop);
+
+int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+{
+       if (vlan_tx_tag_present(skb)) {
+               unsigned int offset = skb->data - skb_mac_header(skb);
+               int err;
+
+               /* __vlan_insert_tag expect skb->data pointing to mac header.
+                * So change skb->data before calling it and change back to
+                * original position later
+                */
+               __skb_push(skb, offset);
+               err = __vlan_insert_tag(skb, skb->vlan_proto,
+                                       vlan_tx_tag_get(skb));
+               if (err)
+                       return err;
+               skb->protocol = skb->vlan_proto;
+               skb->mac_len += VLAN_HLEN;
+               __skb_pull(skb, offset);
+
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       skb->csum = csum_add(skb->csum, csum_partial(skb->data
+                                       + (2 * ETH_ALEN), VLAN_HLEN, 0));
+       }
+       __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
+       return 0;
+}
+EXPORT_SYMBOL(skb_vlan_push);
+
 /**
  * alloc_skb_with_frags - allocate skb with page frags
  *