]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/gre_offload.c
Merge branch 'csums-next'
[karo-tx-linux.git] / net / ipv4 / gre_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      GRE GSO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <net/protocol.h>
16 #include <net/gre.h>
17
18 static int gre_gso_send_check(struct sk_buff *skb)
19 {
20         if (!skb->encapsulation)
21                 return -EINVAL;
22         return 0;
23 }
24
25 static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
26                                        netdev_features_t features)
27 {
28         struct sk_buff *segs = ERR_PTR(-EINVAL);
29         netdev_features_t enc_features;
30         int ghl;
31         struct gre_base_hdr *greh;
32         u16 mac_offset = skb->mac_header;
33         int mac_len = skb->mac_len;
34         __be16 protocol = skb->protocol;
35         int tnl_hlen;
36         bool csum;
37
38         if (unlikely(skb_shinfo(skb)->gso_type &
39                                 ~(SKB_GSO_TCPV4 |
40                                   SKB_GSO_TCPV6 |
41                                   SKB_GSO_UDP |
42                                   SKB_GSO_DODGY |
43                                   SKB_GSO_TCP_ECN |
44                                   SKB_GSO_GRE |
45                                   SKB_GSO_GRE_CSUM |
46                                   SKB_GSO_IPIP)))
47                 goto out;
48
49         if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
50                 goto out;
51
52         greh = (struct gre_base_hdr *)skb_transport_header(skb);
53
54         ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
55         if (unlikely(ghl < sizeof(*greh)))
56                 goto out;
57
58         csum = !!(greh->flags & GRE_CSUM);
59         if (csum)
60                 skb->encap_hdr_csum = 1;
61
62         if (unlikely(!pskb_may_pull(skb, ghl)))
63                 goto out;
64
65         /* setup inner skb. */
66         skb->protocol = greh->protocol;
67         skb->encapsulation = 0;
68
69         __skb_pull(skb, ghl);
70         skb_reset_mac_header(skb);
71         skb_set_network_header(skb, skb_inner_network_offset(skb));
72         skb->mac_len = skb_inner_network_offset(skb);
73
74         /* segment inner packet. */
75         enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
76         segs = skb_mac_gso_segment(skb, enc_features);
77         if (IS_ERR_OR_NULL(segs)) {
78                 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
79                 goto out;
80         }
81
82         skb = segs;
83         tnl_hlen = skb_tnl_header_len(skb);
84         do {
85                 __skb_push(skb, ghl);
86                 if (csum) {
87                         __be32 *pcsum;
88
89                         if (skb_has_shared_frag(skb)) {
90                                 int err;
91
92                                 err = __skb_linearize(skb);
93                                 if (err) {
94                                         kfree_skb_list(segs);
95                                         segs = ERR_PTR(err);
96                                         goto out;
97                                 }
98                         }
99
100                         skb_reset_transport_header(skb);
101
102                         greh = (struct gre_base_hdr *)
103                             skb_transport_header(skb);
104                         pcsum = (__be32 *)(greh + 1);
105                         *pcsum = 0;
106                         *(__sum16 *)pcsum = gso_make_checksum(skb, 0);
107                 }
108                 __skb_push(skb, tnl_hlen - ghl);
109
110                 skb_reset_inner_headers(skb);
111                 skb->encapsulation = 1;
112
113                 skb_reset_mac_header(skb);
114                 skb_set_network_header(skb, mac_len);
115                 skb->mac_len = mac_len;
116                 skb->protocol = protocol;
117         } while ((skb = skb->next));
118 out:
119         return segs;
120 }
121
122 static struct sk_buff **gre_gro_receive(struct sk_buff **head,
123                                         struct sk_buff *skb)
124 {
125         struct sk_buff **pp = NULL;
126         struct sk_buff *p;
127         const struct gre_base_hdr *greh;
128         unsigned int hlen, grehlen;
129         unsigned int off;
130         int flush = 1;
131         struct packet_offload *ptype;
132         __be16 type;
133
134         off = skb_gro_offset(skb);
135         hlen = off + sizeof(*greh);
136         greh = skb_gro_header_fast(skb, off);
137         if (skb_gro_header_hard(skb, hlen)) {
138                 greh = skb_gro_header_slow(skb, hlen, off);
139                 if (unlikely(!greh))
140                         goto out;
141         }
142
143         /* Only support version 0 and K (key), C (csum) flags. Note that
144          * although the support for the S (seq#) flag can be added easily
145          * for GRO, this is problematic for GSO hence can not be enabled
146          * here because a GRO pkt may end up in the forwarding path, thus
147          * requiring GSO support to break it up correctly.
148          */
149         if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
150                 goto out;
151
152         type = greh->protocol;
153
154         rcu_read_lock();
155         ptype = gro_find_receive_by_type(type);
156         if (ptype == NULL)
157                 goto out_unlock;
158
159         grehlen = GRE_HEADER_SECTION;
160
161         if (greh->flags & GRE_KEY)
162                 grehlen += GRE_HEADER_SECTION;
163
164         if (greh->flags & GRE_CSUM)
165                 grehlen += GRE_HEADER_SECTION;
166
167         hlen = off + grehlen;
168         if (skb_gro_header_hard(skb, hlen)) {
169                 greh = skb_gro_header_slow(skb, hlen, off);
170                 if (unlikely(!greh))
171                         goto out_unlock;
172         }
173
174         /* Don't bother verifying checksum if we're going to flush anyway. */
175         if (greh->flags & GRE_CSUM) {
176                 if (!NAPI_GRO_CB(skb)->flush &&
177                     skb_gro_checksum_simple_validate(skb))
178                         goto out_unlock;
179                 NAPI_GRO_CB(skb)->encapsulation++;
180         }
181
182         flush = 0;
183
184         for (p = *head; p; p = p->next) {
185                 const struct gre_base_hdr *greh2;
186
187                 if (!NAPI_GRO_CB(p)->same_flow)
188                         continue;
189
190                 /* The following checks are needed to ensure only pkts
191                  * from the same tunnel are considered for aggregation.
192                  * The criteria for "the same tunnel" includes:
193                  * 1) same version (we only support version 0 here)
194                  * 2) same protocol (we only support ETH_P_IP for now)
195                  * 3) same set of flags
196                  * 4) same key if the key field is present.
197                  */
198                 greh2 = (struct gre_base_hdr *)(p->data + off);
199
200                 if (greh2->flags != greh->flags ||
201                     greh2->protocol != greh->protocol) {
202                         NAPI_GRO_CB(p)->same_flow = 0;
203                         continue;
204                 }
205                 if (greh->flags & GRE_KEY) {
206                         /* compare keys */
207                         if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
208                                 NAPI_GRO_CB(p)->same_flow = 0;
209                                 continue;
210                         }
211                 }
212         }
213
214         skb_gro_pull(skb, grehlen);
215
216         /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
217         skb_gro_postpull_rcsum(skb, greh, grehlen);
218
219         pp = ptype->callbacks.gro_receive(head, skb);
220
221 out_unlock:
222         rcu_read_unlock();
223 out:
224         NAPI_GRO_CB(skb)->flush |= flush;
225
226         return pp;
227 }
228
229 static int gre_gro_complete(struct sk_buff *skb, int nhoff)
230 {
231         struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
232         struct packet_offload *ptype;
233         unsigned int grehlen = sizeof(*greh);
234         int err = -ENOENT;
235         __be16 type;
236
237         skb->encapsulation = 1;
238         skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
239
240         type = greh->protocol;
241         if (greh->flags & GRE_KEY)
242                 grehlen += GRE_HEADER_SECTION;
243
244         if (greh->flags & GRE_CSUM)
245                 grehlen += GRE_HEADER_SECTION;
246
247         rcu_read_lock();
248         ptype = gro_find_complete_by_type(type);
249         if (ptype != NULL)
250                 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
251
252         rcu_read_unlock();
253         return err;
254 }
255
256 static const struct net_offload gre_offload = {
257         .callbacks = {
258                 .gso_send_check = gre_gso_send_check,
259                 .gso_segment = gre_gso_segment,
260                 .gro_receive = gre_gro_receive,
261                 .gro_complete = gre_gro_complete,
262         },
263 };
264
265 static int __init gre_offload_init(void)
266 {
267         return inet_add_offload(&gre_offload, IPPROTO_GRE);
268 }
269 device_initcall(gre_offload_init);