]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/tcp_offload.c
Merge remote-tracking branch 'scsi/for-next'
[karo-tx-linux.git] / net / ipv4 / tcp_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      TCPv4 GSO/GRO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
16
17 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18                                 netdev_features_t features)
19 {
20         struct sk_buff *segs = ERR_PTR(-EINVAL);
21         struct tcphdr *th;
22         unsigned int thlen;
23         unsigned int seq;
24         __be32 delta;
25         unsigned int oldlen;
26         unsigned int mss;
27         struct sk_buff *gso_skb = skb;
28         __sum16 newcheck;
29         bool ooo_okay, copy_destructor;
30
31         if (!pskb_may_pull(skb, sizeof(*th)))
32                 goto out;
33
34         th = tcp_hdr(skb);
35         thlen = th->doff * 4;
36         if (thlen < sizeof(*th))
37                 goto out;
38
39         if (!pskb_may_pull(skb, thlen))
40                 goto out;
41
42         oldlen = (u16)~skb->len;
43         __skb_pull(skb, thlen);
44
45         mss = tcp_skb_mss(skb);
46         if (unlikely(skb->len <= mss))
47                 goto out;
48
49         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
50                 /* Packet is from an untrusted source, reset gso_segs. */
51                 int type = skb_shinfo(skb)->gso_type;
52
53                 if (unlikely(type &
54                              ~(SKB_GSO_TCPV4 |
55                                SKB_GSO_DODGY |
56                                SKB_GSO_TCP_ECN |
57                                SKB_GSO_TCPV6 |
58                                SKB_GSO_GRE |
59                                SKB_GSO_IPIP |
60                                SKB_GSO_SIT |
61                                SKB_GSO_MPLS |
62                                SKB_GSO_UDP_TUNNEL |
63                                0) ||
64                              !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
65                         goto out;
66
67                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
68
69                 segs = NULL;
70                 goto out;
71         }
72
73         copy_destructor = gso_skb->destructor == tcp_wfree;
74         ooo_okay = gso_skb->ooo_okay;
75         /* All segments but the first should have ooo_okay cleared */
76         skb->ooo_okay = 0;
77
78         segs = skb_segment(skb, features);
79         if (IS_ERR(segs))
80                 goto out;
81
82         /* Only first segment might have ooo_okay set */
83         segs->ooo_okay = ooo_okay;
84
85         delta = htonl(oldlen + (thlen + mss));
86
87         skb = segs;
88         th = tcp_hdr(skb);
89         seq = ntohl(th->seq);
90
91         newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
92                                                (__force u32)delta));
93
94         do {
95                 th->fin = th->psh = 0;
96                 th->check = newcheck;
97
98                 if (skb->ip_summed != CHECKSUM_PARTIAL)
99                         th->check =
100                              csum_fold(csum_partial(skb_transport_header(skb),
101                                                     thlen, skb->csum));
102
103                 seq += mss;
104                 if (copy_destructor) {
105                         skb->destructor = gso_skb->destructor;
106                         skb->sk = gso_skb->sk;
107                         /* {tcp|sock}_wfree() use exact truesize accounting :
108                          * sum(skb->truesize) MUST be exactly be gso_skb->truesize
109                          * So we account mss bytes of 'true size' for each segment.
110                          * The last segment will contain the remaining.
111                          */
112                         skb->truesize = mss;
113                         gso_skb->truesize -= mss;
114                 }
115                 skb = skb->next;
116                 th = tcp_hdr(skb);
117
118                 th->seq = htonl(seq);
119                 th->cwr = 0;
120         } while (skb->next);
121
122         /* Following permits TCP Small Queues to work well with GSO :
123          * The callback to TCP stack will be called at the time last frag
124          * is freed at TX completion, and not right now when gso_skb
125          * is freed by GSO engine
126          */
127         if (copy_destructor) {
128                 swap(gso_skb->sk, skb->sk);
129                 swap(gso_skb->destructor, skb->destructor);
130                 swap(gso_skb->truesize, skb->truesize);
131         }
132
133         delta = htonl(oldlen + (skb_tail_pointer(skb) -
134                                 skb_transport_header(skb)) +
135                       skb->data_len);
136         th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
137                                 (__force u32)delta));
138         if (skb->ip_summed != CHECKSUM_PARTIAL)
139                 th->check = csum_fold(csum_partial(skb_transport_header(skb),
140                                                    thlen, skb->csum));
141 out:
142         return segs;
143 }
144 EXPORT_SYMBOL(tcp_gso_segment);
145
146 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
147 {
148         struct sk_buff **pp = NULL;
149         struct sk_buff *p;
150         struct tcphdr *th;
151         struct tcphdr *th2;
152         unsigned int len;
153         unsigned int thlen;
154         __be32 flags;
155         unsigned int mss = 1;
156         unsigned int hlen;
157         unsigned int off;
158         int flush = 1;
159         int i;
160
161         off = skb_gro_offset(skb);
162         hlen = off + sizeof(*th);
163         th = skb_gro_header_fast(skb, off);
164         if (skb_gro_header_hard(skb, hlen)) {
165                 th = skb_gro_header_slow(skb, hlen, off);
166                 if (unlikely(!th))
167                         goto out;
168         }
169
170         thlen = th->doff * 4;
171         if (thlen < sizeof(*th))
172                 goto out;
173
174         hlen = off + thlen;
175         if (skb_gro_header_hard(skb, hlen)) {
176                 th = skb_gro_header_slow(skb, hlen, off);
177                 if (unlikely(!th))
178                         goto out;
179         }
180
181         skb_gro_pull(skb, thlen);
182
183         len = skb_gro_len(skb);
184         flags = tcp_flag_word(th);
185
186         for (; (p = *head); head = &p->next) {
187                 if (!NAPI_GRO_CB(p)->same_flow)
188                         continue;
189
190                 th2 = tcp_hdr(p);
191
192                 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
193                         NAPI_GRO_CB(p)->same_flow = 0;
194                         continue;
195                 }
196
197                 goto found;
198         }
199
200         goto out_check_final;
201
202 found:
203         flush = NAPI_GRO_CB(p)->flush;
204         flush |= (__force int)(flags & TCP_FLAG_CWR);
205         flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
206                   ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
207         flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
208         for (i = sizeof(*th); i < thlen; i += 4)
209                 flush |= *(u32 *)((u8 *)th + i) ^
210                          *(u32 *)((u8 *)th2 + i);
211
212         mss = tcp_skb_mss(p);
213
214         flush |= (len - 1) >= mss;
215         flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
216
217         if (flush || skb_gro_receive(head, skb)) {
218                 mss = 1;
219                 goto out_check_final;
220         }
221
222         p = *head;
223         th2 = tcp_hdr(p);
224         tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
225
226 out_check_final:
227         flush = len < mss;
228         flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
229                                         TCP_FLAG_RST | TCP_FLAG_SYN |
230                                         TCP_FLAG_FIN));
231
232         if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
233                 pp = head;
234
235 out:
236         NAPI_GRO_CB(skb)->flush |= flush;
237
238         return pp;
239 }
240 EXPORT_SYMBOL(tcp_gro_receive);
241
242 int tcp_gro_complete(struct sk_buff *skb)
243 {
244         struct tcphdr *th = tcp_hdr(skb);
245
246         skb->csum_start = skb_transport_header(skb) - skb->head;
247         skb->csum_offset = offsetof(struct tcphdr, check);
248         skb->ip_summed = CHECKSUM_PARTIAL;
249
250         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
251
252         if (th->cwr)
253                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
254
255         return 0;
256 }
257 EXPORT_SYMBOL(tcp_gro_complete);
258
259 static int tcp_v4_gso_send_check(struct sk_buff *skb)
260 {
261         const struct iphdr *iph;
262         struct tcphdr *th;
263
264         if (!pskb_may_pull(skb, sizeof(*th)))
265                 return -EINVAL;
266
267         iph = ip_hdr(skb);
268         th = tcp_hdr(skb);
269
270         th->check = 0;
271         skb->ip_summed = CHECKSUM_PARTIAL;
272         __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
273         return 0;
274 }
275
276 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
277 {
278         const struct iphdr *iph = skb_gro_network_header(skb);
279         __wsum wsum;
280         __sum16 sum;
281
282         switch (skb->ip_summed) {
283         case CHECKSUM_COMPLETE:
284                 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
285                                   skb->csum)) {
286                         skb->ip_summed = CHECKSUM_UNNECESSARY;
287                         break;
288                 }
289 flush:
290                 NAPI_GRO_CB(skb)->flush = 1;
291                 return NULL;
292
293         case CHECKSUM_NONE:
294                 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
295                                           skb_gro_len(skb), IPPROTO_TCP, 0);
296                 sum = csum_fold(skb_checksum(skb,
297                                              skb_gro_offset(skb),
298                                              skb_gro_len(skb),
299                                              wsum));
300                 if (sum)
301                         goto flush;
302
303                 skb->ip_summed = CHECKSUM_UNNECESSARY;
304                 break;
305         }
306
307         return tcp_gro_receive(head, skb);
308 }
309
310 static int tcp4_gro_complete(struct sk_buff *skb)
311 {
312         const struct iphdr *iph = ip_hdr(skb);
313         struct tcphdr *th = tcp_hdr(skb);
314
315         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
316                                   iph->saddr, iph->daddr, 0);
317         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
318
319         return tcp_gro_complete(skb);
320 }
321
322 static const struct net_offload tcpv4_offload = {
323         .callbacks = {
324                 .gso_send_check =       tcp_v4_gso_send_check,
325                 .gso_segment    =       tcp_gso_segment,
326                 .gro_receive    =       tcp4_gro_receive,
327                 .gro_complete   =       tcp4_gro_complete,
328         },
329 };
330
331 int __init tcpv4_offload_init(void)
332 {
333         return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
334 }