]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/ip_gre.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / net / ipv4 / ip_gre.c
1 /*
2  *      Linux NET3:     GRE over IP protocol decoder.
3  *
4  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ipip.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50
51 #if IS_ENABLED(CONFIG_IPV6)
52 #include <net/ipv6.h>
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
55 #endif
56
57 /*
58    Problems & solutions
59    --------------------
60
61    1. The most important issue is detecting local dead loops.
62    They would cause complete host lockup in transmit, which
63    would be "resolved" by stack overflow or, if queueing is enabled,
64    with infinite looping in net_bh.
65
66    We cannot track such dead loops during route installation,
67    it is infeasible task. The most general solutions would be
68    to keep skb->encapsulation counter (sort of local ttl),
69    and silently drop packet when it expires. It is a good
70    solution, but it supposes maintaining new variable in ALL
71    skb, even if no tunneling is used.
72
73    Current solution: xmit_recursion breaks dead loops. This is a percpu
74    counter, since when we enter the first ndo_xmit(), cpu migration is
75    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
76
77    2. Networking dead loops would not kill routers, but would really
78    kill network. IP hop limit plays role of "t->recursion" in this case,
79    if we copy it from packet being encapsulated to upper header.
80    It is very good solution, but it introduces two problems:
81
82    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83      do not work over tunnels.
84    - traceroute does not work. I planned to relay ICMP from tunnel,
85      so that this problem would be solved and traceroute output
86      would even more informative. This idea appeared to be wrong:
87      only Linux complies to rfc1812 now (yes, guys, Linux is the only
88      true router now :-)), all routers (at least, in neighbourhood of mine)
89      return only 8 bytes of payload. It is the end.
90
91    Hence, if we want that OSPF worked or traceroute said something reasonable,
92    we should search for another solution.
93
94    One of them is to parse packet trying to detect inner encapsulation
95    made by our node. It is difficult or even impossible, especially,
96    taking into account fragmentation. TO be short, ttl is not solution at all.
97
98    Current solution: The solution was UNEXPECTEDLY SIMPLE.
99    We force DF flag on tunnels with preconfigured hop limit,
100    that is ALL. :-) Well, it does not remove the problem completely,
101    but exponential growth of network traffic is changed to linear
102    (branches, that exceed pmtu are pruned) and tunnel mtu
103    rapidly degrades to value <68, where looping stops.
104    Yes, it is not good if there exists a router in the loop,
105    which does not force DF, even when encapsulating packets have DF set.
106    But it is not our problem! Nobody could accuse us, we made
107    all that we could make. Even if it is your gated who injected
108    fatal route to network, even if it were you who configured
109    fatal static route: you are innocent. :-)
110
111
112
113    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114    practically identical code. It would be good to glue them
115    together, but it is not very evident, how to make them modular.
116    sit is integral part of IPv6, ipip and gre are naturally modular.
117    We could extract common parts (hash table, ioctl etc)
118    to a separate module (ip_tunnel.c).
119
120    Alexey Kuznetsov.
121  */
122
123 static bool log_ecn_error = true;
124 module_param(log_ecn_error, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
128 static int ipgre_tunnel_init(struct net_device *dev);
129 static void ipgre_tunnel_setup(struct net_device *dev);
130 static int ipgre_tunnel_bind_dev(struct net_device *dev);
131
132 /* Fallback tunnel: no source, no destination, no key, no options */
133
134 #define HASH_SIZE  16
135
136 static int ipgre_net_id __read_mostly;
137 struct ipgre_net {
138         struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
139
140         struct net_device *fb_tunnel_dev;
141 };
142
143 /* Tunnel hash table */
144
145 /*
146    4 hash tables:
147
148    3: (remote,local)
149    2: (remote,*)
150    1: (*,local)
151    0: (*,*)
152
153    We require exact key match i.e. if a key is present in packet
154    it will match only tunnel with the same key; if it is not present,
155    it will match only keyless tunnel.
156
157    All keysless packets, if not matched configured keyless tunnels
158    will match fallback tunnel.
159  */
160
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
162
163 #define tunnels_r_l     tunnels[3]
164 #define tunnels_r       tunnels[2]
165 #define tunnels_l       tunnels[1]
166 #define tunnels_wc      tunnels[0]
167
168 static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
169                                                    struct rtnl_link_stats64 *tot)
170 {
171         int i;
172
173         for_each_possible_cpu(i) {
174                 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
175                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
176                 unsigned int start;
177
178                 do {
179                         start = u64_stats_fetch_begin_bh(&tstats->syncp);
180                         rx_packets = tstats->rx_packets;
181                         tx_packets = tstats->tx_packets;
182                         rx_bytes = tstats->rx_bytes;
183                         tx_bytes = tstats->tx_bytes;
184                 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
185
186                 tot->rx_packets += rx_packets;
187                 tot->tx_packets += tx_packets;
188                 tot->rx_bytes   += rx_bytes;
189                 tot->tx_bytes   += tx_bytes;
190         }
191
192         tot->multicast = dev->stats.multicast;
193         tot->rx_crc_errors = dev->stats.rx_crc_errors;
194         tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
195         tot->rx_length_errors = dev->stats.rx_length_errors;
196         tot->rx_frame_errors = dev->stats.rx_frame_errors;
197         tot->rx_errors = dev->stats.rx_errors;
198
199         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
200         tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
201         tot->tx_dropped = dev->stats.tx_dropped;
202         tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
203         tot->tx_errors = dev->stats.tx_errors;
204
205         return tot;
206 }
207
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm *p,
210                             __be16 flags, __be32 key)
211 {
212         if (p->i_flags & GRE_KEY) {
213                 if (flags & GRE_KEY)
214                         return key == p->i_key;
215                 else
216                         return false;   /* key expected, none present */
217         } else
218                 return !(flags & GRE_KEY);
219 }
220
221 /* Given src, dst and key, find appropriate for input tunnel. */
222
223 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
224                                              __be32 remote, __be32 local,
225                                              __be16 flags, __be32 key,
226                                              __be16 gre_proto)
227 {
228         struct net *net = dev_net(dev);
229         int link = dev->ifindex;
230         unsigned int h0 = HASH(remote);
231         unsigned int h1 = HASH(key);
232         struct ip_tunnel *t, *cand = NULL;
233         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
234         int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
235                        ARPHRD_ETHER : ARPHRD_IPGRE;
236         int score, cand_score = 4;
237
238         for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
239                 if (local != t->parms.iph.saddr ||
240                     remote != t->parms.iph.daddr ||
241                     !(t->dev->flags & IFF_UP))
242                         continue;
243
244                 if (!ipgre_key_match(&t->parms, flags, key))
245                         continue;
246
247                 if (t->dev->type != ARPHRD_IPGRE &&
248                     t->dev->type != dev_type)
249                         continue;
250
251                 score = 0;
252                 if (t->parms.link != link)
253                         score |= 1;
254                 if (t->dev->type != dev_type)
255                         score |= 2;
256                 if (score == 0)
257                         return t;
258
259                 if (score < cand_score) {
260                         cand = t;
261                         cand_score = score;
262                 }
263         }
264
265         for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
266                 if (remote != t->parms.iph.daddr ||
267                     !(t->dev->flags & IFF_UP))
268                         continue;
269
270                 if (!ipgre_key_match(&t->parms, flags, key))
271                         continue;
272
273                 if (t->dev->type != ARPHRD_IPGRE &&
274                     t->dev->type != dev_type)
275                         continue;
276
277                 score = 0;
278                 if (t->parms.link != link)
279                         score |= 1;
280                 if (t->dev->type != dev_type)
281                         score |= 2;
282                 if (score == 0)
283                         return t;
284
285                 if (score < cand_score) {
286                         cand = t;
287                         cand_score = score;
288                 }
289         }
290
291         for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
292                 if ((local != t->parms.iph.saddr &&
293                      (local != t->parms.iph.daddr ||
294                       !ipv4_is_multicast(local))) ||
295                     !(t->dev->flags & IFF_UP))
296                         continue;
297
298                 if (!ipgre_key_match(&t->parms, flags, key))
299                         continue;
300
301                 if (t->dev->type != ARPHRD_IPGRE &&
302                     t->dev->type != dev_type)
303                         continue;
304
305                 score = 0;
306                 if (t->parms.link != link)
307                         score |= 1;
308                 if (t->dev->type != dev_type)
309                         score |= 2;
310                 if (score == 0)
311                         return t;
312
313                 if (score < cand_score) {
314                         cand = t;
315                         cand_score = score;
316                 }
317         }
318
319         for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
320                 if (t->parms.i_key != key ||
321                     !(t->dev->flags & IFF_UP))
322                         continue;
323
324                 if (t->dev->type != ARPHRD_IPGRE &&
325                     t->dev->type != dev_type)
326                         continue;
327
328                 score = 0;
329                 if (t->parms.link != link)
330                         score |= 1;
331                 if (t->dev->type != dev_type)
332                         score |= 2;
333                 if (score == 0)
334                         return t;
335
336                 if (score < cand_score) {
337                         cand = t;
338                         cand_score = score;
339                 }
340         }
341
342         if (cand != NULL)
343                 return cand;
344
345         dev = ign->fb_tunnel_dev;
346         if (dev->flags & IFF_UP)
347                 return netdev_priv(dev);
348
349         return NULL;
350 }
351
352 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
353                 struct ip_tunnel_parm *parms)
354 {
355         __be32 remote = parms->iph.daddr;
356         __be32 local = parms->iph.saddr;
357         __be32 key = parms->i_key;
358         unsigned int h = HASH(key);
359         int prio = 0;
360
361         if (local)
362                 prio |= 1;
363         if (remote && !ipv4_is_multicast(remote)) {
364                 prio |= 2;
365                 h ^= HASH(remote);
366         }
367
368         return &ign->tunnels[prio][h];
369 }
370
371 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
372                 struct ip_tunnel *t)
373 {
374         return __ipgre_bucket(ign, &t->parms);
375 }
376
377 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
378 {
379         struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
380
381         rcu_assign_pointer(t->next, rtnl_dereference(*tp));
382         rcu_assign_pointer(*tp, t);
383 }
384
385 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
386 {
387         struct ip_tunnel __rcu **tp;
388         struct ip_tunnel *iter;
389
390         for (tp = ipgre_bucket(ign, t);
391              (iter = rtnl_dereference(*tp)) != NULL;
392              tp = &iter->next) {
393                 if (t == iter) {
394                         rcu_assign_pointer(*tp, t->next);
395                         break;
396                 }
397         }
398 }
399
400 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
401                                            struct ip_tunnel_parm *parms,
402                                            int type)
403 {
404         __be32 remote = parms->iph.daddr;
405         __be32 local = parms->iph.saddr;
406         __be32 key = parms->i_key;
407         int link = parms->link;
408         struct ip_tunnel *t;
409         struct ip_tunnel __rcu **tp;
410         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
411
412         for (tp = __ipgre_bucket(ign, parms);
413              (t = rtnl_dereference(*tp)) != NULL;
414              tp = &t->next)
415                 if (local == t->parms.iph.saddr &&
416                     remote == t->parms.iph.daddr &&
417                     key == t->parms.i_key &&
418                     link == t->parms.link &&
419                     type == t->dev->type)
420                         break;
421
422         return t;
423 }
424
425 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
426                 struct ip_tunnel_parm *parms, int create)
427 {
428         struct ip_tunnel *t, *nt;
429         struct net_device *dev;
430         char name[IFNAMSIZ];
431         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
432
433         t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
434         if (t || !create)
435                 return t;
436
437         if (parms->name[0])
438                 strlcpy(name, parms->name, IFNAMSIZ);
439         else
440                 strcpy(name, "gre%d");
441
442         dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
443         if (!dev)
444                 return NULL;
445
446         dev_net_set(dev, net);
447
448         nt = netdev_priv(dev);
449         nt->parms = *parms;
450         dev->rtnl_link_ops = &ipgre_link_ops;
451
452         dev->mtu = ipgre_tunnel_bind_dev(dev);
453
454         if (register_netdevice(dev) < 0)
455                 goto failed_free;
456
457         /* Can use a lockless transmit, unless we generate output sequences */
458         if (!(nt->parms.o_flags & GRE_SEQ))
459                 dev->features |= NETIF_F_LLTX;
460
461         dev_hold(dev);
462         ipgre_tunnel_link(ign, nt);
463         return nt;
464
465 failed_free:
466         free_netdev(dev);
467         return NULL;
468 }
469
470 static void ipgre_tunnel_uninit(struct net_device *dev)
471 {
472         struct net *net = dev_net(dev);
473         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
474
475         ipgre_tunnel_unlink(ign, netdev_priv(dev));
476         dev_put(dev);
477 }
478
479
480 static void ipgre_err(struct sk_buff *skb, u32 info)
481 {
482
483 /* All the routers (except for Linux) return only
484    8 bytes of packet payload. It means, that precise relaying of
485    ICMP in the real Internet is absolutely infeasible.
486
487    Moreover, Cisco "wise men" put GRE key to the third word
488    in GRE header. It makes impossible maintaining even soft state for keyed
489    GRE tunnels with enabled checksum. Tell them "thank you".
490
491    Well, I wonder, rfc1812 was written by Cisco employee,
492    what the hell these idiots break standards established
493    by themselves???
494  */
495
496         const struct iphdr *iph = (const struct iphdr *)skb->data;
497         __be16       *p = (__be16 *)(skb->data+(iph->ihl<<2));
498         int grehlen = (iph->ihl<<2) + 4;
499         const int type = icmp_hdr(skb)->type;
500         const int code = icmp_hdr(skb)->code;
501         struct ip_tunnel *t;
502         __be16 flags;
503         __be32 key = 0;
504
505         flags = p[0];
506         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
507                 if (flags&(GRE_VERSION|GRE_ROUTING))
508                         return;
509                 if (flags&GRE_KEY) {
510                         grehlen += 4;
511                         if (flags&GRE_CSUM)
512                                 grehlen += 4;
513                 }
514         }
515
516         /* If only 8 bytes returned, keyed message will be dropped here */
517         if (skb_headlen(skb) < grehlen)
518                 return;
519
520         if (flags & GRE_KEY)
521                 key = *(((__be32 *)p) + (grehlen / 4) - 1);
522
523         switch (type) {
524         default:
525         case ICMP_PARAMETERPROB:
526                 return;
527
528         case ICMP_DEST_UNREACH:
529                 switch (code) {
530                 case ICMP_SR_FAILED:
531                 case ICMP_PORT_UNREACH:
532                         /* Impossible event. */
533                         return;
534                 default:
535                         /* All others are translated to HOST_UNREACH.
536                            rfc2003 contains "deep thoughts" about NET_UNREACH,
537                            I believe they are just ether pollution. --ANK
538                          */
539                         break;
540                 }
541                 break;
542         case ICMP_TIME_EXCEEDED:
543                 if (code != ICMP_EXC_TTL)
544                         return;
545                 break;
546
547         case ICMP_REDIRECT:
548                 break;
549         }
550
551         t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
552                                 flags, key, p[1]);
553
554         if (t == NULL)
555                 return;
556
557         if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
558                 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
559                                  t->parms.link, 0, IPPROTO_GRE, 0);
560                 return;
561         }
562         if (type == ICMP_REDIRECT) {
563                 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
564                               IPPROTO_GRE, 0);
565                 return;
566         }
567         if (t->parms.iph.daddr == 0 ||
568             ipv4_is_multicast(t->parms.iph.daddr))
569                 return;
570
571         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
572                 return;
573
574         if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
575                 t->err_count++;
576         else
577                 t->err_count = 1;
578         t->err_time = jiffies;
579 }
580
581 static inline u8
582 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
583 {
584         u8 inner = 0;
585         if (skb->protocol == htons(ETH_P_IP))
586                 inner = old_iph->tos;
587         else if (skb->protocol == htons(ETH_P_IPV6))
588                 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
589         return INET_ECN_encapsulate(tos, inner);
590 }
591
592 static int ipgre_rcv(struct sk_buff *skb)
593 {
594         const struct iphdr *iph;
595         u8     *h;
596         __be16    flags;
597         __sum16   csum = 0;
598         __be32 key = 0;
599         u32    seqno = 0;
600         struct ip_tunnel *tunnel;
601         int    offset = 4;
602         __be16 gre_proto;
603         int    err;
604
605         if (!pskb_may_pull(skb, 16))
606                 goto drop;
607
608         iph = ip_hdr(skb);
609         h = skb->data;
610         flags = *(__be16 *)h;
611
612         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
613                 /* - Version must be 0.
614                    - We do not support routing headers.
615                  */
616                 if (flags&(GRE_VERSION|GRE_ROUTING))
617                         goto drop;
618
619                 if (flags&GRE_CSUM) {
620                         switch (skb->ip_summed) {
621                         case CHECKSUM_COMPLETE:
622                                 csum = csum_fold(skb->csum);
623                                 if (!csum)
624                                         break;
625                                 /* fall through */
626                         case CHECKSUM_NONE:
627                                 skb->csum = 0;
628                                 csum = __skb_checksum_complete(skb);
629                                 skb->ip_summed = CHECKSUM_COMPLETE;
630                         }
631                         offset += 4;
632                 }
633                 if (flags&GRE_KEY) {
634                         key = *(__be32 *)(h + offset);
635                         offset += 4;
636                 }
637                 if (flags&GRE_SEQ) {
638                         seqno = ntohl(*(__be32 *)(h + offset));
639                         offset += 4;
640                 }
641         }
642
643         gre_proto = *(__be16 *)(h + 2);
644
645         tunnel = ipgre_tunnel_lookup(skb->dev,
646                                      iph->saddr, iph->daddr, flags, key,
647                                      gre_proto);
648         if (tunnel) {
649                 struct pcpu_tstats *tstats;
650
651                 secpath_reset(skb);
652
653                 skb->protocol = gre_proto;
654                 /* WCCP version 1 and 2 protocol decoding.
655                  * - Change protocol to IP
656                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
657                  */
658                 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
659                         skb->protocol = htons(ETH_P_IP);
660                         if ((*(h + offset) & 0xF0) != 0x40)
661                                 offset += 4;
662                 }
663
664                 skb->mac_header = skb->network_header;
665                 __pskb_pull(skb, offset);
666                 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
667                 skb->pkt_type = PACKET_HOST;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669                 if (ipv4_is_multicast(iph->daddr)) {
670                         /* Looped back packet, drop it! */
671                         if (rt_is_output_route(skb_rtable(skb)))
672                                 goto drop;
673                         tunnel->dev->stats.multicast++;
674                         skb->pkt_type = PACKET_BROADCAST;
675                 }
676 #endif
677
678                 if (((flags&GRE_CSUM) && csum) ||
679                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
680                         tunnel->dev->stats.rx_crc_errors++;
681                         tunnel->dev->stats.rx_errors++;
682                         goto drop;
683                 }
684                 if (tunnel->parms.i_flags&GRE_SEQ) {
685                         if (!(flags&GRE_SEQ) ||
686                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
687                                 tunnel->dev->stats.rx_fifo_errors++;
688                                 tunnel->dev->stats.rx_errors++;
689                                 goto drop;
690                         }
691                         tunnel->i_seqno = seqno + 1;
692                 }
693
694                 /* Warning: All skb pointers will be invalidated! */
695                 if (tunnel->dev->type == ARPHRD_ETHER) {
696                         if (!pskb_may_pull(skb, ETH_HLEN)) {
697                                 tunnel->dev->stats.rx_length_errors++;
698                                 tunnel->dev->stats.rx_errors++;
699                                 goto drop;
700                         }
701
702                         iph = ip_hdr(skb);
703                         skb->protocol = eth_type_trans(skb, tunnel->dev);
704                         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
705                 }
706
707                 __skb_tunnel_rx(skb, tunnel->dev);
708
709                 skb_reset_network_header(skb);
710                 err = IP_ECN_decapsulate(iph, skb);
711                 if (unlikely(err)) {
712                         if (log_ecn_error)
713                                 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714                                                      &iph->saddr, iph->tos);
715                         if (err > 1) {
716                                 ++tunnel->dev->stats.rx_frame_errors;
717                                 ++tunnel->dev->stats.rx_errors;
718                                 goto drop;
719                         }
720                 }
721
722                 tstats = this_cpu_ptr(tunnel->dev->tstats);
723                 u64_stats_update_begin(&tstats->syncp);
724                 tstats->rx_packets++;
725                 tstats->rx_bytes += skb->len;
726                 u64_stats_update_end(&tstats->syncp);
727
728                 gro_cells_receive(&tunnel->gro_cells, skb);
729                 return 0;
730         }
731         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
732
733 drop:
734         kfree_skb(skb);
735         return 0;
736 }
737
738 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
739 {
740         struct ip_tunnel *tunnel = netdev_priv(dev);
741         const struct iphdr  *old_iph;
742         const struct iphdr  *tiph;
743         struct flowi4 fl4;
744         u8     tos;
745         __be16 df;
746         struct rtable *rt;                      /* Route to the other host */
747         struct net_device *tdev;                /* Device to other host */
748         struct iphdr  *iph;                     /* Our new IP header */
749         unsigned int max_headroom;              /* The extra header space needed */
750         int    gre_hlen;
751         __be32 dst;
752         int    mtu;
753         u8     ttl;
754
755         if (skb->ip_summed == CHECKSUM_PARTIAL &&
756             skb_checksum_help(skb))
757                 goto tx_error;
758
759         old_iph = ip_hdr(skb);
760
761         if (dev->type == ARPHRD_ETHER)
762                 IPCB(skb)->flags = 0;
763
764         if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
765                 gre_hlen = 0;
766                 if (skb->protocol == htons(ETH_P_IP))
767                         tiph = (const struct iphdr *)skb->data;
768                 else
769                         tiph = &tunnel->parms.iph;
770         } else {
771                 gre_hlen = tunnel->hlen;
772                 tiph = &tunnel->parms.iph;
773         }
774
775         if ((dst = tiph->daddr) == 0) {
776                 /* NBMA tunnel */
777
778                 if (skb_dst(skb) == NULL) {
779                         dev->stats.tx_fifo_errors++;
780                         goto tx_error;
781                 }
782
783                 if (skb->protocol == htons(ETH_P_IP)) {
784                         rt = skb_rtable(skb);
785                         dst = rt_nexthop(rt, old_iph->daddr);
786                 }
787 #if IS_ENABLED(CONFIG_IPV6)
788                 else if (skb->protocol == htons(ETH_P_IPV6)) {
789                         const struct in6_addr *addr6;
790                         struct neighbour *neigh;
791                         bool do_tx_error_icmp;
792                         int addr_type;
793
794                         neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
795                         if (neigh == NULL)
796                                 goto tx_error;
797
798                         addr6 = (const struct in6_addr *)&neigh->primary_key;
799                         addr_type = ipv6_addr_type(addr6);
800
801                         if (addr_type == IPV6_ADDR_ANY) {
802                                 addr6 = &ipv6_hdr(skb)->daddr;
803                                 addr_type = ipv6_addr_type(addr6);
804                         }
805
806                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
807                                 do_tx_error_icmp = true;
808                         else {
809                                 do_tx_error_icmp = false;
810                                 dst = addr6->s6_addr32[3];
811                         }
812                         neigh_release(neigh);
813                         if (do_tx_error_icmp)
814                                 goto tx_error_icmp;
815                 }
816 #endif
817                 else
818                         goto tx_error;
819         }
820
821         ttl = tiph->ttl;
822         tos = tiph->tos;
823         if (tos & 0x1) {
824                 tos &= ~0x1;
825                 if (skb->protocol == htons(ETH_P_IP))
826                         tos = old_iph->tos;
827                 else if (skb->protocol == htons(ETH_P_IPV6))
828                         tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
829         }
830
831         rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
832                                  tunnel->parms.o_key, RT_TOS(tos),
833                                  tunnel->parms.link);
834         if (IS_ERR(rt)) {
835                 dev->stats.tx_carrier_errors++;
836                 goto tx_error;
837         }
838         tdev = rt->dst.dev;
839
840         if (tdev == dev) {
841                 ip_rt_put(rt);
842                 dev->stats.collisions++;
843                 goto tx_error;
844         }
845
846         df = tiph->frag_off;
847         if (df)
848                 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
849         else
850                 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
851
852         if (skb_dst(skb))
853                 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
854
855         if (skb->protocol == htons(ETH_P_IP)) {
856                 df |= (old_iph->frag_off&htons(IP_DF));
857
858                 if ((old_iph->frag_off&htons(IP_DF)) &&
859                     mtu < ntohs(old_iph->tot_len)) {
860                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
861                         ip_rt_put(rt);
862                         goto tx_error;
863                 }
864         }
865 #if IS_ENABLED(CONFIG_IPV6)
866         else if (skb->protocol == htons(ETH_P_IPV6)) {
867                 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
868
869                 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
870                         if ((tunnel->parms.iph.daddr &&
871                              !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
872                             rt6->rt6i_dst.plen == 128) {
873                                 rt6->rt6i_flags |= RTF_MODIFIED;
874                                 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
875                         }
876                 }
877
878                 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
879                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
880                         ip_rt_put(rt);
881                         goto tx_error;
882                 }
883         }
884 #endif
885
886         if (tunnel->err_count > 0) {
887                 if (time_before(jiffies,
888                                 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
889                         tunnel->err_count--;
890
891                         dst_link_failure(skb);
892                 } else
893                         tunnel->err_count = 0;
894         }
895
896         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
897
898         if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
899             (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
900                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
901                 if (max_headroom > dev->needed_headroom)
902                         dev->needed_headroom = max_headroom;
903                 if (!new_skb) {
904                         ip_rt_put(rt);
905                         dev->stats.tx_dropped++;
906                         dev_kfree_skb(skb);
907                         return NETDEV_TX_OK;
908                 }
909                 if (skb->sk)
910                         skb_set_owner_w(new_skb, skb->sk);
911                 dev_kfree_skb(skb);
912                 skb = new_skb;
913                 old_iph = ip_hdr(skb);
914                 /* Warning : tiph value might point to freed memory */
915         }
916
917         skb_push(skb, gre_hlen);
918         skb_reset_network_header(skb);
919         skb_set_transport_header(skb, sizeof(*iph));
920         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
921         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
922                               IPSKB_REROUTED);
923         skb_dst_drop(skb);
924         skb_dst_set(skb, &rt->dst);
925
926         /*
927          *      Push down and install the IPIP header.
928          */
929
930         iph                     =       ip_hdr(skb);
931         iph->version            =       4;
932         iph->ihl                =       sizeof(struct iphdr) >> 2;
933         iph->frag_off           =       df;
934         iph->protocol           =       IPPROTO_GRE;
935         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
936         iph->daddr              =       fl4.daddr;
937         iph->saddr              =       fl4.saddr;
938         iph->ttl                =       ttl;
939
940         if (ttl == 0) {
941                 if (skb->protocol == htons(ETH_P_IP))
942                         iph->ttl = old_iph->ttl;
943 #if IS_ENABLED(CONFIG_IPV6)
944                 else if (skb->protocol == htons(ETH_P_IPV6))
945                         iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
946 #endif
947                 else
948                         iph->ttl = ip4_dst_hoplimit(&rt->dst);
949         }
950
951         ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
952         ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
953                                    htons(ETH_P_TEB) : skb->protocol;
954
955         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
956                 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
957
958                 if (tunnel->parms.o_flags&GRE_SEQ) {
959                         ++tunnel->o_seqno;
960                         *ptr = htonl(tunnel->o_seqno);
961                         ptr--;
962                 }
963                 if (tunnel->parms.o_flags&GRE_KEY) {
964                         *ptr = tunnel->parms.o_key;
965                         ptr--;
966                 }
967                 if (tunnel->parms.o_flags&GRE_CSUM) {
968                         int offset = skb_transport_offset(skb);
969
970                         *ptr = 0;
971                         *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
972                                                                  skb->len - offset,
973                                                                  0));
974                 }
975         }
976
977         iptunnel_xmit(skb, dev);
978         return NETDEV_TX_OK;
979
980 #if IS_ENABLED(CONFIG_IPV6)
981 tx_error_icmp:
982         dst_link_failure(skb);
983 #endif
984 tx_error:
985         dev->stats.tx_errors++;
986         dev_kfree_skb(skb);
987         return NETDEV_TX_OK;
988 }
989
990 static int ipgre_tunnel_bind_dev(struct net_device *dev)
991 {
992         struct net_device *tdev = NULL;
993         struct ip_tunnel *tunnel;
994         const struct iphdr *iph;
995         int hlen = LL_MAX_HEADER;
996         int mtu = ETH_DATA_LEN;
997         int addend = sizeof(struct iphdr) + 4;
998
999         tunnel = netdev_priv(dev);
1000         iph = &tunnel->parms.iph;
1001
1002         /* Guess output device to choose reasonable mtu and needed_headroom */
1003
1004         if (iph->daddr) {
1005                 struct flowi4 fl4;
1006                 struct rtable *rt;
1007
1008                 rt = ip_route_output_gre(dev_net(dev), &fl4,
1009                                          iph->daddr, iph->saddr,
1010                                          tunnel->parms.o_key,
1011                                          RT_TOS(iph->tos),
1012                                          tunnel->parms.link);
1013                 if (!IS_ERR(rt)) {
1014                         tdev = rt->dst.dev;
1015                         ip_rt_put(rt);
1016                 }
1017
1018                 if (dev->type != ARPHRD_ETHER)
1019                         dev->flags |= IFF_POINTOPOINT;
1020         }
1021
1022         if (!tdev && tunnel->parms.link)
1023                 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1024
1025         if (tdev) {
1026                 hlen = tdev->hard_header_len + tdev->needed_headroom;
1027                 mtu = tdev->mtu;
1028         }
1029         dev->iflink = tunnel->parms.link;
1030
1031         /* Precalculate GRE options length */
1032         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1033                 if (tunnel->parms.o_flags&GRE_CSUM)
1034                         addend += 4;
1035                 if (tunnel->parms.o_flags&GRE_KEY)
1036                         addend += 4;
1037                 if (tunnel->parms.o_flags&GRE_SEQ)
1038                         addend += 4;
1039         }
1040         dev->needed_headroom = addend + hlen;
1041         mtu -= dev->hard_header_len + addend;
1042
1043         if (mtu < 68)
1044                 mtu = 68;
1045
1046         tunnel->hlen = addend;
1047
1048         return mtu;
1049 }
1050
1051 static int
1052 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1053 {
1054         int err = 0;
1055         struct ip_tunnel_parm p;
1056         struct ip_tunnel *t;
1057         struct net *net = dev_net(dev);
1058         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1059
1060         switch (cmd) {
1061         case SIOCGETTUNNEL:
1062                 t = NULL;
1063                 if (dev == ign->fb_tunnel_dev) {
1064                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1065                                 err = -EFAULT;
1066                                 break;
1067                         }
1068                         t = ipgre_tunnel_locate(net, &p, 0);
1069                 }
1070                 if (t == NULL)
1071                         t = netdev_priv(dev);
1072                 memcpy(&p, &t->parms, sizeof(p));
1073                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1074                         err = -EFAULT;
1075                 break;
1076
1077         case SIOCADDTUNNEL:
1078         case SIOCCHGTUNNEL:
1079                 err = -EPERM;
1080                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1081                         goto done;
1082
1083                 err = -EFAULT;
1084                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1085                         goto done;
1086
1087                 err = -EINVAL;
1088                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1089                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1090                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1091                         goto done;
1092                 if (p.iph.ttl)
1093                         p.iph.frag_off |= htons(IP_DF);
1094
1095                 if (!(p.i_flags&GRE_KEY))
1096                         p.i_key = 0;
1097                 if (!(p.o_flags&GRE_KEY))
1098                         p.o_key = 0;
1099
1100                 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1101
1102                 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1103                         if (t != NULL) {
1104                                 if (t->dev != dev) {
1105                                         err = -EEXIST;
1106                                         break;
1107                                 }
1108                         } else {
1109                                 unsigned int nflags = 0;
1110
1111                                 t = netdev_priv(dev);
1112
1113                                 if (ipv4_is_multicast(p.iph.daddr))
1114                                         nflags = IFF_BROADCAST;
1115                                 else if (p.iph.daddr)
1116                                         nflags = IFF_POINTOPOINT;
1117
1118                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1119                                         err = -EINVAL;
1120                                         break;
1121                                 }
1122                                 ipgre_tunnel_unlink(ign, t);
1123                                 synchronize_net();
1124                                 t->parms.iph.saddr = p.iph.saddr;
1125                                 t->parms.iph.daddr = p.iph.daddr;
1126                                 t->parms.i_key = p.i_key;
1127                                 t->parms.o_key = p.o_key;
1128                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1129                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
1130                                 ipgre_tunnel_link(ign, t);
1131                                 netdev_state_change(dev);
1132                         }
1133                 }
1134
1135                 if (t) {
1136                         err = 0;
1137                         if (cmd == SIOCCHGTUNNEL) {
1138                                 t->parms.iph.ttl = p.iph.ttl;
1139                                 t->parms.iph.tos = p.iph.tos;
1140                                 t->parms.iph.frag_off = p.iph.frag_off;
1141                                 if (t->parms.link != p.link) {
1142                                         t->parms.link = p.link;
1143                                         dev->mtu = ipgre_tunnel_bind_dev(dev);
1144                                         netdev_state_change(dev);
1145                                 }
1146                         }
1147                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1148                                 err = -EFAULT;
1149                 } else
1150                         err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1151                 break;
1152
1153         case SIOCDELTUNNEL:
1154                 err = -EPERM;
1155                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1156                         goto done;
1157
1158                 if (dev == ign->fb_tunnel_dev) {
1159                         err = -EFAULT;
1160                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1161                                 goto done;
1162                         err = -ENOENT;
1163                         if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1164                                 goto done;
1165                         err = -EPERM;
1166                         if (t == netdev_priv(ign->fb_tunnel_dev))
1167                                 goto done;
1168                         dev = t->dev;
1169                 }
1170                 unregister_netdevice(dev);
1171                 err = 0;
1172                 break;
1173
1174         default:
1175                 err = -EINVAL;
1176         }
1177
1178 done:
1179         return err;
1180 }
1181
1182 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1183 {
1184         struct ip_tunnel *tunnel = netdev_priv(dev);
1185         if (new_mtu < 68 ||
1186             new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1187                 return -EINVAL;
1188         dev->mtu = new_mtu;
1189         return 0;
1190 }
1191
1192 /* Nice toy. Unfortunately, useless in real life :-)
1193    It allows to construct virtual multiprotocol broadcast "LAN"
1194    over the Internet, provided multicast routing is tuned.
1195
1196
1197    I have no idea was this bicycle invented before me,
1198    so that I had to set ARPHRD_IPGRE to a random value.
1199    I have an impression, that Cisco could make something similar,
1200    but this feature is apparently missing in IOS<=11.2(8).
1201
1202    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1203    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1204
1205    ping -t 255 224.66.66.66
1206
1207    If nobody answers, mbone does not work.
1208
1209    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1210    ip addr add 10.66.66.<somewhat>/24 dev Universe
1211    ifconfig Universe up
1212    ifconfig Universe add fe80::<Your_real_addr>/10
1213    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1214    ftp 10.66.66.66
1215    ...
1216    ftp fec0:6666:6666::193.233.7.65
1217    ...
1218
1219  */
1220
1221 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1222                         unsigned short type,
1223                         const void *daddr, const void *saddr, unsigned int len)
1224 {
1225         struct ip_tunnel *t = netdev_priv(dev);
1226         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1227         __be16 *p = (__be16 *)(iph+1);
1228
1229         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1230         p[0]            = t->parms.o_flags;
1231         p[1]            = htons(type);
1232
1233         /*
1234          *      Set the source hardware address.
1235          */
1236
1237         if (saddr)
1238                 memcpy(&iph->saddr, saddr, 4);
1239         if (daddr)
1240                 memcpy(&iph->daddr, daddr, 4);
1241         if (iph->daddr)
1242                 return t->hlen;
1243
1244         return -t->hlen;
1245 }
1246
1247 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1248 {
1249         const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1250         memcpy(haddr, &iph->saddr, 4);
1251         return 4;
1252 }
1253
1254 static const struct header_ops ipgre_header_ops = {
1255         .create = ipgre_header,
1256         .parse  = ipgre_header_parse,
1257 };
1258
1259 #ifdef CONFIG_NET_IPGRE_BROADCAST
1260 static int ipgre_open(struct net_device *dev)
1261 {
1262         struct ip_tunnel *t = netdev_priv(dev);
1263
1264         if (ipv4_is_multicast(t->parms.iph.daddr)) {
1265                 struct flowi4 fl4;
1266                 struct rtable *rt;
1267
1268                 rt = ip_route_output_gre(dev_net(dev), &fl4,
1269                                          t->parms.iph.daddr,
1270                                          t->parms.iph.saddr,
1271                                          t->parms.o_key,
1272                                          RT_TOS(t->parms.iph.tos),
1273                                          t->parms.link);
1274                 if (IS_ERR(rt))
1275                         return -EADDRNOTAVAIL;
1276                 dev = rt->dst.dev;
1277                 ip_rt_put(rt);
1278                 if (__in_dev_get_rtnl(dev) == NULL)
1279                         return -EADDRNOTAVAIL;
1280                 t->mlink = dev->ifindex;
1281                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1282         }
1283         return 0;
1284 }
1285
1286 static int ipgre_close(struct net_device *dev)
1287 {
1288         struct ip_tunnel *t = netdev_priv(dev);
1289
1290         if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1291                 struct in_device *in_dev;
1292                 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1293                 if (in_dev)
1294                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1295         }
1296         return 0;
1297 }
1298
1299 #endif
1300
1301 static const struct net_device_ops ipgre_netdev_ops = {
1302         .ndo_init               = ipgre_tunnel_init,
1303         .ndo_uninit             = ipgre_tunnel_uninit,
1304 #ifdef CONFIG_NET_IPGRE_BROADCAST
1305         .ndo_open               = ipgre_open,
1306         .ndo_stop               = ipgre_close,
1307 #endif
1308         .ndo_start_xmit         = ipgre_tunnel_xmit,
1309         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1310         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1311         .ndo_get_stats64        = ipgre_get_stats64,
1312 };
1313
1314 static void ipgre_dev_free(struct net_device *dev)
1315 {
1316         struct ip_tunnel *tunnel = netdev_priv(dev);
1317
1318         gro_cells_destroy(&tunnel->gro_cells);
1319         free_percpu(dev->tstats);
1320         free_netdev(dev);
1321 }
1322
1323 #define GRE_FEATURES (NETIF_F_SG |              \
1324                       NETIF_F_FRAGLIST |        \
1325                       NETIF_F_HIGHDMA |         \
1326                       NETIF_F_HW_CSUM)
1327
1328 static void ipgre_tunnel_setup(struct net_device *dev)
1329 {
1330         dev->netdev_ops         = &ipgre_netdev_ops;
1331         dev->destructor         = ipgre_dev_free;
1332
1333         dev->type               = ARPHRD_IPGRE;
1334         dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1335         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1336         dev->flags              = IFF_NOARP;
1337         dev->iflink             = 0;
1338         dev->addr_len           = 4;
1339         dev->features           |= NETIF_F_NETNS_LOCAL;
1340         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1341
1342         dev->features           |= GRE_FEATURES;
1343         dev->hw_features        |= GRE_FEATURES;
1344 }
1345
1346 static int ipgre_tunnel_init(struct net_device *dev)
1347 {
1348         struct ip_tunnel *tunnel;
1349         struct iphdr *iph;
1350         int err;
1351
1352         tunnel = netdev_priv(dev);
1353         iph = &tunnel->parms.iph;
1354
1355         tunnel->dev = dev;
1356         strcpy(tunnel->parms.name, dev->name);
1357
1358         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1359         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1360
1361         if (iph->daddr) {
1362 #ifdef CONFIG_NET_IPGRE_BROADCAST
1363                 if (ipv4_is_multicast(iph->daddr)) {
1364                         if (!iph->saddr)
1365                                 return -EINVAL;
1366                         dev->flags = IFF_BROADCAST;
1367                         dev->header_ops = &ipgre_header_ops;
1368                 }
1369 #endif
1370         } else
1371                 dev->header_ops = &ipgre_header_ops;
1372
1373         dev->tstats = alloc_percpu(struct pcpu_tstats);
1374         if (!dev->tstats)
1375                 return -ENOMEM;
1376
1377         err = gro_cells_init(&tunnel->gro_cells, dev);
1378         if (err) {
1379                 free_percpu(dev->tstats);
1380                 return err;
1381         }
1382
1383         return 0;
1384 }
1385
1386 static void ipgre_fb_tunnel_init(struct net_device *dev)
1387 {
1388         struct ip_tunnel *tunnel = netdev_priv(dev);
1389         struct iphdr *iph = &tunnel->parms.iph;
1390
1391         tunnel->dev = dev;
1392         strcpy(tunnel->parms.name, dev->name);
1393
1394         iph->version            = 4;
1395         iph->protocol           = IPPROTO_GRE;
1396         iph->ihl                = 5;
1397         tunnel->hlen            = sizeof(struct iphdr) + 4;
1398
1399         dev_hold(dev);
1400 }
1401
1402
1403 static const struct gre_protocol ipgre_protocol = {
1404         .handler     = ipgre_rcv,
1405         .err_handler = ipgre_err,
1406 };
1407
1408 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1409 {
1410         int prio;
1411
1412         for (prio = 0; prio < 4; prio++) {
1413                 int h;
1414                 for (h = 0; h < HASH_SIZE; h++) {
1415                         struct ip_tunnel *t;
1416
1417                         t = rtnl_dereference(ign->tunnels[prio][h]);
1418
1419                         while (t != NULL) {
1420                                 unregister_netdevice_queue(t->dev, head);
1421                                 t = rtnl_dereference(t->next);
1422                         }
1423                 }
1424         }
1425 }
1426
1427 static int __net_init ipgre_init_net(struct net *net)
1428 {
1429         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1430         int err;
1431
1432         ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1433                                            ipgre_tunnel_setup);
1434         if (!ign->fb_tunnel_dev) {
1435                 err = -ENOMEM;
1436                 goto err_alloc_dev;
1437         }
1438         dev_net_set(ign->fb_tunnel_dev, net);
1439
1440         ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1441         ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1442
1443         if ((err = register_netdev(ign->fb_tunnel_dev)))
1444                 goto err_reg_dev;
1445
1446         rcu_assign_pointer(ign->tunnels_wc[0],
1447                            netdev_priv(ign->fb_tunnel_dev));
1448         return 0;
1449
1450 err_reg_dev:
1451         ipgre_dev_free(ign->fb_tunnel_dev);
1452 err_alloc_dev:
1453         return err;
1454 }
1455
1456 static void __net_exit ipgre_exit_net(struct net *net)
1457 {
1458         struct ipgre_net *ign;
1459         LIST_HEAD(list);
1460
1461         ign = net_generic(net, ipgre_net_id);
1462         rtnl_lock();
1463         ipgre_destroy_tunnels(ign, &list);
1464         unregister_netdevice_many(&list);
1465         rtnl_unlock();
1466 }
1467
1468 static struct pernet_operations ipgre_net_ops = {
1469         .init = ipgre_init_net,
1470         .exit = ipgre_exit_net,
1471         .id   = &ipgre_net_id,
1472         .size = sizeof(struct ipgre_net),
1473 };
1474
1475 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1476 {
1477         __be16 flags;
1478
1479         if (!data)
1480                 return 0;
1481
1482         flags = 0;
1483         if (data[IFLA_GRE_IFLAGS])
1484                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1485         if (data[IFLA_GRE_OFLAGS])
1486                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1487         if (flags & (GRE_VERSION|GRE_ROUTING))
1488                 return -EINVAL;
1489
1490         return 0;
1491 }
1492
1493 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1494 {
1495         __be32 daddr;
1496
1497         if (tb[IFLA_ADDRESS]) {
1498                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1499                         return -EINVAL;
1500                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1501                         return -EADDRNOTAVAIL;
1502         }
1503
1504         if (!data)
1505                 goto out;
1506
1507         if (data[IFLA_GRE_REMOTE]) {
1508                 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1509                 if (!daddr)
1510                         return -EINVAL;
1511         }
1512
1513 out:
1514         return ipgre_tunnel_validate(tb, data);
1515 }
1516
1517 static void ipgre_netlink_parms(struct nlattr *data[],
1518                                 struct ip_tunnel_parm *parms)
1519 {
1520         memset(parms, 0, sizeof(*parms));
1521
1522         parms->iph.protocol = IPPROTO_GRE;
1523
1524         if (!data)
1525                 return;
1526
1527         if (data[IFLA_GRE_LINK])
1528                 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1529
1530         if (data[IFLA_GRE_IFLAGS])
1531                 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1532
1533         if (data[IFLA_GRE_OFLAGS])
1534                 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1535
1536         if (data[IFLA_GRE_IKEY])
1537                 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1538
1539         if (data[IFLA_GRE_OKEY])
1540                 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1541
1542         if (data[IFLA_GRE_LOCAL])
1543                 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1544
1545         if (data[IFLA_GRE_REMOTE])
1546                 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1547
1548         if (data[IFLA_GRE_TTL])
1549                 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1550
1551         if (data[IFLA_GRE_TOS])
1552                 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1553
1554         if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1555                 parms->iph.frag_off = htons(IP_DF);
1556 }
1557
1558 static int ipgre_tap_init(struct net_device *dev)
1559 {
1560         struct ip_tunnel *tunnel;
1561
1562         tunnel = netdev_priv(dev);
1563
1564         tunnel->dev = dev;
1565         strcpy(tunnel->parms.name, dev->name);
1566
1567         ipgre_tunnel_bind_dev(dev);
1568
1569         dev->tstats = alloc_percpu(struct pcpu_tstats);
1570         if (!dev->tstats)
1571                 return -ENOMEM;
1572
1573         return 0;
1574 }
1575
1576 static const struct net_device_ops ipgre_tap_netdev_ops = {
1577         .ndo_init               = ipgre_tap_init,
1578         .ndo_uninit             = ipgre_tunnel_uninit,
1579         .ndo_start_xmit         = ipgre_tunnel_xmit,
1580         .ndo_set_mac_address    = eth_mac_addr,
1581         .ndo_validate_addr      = eth_validate_addr,
1582         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1583         .ndo_get_stats64        = ipgre_get_stats64,
1584 };
1585
1586 static void ipgre_tap_setup(struct net_device *dev)
1587 {
1588
1589         ether_setup(dev);
1590
1591         dev->netdev_ops         = &ipgre_tap_netdev_ops;
1592         dev->destructor         = ipgre_dev_free;
1593
1594         dev->iflink             = 0;
1595         dev->features           |= NETIF_F_NETNS_LOCAL;
1596 }
1597
1598 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1599                          struct nlattr *data[])
1600 {
1601         struct ip_tunnel *nt;
1602         struct net *net = dev_net(dev);
1603         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1604         int mtu;
1605         int err;
1606
1607         nt = netdev_priv(dev);
1608         ipgre_netlink_parms(data, &nt->parms);
1609
1610         if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1611                 return -EEXIST;
1612
1613         if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1614                 eth_hw_addr_random(dev);
1615
1616         mtu = ipgre_tunnel_bind_dev(dev);
1617         if (!tb[IFLA_MTU])
1618                 dev->mtu = mtu;
1619
1620         /* Can use a lockless transmit, unless we generate output sequences */
1621         if (!(nt->parms.o_flags & GRE_SEQ))
1622                 dev->features |= NETIF_F_LLTX;
1623
1624         err = register_netdevice(dev);
1625         if (err)
1626                 goto out;
1627
1628         dev_hold(dev);
1629         ipgre_tunnel_link(ign, nt);
1630
1631 out:
1632         return err;
1633 }
1634
1635 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1636                             struct nlattr *data[])
1637 {
1638         struct ip_tunnel *t, *nt;
1639         struct net *net = dev_net(dev);
1640         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1641         struct ip_tunnel_parm p;
1642         int mtu;
1643
1644         if (dev == ign->fb_tunnel_dev)
1645                 return -EINVAL;
1646
1647         nt = netdev_priv(dev);
1648         ipgre_netlink_parms(data, &p);
1649
1650         t = ipgre_tunnel_locate(net, &p, 0);
1651
1652         if (t) {
1653                 if (t->dev != dev)
1654                         return -EEXIST;
1655         } else {
1656                 t = nt;
1657
1658                 if (dev->type != ARPHRD_ETHER) {
1659                         unsigned int nflags = 0;
1660
1661                         if (ipv4_is_multicast(p.iph.daddr))
1662                                 nflags = IFF_BROADCAST;
1663                         else if (p.iph.daddr)
1664                                 nflags = IFF_POINTOPOINT;
1665
1666                         if ((dev->flags ^ nflags) &
1667                             (IFF_POINTOPOINT | IFF_BROADCAST))
1668                                 return -EINVAL;
1669                 }
1670
1671                 ipgre_tunnel_unlink(ign, t);
1672                 t->parms.iph.saddr = p.iph.saddr;
1673                 t->parms.iph.daddr = p.iph.daddr;
1674                 t->parms.i_key = p.i_key;
1675                 if (dev->type != ARPHRD_ETHER) {
1676                         memcpy(dev->dev_addr, &p.iph.saddr, 4);
1677                         memcpy(dev->broadcast, &p.iph.daddr, 4);
1678                 }
1679                 ipgre_tunnel_link(ign, t);
1680                 netdev_state_change(dev);
1681         }
1682
1683         t->parms.o_key = p.o_key;
1684         t->parms.iph.ttl = p.iph.ttl;
1685         t->parms.iph.tos = p.iph.tos;
1686         t->parms.iph.frag_off = p.iph.frag_off;
1687
1688         if (t->parms.link != p.link) {
1689                 t->parms.link = p.link;
1690                 mtu = ipgre_tunnel_bind_dev(dev);
1691                 if (!tb[IFLA_MTU])
1692                         dev->mtu = mtu;
1693                 netdev_state_change(dev);
1694         }
1695
1696         return 0;
1697 }
1698
1699 static size_t ipgre_get_size(const struct net_device *dev)
1700 {
1701         return
1702                 /* IFLA_GRE_LINK */
1703                 nla_total_size(4) +
1704                 /* IFLA_GRE_IFLAGS */
1705                 nla_total_size(2) +
1706                 /* IFLA_GRE_OFLAGS */
1707                 nla_total_size(2) +
1708                 /* IFLA_GRE_IKEY */
1709                 nla_total_size(4) +
1710                 /* IFLA_GRE_OKEY */
1711                 nla_total_size(4) +
1712                 /* IFLA_GRE_LOCAL */
1713                 nla_total_size(4) +
1714                 /* IFLA_GRE_REMOTE */
1715                 nla_total_size(4) +
1716                 /* IFLA_GRE_TTL */
1717                 nla_total_size(1) +
1718                 /* IFLA_GRE_TOS */
1719                 nla_total_size(1) +
1720                 /* IFLA_GRE_PMTUDISC */
1721                 nla_total_size(1) +
1722                 0;
1723 }
1724
1725 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1726 {
1727         struct ip_tunnel *t = netdev_priv(dev);
1728         struct ip_tunnel_parm *p = &t->parms;
1729
1730         if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1731             nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1732             nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1733             nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1734             nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1735             nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1736             nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1737             nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1738             nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1739             nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1740                        !!(p->iph.frag_off & htons(IP_DF))))
1741                 goto nla_put_failure;
1742         return 0;
1743
1744 nla_put_failure:
1745         return -EMSGSIZE;
1746 }
1747
1748 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1749         [IFLA_GRE_LINK]         = { .type = NLA_U32 },
1750         [IFLA_GRE_IFLAGS]       = { .type = NLA_U16 },
1751         [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
1752         [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
1753         [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
1754         [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1755         [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1756         [IFLA_GRE_TTL]          = { .type = NLA_U8 },
1757         [IFLA_GRE_TOS]          = { .type = NLA_U8 },
1758         [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
1759 };
1760
1761 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1762         .kind           = "gre",
1763         .maxtype        = IFLA_GRE_MAX,
1764         .policy         = ipgre_policy,
1765         .priv_size      = sizeof(struct ip_tunnel),
1766         .setup          = ipgre_tunnel_setup,
1767         .validate       = ipgre_tunnel_validate,
1768         .newlink        = ipgre_newlink,
1769         .changelink     = ipgre_changelink,
1770         .get_size       = ipgre_get_size,
1771         .fill_info      = ipgre_fill_info,
1772 };
1773
1774 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1775         .kind           = "gretap",
1776         .maxtype        = IFLA_GRE_MAX,
1777         .policy         = ipgre_policy,
1778         .priv_size      = sizeof(struct ip_tunnel),
1779         .setup          = ipgre_tap_setup,
1780         .validate       = ipgre_tap_validate,
1781         .newlink        = ipgre_newlink,
1782         .changelink     = ipgre_changelink,
1783         .get_size       = ipgre_get_size,
1784         .fill_info      = ipgre_fill_info,
1785 };
1786
1787 /*
1788  *      And now the modules code and kernel interface.
1789  */
1790
1791 static int __init ipgre_init(void)
1792 {
1793         int err;
1794
1795         pr_info("GRE over IPv4 tunneling driver\n");
1796
1797         err = register_pernet_device(&ipgre_net_ops);
1798         if (err < 0)
1799                 return err;
1800
1801         err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1802         if (err < 0) {
1803                 pr_info("%s: can't add protocol\n", __func__);
1804                 goto add_proto_failed;
1805         }
1806
1807         err = rtnl_link_register(&ipgre_link_ops);
1808         if (err < 0)
1809                 goto rtnl_link_failed;
1810
1811         err = rtnl_link_register(&ipgre_tap_ops);
1812         if (err < 0)
1813                 goto tap_ops_failed;
1814
1815 out:
1816         return err;
1817
1818 tap_ops_failed:
1819         rtnl_link_unregister(&ipgre_link_ops);
1820 rtnl_link_failed:
1821         gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1822 add_proto_failed:
1823         unregister_pernet_device(&ipgre_net_ops);
1824         goto out;
1825 }
1826
1827 static void __exit ipgre_fini(void)
1828 {
1829         rtnl_link_unregister(&ipgre_tap_ops);
1830         rtnl_link_unregister(&ipgre_link_ops);
1831         if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1832                 pr_info("%s: can't remove protocol\n", __func__);
1833         unregister_pernet_device(&ipgre_net_ops);
1834 }
1835
1836 module_init(ipgre_init);
1837 module_exit(ipgre_fini);
1838 MODULE_LICENSE("GPL");
1839 MODULE_ALIAS_RTNL_LINK("gre");
1840 MODULE_ALIAS_RTNL_LINK("gretap");
1841 MODULE_ALIAS_NETDEV("gre0");