2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
52 #if IS_ENABLED(CONFIG_IPV6)
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
62 1. The most important issue is detecting local dead loops.
63 They would cause complete host lockup in transmit, which
64 would be "resolved" by stack overflow or, if queueing is enabled,
65 with infinite looping in net_bh.
67 We cannot track such dead loops during route installation,
68 it is infeasible task. The most general solutions would be
69 to keep skb->encapsulation counter (sort of local ttl),
70 and silently drop packet when it expires. It is a good
71 solution, but it supposes maintaining new variable in ALL
72 skb, even if no tunneling is used.
74 Current solution: xmit_recursion breaks dead loops. This is a percpu
75 counter, since when we enter the first ndo_xmit(), cpu migration is
76 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
78 2. Networking dead loops would not kill routers, but would really
79 kill network. IP hop limit plays role of "t->recursion" in this case,
80 if we copy it from packet being encapsulated to upper header.
81 It is very good solution, but it introduces two problems:
83 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
84 do not work over tunnels.
85 - traceroute does not work. I planned to relay ICMP from tunnel,
86 so that this problem would be solved and traceroute output
87 would even more informative. This idea appeared to be wrong:
88 only Linux complies to rfc1812 now (yes, guys, Linux is the only
89 true router now :-)), all routers (at least, in neighbourhood of mine)
90 return only 8 bytes of payload. It is the end.
92 Hence, if we want that OSPF worked or traceroute said something reasonable,
93 we should search for another solution.
95 One of them is to parse packet trying to detect inner encapsulation
96 made by our node. It is difficult or even impossible, especially,
97 taking into account fragmentation. TO be short, ttl is not solution at all.
99 Current solution: The solution was UNEXPECTEDLY SIMPLE.
100 We force DF flag on tunnels with preconfigured hop limit,
101 that is ALL. :-) Well, it does not remove the problem completely,
102 but exponential growth of network traffic is changed to linear
103 (branches, that exceed pmtu are pruned) and tunnel mtu
104 rapidly degrades to value <68, where looping stops.
105 Yes, it is not good if there exists a router in the loop,
106 which does not force DF, even when encapsulating packets have DF set.
107 But it is not our problem! Nobody could accuse us, we made
108 all that we could make. Even if it is your gated who injected
109 fatal route to network, even if it were you who configured
110 fatal static route: you are innocent. :-)
115 static bool log_ecn_error = true;
116 module_param(log_ecn_error, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
119 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
120 static int ipgre_tunnel_init(struct net_device *dev);
122 static int ipgre_net_id __read_mostly;
123 static int gre_tap_net_id __read_mostly;
125 static int ip_gre_calc_hlen(__be16 o_flags)
129 if (o_flags & TUNNEL_CSUM)
131 if (o_flags & TUNNEL_KEY)
133 if (o_flags & TUNNEL_SEQ)
138 static __be16 gre_flags_to_tnl_flags(__be16 flags)
142 if (flags & GRE_CSUM)
143 tflags |= TUNNEL_CSUM;
144 if (flags & GRE_ROUTING)
145 tflags |= TUNNEL_ROUTING;
147 tflags |= TUNNEL_KEY;
149 tflags |= TUNNEL_SEQ;
150 if (flags & GRE_STRICT)
151 tflags |= TUNNEL_STRICT;
153 tflags |= TUNNEL_REC;
154 if (flags & GRE_VERSION)
155 tflags |= TUNNEL_VERSION;
160 static __be16 tnl_flags_to_gre_flags(__be16 tflags)
164 if (tflags & TUNNEL_CSUM)
166 if (tflags & TUNNEL_ROUTING)
167 flags |= GRE_ROUTING;
168 if (tflags & TUNNEL_KEY)
170 if (tflags & TUNNEL_SEQ)
172 if (tflags & TUNNEL_STRICT)
174 if (tflags & TUNNEL_REC)
176 if (tflags & TUNNEL_VERSION)
177 flags |= GRE_VERSION;
182 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
185 const struct gre_base_hdr *greh;
189 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
192 greh = (struct gre_base_hdr *)skb_transport_header(skb);
193 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
196 tpi->flags = gre_flags_to_tnl_flags(greh->flags);
197 hdr_len = ip_gre_calc_hlen(tpi->flags);
199 if (!pskb_may_pull(skb, hdr_len))
202 greh = (struct gre_base_hdr *)skb_transport_header(skb);
203 tpi->proto = greh->protocol;
205 options = (__be32 *)(greh + 1);
206 if (greh->flags & GRE_CSUM) {
207 if (skb_checksum_simple_validate(skb)) {
212 skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
213 null_compute_pseudo);
217 if (greh->flags & GRE_KEY) {
223 if (unlikely(greh->flags & GRE_SEQ)) {
229 /* WCCP version 1 and 2 protocol decoding.
230 * - Change protocol to IP
231 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
233 if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
234 tpi->proto = htons(ETH_P_IP);
235 if ((*(u8 *)options & 0xF0) != 0x40) {
237 if (!pskb_may_pull(skb, hdr_len))
241 return iptunnel_pull_header(skb, hdr_len, tpi->proto);
244 static void ipgre_err(struct sk_buff *skb, u32 info,
245 const struct tnl_ptk_info *tpi)
248 /* All the routers (except for Linux) return only
249 8 bytes of packet payload. It means, that precise relaying of
250 ICMP in the real Internet is absolutely infeasible.
252 Moreover, Cisco "wise men" put GRE key to the third word
253 in GRE header. It makes impossible maintaining even soft
254 state for keyed GRE tunnels with enabled checksum. Tell
257 Well, I wonder, rfc1812 was written by Cisco employee,
258 what the hell these idiots break standards established
261 struct net *net = dev_net(skb->dev);
262 struct ip_tunnel_net *itn;
263 const struct iphdr *iph;
264 const int type = icmp_hdr(skb)->type;
265 const int code = icmp_hdr(skb)->code;
270 case ICMP_PARAMETERPROB:
273 case ICMP_DEST_UNREACH:
276 case ICMP_PORT_UNREACH:
277 /* Impossible event. */
280 /* All others are translated to HOST_UNREACH.
281 rfc2003 contains "deep thoughts" about NET_UNREACH,
282 I believe they are just ether pollution. --ANK
288 case ICMP_TIME_EXCEEDED:
289 if (code != ICMP_EXC_TTL)
297 if (tpi->proto == htons(ETH_P_TEB))
298 itn = net_generic(net, gre_tap_net_id);
300 itn = net_generic(net, ipgre_net_id);
302 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
303 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
304 iph->daddr, iph->saddr, tpi->key);
309 if (t->parms.iph.daddr == 0 ||
310 ipv4_is_multicast(t->parms.iph.daddr))
313 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
316 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
320 t->err_time = jiffies;
323 static void gre_err(struct sk_buff *skb, u32 info)
325 /* All the routers (except for Linux) return only
326 * 8 bytes of packet payload. It means, that precise relaying of
327 * ICMP in the real Internet is absolutely infeasible.
329 * Moreover, Cisco "wise men" put GRE key to the third word
330 * in GRE header. It makes impossible maintaining even soft
332 * GRE tunnels with enabled checksum. Tell them "thank you".
334 * Well, I wonder, rfc1812 was written by Cisco employee,
335 * what the hell these idiots break standards established
339 const int type = icmp_hdr(skb)->type;
340 const int code = icmp_hdr(skb)->code;
341 struct tnl_ptk_info tpi;
342 bool csum_err = false;
344 if (parse_gre_header(skb, &tpi, &csum_err)) {
345 if (!csum_err) /* ignore csum errors. */
349 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
350 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
351 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
354 if (type == ICMP_REDIRECT) {
355 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
360 ipgre_err(skb, info, &tpi);
363 static __be64 key_to_tunnel_id(__be32 key)
366 return (__force __be64)((__force u32)key);
368 return (__force __be64)((__force u64)key << 32);
372 /* Returns the least-significant 32 bits of a __be64. */
373 static __be32 tunnel_id_to_key(__be64 x)
376 return (__force __be32)x;
378 return (__force __be32)((__force u64)x >> 32);
382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
384 struct net *net = dev_net(skb->dev);
385 struct metadata_dst *tun_dst = NULL;
386 struct ip_tunnel_net *itn;
387 const struct iphdr *iph;
388 struct ip_tunnel *tunnel;
390 if (tpi->proto == htons(ETH_P_TEB))
391 itn = net_generic(net, gre_tap_net_id);
393 itn = net_generic(net, ipgre_net_id);
396 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
397 iph->saddr, iph->daddr, tpi->key);
400 skb_pop_mac_header(skb);
401 if (tunnel->collect_md) {
405 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
406 tun_id = key_to_tunnel_id(tpi->key);
407 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
409 return PACKET_REJECT;
412 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
415 return PACKET_REJECT;
418 static int gre_rcv(struct sk_buff *skb)
420 struct tnl_ptk_info tpi;
421 bool csum_err = false;
423 #ifdef CONFIG_NET_IPGRE_BROADCAST
424 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
425 /* Looped back packet, drop it! */
426 if (rt_is_output_route(skb_rtable(skb)))
431 if (parse_gre_header(skb, &tpi, &csum_err) < 0)
434 if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
437 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
443 static __sum16 gre_checksum(struct sk_buff *skb)
447 if (skb->ip_summed == CHECKSUM_PARTIAL)
448 csum = lco_csum(skb);
450 csum = skb_checksum(skb, 0, skb->len, 0);
451 return csum_fold(csum);
454 static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
455 __be16 proto, __be32 key, __be32 seq)
457 struct gre_base_hdr *greh;
459 skb_push(skb, hdr_len);
461 skb_reset_transport_header(skb);
462 greh = (struct gre_base_hdr *)skb->data;
463 greh->flags = tnl_flags_to_gre_flags(flags);
464 greh->protocol = proto;
466 if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
467 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
469 if (flags & TUNNEL_SEQ) {
473 if (flags & TUNNEL_KEY) {
477 if (flags & TUNNEL_CSUM &&
478 !(skb_shinfo(skb)->gso_type &
479 (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
481 *(__sum16 *)ptr = gre_checksum(skb);
486 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
487 const struct iphdr *tnl_params,
490 struct ip_tunnel *tunnel = netdev_priv(dev);
492 if (tunnel->parms.o_flags & TUNNEL_SEQ)
495 /* Push GRE header. */
496 build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
497 proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
499 skb_set_inner_protocol(skb, proto);
500 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
503 static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
506 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
509 static struct rtable *gre_get_rt(struct sk_buff *skb,
510 struct net_device *dev,
512 const struct ip_tunnel_key *key)
514 struct net *net = dev_net(dev);
516 memset(fl, 0, sizeof(*fl));
517 fl->daddr = key->u.ipv4.dst;
518 fl->saddr = key->u.ipv4.src;
519 fl->flowi4_tos = RT_TOS(key->tos);
520 fl->flowi4_mark = skb->mark;
521 fl->flowi4_proto = IPPROTO_GRE;
523 return ip_route_output_key(net, fl);
526 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
528 struct ip_tunnel_info *tun_info;
529 const struct ip_tunnel_key *key;
537 tun_info = skb_tunnel_info(skb);
538 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
539 ip_tunnel_info_af(tun_info) != AF_INET))
542 key = &tun_info->key;
543 rt = gre_get_rt(skb, dev, &fl, key);
547 tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
549 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
550 + tunnel_hlen + sizeof(struct iphdr);
551 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
552 int head_delta = SKB_DATA_ALIGN(min_headroom -
555 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
561 /* Push Tunnel header. */
562 skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
568 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
569 build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
570 tunnel_id_to_key(tun_info->key.tun_id), 0);
572 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
574 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
575 key->tos, key->ttl, df, false);
582 dev->stats.tx_dropped++;
585 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
587 struct ip_tunnel_info *info = skb_tunnel_info(skb);
591 if (ip_tunnel_info_af(info) != AF_INET)
594 rt = gre_get_rt(skb, dev, &fl4, &info->key);
599 info->key.u.ipv4.src = fl4.saddr;
603 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
604 struct net_device *dev)
606 struct ip_tunnel *tunnel = netdev_priv(dev);
607 const struct iphdr *tnl_params;
609 if (tunnel->collect_md) {
610 gre_fb_xmit(skb, dev);
614 if (dev->header_ops) {
615 /* Need space for new headers */
616 if (skb_cow_head(skb, dev->needed_headroom -
617 (tunnel->hlen + sizeof(struct iphdr))))
620 tnl_params = (const struct iphdr *)skb->data;
622 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
625 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
626 skb_reset_mac_header(skb);
628 if (skb_cow_head(skb, dev->needed_headroom))
631 tnl_params = &tunnel->parms.iph;
634 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
638 __gre_xmit(skb, dev, tnl_params, skb->protocol);
644 dev->stats.tx_dropped++;
648 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
649 struct net_device *dev)
651 struct ip_tunnel *tunnel = netdev_priv(dev);
653 if (tunnel->collect_md) {
654 gre_fb_xmit(skb, dev);
658 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
662 if (skb_cow_head(skb, dev->needed_headroom))
665 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
671 dev->stats.tx_dropped++;
675 static int ipgre_tunnel_ioctl(struct net_device *dev,
676 struct ifreq *ifr, int cmd)
679 struct ip_tunnel_parm p;
681 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
683 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
684 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
685 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
686 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
689 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
690 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
692 err = ip_tunnel_ioctl(dev, &p, cmd);
696 p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
697 p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
699 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
704 /* Nice toy. Unfortunately, useless in real life :-)
705 It allows to construct virtual multiprotocol broadcast "LAN"
706 over the Internet, provided multicast routing is tuned.
709 I have no idea was this bicycle invented before me,
710 so that I had to set ARPHRD_IPGRE to a random value.
711 I have an impression, that Cisco could make something similar,
712 but this feature is apparently missing in IOS<=11.2(8).
714 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
715 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
717 ping -t 255 224.66.66.66
719 If nobody answers, mbone does not work.
721 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
722 ip addr add 10.66.66.<somewhat>/24 dev Universe
724 ifconfig Universe add fe80::<Your_real_addr>/10
725 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
728 ftp fec0:6666:6666::193.233.7.65
731 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
733 const void *daddr, const void *saddr, unsigned int len)
735 struct ip_tunnel *t = netdev_priv(dev);
737 struct gre_base_hdr *greh;
739 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
740 greh = (struct gre_base_hdr *)(iph+1);
741 greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
742 greh->protocol = htons(type);
744 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
746 /* Set the source hardware address. */
748 memcpy(&iph->saddr, saddr, 4);
750 memcpy(&iph->daddr, daddr, 4);
752 return t->hlen + sizeof(*iph);
754 return -(t->hlen + sizeof(*iph));
757 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
759 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
760 memcpy(haddr, &iph->saddr, 4);
764 static const struct header_ops ipgre_header_ops = {
765 .create = ipgre_header,
766 .parse = ipgre_header_parse,
769 #ifdef CONFIG_NET_IPGRE_BROADCAST
770 static int ipgre_open(struct net_device *dev)
772 struct ip_tunnel *t = netdev_priv(dev);
774 if (ipv4_is_multicast(t->parms.iph.daddr)) {
778 rt = ip_route_output_gre(t->net, &fl4,
782 RT_TOS(t->parms.iph.tos),
785 return -EADDRNOTAVAIL;
788 if (!__in_dev_get_rtnl(dev))
789 return -EADDRNOTAVAIL;
790 t->mlink = dev->ifindex;
791 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
796 static int ipgre_close(struct net_device *dev)
798 struct ip_tunnel *t = netdev_priv(dev);
800 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
801 struct in_device *in_dev;
802 in_dev = inetdev_by_index(t->net, t->mlink);
804 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
810 static const struct net_device_ops ipgre_netdev_ops = {
811 .ndo_init = ipgre_tunnel_init,
812 .ndo_uninit = ip_tunnel_uninit,
813 #ifdef CONFIG_NET_IPGRE_BROADCAST
814 .ndo_open = ipgre_open,
815 .ndo_stop = ipgre_close,
817 .ndo_start_xmit = ipgre_xmit,
818 .ndo_do_ioctl = ipgre_tunnel_ioctl,
819 .ndo_change_mtu = ip_tunnel_change_mtu,
820 .ndo_get_stats64 = ip_tunnel_get_stats64,
821 .ndo_get_iflink = ip_tunnel_get_iflink,
824 #define GRE_FEATURES (NETIF_F_SG | \
829 static void ipgre_tunnel_setup(struct net_device *dev)
831 dev->netdev_ops = &ipgre_netdev_ops;
832 dev->type = ARPHRD_IPGRE;
833 ip_tunnel_setup(dev, ipgre_net_id);
836 static void __gre_tunnel_init(struct net_device *dev)
838 struct ip_tunnel *tunnel;
841 tunnel = netdev_priv(dev);
842 tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
843 tunnel->parms.iph.protocol = IPPROTO_GRE;
845 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
847 t_hlen = tunnel->hlen + sizeof(struct iphdr);
849 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
850 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
852 dev->features |= GRE_FEATURES;
853 dev->hw_features |= GRE_FEATURES;
855 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
856 /* TCP offload with GRE SEQ is not supported. */
857 dev->features |= NETIF_F_GSO_SOFTWARE;
858 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
859 /* Can use a lockless transmit, unless we generate
862 dev->features |= NETIF_F_LLTX;
866 static int ipgre_tunnel_init(struct net_device *dev)
868 struct ip_tunnel *tunnel = netdev_priv(dev);
869 struct iphdr *iph = &tunnel->parms.iph;
871 __gre_tunnel_init(dev);
873 memcpy(dev->dev_addr, &iph->saddr, 4);
874 memcpy(dev->broadcast, &iph->daddr, 4);
876 dev->flags = IFF_NOARP;
881 #ifdef CONFIG_NET_IPGRE_BROADCAST
882 if (ipv4_is_multicast(iph->daddr)) {
885 dev->flags = IFF_BROADCAST;
886 dev->header_ops = &ipgre_header_ops;
890 dev->header_ops = &ipgre_header_ops;
892 return ip_tunnel_init(dev);
895 static const struct gre_protocol ipgre_protocol = {
897 .err_handler = gre_err,
900 static int __net_init ipgre_init_net(struct net *net)
902 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
905 static void __net_exit ipgre_exit_net(struct net *net)
907 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
908 ip_tunnel_delete_net(itn, &ipgre_link_ops);
911 static struct pernet_operations ipgre_net_ops = {
912 .init = ipgre_init_net,
913 .exit = ipgre_exit_net,
915 .size = sizeof(struct ip_tunnel_net),
918 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
926 if (data[IFLA_GRE_IFLAGS])
927 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
928 if (data[IFLA_GRE_OFLAGS])
929 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
930 if (flags & (GRE_VERSION|GRE_ROUTING))
936 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
940 if (tb[IFLA_ADDRESS]) {
941 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
943 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
944 return -EADDRNOTAVAIL;
950 if (data[IFLA_GRE_REMOTE]) {
951 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
957 return ipgre_tunnel_validate(tb, data);
960 static void ipgre_netlink_parms(struct net_device *dev,
961 struct nlattr *data[],
963 struct ip_tunnel_parm *parms)
965 memset(parms, 0, sizeof(*parms));
967 parms->iph.protocol = IPPROTO_GRE;
972 if (data[IFLA_GRE_LINK])
973 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
975 if (data[IFLA_GRE_IFLAGS])
976 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
978 if (data[IFLA_GRE_OFLAGS])
979 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
981 if (data[IFLA_GRE_IKEY])
982 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
984 if (data[IFLA_GRE_OKEY])
985 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
987 if (data[IFLA_GRE_LOCAL])
988 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
990 if (data[IFLA_GRE_REMOTE])
991 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
993 if (data[IFLA_GRE_TTL])
994 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
996 if (data[IFLA_GRE_TOS])
997 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
999 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1000 parms->iph.frag_off = htons(IP_DF);
1002 if (data[IFLA_GRE_COLLECT_METADATA]) {
1003 struct ip_tunnel *t = netdev_priv(dev);
1005 t->collect_md = true;
1009 /* This function returns true when ENCAP attributes are present in the nl msg */
1010 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1011 struct ip_tunnel_encap *ipencap)
1015 memset(ipencap, 0, sizeof(*ipencap));
1020 if (data[IFLA_GRE_ENCAP_TYPE]) {
1022 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1025 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1027 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1030 if (data[IFLA_GRE_ENCAP_SPORT]) {
1032 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1035 if (data[IFLA_GRE_ENCAP_DPORT]) {
1037 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1043 static int gre_tap_init(struct net_device *dev)
1045 __gre_tunnel_init(dev);
1046 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1048 return ip_tunnel_init(dev);
1051 static const struct net_device_ops gre_tap_netdev_ops = {
1052 .ndo_init = gre_tap_init,
1053 .ndo_uninit = ip_tunnel_uninit,
1054 .ndo_start_xmit = gre_tap_xmit,
1055 .ndo_set_mac_address = eth_mac_addr,
1056 .ndo_validate_addr = eth_validate_addr,
1057 .ndo_change_mtu = ip_tunnel_change_mtu,
1058 .ndo_get_stats64 = ip_tunnel_get_stats64,
1059 .ndo_get_iflink = ip_tunnel_get_iflink,
1060 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1063 static void ipgre_tap_setup(struct net_device *dev)
1066 dev->netdev_ops = &gre_tap_netdev_ops;
1067 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1068 ip_tunnel_setup(dev, gre_tap_net_id);
1071 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1072 struct nlattr *tb[], struct nlattr *data[])
1074 struct ip_tunnel_parm p;
1075 struct ip_tunnel_encap ipencap;
1077 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1078 struct ip_tunnel *t = netdev_priv(dev);
1079 int err = ip_tunnel_encap_setup(t, &ipencap);
1085 ipgre_netlink_parms(dev, data, tb, &p);
1086 return ip_tunnel_newlink(dev, tb, &p);
1089 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1090 struct nlattr *data[])
1092 struct ip_tunnel_parm p;
1093 struct ip_tunnel_encap ipencap;
1095 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1096 struct ip_tunnel *t = netdev_priv(dev);
1097 int err = ip_tunnel_encap_setup(t, &ipencap);
1103 ipgre_netlink_parms(dev, data, tb, &p);
1104 return ip_tunnel_changelink(dev, tb, &p);
1107 static size_t ipgre_get_size(const struct net_device *dev)
1112 /* IFLA_GRE_IFLAGS */
1114 /* IFLA_GRE_OFLAGS */
1120 /* IFLA_GRE_LOCAL */
1122 /* IFLA_GRE_REMOTE */
1128 /* IFLA_GRE_PMTUDISC */
1130 /* IFLA_GRE_ENCAP_TYPE */
1132 /* IFLA_GRE_ENCAP_FLAGS */
1134 /* IFLA_GRE_ENCAP_SPORT */
1136 /* IFLA_GRE_ENCAP_DPORT */
1138 /* IFLA_GRE_COLLECT_METADATA */
1143 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1145 struct ip_tunnel *t = netdev_priv(dev);
1146 struct ip_tunnel_parm *p = &t->parms;
1148 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1149 nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
1150 nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
1151 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1152 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1153 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1154 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1155 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1156 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1157 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1158 !!(p->iph.frag_off & htons(IP_DF))))
1159 goto nla_put_failure;
1161 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1163 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1165 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1167 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1169 goto nla_put_failure;
1171 if (t->collect_md) {
1172 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1173 goto nla_put_failure;
1182 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1183 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1184 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1185 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1186 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1187 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1188 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1189 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1190 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1191 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1192 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1193 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1194 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1195 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1196 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1197 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1200 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1202 .maxtype = IFLA_GRE_MAX,
1203 .policy = ipgre_policy,
1204 .priv_size = sizeof(struct ip_tunnel),
1205 .setup = ipgre_tunnel_setup,
1206 .validate = ipgre_tunnel_validate,
1207 .newlink = ipgre_newlink,
1208 .changelink = ipgre_changelink,
1209 .dellink = ip_tunnel_dellink,
1210 .get_size = ipgre_get_size,
1211 .fill_info = ipgre_fill_info,
1212 .get_link_net = ip_tunnel_get_link_net,
1215 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1217 .maxtype = IFLA_GRE_MAX,
1218 .policy = ipgre_policy,
1219 .priv_size = sizeof(struct ip_tunnel),
1220 .setup = ipgre_tap_setup,
1221 .validate = ipgre_tap_validate,
1222 .newlink = ipgre_newlink,
1223 .changelink = ipgre_changelink,
1224 .dellink = ip_tunnel_dellink,
1225 .get_size = ipgre_get_size,
1226 .fill_info = ipgre_fill_info,
1227 .get_link_net = ip_tunnel_get_link_net,
1230 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1231 u8 name_assign_type)
1233 struct nlattr *tb[IFLA_MAX + 1];
1234 struct net_device *dev;
1235 struct ip_tunnel *t;
1238 memset(&tb, 0, sizeof(tb));
1240 dev = rtnl_create_link(net, name, name_assign_type,
1241 &ipgre_tap_ops, tb);
1245 /* Configure flow based GRE device. */
1246 t = netdev_priv(dev);
1247 t->collect_md = true;
1249 err = ipgre_newlink(net, dev, tb, NULL);
1255 return ERR_PTR(err);
1257 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1259 static int __net_init ipgre_tap_init_net(struct net *net)
1261 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1264 static void __net_exit ipgre_tap_exit_net(struct net *net)
1266 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1267 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1270 static struct pernet_operations ipgre_tap_net_ops = {
1271 .init = ipgre_tap_init_net,
1272 .exit = ipgre_tap_exit_net,
1273 .id = &gre_tap_net_id,
1274 .size = sizeof(struct ip_tunnel_net),
1277 static int __init ipgre_init(void)
1281 pr_info("GRE over IPv4 tunneling driver\n");
1283 err = register_pernet_device(&ipgre_net_ops);
1287 err = register_pernet_device(&ipgre_tap_net_ops);
1289 goto pnet_tap_faied;
1291 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1293 pr_info("%s: can't add protocol\n", __func__);
1294 goto add_proto_failed;
1297 err = rtnl_link_register(&ipgre_link_ops);
1299 goto rtnl_link_failed;
1301 err = rtnl_link_register(&ipgre_tap_ops);
1303 goto tap_ops_failed;
1308 rtnl_link_unregister(&ipgre_link_ops);
1310 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1312 unregister_pernet_device(&ipgre_tap_net_ops);
1314 unregister_pernet_device(&ipgre_net_ops);
1318 static void __exit ipgre_fini(void)
1320 rtnl_link_unregister(&ipgre_tap_ops);
1321 rtnl_link_unregister(&ipgre_link_ops);
1322 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1323 unregister_pernet_device(&ipgre_tap_net_ops);
1324 unregister_pernet_device(&ipgre_net_ops);
1327 module_init(ipgre_init);
1328 module_exit(ipgre_fini);
1329 MODULE_LICENSE("GPL");
1330 MODULE_ALIAS_RTNL_LINK("gre");
1331 MODULE_ALIAS_RTNL_LINK("gretap");
1332 MODULE_ALIAS_NETDEV("gre0");
1333 MODULE_ALIAS_NETDEV("gretap0");