2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/rculist.h>
23 #include <linux/netdevice.h>
26 #include <linux/udp.h>
27 #include <linux/igmp.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/hash.h>
32 #include <linux/ethtool.h>
34 #include <net/ndisc.h>
36 #include <net/ip_tunnels.h>
39 #include <net/rtnetlink.h>
40 #include <net/route.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
43 #include <net/net_namespace.h>
44 #include <net/netns/generic.h>
45 #include <net/vxlan.h>
47 #define VXLAN_VERSION "0.1"
49 #define PORT_HASH_BITS 8
50 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
51 #define VNI_HASH_BITS 10
52 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
53 #define FDB_HASH_BITS 8
54 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
55 #define FDB_AGE_DEFAULT 300 /* 5 min */
56 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
58 #define VXLAN_N_VID (1u << 24)
59 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
60 /* IP header + UDP + VXLAN + Ethernet header */
61 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
62 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
64 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
66 /* VXLAN protocol header */
72 /* UDP port for VXLAN traffic.
73 * The IANA assigned port is 4789, but the Linux default is 8472
74 * for compatibility with early adopters.
76 static unsigned short vxlan_port __read_mostly = 8472;
77 module_param_named(udp_port, vxlan_port, ushort, 0444);
78 MODULE_PARM_DESC(udp_port, "Destination UDP port");
80 static bool log_ecn_error = true;
81 module_param(log_ecn_error, bool, 0644);
82 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
84 static int vxlan_net_id;
86 static const u8 all_zeros_mac[ETH_ALEN];
88 /* per-network namespace private data for this module */
90 struct list_head vxlan_list;
91 struct hlist_head sock_list[PORT_HASH_SIZE];
100 struct list_head list;
104 /* Forwarding table entry */
106 struct hlist_node hlist; /* linked list of entries */
108 unsigned long updated; /* jiffies */
110 struct list_head remotes;
111 u16 state; /* see ndm_state */
112 u8 flags; /* see ndm_flags */
113 u8 eth_addr[ETH_ALEN];
116 /* Pseudo network device */
118 struct hlist_node hlist; /* vni hash table */
119 struct list_head next; /* vxlan's per namespace list */
120 struct vxlan_sock *vn_sock; /* listening socket */
121 struct net_device *dev;
122 struct vxlan_rdst default_dst; /* default destination */
123 __be32 saddr; /* source address */
125 __u16 port_min; /* source port range */
127 __u8 tos; /* TOS override */
129 u32 flags; /* VXLAN_F_* below */
131 struct work_struct sock_work;
132 struct work_struct igmp_join;
133 struct work_struct igmp_leave;
135 unsigned long age_interval;
136 struct timer_list age_timer;
137 spinlock_t hash_lock;
138 unsigned int addrcnt;
139 unsigned int addrmax;
141 struct hlist_head fdb_head[FDB_HASH_SIZE];
144 #define VXLAN_F_LEARN 0x01
145 #define VXLAN_F_PROXY 0x02
146 #define VXLAN_F_RSC 0x04
147 #define VXLAN_F_L2MISS 0x08
148 #define VXLAN_F_L3MISS 0x10
150 /* salt for hash table */
151 static u32 vxlan_salt __read_mostly;
152 static struct workqueue_struct *vxlan_wq;
154 static void vxlan_sock_work(struct work_struct *work);
156 /* Virtual Network hash table head */
157 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
159 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
162 /* Socket hash table head */
163 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
165 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
167 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
170 /* First remote destination for a forwarding entry.
171 * Guaranteed to be non-NULL because remotes are never deleted.
173 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
175 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
178 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
180 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
183 /* Find VXLAN socket based on network namespace and UDP port */
184 static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
186 struct vxlan_sock *vs;
188 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
189 if (inet_sk(vs->sock->sk)->inet_sport == port)
195 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
197 struct vxlan_dev *vxlan;
199 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
200 if (vxlan->default_dst.remote_vni == id)
207 /* Look up VNI in a per net namespace table */
208 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
210 struct vxlan_sock *vs;
212 vs = vxlan_find_sock(net, port);
216 return vxlan_vs_find_vni(vs, id);
219 /* Fill in neighbour message in skbuff. */
220 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
221 const struct vxlan_fdb *fdb,
222 u32 portid, u32 seq, int type, unsigned int flags,
223 const struct vxlan_rdst *rdst)
225 unsigned long now = jiffies;
226 struct nda_cacheinfo ci;
227 struct nlmsghdr *nlh;
229 bool send_ip, send_eth;
231 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
235 ndm = nlmsg_data(nlh);
236 memset(ndm, 0, sizeof(*ndm));
238 send_eth = send_ip = true;
240 if (type == RTM_GETNEIGH) {
241 ndm->ndm_family = AF_INET;
242 send_ip = rdst->remote_ip != htonl(INADDR_ANY);
243 send_eth = !is_zero_ether_addr(fdb->eth_addr);
245 ndm->ndm_family = AF_BRIDGE;
246 ndm->ndm_state = fdb->state;
247 ndm->ndm_ifindex = vxlan->dev->ifindex;
248 ndm->ndm_flags = fdb->flags;
249 ndm->ndm_type = NDA_DST;
251 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
252 goto nla_put_failure;
254 if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
255 goto nla_put_failure;
257 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
258 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
259 goto nla_put_failure;
260 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
261 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
262 goto nla_put_failure;
263 if (rdst->remote_ifindex &&
264 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
265 goto nla_put_failure;
267 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
268 ci.ndm_confirmed = 0;
269 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
272 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
273 goto nla_put_failure;
275 return nlmsg_end(skb, nlh);
278 nlmsg_cancel(skb, nlh);
282 static inline size_t vxlan_nlmsg_size(void)
284 return NLMSG_ALIGN(sizeof(struct ndmsg))
285 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
286 + nla_total_size(sizeof(__be32)) /* NDA_DST */
287 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
288 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
289 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
290 + nla_total_size(sizeof(struct nda_cacheinfo));
293 static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
294 struct vxlan_fdb *fdb, int type)
296 struct net *net = dev_net(vxlan->dev);
300 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
304 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
305 first_remote_rtnl(fdb));
307 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
308 WARN_ON(err == -EMSGSIZE);
313 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
317 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
320 static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
322 struct vxlan_dev *vxlan = netdev_priv(dev);
323 struct vxlan_fdb f = {
326 struct vxlan_rdst remote = {
327 .remote_ip = ipa, /* goes to NDA_DST */
328 .remote_vni = VXLAN_N_VID,
331 INIT_LIST_HEAD(&f.remotes);
332 list_add_rcu(&remote.list, &f.remotes);
334 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
337 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
339 struct vxlan_fdb f = {
343 INIT_LIST_HEAD(&f.remotes);
344 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
346 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
349 /* Hash Ethernet address */
350 static u32 eth_hash(const unsigned char *addr)
352 u64 value = get_unaligned((u64 *)addr);
354 /* only want 6 bytes */
360 return hash_64(value, FDB_HASH_BITS);
363 /* Hash chain to use given mac address */
364 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
367 return &vxlan->fdb_head[eth_hash(mac)];
370 /* Look up Ethernet address in forwarding table */
371 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
375 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
378 hlist_for_each_entry_rcu(f, head, hlist) {
379 if (compare_ether_addr(mac, f->eth_addr) == 0)
386 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
391 f = __vxlan_find_mac(vxlan, mac);
398 /* caller should hold vxlan->hash_lock */
399 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
400 __be32 ip, __be16 port,
401 __u32 vni, __u32 ifindex)
403 struct vxlan_rdst *rd;
405 list_for_each_entry(rd, &f->remotes, list) {
406 if (rd->remote_ip == ip &&
407 rd->remote_port == port &&
408 rd->remote_vni == vni &&
409 rd->remote_ifindex == ifindex)
416 /* Replace destination of unicast mac */
417 static int vxlan_fdb_replace(struct vxlan_fdb *f,
418 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
420 struct vxlan_rdst *rd;
422 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
426 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
430 rd->remote_port = port;
431 rd->remote_vni = vni;
432 rd->remote_ifindex = ifindex;
436 /* Add/update destinations for multicast */
437 static int vxlan_fdb_append(struct vxlan_fdb *f,
438 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
440 struct vxlan_rdst *rd;
442 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
446 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
450 rd->remote_port = port;
451 rd->remote_vni = vni;
452 rd->remote_ifindex = ifindex;
454 list_add_tail_rcu(&rd->list, &f->remotes);
459 /* Add new entry to forwarding table -- assumes lock held */
460 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
461 const u8 *mac, __be32 ip,
462 __u16 state, __u16 flags,
463 __be16 port, __u32 vni, __u32 ifindex,
469 f = __vxlan_find_mac(vxlan, mac);
471 if (flags & NLM_F_EXCL) {
472 netdev_dbg(vxlan->dev,
473 "lost race to create %pM\n", mac);
476 if (f->state != state) {
478 f->updated = jiffies;
481 if (f->flags != ndm_flags) {
482 f->flags = ndm_flags;
483 f->updated = jiffies;
486 if ((flags & NLM_F_REPLACE)) {
487 /* Only change unicasts */
488 if (!(is_multicast_ether_addr(f->eth_addr) ||
489 is_zero_ether_addr(f->eth_addr))) {
490 int rc = vxlan_fdb_replace(f, ip, port, vni,
499 if ((flags & NLM_F_APPEND) &&
500 (is_multicast_ether_addr(f->eth_addr) ||
501 is_zero_ether_addr(f->eth_addr))) {
502 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
509 if (!(flags & NLM_F_CREATE))
512 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
515 /* Disallow replace to add a multicast entry */
516 if ((flags & NLM_F_REPLACE) &&
517 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
520 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
521 f = kmalloc(sizeof(*f), GFP_ATOMIC);
527 f->flags = ndm_flags;
528 f->updated = f->used = jiffies;
529 INIT_LIST_HEAD(&f->remotes);
530 memcpy(f->eth_addr, mac, ETH_ALEN);
532 vxlan_fdb_append(f, ip, port, vni, ifindex);
535 hlist_add_head_rcu(&f->hlist,
536 vxlan_fdb_head(vxlan, mac));
540 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
545 static void vxlan_fdb_free(struct rcu_head *head)
547 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
548 struct vxlan_rdst *rd, *nd;
550 list_for_each_entry_safe(rd, nd, &f->remotes, list)
555 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
557 netdev_dbg(vxlan->dev,
558 "delete %pM\n", f->eth_addr);
561 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
563 hlist_del_rcu(&f->hlist);
564 call_rcu(&f->rcu, vxlan_fdb_free);
567 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
568 __be32 *ip, __be16 *port, u32 *vni, u32 *ifindex)
570 struct net *net = dev_net(vxlan->dev);
573 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
574 return -EAFNOSUPPORT;
576 *ip = nla_get_be32(tb[NDA_DST]);
578 *ip = htonl(INADDR_ANY);
582 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
584 *port = nla_get_be16(tb[NDA_PORT]);
586 *port = vxlan->dst_port;
590 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
592 *vni = nla_get_u32(tb[NDA_VNI]);
594 *vni = vxlan->default_dst.remote_vni;
597 if (tb[NDA_IFINDEX]) {
598 struct net_device *tdev;
600 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
602 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
603 tdev = dev_get_by_index(net, *ifindex);
605 return -EADDRNOTAVAIL;
614 /* Add static entry (via netlink) */
615 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
616 struct net_device *dev,
617 const unsigned char *addr, u16 flags)
619 struct vxlan_dev *vxlan = netdev_priv(dev);
620 /* struct net *net = dev_net(vxlan->dev); */
626 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
627 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
632 if (tb[NDA_DST] == NULL)
635 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
639 spin_lock_bh(&vxlan->hash_lock);
640 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
641 port, vni, ifindex, ndm->ndm_flags);
642 spin_unlock_bh(&vxlan->hash_lock);
647 /* Delete entry (via netlink) */
648 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
649 struct net_device *dev,
650 const unsigned char *addr)
652 struct vxlan_dev *vxlan = netdev_priv(dev);
654 struct vxlan_rdst *rd = NULL;
660 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
666 spin_lock_bh(&vxlan->hash_lock);
667 f = vxlan_find_mac(vxlan, addr);
671 if (ip != htonl(INADDR_ANY)) {
672 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
679 /* remove a destination if it's not the only one on the list,
680 * otherwise destroy the fdb entry
682 if (rd && !list_is_singular(&f->remotes)) {
683 list_del_rcu(&rd->list);
688 vxlan_fdb_destroy(vxlan, f);
691 spin_unlock_bh(&vxlan->hash_lock);
696 /* Dump forwarding table */
697 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
698 struct net_device *dev, int idx)
700 struct vxlan_dev *vxlan = netdev_priv(dev);
703 for (h = 0; h < FDB_HASH_SIZE; ++h) {
707 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
708 struct vxlan_rdst *rd;
710 if (idx < cb->args[0])
713 list_for_each_entry_rcu(rd, &f->remotes, list) {
714 err = vxlan_fdb_info(skb, vxlan, f,
715 NETLINK_CB(cb->skb).portid,
730 /* Watch incoming packets to learn mapping between Ethernet address
731 * and Tunnel endpoint.
732 * Return true if packet is bogus and should be droppped.
734 static bool vxlan_snoop(struct net_device *dev,
735 __be32 src_ip, const u8 *src_mac)
737 struct vxlan_dev *vxlan = netdev_priv(dev);
740 f = vxlan_find_mac(vxlan, src_mac);
742 struct vxlan_rdst *rdst = first_remote_rcu(f);
744 if (likely(rdst->remote_ip == src_ip))
747 /* Don't migrate static entries, drop packets */
748 if (f->state & NUD_NOARP)
753 "%pM migrated from %pI4 to %pI4\n",
754 src_mac, &rdst->remote_ip, &src_ip);
756 rdst->remote_ip = src_ip;
757 f->updated = jiffies;
758 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
760 /* learned new entry */
761 spin_lock(&vxlan->hash_lock);
763 /* close off race between vxlan_flush and incoming packets */
764 if (netif_running(dev))
765 vxlan_fdb_create(vxlan, src_mac, src_ip,
767 NLM_F_EXCL|NLM_F_CREATE,
769 vxlan->default_dst.remote_vni,
771 spin_unlock(&vxlan->hash_lock);
777 /* See if multicast group is already in use by other ID */
778 static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
780 struct vxlan_dev *vxlan;
782 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
783 if (!netif_running(vxlan->dev))
786 if (vxlan->default_dst.remote_ip == remote_ip)
793 static void vxlan_sock_hold(struct vxlan_sock *vs)
795 atomic_inc(&vs->refcnt);
798 void vxlan_sock_release(struct vxlan_sock *vs)
800 struct vxlan_net *vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
802 if (!atomic_dec_and_test(&vs->refcnt))
805 spin_lock(&vn->sock_lock);
806 hlist_del_rcu(&vs->hlist);
807 spin_unlock(&vn->sock_lock);
809 queue_work(vxlan_wq, &vs->del_work);
811 EXPORT_SYMBOL_GPL(vxlan_sock_release);
813 /* Callback to update multicast group membership when first VNI on
814 * multicast asddress is brought up
815 * Done as workqueue because ip_mc_join_group acquires RTNL.
817 static void vxlan_igmp_join(struct work_struct *work)
819 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
820 struct vxlan_sock *vs = vxlan->vn_sock;
821 struct sock *sk = vs->sock->sk;
822 struct ip_mreqn mreq = {
823 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
824 .imr_ifindex = vxlan->default_dst.remote_ifindex,
828 ip_mc_join_group(sk, &mreq);
831 vxlan_sock_release(vs);
835 /* Inverse of vxlan_igmp_join when last VNI is brought down */
836 static void vxlan_igmp_leave(struct work_struct *work)
838 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
839 struct vxlan_sock *vs = vxlan->vn_sock;
840 struct sock *sk = vs->sock->sk;
841 struct ip_mreqn mreq = {
842 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
843 .imr_ifindex = vxlan->default_dst.remote_ifindex,
847 ip_mc_leave_group(sk, &mreq);
850 vxlan_sock_release(vs);
854 /* Callback from net/ipv4/udp.c to receive packets */
855 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
857 struct vxlan_sock *vs;
858 struct vxlanhdr *vxh;
861 /* Need Vxlan and inner Ethernet header to be present */
862 if (!pskb_may_pull(skb, VXLAN_HLEN))
865 /* Return packets with reserved bits set */
866 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
867 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
868 (vxh->vx_vni & htonl(0xff))) {
869 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
870 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
874 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
877 port = inet_sk(sk)->inet_sport;
879 vs = vxlan_find_sock(sock_net(sk), port);
883 vs->rcv(vs, skb, vxh->vx_vni);
887 /* Consume bad packet */
892 /* Return non vxlan pkt */
896 static void vxlan_rcv(struct vxlan_sock *vs,
897 struct sk_buff *skb, __be32 vx_vni)
900 struct vxlan_dev *vxlan;
901 struct pcpu_tstats *stats;
905 vni = ntohl(vx_vni) >> 8;
906 /* Is this VNI defined? */
907 vxlan = vxlan_vs_find_vni(vs, vni);
911 skb_reset_mac_header(skb);
912 skb->protocol = eth_type_trans(skb, vxlan->dev);
914 /* Ignore packet loops (and multicast echo) */
915 if (compare_ether_addr(eth_hdr(skb)->h_source,
916 vxlan->dev->dev_addr) == 0)
919 /* Re-examine inner Ethernet packet */
921 if ((vxlan->flags & VXLAN_F_LEARN) &&
922 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
925 skb_reset_network_header(skb);
927 /* If the NIC driver gave us an encapsulated packet with
928 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
929 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
930 * for us. Otherwise force the upper layers to verify it.
932 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
933 !(vxlan->dev->features & NETIF_F_RXCSUM))
934 skb->ip_summed = CHECKSUM_NONE;
936 skb->encapsulation = 0;
938 err = IP_ECN_decapsulate(oip, skb);
941 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
942 &oip->saddr, oip->tos);
944 ++vxlan->dev->stats.rx_frame_errors;
945 ++vxlan->dev->stats.rx_errors;
950 stats = this_cpu_ptr(vxlan->dev->tstats);
951 u64_stats_update_begin(&stats->syncp);
953 stats->rx_bytes += skb->len;
954 u64_stats_update_end(&stats->syncp);
960 /* Consume bad packet */
964 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
966 struct vxlan_dev *vxlan = netdev_priv(dev);
972 if (dev->flags & IFF_NOARP)
975 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
976 dev->stats.tx_dropped++;
981 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
982 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
983 parp->ar_pro != htons(ETH_P_IP) ||
984 parp->ar_op != htons(ARPOP_REQUEST) ||
985 parp->ar_hln != dev->addr_len ||
988 arpptr = (u8 *)parp + sizeof(struct arphdr);
990 arpptr += dev->addr_len; /* sha */
991 memcpy(&sip, arpptr, sizeof(sip));
992 arpptr += sizeof(sip);
993 arpptr += dev->addr_len; /* tha */
994 memcpy(&tip, arpptr, sizeof(tip));
996 if (ipv4_is_loopback(tip) ||
997 ipv4_is_multicast(tip))
1000 n = neigh_lookup(&arp_tbl, &tip, dev);
1003 struct vxlan_fdb *f;
1004 struct sk_buff *reply;
1006 if (!(n->nud_state & NUD_CONNECTED)) {
1011 f = vxlan_find_mac(vxlan, n->ha);
1012 if (f && first_remote_rcu(f)->remote_ip == htonl(INADDR_ANY)) {
1013 /* bridge-local neighbor */
1018 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1023 skb_reset_mac_header(reply);
1024 __skb_pull(reply, skb_network_offset(reply));
1025 reply->ip_summed = CHECKSUM_UNNECESSARY;
1026 reply->pkt_type = PACKET_HOST;
1028 if (netif_rx_ni(reply) == NET_RX_DROP)
1029 dev->stats.rx_dropped++;
1030 } else if (vxlan->flags & VXLAN_F_L3MISS)
1031 vxlan_ip_miss(dev, tip);
1034 return NETDEV_TX_OK;
1037 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1039 struct vxlan_dev *vxlan = netdev_priv(dev);
1040 struct neighbour *n;
1043 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1047 switch (ntohs(eth_hdr(skb)->h_proto)) {
1049 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1052 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1061 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
1063 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1065 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1069 } else if (vxlan->flags & VXLAN_F_L3MISS)
1070 vxlan_ip_miss(dev, pip->daddr);
1074 static void vxlan_sock_put(struct sk_buff *skb)
1079 /* On transmit, associate with the tunnel socket */
1080 static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
1085 skb->destructor = vxlan_sock_put;
1088 /* Compute source port for outgoing packet
1089 * first choice to use L4 flow hash since it will spread
1090 * better and maybe available from hardware
1091 * secondary choice is to use jhash on the Ethernet header
1093 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
1095 unsigned int range = (port_max - port_min) + 1;
1098 hash = skb_get_rxhash(skb);
1100 hash = jhash(skb->data, 2 * ETH_ALEN,
1101 (__force u32) skb->protocol);
1103 return htons((((u64) hash * range) >> 32) + port_min);
1105 EXPORT_SYMBOL_GPL(vxlan_src_port);
1107 static int handle_offloads(struct sk_buff *skb)
1109 if (skb_is_gso(skb)) {
1110 int err = skb_unclone(skb, GFP_ATOMIC);
1114 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1115 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
1116 skb->ip_summed = CHECKSUM_NONE;
1121 int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs,
1122 struct rtable *rt, struct sk_buff *skb,
1123 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1124 __be16 src_port, __be16 dst_port, __be32 vni)
1126 struct vxlanhdr *vxh;
1131 if (!skb->encapsulation) {
1132 skb_reset_inner_headers(skb);
1133 skb->encapsulation = 1;
1136 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1137 + VXLAN_HLEN + sizeof(struct iphdr)
1138 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
1140 /* Need space for new headers (invalidates iph ptr) */
1141 err = skb_cow_head(skb, min_headroom);
1145 if (vlan_tx_tag_present(skb)) {
1146 if (WARN_ON(!__vlan_put_tag(skb,
1148 vlan_tx_tag_get(skb))))
1154 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1155 vxh->vx_flags = htonl(VXLAN_FLAGS);
1158 __skb_push(skb, sizeof(*uh));
1159 skb_reset_transport_header(skb);
1162 uh->dest = dst_port;
1163 uh->source = src_port;
1165 uh->len = htons(skb->len);
1168 vxlan_set_owner(vs->sock->sk, skb);
1170 err = handle_offloads(skb);
1174 return iptunnel_xmit(net, rt, skb, src, dst,
1175 IPPROTO_UDP, tos, ttl, df);
1177 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1179 /* Bypass encapsulation if the destination is local */
1180 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1181 struct vxlan_dev *dst_vxlan)
1183 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1184 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1186 skb->pkt_type = PACKET_HOST;
1187 skb->encapsulation = 0;
1188 skb->dev = dst_vxlan->dev;
1189 __skb_pull(skb, skb_network_offset(skb));
1191 if (dst_vxlan->flags & VXLAN_F_LEARN)
1192 vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK),
1193 eth_hdr(skb)->h_source);
1195 u64_stats_update_begin(&tx_stats->syncp);
1196 tx_stats->tx_packets++;
1197 tx_stats->tx_bytes += skb->len;
1198 u64_stats_update_end(&tx_stats->syncp);
1200 if (netif_rx(skb) == NET_RX_SUCCESS) {
1201 u64_stats_update_begin(&rx_stats->syncp);
1202 rx_stats->rx_packets++;
1203 rx_stats->rx_bytes += skb->len;
1204 u64_stats_update_end(&rx_stats->syncp);
1206 skb->dev->stats.rx_dropped++;
1210 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1211 struct vxlan_rdst *rdst, bool did_rsc)
1213 struct vxlan_dev *vxlan = netdev_priv(dev);
1215 const struct iphdr *old_iph;
1218 __be16 src_port, dst_port;
1224 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1225 vni = rdst->remote_vni;
1226 dst = rdst->remote_ip;
1230 /* short-circuited back to local bridge */
1231 vxlan_encap_bypass(skb, vxlan, vxlan);
1237 old_iph = ip_hdr(skb);
1240 if (!ttl && IN_MULTICAST(ntohl(dst)))
1245 tos = ip_tunnel_get_dsfield(old_iph, skb);
1247 src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
1249 memset(&fl4, 0, sizeof(fl4));
1250 fl4.flowi4_oif = rdst->remote_ifindex;
1251 fl4.flowi4_tos = RT_TOS(tos);
1253 fl4.saddr = vxlan->saddr;
1255 rt = ip_route_output_key(dev_net(dev), &fl4);
1257 netdev_dbg(dev, "no route to %pI4\n", &dst);
1258 dev->stats.tx_carrier_errors++;
1262 if (rt->dst.dev == dev) {
1263 netdev_dbg(dev, "circular route to %pI4\n", &dst);
1264 dev->stats.collisions++;
1268 /* Bypass encapsulation if the destination is local */
1269 if (rt->rt_flags & RTCF_LOCAL &&
1270 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1271 struct vxlan_dev *dst_vxlan;
1274 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1277 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1281 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1282 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1284 err = vxlan_xmit_skb(dev_net(dev), vxlan->vn_sock, rt, skb,
1285 fl4.saddr, dst, tos, ttl, df,
1286 src_port, dst_port, htonl(vni << 8));
1290 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1295 dev->stats.tx_dropped++;
1301 dev->stats.tx_errors++;
1306 /* Transmit local packets over Vxlan
1308 * Outer IP header inherits ECN and DF from inner header.
1309 * Outer UDP destination is the VXLAN assigned port.
1310 * source port is based on hash of flow
1312 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1314 struct vxlan_dev *vxlan = netdev_priv(dev);
1316 bool did_rsc = false;
1317 struct vxlan_rdst *rdst;
1318 struct vxlan_fdb *f;
1320 skb_reset_mac_header(skb);
1323 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
1324 return arp_reduce(dev, skb);
1326 f = vxlan_find_mac(vxlan, eth->h_dest);
1329 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1330 ntohs(eth->h_proto) == ETH_P_IP) {
1331 did_rsc = route_shortcircuit(dev, skb);
1333 f = vxlan_find_mac(vxlan, eth->h_dest);
1337 f = vxlan_find_mac(vxlan, all_zeros_mac);
1339 if ((vxlan->flags & VXLAN_F_L2MISS) &&
1340 !is_multicast_ether_addr(eth->h_dest))
1341 vxlan_fdb_miss(vxlan, eth->h_dest);
1343 dev->stats.tx_dropped++;
1345 return NETDEV_TX_OK;
1349 list_for_each_entry_rcu(rdst, &f->remotes, list) {
1350 struct sk_buff *skb1;
1352 skb1 = skb_clone(skb, GFP_ATOMIC);
1354 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1358 return NETDEV_TX_OK;
1361 /* Walk the forwarding table and purge stale entries */
1362 static void vxlan_cleanup(unsigned long arg)
1364 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
1365 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
1368 if (!netif_running(vxlan->dev))
1371 spin_lock_bh(&vxlan->hash_lock);
1372 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1373 struct hlist_node *p, *n;
1374 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1376 = container_of(p, struct vxlan_fdb, hlist);
1377 unsigned long timeout;
1379 if (f->state & NUD_PERMANENT)
1382 timeout = f->used + vxlan->age_interval * HZ;
1383 if (time_before_eq(timeout, jiffies)) {
1384 netdev_dbg(vxlan->dev,
1385 "garbage collect %pM\n",
1387 f->state = NUD_STALE;
1388 vxlan_fdb_destroy(vxlan, f);
1389 } else if (time_before(timeout, next_timer))
1390 next_timer = timeout;
1393 spin_unlock_bh(&vxlan->hash_lock);
1395 mod_timer(&vxlan->age_timer, next_timer);
1398 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
1400 __u32 vni = vxlan->default_dst.remote_vni;
1402 vxlan->vn_sock = vs;
1403 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1406 /* Setup stats when device is created */
1407 static int vxlan_init(struct net_device *dev)
1409 struct vxlan_dev *vxlan = netdev_priv(dev);
1410 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1411 struct vxlan_sock *vs;
1413 dev->tstats = alloc_percpu(struct pcpu_tstats);
1417 spin_lock(&vn->sock_lock);
1418 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
1420 /* If we have a socket with same port already, reuse it */
1421 atomic_inc(&vs->refcnt);
1422 vxlan_vs_add_dev(vs, vxlan);
1424 /* otherwise make new socket outside of RTNL */
1426 queue_work(vxlan_wq, &vxlan->sock_work);
1428 spin_unlock(&vn->sock_lock);
1433 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
1435 struct vxlan_fdb *f;
1437 spin_lock_bh(&vxlan->hash_lock);
1438 f = __vxlan_find_mac(vxlan, all_zeros_mac);
1440 vxlan_fdb_destroy(vxlan, f);
1441 spin_unlock_bh(&vxlan->hash_lock);
1444 static void vxlan_uninit(struct net_device *dev)
1446 struct vxlan_dev *vxlan = netdev_priv(dev);
1447 struct vxlan_sock *vs = vxlan->vn_sock;
1449 vxlan_fdb_delete_default(vxlan);
1452 vxlan_sock_release(vs);
1453 free_percpu(dev->tstats);
1456 /* Start ageing timer and join group when device is brought up */
1457 static int vxlan_open(struct net_device *dev)
1459 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1460 struct vxlan_dev *vxlan = netdev_priv(dev);
1461 struct vxlan_sock *vs = vxlan->vn_sock;
1463 /* socket hasn't been created */
1467 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1468 vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1469 vxlan_sock_hold(vs);
1471 queue_work(vxlan_wq, &vxlan->igmp_join);
1474 if (vxlan->age_interval)
1475 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1480 /* Purge the forwarding table */
1481 static void vxlan_flush(struct vxlan_dev *vxlan)
1485 spin_lock_bh(&vxlan->hash_lock);
1486 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1487 struct hlist_node *p, *n;
1488 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1490 = container_of(p, struct vxlan_fdb, hlist);
1491 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1492 if (!is_zero_ether_addr(f->eth_addr))
1493 vxlan_fdb_destroy(vxlan, f);
1496 spin_unlock_bh(&vxlan->hash_lock);
1499 /* Cleanup timer and forwarding table on shutdown */
1500 static int vxlan_stop(struct net_device *dev)
1502 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1503 struct vxlan_dev *vxlan = netdev_priv(dev);
1504 struct vxlan_sock *vs = vxlan->vn_sock;
1506 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1507 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1508 vxlan_sock_hold(vs);
1510 queue_work(vxlan_wq, &vxlan->igmp_leave);
1513 del_timer_sync(&vxlan->age_timer);
1520 /* Stub, nothing needs to be done. */
1521 static void vxlan_set_multicast_list(struct net_device *dev)
1525 static const struct net_device_ops vxlan_netdev_ops = {
1526 .ndo_init = vxlan_init,
1527 .ndo_uninit = vxlan_uninit,
1528 .ndo_open = vxlan_open,
1529 .ndo_stop = vxlan_stop,
1530 .ndo_start_xmit = vxlan_xmit,
1531 .ndo_get_stats64 = ip_tunnel_get_stats64,
1532 .ndo_set_rx_mode = vxlan_set_multicast_list,
1533 .ndo_change_mtu = eth_change_mtu,
1534 .ndo_validate_addr = eth_validate_addr,
1535 .ndo_set_mac_address = eth_mac_addr,
1536 .ndo_fdb_add = vxlan_fdb_add,
1537 .ndo_fdb_del = vxlan_fdb_delete,
1538 .ndo_fdb_dump = vxlan_fdb_dump,
1541 /* Info for udev, that this is a virtual tunnel endpoint */
1542 static struct device_type vxlan_type = {
1546 /* Initialize the device structure. */
1547 static void vxlan_setup(struct net_device *dev)
1549 struct vxlan_dev *vxlan = netdev_priv(dev);
1553 eth_hw_addr_random(dev);
1555 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
1557 dev->netdev_ops = &vxlan_netdev_ops;
1558 dev->destructor = free_netdev;
1559 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1561 dev->tx_queue_len = 0;
1562 dev->features |= NETIF_F_LLTX;
1563 dev->features |= NETIF_F_NETNS_LOCAL;
1564 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1565 dev->features |= NETIF_F_RXCSUM;
1566 dev->features |= NETIF_F_GSO_SOFTWARE;
1568 dev->vlan_features = dev->features;
1569 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
1570 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1571 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1572 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
1573 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1574 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1576 INIT_LIST_HEAD(&vxlan->next);
1577 spin_lock_init(&vxlan->hash_lock);
1578 INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
1579 INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
1580 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
1582 init_timer_deferrable(&vxlan->age_timer);
1583 vxlan->age_timer.function = vxlan_cleanup;
1584 vxlan->age_timer.data = (unsigned long) vxlan;
1586 inet_get_local_port_range(&low, &high);
1587 vxlan->port_min = low;
1588 vxlan->port_max = high;
1589 vxlan->dst_port = htons(vxlan_port);
1593 for (h = 0; h < FDB_HASH_SIZE; ++h)
1594 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1597 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1598 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1599 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1600 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1601 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1602 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1603 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1604 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1605 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1606 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1607 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
1608 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1609 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1610 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1611 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
1612 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
1615 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1617 if (tb[IFLA_ADDRESS]) {
1618 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1619 pr_debug("invalid link address (not ethernet)\n");
1623 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1624 pr_debug("invalid all zero ethernet address\n");
1625 return -EADDRNOTAVAIL;
1632 if (data[IFLA_VXLAN_ID]) {
1633 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1634 if (id >= VXLAN_VID_MASK)
1638 if (data[IFLA_VXLAN_PORT_RANGE]) {
1639 const struct ifla_vxlan_port_range *p
1640 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1642 if (ntohs(p->high) < ntohs(p->low)) {
1643 pr_debug("port range %u .. %u not valid\n",
1644 ntohs(p->low), ntohs(p->high));
1652 static void vxlan_get_drvinfo(struct net_device *netdev,
1653 struct ethtool_drvinfo *drvinfo)
1655 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
1656 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
1659 static const struct ethtool_ops vxlan_ethtool_ops = {
1660 .get_drvinfo = vxlan_get_drvinfo,
1661 .get_link = ethtool_op_get_link,
1664 static void vxlan_del_work(struct work_struct *work)
1666 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
1668 sk_release_kernel(vs->sock->sk);
1672 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
1673 vxlan_rcv_t *rcv, void *data)
1675 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1676 struct vxlan_sock *vs;
1678 struct sockaddr_in vxlan_addr = {
1679 .sin_family = AF_INET,
1680 .sin_addr.s_addr = htonl(INADDR_ANY),
1686 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
1688 pr_debug("memory alocation failure\n");
1689 return ERR_PTR(-ENOMEM);
1692 for (h = 0; h < VNI_HASH_SIZE; ++h)
1693 INIT_HLIST_HEAD(&vs->vni_list[h]);
1695 INIT_WORK(&vs->del_work, vxlan_del_work);
1697 /* Create UDP socket for encapsulation receive. */
1698 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
1700 pr_debug("UDP socket create failed\n");
1705 /* Put in proper namespace */
1707 sk_change_net(sk, net);
1709 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
1710 sizeof(vxlan_addr));
1712 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1713 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1714 sk_release_kernel(sk);
1718 atomic_set(&vs->refcnt, 1);
1722 /* Disable multicast loopback */
1723 inet_sk(sk)->mc_loop = 0;
1724 spin_lock(&vn->sock_lock);
1725 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
1726 spin_unlock(&vn->sock_lock);
1728 /* Mark socket as an encapsulation socket. */
1729 udp_sk(sk)->encap_type = 1;
1730 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1735 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
1736 vxlan_rcv_t *rcv, void *data,
1739 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1740 struct vxlan_sock *vs;
1742 vs = vxlan_socket_create(net, port, rcv, data);
1746 if (no_share) /* Return error if sharing is not allowed. */
1749 spin_lock(&vn->sock_lock);
1750 vs = vxlan_find_sock(net, port);
1753 atomic_inc(&vs->refcnt);
1755 vs = ERR_PTR(-EBUSY);
1757 spin_unlock(&vn->sock_lock);
1760 vs = ERR_PTR(-EINVAL);
1764 EXPORT_SYMBOL_GPL(vxlan_sock_add);
1766 /* Scheduled at device creation to bind to a socket */
1767 static void vxlan_sock_work(struct work_struct *work)
1769 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
1770 struct net *net = dev_net(vxlan->dev);
1771 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1772 __be16 port = vxlan->dst_port;
1773 struct vxlan_sock *nvs;
1775 nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false);
1776 spin_lock(&vn->sock_lock);
1778 vxlan_vs_add_dev(nvs, vxlan);
1779 spin_unlock(&vn->sock_lock);
1781 dev_put(vxlan->dev);
1784 static int vxlan_newlink(struct net *net, struct net_device *dev,
1785 struct nlattr *tb[], struct nlattr *data[])
1787 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1788 struct vxlan_dev *vxlan = netdev_priv(dev);
1789 struct vxlan_rdst *dst = &vxlan->default_dst;
1793 if (!data[IFLA_VXLAN_ID])
1796 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1797 dst->remote_vni = vni;
1799 if (data[IFLA_VXLAN_GROUP])
1800 dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1802 if (data[IFLA_VXLAN_LOCAL])
1803 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1805 if (data[IFLA_VXLAN_LINK] &&
1806 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1807 struct net_device *lowerdev
1808 = __dev_get_by_index(net, dst->remote_ifindex);
1811 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
1816 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1818 /* update header length based on lower device */
1819 dev->hard_header_len = lowerdev->hard_header_len +
1823 if (data[IFLA_VXLAN_TOS])
1824 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1826 if (data[IFLA_VXLAN_TTL])
1827 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1829 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1830 vxlan->flags |= VXLAN_F_LEARN;
1832 if (data[IFLA_VXLAN_AGEING])
1833 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1835 vxlan->age_interval = FDB_AGE_DEFAULT;
1837 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1838 vxlan->flags |= VXLAN_F_PROXY;
1840 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1841 vxlan->flags |= VXLAN_F_RSC;
1843 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1844 vxlan->flags |= VXLAN_F_L2MISS;
1846 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1847 vxlan->flags |= VXLAN_F_L3MISS;
1849 if (data[IFLA_VXLAN_LIMIT])
1850 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1852 if (data[IFLA_VXLAN_PORT_RANGE]) {
1853 const struct ifla_vxlan_port_range *p
1854 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1855 vxlan->port_min = ntohs(p->low);
1856 vxlan->port_max = ntohs(p->high);
1859 if (data[IFLA_VXLAN_PORT])
1860 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
1862 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
1863 pr_info("duplicate VNI %u\n", vni);
1867 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1869 /* create an fdb entry for default destination */
1870 err = vxlan_fdb_create(vxlan, all_zeros_mac,
1871 vxlan->default_dst.remote_ip,
1872 NUD_REACHABLE|NUD_PERMANENT,
1873 NLM_F_EXCL|NLM_F_CREATE,
1874 vxlan->dst_port, vxlan->default_dst.remote_vni,
1875 vxlan->default_dst.remote_ifindex, NTF_SELF);
1879 err = register_netdevice(dev);
1881 vxlan_fdb_delete_default(vxlan);
1885 list_add(&vxlan->next, &vn->vxlan_list);
1890 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1892 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1893 struct vxlan_dev *vxlan = netdev_priv(dev);
1895 spin_lock(&vn->sock_lock);
1896 if (!hlist_unhashed(&vxlan->hlist))
1897 hlist_del_rcu(&vxlan->hlist);
1898 spin_unlock(&vn->sock_lock);
1900 list_del(&vxlan->next);
1901 unregister_netdevice_queue(dev, head);
1904 static size_t vxlan_get_size(const struct net_device *dev)
1907 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1908 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1909 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1910 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1911 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1912 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1913 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1914 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1915 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1916 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1917 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
1918 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1919 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1920 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1921 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
1925 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1927 const struct vxlan_dev *vxlan = netdev_priv(dev);
1928 const struct vxlan_rdst *dst = &vxlan->default_dst;
1929 struct ifla_vxlan_port_range ports = {
1930 .low = htons(vxlan->port_min),
1931 .high = htons(vxlan->port_max),
1934 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
1935 goto nla_put_failure;
1937 if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip))
1938 goto nla_put_failure;
1940 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
1941 goto nla_put_failure;
1943 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1944 goto nla_put_failure;
1946 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1947 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1948 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1949 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1950 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1951 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1952 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1953 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1954 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1955 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1956 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
1957 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1958 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
1959 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
1960 goto nla_put_failure;
1962 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1963 goto nla_put_failure;
1971 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1973 .maxtype = IFLA_VXLAN_MAX,
1974 .policy = vxlan_policy,
1975 .priv_size = sizeof(struct vxlan_dev),
1976 .setup = vxlan_setup,
1977 .validate = vxlan_validate,
1978 .newlink = vxlan_newlink,
1979 .dellink = vxlan_dellink,
1980 .get_size = vxlan_get_size,
1981 .fill_info = vxlan_fill_info,
1984 static __net_init int vxlan_init_net(struct net *net)
1986 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1989 INIT_LIST_HEAD(&vn->vxlan_list);
1990 spin_lock_init(&vn->sock_lock);
1992 for (h = 0; h < PORT_HASH_SIZE; ++h)
1993 INIT_HLIST_HEAD(&vn->sock_list[h]);
1998 static __net_exit void vxlan_exit_net(struct net *net)
2000 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2001 struct vxlan_dev *vxlan;
2005 list_for_each_entry(vxlan, &vn->vxlan_list, next)
2006 unregister_netdevice_queue(vxlan->dev, &list);
2007 unregister_netdevice_many(&list);
2011 static struct pernet_operations vxlan_net_ops = {
2012 .init = vxlan_init_net,
2013 .exit = vxlan_exit_net,
2014 .id = &vxlan_net_id,
2015 .size = sizeof(struct vxlan_net),
2018 static int __init vxlan_init_module(void)
2022 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
2026 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
2028 rc = register_pernet_device(&vxlan_net_ops);
2032 rc = rtnl_link_register(&vxlan_link_ops);
2039 unregister_pernet_device(&vxlan_net_ops);
2041 destroy_workqueue(vxlan_wq);
2044 late_initcall(vxlan_init_module);
2046 static void __exit vxlan_cleanup_module(void)
2048 rtnl_link_unregister(&vxlan_link_ops);
2049 destroy_workqueue(vxlan_wq);
2050 unregister_pernet_device(&vxlan_net_ops);
2053 module_exit(vxlan_cleanup_module);
2055 MODULE_LICENSE("GPL");
2056 MODULE_VERSION(VXLAN_VERSION);
2057 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
2058 MODULE_ALIAS_RTNL_LINK("vxlan");