2 * net/switchdev/switchdev.c - Switch device API
3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_bridge.h>
19 #include <linux/list.h>
20 #include <net/ip_fib.h>
21 #include <net/switchdev.h>
24 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue
27 * @data: pointer to data being queued
28 * @destructor: data destructor
29 * @tritem: transaction item being queued
31 * Enqeueue data item to transaction queue. tritem is typically placed in
32 * cointainter pointed at by data pointer. Destructor is called on
33 * transaction abort and after successful commit phase in case
34 * the caller did not dequeue the item before.
36 void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
37 void *data, void (*destructor)(void const *),
38 struct switchdev_trans_item *tritem)
41 tritem->destructor = destructor;
42 list_add_tail(&tritem->list, &trans->item_list);
44 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
46 static struct switchdev_trans_item *
47 __switchdev_trans_item_dequeue(struct switchdev_trans *trans)
49 struct switchdev_trans_item *tritem;
51 if (list_empty(&trans->item_list))
53 tritem = list_first_entry(&trans->item_list,
54 struct switchdev_trans_item, list);
55 list_del(&tritem->list);
60 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue
64 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
66 struct switchdev_trans_item *tritem;
68 tritem = __switchdev_trans_item_dequeue(trans);
72 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
74 static void switchdev_trans_init(struct switchdev_trans *trans)
76 INIT_LIST_HEAD(&trans->item_list);
79 static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
81 struct switchdev_trans_item *tritem;
83 while ((tritem = __switchdev_trans_item_dequeue(trans)))
84 tritem->destructor(tritem->data);
87 static void switchdev_trans_items_warn_destroy(struct net_device *dev,
88 struct switchdev_trans *trans)
90 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
92 switchdev_trans_items_destroy(trans);
96 * switchdev_port_attr_get - Get port attribute
99 * @attr: attribute to get
101 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
103 const struct switchdev_ops *ops = dev->switchdev_ops;
104 struct net_device *lower_dev;
105 struct list_head *iter;
106 struct switchdev_attr first = {
107 .id = SWITCHDEV_ATTR_ID_UNDEFINED
109 int err = -EOPNOTSUPP;
111 if (ops && ops->switchdev_port_attr_get)
112 return ops->switchdev_port_attr_get(dev, attr);
114 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
117 /* Switch device port(s) may be stacked under
118 * bond/team/vlan dev, so recurse down to get attr on
119 * each port. Return -ENODATA if attr values don't
120 * compare across ports.
123 netdev_for_each_lower_dev(dev, lower_dev, iter) {
124 err = switchdev_port_attr_get(lower_dev, attr);
127 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
129 else if (memcmp(&first, attr, sizeof(*attr)))
135 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
137 static int __switchdev_port_attr_set(struct net_device *dev,
138 struct switchdev_attr *attr,
139 struct switchdev_trans *trans)
141 const struct switchdev_ops *ops = dev->switchdev_ops;
142 struct net_device *lower_dev;
143 struct list_head *iter;
144 int err = -EOPNOTSUPP;
146 if (ops && ops->switchdev_port_attr_set)
147 return ops->switchdev_port_attr_set(dev, attr, trans);
149 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
152 /* Switch device port(s) may be stacked under
153 * bond/team/vlan dev, so recurse down to set attr on
157 netdev_for_each_lower_dev(dev, lower_dev, iter) {
158 err = __switchdev_port_attr_set(lower_dev, attr, trans);
166 struct switchdev_attr_set_work {
167 struct work_struct work;
168 struct net_device *dev;
169 struct switchdev_attr attr;
172 static void switchdev_port_attr_set_work(struct work_struct *work)
174 struct switchdev_attr_set_work *asw =
175 container_of(work, struct switchdev_attr_set_work, work);
179 err = switchdev_port_attr_set(asw->dev, &asw->attr);
180 if (err && err != -EOPNOTSUPP)
181 netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
189 static int switchdev_port_attr_set_defer(struct net_device *dev,
190 struct switchdev_attr *attr)
192 struct switchdev_attr_set_work *asw;
194 asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
198 INIT_WORK(&asw->work, switchdev_port_attr_set_work);
202 memcpy(&asw->attr, attr, sizeof(asw->attr));
204 schedule_work(&asw->work);
210 * switchdev_port_attr_set - Set port attribute
213 * @attr: attribute to set
215 * Use a 2-phase prepare-commit transaction model to ensure
216 * system is not left in a partially updated state due to
217 * failure from driver/device.
219 int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
221 struct switchdev_trans trans;
224 if (!rtnl_is_locked()) {
225 /* Running prepare-commit transaction across stacked
226 * devices requires nothing moves, so if rtnl_lock is
227 * not held, schedule a worker thread to hold rtnl_lock
228 * while setting attr.
231 return switchdev_port_attr_set_defer(dev, attr);
234 switchdev_trans_init(&trans);
236 /* Phase I: prepare for attr set. Driver/device should fail
237 * here if there are going to be issues in the commit phase,
238 * such as lack of resources or support. The driver/device
239 * should reserve resources needed for the commit phase here,
240 * but should not commit the attr.
243 trans.ph_prepare = true;
244 err = __switchdev_port_attr_set(dev, attr, &trans);
246 /* Prepare phase failed: abort the transaction. Any
247 * resources reserved in the prepare phase are
251 if (err != -EOPNOTSUPP)
252 switchdev_trans_items_destroy(&trans);
257 /* Phase II: commit attr set. This cannot fail as a fault
258 * of driver/device. If it does, it's a bug in the driver/device
259 * because the driver said everythings was OK in phase I.
262 trans.ph_prepare = false;
263 err = __switchdev_port_attr_set(dev, attr, &trans);
264 WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
265 dev->name, attr->id);
266 switchdev_trans_items_warn_destroy(dev, &trans);
270 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
272 static int __switchdev_port_obj_add(struct net_device *dev,
273 const struct switchdev_obj *obj,
274 struct switchdev_trans *trans)
276 const struct switchdev_ops *ops = dev->switchdev_ops;
277 struct net_device *lower_dev;
278 struct list_head *iter;
279 int err = -EOPNOTSUPP;
281 if (ops && ops->switchdev_port_obj_add)
282 return ops->switchdev_port_obj_add(dev, obj, trans);
284 /* Switch device port(s) may be stacked under
285 * bond/team/vlan dev, so recurse down to add object on
289 netdev_for_each_lower_dev(dev, lower_dev, iter) {
290 err = __switchdev_port_obj_add(lower_dev, obj, trans);
299 * switchdev_port_obj_add - Add port object
303 * @obj: object to add
305 * Use a 2-phase prepare-commit transaction model to ensure
306 * system is not left in a partially updated state due to
307 * failure from driver/device.
309 * rtnl_lock must be held.
311 int switchdev_port_obj_add(struct net_device *dev,
312 const struct switchdev_obj *obj)
314 struct switchdev_trans trans;
319 switchdev_trans_init(&trans);
321 /* Phase I: prepare for obj add. Driver/device should fail
322 * here if there are going to be issues in the commit phase,
323 * such as lack of resources or support. The driver/device
324 * should reserve resources needed for the commit phase here,
325 * but should not commit the obj.
328 trans.ph_prepare = true;
329 err = __switchdev_port_obj_add(dev, obj, &trans);
331 /* Prepare phase failed: abort the transaction. Any
332 * resources reserved in the prepare phase are
336 if (err != -EOPNOTSUPP)
337 switchdev_trans_items_destroy(&trans);
342 /* Phase II: commit obj add. This cannot fail as a fault
343 * of driver/device. If it does, it's a bug in the driver/device
344 * because the driver said everythings was OK in phase I.
347 trans.ph_prepare = false;
348 err = __switchdev_port_obj_add(dev, obj, &trans);
349 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
350 switchdev_trans_items_warn_destroy(dev, &trans);
354 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
357 * switchdev_port_obj_del - Delete port object
361 * @obj: object to delete
363 int switchdev_port_obj_del(struct net_device *dev,
364 const struct switchdev_obj *obj)
366 const struct switchdev_ops *ops = dev->switchdev_ops;
367 struct net_device *lower_dev;
368 struct list_head *iter;
369 int err = -EOPNOTSUPP;
371 if (ops && ops->switchdev_port_obj_del)
372 return ops->switchdev_port_obj_del(dev, obj);
374 /* Switch device port(s) may be stacked under
375 * bond/team/vlan dev, so recurse down to delete object on
379 netdev_for_each_lower_dev(dev, lower_dev, iter) {
380 err = switchdev_port_obj_del(lower_dev, obj);
387 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
390 * switchdev_port_obj_dump - Dump port objects
394 * @obj: object to dump
395 * @cb: function to call with a filled object
397 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
398 switchdev_obj_dump_cb_t *cb)
400 const struct switchdev_ops *ops = dev->switchdev_ops;
401 struct net_device *lower_dev;
402 struct list_head *iter;
403 int err = -EOPNOTSUPP;
405 if (ops && ops->switchdev_port_obj_dump)
406 return ops->switchdev_port_obj_dump(dev, obj, cb);
408 /* Switch device port(s) may be stacked under
409 * bond/team/vlan dev, so recurse down to dump objects on
410 * first port at bottom of stack.
413 netdev_for_each_lower_dev(dev, lower_dev, iter) {
414 err = switchdev_port_obj_dump(lower_dev, obj, cb);
420 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
422 static DEFINE_MUTEX(switchdev_mutex);
423 static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
426 * register_switchdev_notifier - Register notifier
427 * @nb: notifier_block
429 * Register switch device notifier. This should be used by code
430 * which needs to monitor events happening in particular device.
431 * Return values are same as for atomic_notifier_chain_register().
433 int register_switchdev_notifier(struct notifier_block *nb)
437 mutex_lock(&switchdev_mutex);
438 err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
439 mutex_unlock(&switchdev_mutex);
442 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
445 * unregister_switchdev_notifier - Unregister notifier
446 * @nb: notifier_block
448 * Unregister switch device notifier.
449 * Return values are same as for atomic_notifier_chain_unregister().
451 int unregister_switchdev_notifier(struct notifier_block *nb)
455 mutex_lock(&switchdev_mutex);
456 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
457 mutex_unlock(&switchdev_mutex);
460 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
463 * call_switchdev_notifiers - Call notifiers
464 * @val: value passed unmodified to notifier function
466 * @info: notifier information data
468 * Call all network notifier blocks. This should be called by driver
469 * when it needs to propagate hardware event.
470 * Return values are same as for atomic_notifier_call_chain().
472 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
473 struct switchdev_notifier_info *info)
478 mutex_lock(&switchdev_mutex);
479 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
480 mutex_unlock(&switchdev_mutex);
483 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
485 struct switchdev_vlan_dump {
486 struct switchdev_obj_port_vlan vlan;
494 static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
496 struct bridge_vlan_info vinfo;
498 vinfo.flags = dump->flags;
500 if (dump->begin == 0 && dump->end == 0) {
502 } else if (dump->begin == dump->end) {
503 vinfo.vid = dump->begin;
504 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
505 sizeof(vinfo), &vinfo))
508 vinfo.vid = dump->begin;
509 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
510 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
511 sizeof(vinfo), &vinfo))
513 vinfo.vid = dump->end;
514 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
515 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
516 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
517 sizeof(vinfo), &vinfo))
524 static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
526 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
527 struct switchdev_vlan_dump *dump =
528 container_of(vlan, struct switchdev_vlan_dump, vlan);
531 if (vlan->vid_begin > vlan->vid_end)
534 if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
535 dump->flags = vlan->flags;
536 for (dump->begin = dump->end = vlan->vid_begin;
537 dump->begin <= vlan->vid_end;
538 dump->begin++, dump->end++) {
539 err = switchdev_port_vlan_dump_put(dump);
543 } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
544 if (dump->begin > vlan->vid_begin &&
545 dump->begin >= vlan->vid_end) {
546 if ((dump->begin - 1) == vlan->vid_end &&
547 dump->flags == vlan->flags) {
549 dump->begin = vlan->vid_begin;
551 err = switchdev_port_vlan_dump_put(dump);
552 dump->flags = vlan->flags;
553 dump->begin = vlan->vid_begin;
554 dump->end = vlan->vid_end;
556 } else if (dump->end <= vlan->vid_begin &&
557 dump->end < vlan->vid_end) {
558 if ((dump->end + 1) == vlan->vid_begin &&
559 dump->flags == vlan->flags) {
561 dump->end = vlan->vid_end;
563 err = switchdev_port_vlan_dump_put(dump);
564 dump->flags = vlan->flags;
565 dump->begin = vlan->vid_begin;
566 dump->end = vlan->vid_end;
576 static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
579 struct switchdev_vlan_dump dump = {
580 .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
582 .filter_mask = filter_mask,
586 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
587 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
588 err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
589 switchdev_port_vlan_dump_cb);
592 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
594 err = switchdev_port_vlan_dump_put(&dump);
598 return err == -EOPNOTSUPP ? 0 : err;
602 * switchdev_port_bridge_getlink - Get bridge port attributes
606 * Called for SELF on rtnl_bridge_getlink to get bridge port
609 int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
610 struct net_device *dev, u32 filter_mask,
613 struct switchdev_attr attr = {
614 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
616 u16 mode = BRIDGE_MODE_UNDEF;
617 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
620 err = switchdev_port_attr_get(dev, &attr);
621 if (err && err != -EOPNOTSUPP)
624 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
625 attr.u.brport_flags, mask, nlflags,
626 filter_mask, switchdev_port_vlan_fill);
628 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
630 static int switchdev_port_br_setflag(struct net_device *dev,
631 struct nlattr *nlattr,
632 unsigned long brport_flag)
634 struct switchdev_attr attr = {
635 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
637 u8 flag = nla_get_u8(nlattr);
640 err = switchdev_port_attr_get(dev, &attr);
645 attr.u.brport_flags |= brport_flag;
647 attr.u.brport_flags &= ~brport_flag;
649 return switchdev_port_attr_set(dev, &attr);
652 static const struct nla_policy
653 switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
654 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
655 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
656 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
657 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
658 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
659 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
660 [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 },
661 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
662 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
663 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
666 static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
667 struct nlattr *protinfo)
673 err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
674 switchdev_port_bridge_policy);
678 nla_for_each_nested(attr, protinfo, rem) {
679 switch (nla_type(attr)) {
680 case IFLA_BRPORT_LEARNING:
681 err = switchdev_port_br_setflag(dev, attr,
684 case IFLA_BRPORT_LEARNING_SYNC:
685 err = switchdev_port_br_setflag(dev, attr,
699 static int switchdev_port_br_afspec(struct net_device *dev,
700 struct nlattr *afspec,
701 int (*f)(struct net_device *dev,
702 const struct switchdev_obj *obj))
705 struct bridge_vlan_info *vinfo;
706 struct switchdev_obj_port_vlan vlan = {
707 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
712 nla_for_each_nested(attr, afspec, rem) {
713 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
715 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
717 vinfo = nla_data(attr);
718 vlan.flags = vinfo->flags;
719 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
722 vlan.vid_begin = vinfo->vid;
723 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
726 vlan.vid_end = vinfo->vid;
727 if (vlan.vid_end <= vlan.vid_begin)
729 err = f(dev, &vlan.obj);
732 memset(&vlan, 0, sizeof(vlan));
736 vlan.vid_begin = vinfo->vid;
737 vlan.vid_end = vinfo->vid;
738 err = f(dev, &vlan.obj);
741 memset(&vlan, 0, sizeof(vlan));
749 * switchdev_port_bridge_setlink - Set bridge port attributes
752 * @nlh: netlink header
753 * @flags: netlink flags
755 * Called for SELF on rtnl_bridge_setlink to set bridge port
758 int switchdev_port_bridge_setlink(struct net_device *dev,
759 struct nlmsghdr *nlh, u16 flags)
761 struct nlattr *protinfo;
762 struct nlattr *afspec;
765 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
768 err = switchdev_port_br_setlink_protinfo(dev, protinfo);
773 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
776 err = switchdev_port_br_afspec(dev, afspec,
777 switchdev_port_obj_add);
781 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
784 * switchdev_port_bridge_dellink - Set bridge port attributes
787 * @nlh: netlink header
788 * @flags: netlink flags
790 * Called for SELF on rtnl_bridge_dellink to set bridge port
793 int switchdev_port_bridge_dellink(struct net_device *dev,
794 struct nlmsghdr *nlh, u16 flags)
796 struct nlattr *afspec;
798 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
801 return switchdev_port_br_afspec(dev, afspec,
802 switchdev_port_obj_del);
806 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
809 * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
811 * @ndmsg: netlink hdr
812 * @nlattr: netlink attributes
814 * @addr: MAC address to add
817 * Add FDB entry to switch device.
819 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
820 struct net_device *dev, const unsigned char *addr,
821 u16 vid, u16 nlm_flags)
823 struct switchdev_obj_port_fdb fdb = {
824 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
829 return switchdev_port_obj_add(dev, &fdb.obj);
831 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
834 * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
836 * @ndmsg: netlink hdr
837 * @nlattr: netlink attributes
839 * @addr: MAC address to delete
840 * @vid: VLAN to delete
842 * Delete FDB entry from switch device.
844 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
845 struct net_device *dev, const unsigned char *addr,
848 struct switchdev_obj_port_fdb fdb = {
849 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
854 return switchdev_port_obj_del(dev, &fdb.obj);
856 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
858 struct switchdev_fdb_dump {
859 struct switchdev_obj_port_fdb fdb;
860 struct net_device *dev;
862 struct netlink_callback *cb;
866 static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
868 struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
869 struct switchdev_fdb_dump *dump =
870 container_of(fdb, struct switchdev_fdb_dump, fdb);
871 u32 portid = NETLINK_CB(dump->cb->skb).portid;
872 u32 seq = dump->cb->nlh->nlmsg_seq;
873 struct nlmsghdr *nlh;
876 if (dump->idx < dump->cb->args[0])
879 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
880 sizeof(*ndm), NLM_F_MULTI);
884 ndm = nlmsg_data(nlh);
885 ndm->ndm_family = AF_BRIDGE;
888 ndm->ndm_flags = NTF_SELF;
890 ndm->ndm_ifindex = dump->dev->ifindex;
891 ndm->ndm_state = fdb->ndm_state;
893 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
894 goto nla_put_failure;
896 if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
897 goto nla_put_failure;
899 nlmsg_end(dump->skb, nlh);
906 nlmsg_cancel(dump->skb, nlh);
911 * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
914 * @cb: netlink callback
916 * @filter_dev: filter device
919 * Delete FDB entry from switch device.
921 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
922 struct net_device *dev,
923 struct net_device *filter_dev, int idx)
925 struct switchdev_fdb_dump dump = {
926 .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
933 switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
936 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
938 static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
940 const struct switchdev_ops *ops = dev->switchdev_ops;
941 struct net_device *lower_dev;
942 struct net_device *port_dev;
943 struct list_head *iter;
945 /* Recusively search down until we find a sw port dev.
946 * (A sw port dev supports switchdev_port_attr_get).
949 if (ops && ops->switchdev_port_attr_get)
952 netdev_for_each_lower_dev(dev, lower_dev, iter) {
953 port_dev = switchdev_get_lowest_dev(lower_dev);
961 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
963 struct switchdev_attr attr = {
964 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
966 struct switchdev_attr prev_attr;
967 struct net_device *dev = NULL;
970 /* For this route, all nexthop devs must be on the same switch. */
972 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
973 const struct fib_nh *nh = &fi->fib_nh[nhsel];
978 dev = switchdev_get_lowest_dev(nh->nh_dev);
982 if (switchdev_port_attr_get(dev, &attr))
986 !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
996 * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
998 * @dst: route's IPv4 destination address
999 * @dst_len: destination address length (prefix length)
1000 * @fi: route FIB info structure
1003 * @nlflags: netlink flags passed in (NLM_F_*)
1004 * @tb_id: route table ID
1006 * Add/modify switch IPv4 route entry.
1008 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
1009 u8 tos, u8 type, u32 nlflags, u32 tb_id)
1011 struct switchdev_obj_ipv4_fib ipv4_fib = {
1012 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
1021 struct net_device *dev;
1024 /* Don't offload route if using custom ip rules or if
1025 * IPv4 FIB offloading has been disabled completely.
1028 #ifdef CONFIG_IP_MULTIPLE_TABLES
1029 if (fi->fib_net->ipv4.fib_has_custom_rules)
1033 if (fi->fib_net->ipv4.fib_offload_disabled)
1036 dev = switchdev_get_dev_by_nhs(fi);
1040 err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
1042 fi->fib_flags |= RTNH_F_OFFLOAD;
1044 return err == -EOPNOTSUPP ? 0 : err;
1046 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
1049 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
1051 * @dst: route's IPv4 destination address
1052 * @dst_len: destination address length (prefix length)
1053 * @fi: route FIB info structure
1056 * @tb_id: route table ID
1058 * Delete IPv4 route entry from switch device.
1060 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
1061 u8 tos, u8 type, u32 tb_id)
1063 struct switchdev_obj_ipv4_fib ipv4_fib = {
1064 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
1073 struct net_device *dev;
1076 if (!(fi->fib_flags & RTNH_F_OFFLOAD))
1079 dev = switchdev_get_dev_by_nhs(fi);
1083 err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
1085 fi->fib_flags &= ~RTNH_F_OFFLOAD;
1087 return err == -EOPNOTSUPP ? 0 : err;
1089 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
1092 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
1094 * @fi: route FIB info structure
1096 void switchdev_fib_ipv4_abort(struct fib_info *fi)
1098 /* There was a problem installing this route to the offload
1099 * device. For now, until we come up with more refined
1100 * policy handling, abruptly end IPv4 fib offloading for
1101 * for entire net by flushing offload device(s) of all
1102 * IPv4 routes, and mark IPv4 fib offloading broken from
1103 * this point forward.
1106 fib_flush_external(fi->fib_net);
1107 fi->fib_net->ipv4.fib_offload_disabled = true;
1109 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
1111 static bool switchdev_port_same_parent_id(struct net_device *a,
1112 struct net_device *b)
1114 struct switchdev_attr a_attr = {
1115 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1116 .flags = SWITCHDEV_F_NO_RECURSE,
1118 struct switchdev_attr b_attr = {
1119 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1120 .flags = SWITCHDEV_F_NO_RECURSE,
1123 if (switchdev_port_attr_get(a, &a_attr) ||
1124 switchdev_port_attr_get(b, &b_attr))
1127 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
1130 static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
1131 struct net_device *group_dev)
1133 struct net_device *lower_dev;
1134 struct list_head *iter;
1136 netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
1137 if (lower_dev == dev)
1139 if (switchdev_port_same_parent_id(dev, lower_dev))
1140 return lower_dev->offload_fwd_mark;
1141 return switchdev_port_fwd_mark_get(dev, lower_dev);
1144 return dev->ifindex;
1147 static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
1148 u32 old_mark, u32 *reset_mark)
1150 struct net_device *lower_dev;
1151 struct list_head *iter;
1153 netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
1154 if (lower_dev->offload_fwd_mark == old_mark) {
1156 *reset_mark = lower_dev->ifindex;
1157 lower_dev->offload_fwd_mark = *reset_mark;
1159 switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
1164 * switchdev_port_fwd_mark_set - Set port offload forwarding mark
1167 * @group_dev: containing device
1168 * @joining: true if dev is joining group; false if leaving group
1170 * An ungrouped port's offload mark is just its ifindex. A grouped
1171 * port's (member of a bridge, for example) offload mark is the ifindex
1172 * of one of the ports in the group with the same parent (switch) ID.
1173 * Ports on the same device in the same group will have the same mark.
1178 * sw1p1 ifindex=2 mark=2
1179 * sw1p2 ifindex=3 mark=2
1180 * sw2p1 ifindex=4 mark=5
1181 * sw2p2 ifindex=5 mark=5
1183 * If sw2p2 leaves the bridge, we'll have:
1186 * sw1p1 ifindex=2 mark=2
1187 * sw1p2 ifindex=3 mark=2
1188 * sw2p1 ifindex=4 mark=4
1189 * sw2p2 ifindex=5 mark=5
1191 void switchdev_port_fwd_mark_set(struct net_device *dev,
1192 struct net_device *group_dev,
1195 u32 mark = dev->ifindex;
1198 if (group_dev && joining) {
1199 mark = switchdev_port_fwd_mark_get(dev, group_dev);
1200 } else if (group_dev && !joining) {
1201 if (dev->offload_fwd_mark == mark)
1202 /* Ohoh, this port was the mark reference port,
1203 * but it's leaving the group, so reset the
1204 * mark for the remaining ports in the group.
1206 switchdev_port_fwd_mark_reset(group_dev, mark,
1210 dev->offload_fwd_mark = mark;
1212 EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);