]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/switchdev/switchdev.c
Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / net / switchdev / switchdev.c
1 /*
2  * net/switchdev/switchdev.c - Switch device API
3  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
4  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_bridge.h>
20 #include <linux/list.h>
21 #include <linux/workqueue.h>
22 #include <linux/if_vlan.h>
23 #include <linux/rtnetlink.h>
24 #include <net/switchdev.h>
25
26 /**
27  *      switchdev_trans_item_enqueue - Enqueue data item to transaction queue
28  *
29  *      @trans: transaction
30  *      @data: pointer to data being queued
31  *      @destructor: data destructor
32  *      @tritem: transaction item being queued
33  *
34  *      Enqeueue data item to transaction queue. tritem is typically placed in
35  *      cointainter pointed at by data pointer. Destructor is called on
36  *      transaction abort and after successful commit phase in case
37  *      the caller did not dequeue the item before.
38  */
39 void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
40                                   void *data, void (*destructor)(void const *),
41                                   struct switchdev_trans_item *tritem)
42 {
43         tritem->data = data;
44         tritem->destructor = destructor;
45         list_add_tail(&tritem->list, &trans->item_list);
46 }
47 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
48
49 static struct switchdev_trans_item *
50 __switchdev_trans_item_dequeue(struct switchdev_trans *trans)
51 {
52         struct switchdev_trans_item *tritem;
53
54         if (list_empty(&trans->item_list))
55                 return NULL;
56         tritem = list_first_entry(&trans->item_list,
57                                   struct switchdev_trans_item, list);
58         list_del(&tritem->list);
59         return tritem;
60 }
61
62 /**
63  *      switchdev_trans_item_dequeue - Dequeue data item from transaction queue
64  *
65  *      @trans: transaction
66  */
67 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
68 {
69         struct switchdev_trans_item *tritem;
70
71         tritem = __switchdev_trans_item_dequeue(trans);
72         BUG_ON(!tritem);
73         return tritem->data;
74 }
75 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
76
77 static void switchdev_trans_init(struct switchdev_trans *trans)
78 {
79         INIT_LIST_HEAD(&trans->item_list);
80 }
81
82 static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
83 {
84         struct switchdev_trans_item *tritem;
85
86         while ((tritem = __switchdev_trans_item_dequeue(trans)))
87                 tritem->destructor(tritem->data);
88 }
89
90 static void switchdev_trans_items_warn_destroy(struct net_device *dev,
91                                                struct switchdev_trans *trans)
92 {
93         WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
94              dev->name);
95         switchdev_trans_items_destroy(trans);
96 }
97
98 static LIST_HEAD(deferred);
99 static DEFINE_SPINLOCK(deferred_lock);
100
101 typedef void switchdev_deferred_func_t(struct net_device *dev,
102                                        const void *data);
103
104 struct switchdev_deferred_item {
105         struct list_head list;
106         struct net_device *dev;
107         switchdev_deferred_func_t *func;
108         unsigned long data[0];
109 };
110
111 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
112 {
113         struct switchdev_deferred_item *dfitem;
114
115         spin_lock_bh(&deferred_lock);
116         if (list_empty(&deferred)) {
117                 dfitem = NULL;
118                 goto unlock;
119         }
120         dfitem = list_first_entry(&deferred,
121                                   struct switchdev_deferred_item, list);
122         list_del(&dfitem->list);
123 unlock:
124         spin_unlock_bh(&deferred_lock);
125         return dfitem;
126 }
127
128 /**
129  *      switchdev_deferred_process - Process ops in deferred queue
130  *
131  *      Called to flush the ops currently queued in deferred ops queue.
132  *      rtnl_lock must be held.
133  */
134 void switchdev_deferred_process(void)
135 {
136         struct switchdev_deferred_item *dfitem;
137
138         ASSERT_RTNL();
139
140         while ((dfitem = switchdev_deferred_dequeue())) {
141                 dfitem->func(dfitem->dev, dfitem->data);
142                 dev_put(dfitem->dev);
143                 kfree(dfitem);
144         }
145 }
146 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
147
148 static void switchdev_deferred_process_work(struct work_struct *work)
149 {
150         rtnl_lock();
151         switchdev_deferred_process();
152         rtnl_unlock();
153 }
154
155 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
156
157 static int switchdev_deferred_enqueue(struct net_device *dev,
158                                       const void *data, size_t data_len,
159                                       switchdev_deferred_func_t *func)
160 {
161         struct switchdev_deferred_item *dfitem;
162
163         dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
164         if (!dfitem)
165                 return -ENOMEM;
166         dfitem->dev = dev;
167         dfitem->func = func;
168         memcpy(dfitem->data, data, data_len);
169         dev_hold(dev);
170         spin_lock_bh(&deferred_lock);
171         list_add_tail(&dfitem->list, &deferred);
172         spin_unlock_bh(&deferred_lock);
173         schedule_work(&deferred_process_work);
174         return 0;
175 }
176
177 /**
178  *      switchdev_port_attr_get - Get port attribute
179  *
180  *      @dev: port device
181  *      @attr: attribute to get
182  */
183 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
184 {
185         const struct switchdev_ops *ops = dev->switchdev_ops;
186         struct net_device *lower_dev;
187         struct list_head *iter;
188         struct switchdev_attr first = {
189                 .id = SWITCHDEV_ATTR_ID_UNDEFINED
190         };
191         int err = -EOPNOTSUPP;
192
193         if (ops && ops->switchdev_port_attr_get)
194                 return ops->switchdev_port_attr_get(dev, attr);
195
196         if (attr->flags & SWITCHDEV_F_NO_RECURSE)
197                 return err;
198
199         /* Switch device port(s) may be stacked under
200          * bond/team/vlan dev, so recurse down to get attr on
201          * each port.  Return -ENODATA if attr values don't
202          * compare across ports.
203          */
204
205         netdev_for_each_lower_dev(dev, lower_dev, iter) {
206                 err = switchdev_port_attr_get(lower_dev, attr);
207                 if (err)
208                         break;
209                 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
210                         first = *attr;
211                 else if (memcmp(&first, attr, sizeof(*attr)))
212                         return -ENODATA;
213         }
214
215         return err;
216 }
217 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
218
219 static int __switchdev_port_attr_set(struct net_device *dev,
220                                      const struct switchdev_attr *attr,
221                                      struct switchdev_trans *trans)
222 {
223         const struct switchdev_ops *ops = dev->switchdev_ops;
224         struct net_device *lower_dev;
225         struct list_head *iter;
226         int err = -EOPNOTSUPP;
227
228         if (ops && ops->switchdev_port_attr_set) {
229                 err = ops->switchdev_port_attr_set(dev, attr, trans);
230                 goto done;
231         }
232
233         if (attr->flags & SWITCHDEV_F_NO_RECURSE)
234                 goto done;
235
236         /* Switch device port(s) may be stacked under
237          * bond/team/vlan dev, so recurse down to set attr on
238          * each port.
239          */
240
241         netdev_for_each_lower_dev(dev, lower_dev, iter) {
242                 err = __switchdev_port_attr_set(lower_dev, attr, trans);
243                 if (err)
244                         break;
245         }
246
247 done:
248         if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
249                 err = 0;
250
251         return err;
252 }
253
254 static int switchdev_port_attr_set_now(struct net_device *dev,
255                                        const struct switchdev_attr *attr)
256 {
257         struct switchdev_trans trans;
258         int err;
259
260         switchdev_trans_init(&trans);
261
262         /* Phase I: prepare for attr set. Driver/device should fail
263          * here if there are going to be issues in the commit phase,
264          * such as lack of resources or support.  The driver/device
265          * should reserve resources needed for the commit phase here,
266          * but should not commit the attr.
267          */
268
269         trans.ph_prepare = true;
270         err = __switchdev_port_attr_set(dev, attr, &trans);
271         if (err) {
272                 /* Prepare phase failed: abort the transaction.  Any
273                  * resources reserved in the prepare phase are
274                  * released.
275                  */
276
277                 if (err != -EOPNOTSUPP)
278                         switchdev_trans_items_destroy(&trans);
279
280                 return err;
281         }
282
283         /* Phase II: commit attr set.  This cannot fail as a fault
284          * of driver/device.  If it does, it's a bug in the driver/device
285          * because the driver said everythings was OK in phase I.
286          */
287
288         trans.ph_prepare = false;
289         err = __switchdev_port_attr_set(dev, attr, &trans);
290         WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
291              dev->name, attr->id);
292         switchdev_trans_items_warn_destroy(dev, &trans);
293
294         return err;
295 }
296
297 static void switchdev_port_attr_set_deferred(struct net_device *dev,
298                                              const void *data)
299 {
300         const struct switchdev_attr *attr = data;
301         int err;
302
303         err = switchdev_port_attr_set_now(dev, attr);
304         if (err && err != -EOPNOTSUPP)
305                 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
306                            err, attr->id);
307         if (attr->complete)
308                 attr->complete(dev, err, attr->complete_priv);
309 }
310
311 static int switchdev_port_attr_set_defer(struct net_device *dev,
312                                          const struct switchdev_attr *attr)
313 {
314         return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
315                                           switchdev_port_attr_set_deferred);
316 }
317
318 /**
319  *      switchdev_port_attr_set - Set port attribute
320  *
321  *      @dev: port device
322  *      @attr: attribute to set
323  *
324  *      Use a 2-phase prepare-commit transaction model to ensure
325  *      system is not left in a partially updated state due to
326  *      failure from driver/device.
327  *
328  *      rtnl_lock must be held and must not be in atomic section,
329  *      in case SWITCHDEV_F_DEFER flag is not set.
330  */
331 int switchdev_port_attr_set(struct net_device *dev,
332                             const struct switchdev_attr *attr)
333 {
334         if (attr->flags & SWITCHDEV_F_DEFER)
335                 return switchdev_port_attr_set_defer(dev, attr);
336         ASSERT_RTNL();
337         return switchdev_port_attr_set_now(dev, attr);
338 }
339 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
340
341 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
342 {
343         switch (obj->id) {
344         case SWITCHDEV_OBJ_ID_PORT_VLAN:
345                 return sizeof(struct switchdev_obj_port_vlan);
346         case SWITCHDEV_OBJ_ID_PORT_FDB:
347                 return sizeof(struct switchdev_obj_port_fdb);
348         case SWITCHDEV_OBJ_ID_PORT_MDB:
349                 return sizeof(struct switchdev_obj_port_mdb);
350         default:
351                 BUG();
352         }
353         return 0;
354 }
355
356 static int __switchdev_port_obj_add(struct net_device *dev,
357                                     const struct switchdev_obj *obj,
358                                     struct switchdev_trans *trans)
359 {
360         const struct switchdev_ops *ops = dev->switchdev_ops;
361         struct net_device *lower_dev;
362         struct list_head *iter;
363         int err = -EOPNOTSUPP;
364
365         if (ops && ops->switchdev_port_obj_add)
366                 return ops->switchdev_port_obj_add(dev, obj, trans);
367
368         /* Switch device port(s) may be stacked under
369          * bond/team/vlan dev, so recurse down to add object on
370          * each port.
371          */
372
373         netdev_for_each_lower_dev(dev, lower_dev, iter) {
374                 err = __switchdev_port_obj_add(lower_dev, obj, trans);
375                 if (err)
376                         break;
377         }
378
379         return err;
380 }
381
382 static int switchdev_port_obj_add_now(struct net_device *dev,
383                                       const struct switchdev_obj *obj)
384 {
385         struct switchdev_trans trans;
386         int err;
387
388         ASSERT_RTNL();
389
390         switchdev_trans_init(&trans);
391
392         /* Phase I: prepare for obj add. Driver/device should fail
393          * here if there are going to be issues in the commit phase,
394          * such as lack of resources or support.  The driver/device
395          * should reserve resources needed for the commit phase here,
396          * but should not commit the obj.
397          */
398
399         trans.ph_prepare = true;
400         err = __switchdev_port_obj_add(dev, obj, &trans);
401         if (err) {
402                 /* Prepare phase failed: abort the transaction.  Any
403                  * resources reserved in the prepare phase are
404                  * released.
405                  */
406
407                 if (err != -EOPNOTSUPP)
408                         switchdev_trans_items_destroy(&trans);
409
410                 return err;
411         }
412
413         /* Phase II: commit obj add.  This cannot fail as a fault
414          * of driver/device.  If it does, it's a bug in the driver/device
415          * because the driver said everythings was OK in phase I.
416          */
417
418         trans.ph_prepare = false;
419         err = __switchdev_port_obj_add(dev, obj, &trans);
420         WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
421         switchdev_trans_items_warn_destroy(dev, &trans);
422
423         return err;
424 }
425
426 static void switchdev_port_obj_add_deferred(struct net_device *dev,
427                                             const void *data)
428 {
429         const struct switchdev_obj *obj = data;
430         int err;
431
432         err = switchdev_port_obj_add_now(dev, obj);
433         if (err && err != -EOPNOTSUPP)
434                 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
435                            err, obj->id);
436         if (obj->complete)
437                 obj->complete(dev, err, obj->complete_priv);
438 }
439
440 static int switchdev_port_obj_add_defer(struct net_device *dev,
441                                         const struct switchdev_obj *obj)
442 {
443         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
444                                           switchdev_port_obj_add_deferred);
445 }
446
447 /**
448  *      switchdev_port_obj_add - Add port object
449  *
450  *      @dev: port device
451  *      @id: object ID
452  *      @obj: object to add
453  *
454  *      Use a 2-phase prepare-commit transaction model to ensure
455  *      system is not left in a partially updated state due to
456  *      failure from driver/device.
457  *
458  *      rtnl_lock must be held and must not be in atomic section,
459  *      in case SWITCHDEV_F_DEFER flag is not set.
460  */
461 int switchdev_port_obj_add(struct net_device *dev,
462                            const struct switchdev_obj *obj)
463 {
464         if (obj->flags & SWITCHDEV_F_DEFER)
465                 return switchdev_port_obj_add_defer(dev, obj);
466         ASSERT_RTNL();
467         return switchdev_port_obj_add_now(dev, obj);
468 }
469 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
470
471 static int switchdev_port_obj_del_now(struct net_device *dev,
472                                       const struct switchdev_obj *obj)
473 {
474         const struct switchdev_ops *ops = dev->switchdev_ops;
475         struct net_device *lower_dev;
476         struct list_head *iter;
477         int err = -EOPNOTSUPP;
478
479         if (ops && ops->switchdev_port_obj_del)
480                 return ops->switchdev_port_obj_del(dev, obj);
481
482         /* Switch device port(s) may be stacked under
483          * bond/team/vlan dev, so recurse down to delete object on
484          * each port.
485          */
486
487         netdev_for_each_lower_dev(dev, lower_dev, iter) {
488                 err = switchdev_port_obj_del_now(lower_dev, obj);
489                 if (err)
490                         break;
491         }
492
493         return err;
494 }
495
496 static void switchdev_port_obj_del_deferred(struct net_device *dev,
497                                             const void *data)
498 {
499         const struct switchdev_obj *obj = data;
500         int err;
501
502         err = switchdev_port_obj_del_now(dev, obj);
503         if (err && err != -EOPNOTSUPP)
504                 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
505                            err, obj->id);
506         if (obj->complete)
507                 obj->complete(dev, err, obj->complete_priv);
508 }
509
510 static int switchdev_port_obj_del_defer(struct net_device *dev,
511                                         const struct switchdev_obj *obj)
512 {
513         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
514                                           switchdev_port_obj_del_deferred);
515 }
516
517 /**
518  *      switchdev_port_obj_del - Delete port object
519  *
520  *      @dev: port device
521  *      @id: object ID
522  *      @obj: object to delete
523  *
524  *      rtnl_lock must be held and must not be in atomic section,
525  *      in case SWITCHDEV_F_DEFER flag is not set.
526  */
527 int switchdev_port_obj_del(struct net_device *dev,
528                            const struct switchdev_obj *obj)
529 {
530         if (obj->flags & SWITCHDEV_F_DEFER)
531                 return switchdev_port_obj_del_defer(dev, obj);
532         ASSERT_RTNL();
533         return switchdev_port_obj_del_now(dev, obj);
534 }
535 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
536
537 /**
538  *      switchdev_port_obj_dump - Dump port objects
539  *
540  *      @dev: port device
541  *      @id: object ID
542  *      @obj: object to dump
543  *      @cb: function to call with a filled object
544  *
545  *      rtnl_lock must be held.
546  */
547 int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
548                             switchdev_obj_dump_cb_t *cb)
549 {
550         const struct switchdev_ops *ops = dev->switchdev_ops;
551         struct net_device *lower_dev;
552         struct list_head *iter;
553         int err = -EOPNOTSUPP;
554
555         ASSERT_RTNL();
556
557         if (ops && ops->switchdev_port_obj_dump)
558                 return ops->switchdev_port_obj_dump(dev, obj, cb);
559
560         /* Switch device port(s) may be stacked under
561          * bond/team/vlan dev, so recurse down to dump objects on
562          * first port at bottom of stack.
563          */
564
565         netdev_for_each_lower_dev(dev, lower_dev, iter) {
566                 err = switchdev_port_obj_dump(lower_dev, obj, cb);
567                 break;
568         }
569
570         return err;
571 }
572 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
573
574 static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
575
576 /**
577  *      register_switchdev_notifier - Register notifier
578  *      @nb: notifier_block
579  *
580  *      Register switch device notifier. This should be used by code
581  *      which needs to monitor events happening in particular device.
582  *      Return values are same as for atomic_notifier_chain_register().
583  */
584 int register_switchdev_notifier(struct notifier_block *nb)
585 {
586         int err;
587
588         rtnl_lock();
589         err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
590         rtnl_unlock();
591         return err;
592 }
593 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
594
595 /**
596  *      unregister_switchdev_notifier - Unregister notifier
597  *      @nb: notifier_block
598  *
599  *      Unregister switch device notifier.
600  *      Return values are same as for atomic_notifier_chain_unregister().
601  */
602 int unregister_switchdev_notifier(struct notifier_block *nb)
603 {
604         int err;
605
606         rtnl_lock();
607         err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
608         rtnl_unlock();
609         return err;
610 }
611 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
612
613 /**
614  *      call_switchdev_notifiers - Call notifiers
615  *      @val: value passed unmodified to notifier function
616  *      @dev: port device
617  *      @info: notifier information data
618  *
619  *      Call all network notifier blocks. This should be called by driver
620  *      when it needs to propagate hardware event.
621  *      Return values are same as for atomic_notifier_call_chain().
622  *      rtnl_lock must be held.
623  */
624 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
625                              struct switchdev_notifier_info *info)
626 {
627         ASSERT_RTNL();
628
629         info->dev = dev;
630         return raw_notifier_call_chain(&switchdev_notif_chain, val, info);
631 }
632 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
633
634 struct switchdev_vlan_dump {
635         struct switchdev_obj_port_vlan vlan;
636         struct sk_buff *skb;
637         u32 filter_mask;
638         u16 flags;
639         u16 begin;
640         u16 end;
641 };
642
643 static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
644 {
645         struct bridge_vlan_info vinfo;
646
647         vinfo.flags = dump->flags;
648
649         if (dump->begin == 0 && dump->end == 0) {
650                 return 0;
651         } else if (dump->begin == dump->end) {
652                 vinfo.vid = dump->begin;
653                 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
654                             sizeof(vinfo), &vinfo))
655                         return -EMSGSIZE;
656         } else {
657                 vinfo.vid = dump->begin;
658                 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
659                 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
660                             sizeof(vinfo), &vinfo))
661                         return -EMSGSIZE;
662                 vinfo.vid = dump->end;
663                 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
664                 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
665                 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
666                             sizeof(vinfo), &vinfo))
667                         return -EMSGSIZE;
668         }
669
670         return 0;
671 }
672
673 static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
674 {
675         struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
676         struct switchdev_vlan_dump *dump =
677                 container_of(vlan, struct switchdev_vlan_dump, vlan);
678         int err = 0;
679
680         if (vlan->vid_begin > vlan->vid_end)
681                 return -EINVAL;
682
683         if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
684                 dump->flags = vlan->flags;
685                 for (dump->begin = dump->end = vlan->vid_begin;
686                      dump->begin <= vlan->vid_end;
687                      dump->begin++, dump->end++) {
688                         err = switchdev_port_vlan_dump_put(dump);
689                         if (err)
690                                 return err;
691                 }
692         } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
693                 if (dump->begin > vlan->vid_begin &&
694                     dump->begin >= vlan->vid_end) {
695                         if ((dump->begin - 1) == vlan->vid_end &&
696                             dump->flags == vlan->flags) {
697                                 /* prepend */
698                                 dump->begin = vlan->vid_begin;
699                         } else {
700                                 err = switchdev_port_vlan_dump_put(dump);
701                                 dump->flags = vlan->flags;
702                                 dump->begin = vlan->vid_begin;
703                                 dump->end = vlan->vid_end;
704                         }
705                 } else if (dump->end <= vlan->vid_begin &&
706                            dump->end < vlan->vid_end) {
707                         if ((dump->end  + 1) == vlan->vid_begin &&
708                             dump->flags == vlan->flags) {
709                                 /* append */
710                                 dump->end = vlan->vid_end;
711                         } else {
712                                 err = switchdev_port_vlan_dump_put(dump);
713                                 dump->flags = vlan->flags;
714                                 dump->begin = vlan->vid_begin;
715                                 dump->end = vlan->vid_end;
716                         }
717                 } else {
718                         err = -EINVAL;
719                 }
720         }
721
722         return err;
723 }
724
725 static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
726                                     u32 filter_mask)
727 {
728         struct switchdev_vlan_dump dump = {
729                 .vlan.obj.orig_dev = dev,
730                 .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
731                 .skb = skb,
732                 .filter_mask = filter_mask,
733         };
734         int err = 0;
735
736         if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
737             (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
738                 err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
739                                               switchdev_port_vlan_dump_cb);
740                 if (err)
741                         goto err_out;
742                 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
743                         /* last one */
744                         err = switchdev_port_vlan_dump_put(&dump);
745         }
746
747 err_out:
748         return err == -EOPNOTSUPP ? 0 : err;
749 }
750
751 /**
752  *      switchdev_port_bridge_getlink - Get bridge port attributes
753  *
754  *      @dev: port device
755  *
756  *      Called for SELF on rtnl_bridge_getlink to get bridge port
757  *      attributes.
758  */
759 int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
760                                   struct net_device *dev, u32 filter_mask,
761                                   int nlflags)
762 {
763         struct switchdev_attr attr = {
764                 .orig_dev = dev,
765                 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
766         };
767         u16 mode = BRIDGE_MODE_UNDEF;
768         u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
769         int err;
770
771         if (!netif_is_bridge_port(dev))
772                 return -EOPNOTSUPP;
773
774         err = switchdev_port_attr_get(dev, &attr);
775         if (err && err != -EOPNOTSUPP)
776                 return err;
777
778         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
779                                        attr.u.brport_flags, mask, nlflags,
780                                        filter_mask, switchdev_port_vlan_fill);
781 }
782 EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
783
784 static int switchdev_port_br_setflag(struct net_device *dev,
785                                      struct nlattr *nlattr,
786                                      unsigned long brport_flag)
787 {
788         struct switchdev_attr attr = {
789                 .orig_dev = dev,
790                 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
791         };
792         u8 flag = nla_get_u8(nlattr);
793         int err;
794
795         err = switchdev_port_attr_get(dev, &attr);
796         if (err)
797                 return err;
798
799         if (flag)
800                 attr.u.brport_flags |= brport_flag;
801         else
802                 attr.u.brport_flags &= ~brport_flag;
803
804         return switchdev_port_attr_set(dev, &attr);
805 }
806
807 static const struct nla_policy
808 switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
809         [IFLA_BRPORT_STATE]             = { .type = NLA_U8 },
810         [IFLA_BRPORT_COST]              = { .type = NLA_U32 },
811         [IFLA_BRPORT_PRIORITY]          = { .type = NLA_U16 },
812         [IFLA_BRPORT_MODE]              = { .type = NLA_U8 },
813         [IFLA_BRPORT_GUARD]             = { .type = NLA_U8 },
814         [IFLA_BRPORT_PROTECT]           = { .type = NLA_U8 },
815         [IFLA_BRPORT_FAST_LEAVE]        = { .type = NLA_U8 },
816         [IFLA_BRPORT_LEARNING]          = { .type = NLA_U8 },
817         [IFLA_BRPORT_LEARNING_SYNC]     = { .type = NLA_U8 },
818         [IFLA_BRPORT_UNICAST_FLOOD]     = { .type = NLA_U8 },
819 };
820
821 static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
822                                               struct nlattr *protinfo)
823 {
824         struct nlattr *attr;
825         int rem;
826         int err;
827
828         err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
829                                   switchdev_port_bridge_policy, NULL);
830         if (err)
831                 return err;
832
833         nla_for_each_nested(attr, protinfo, rem) {
834                 switch (nla_type(attr)) {
835                 case IFLA_BRPORT_LEARNING:
836                         err = switchdev_port_br_setflag(dev, attr,
837                                                         BR_LEARNING);
838                         break;
839                 case IFLA_BRPORT_LEARNING_SYNC:
840                         err = switchdev_port_br_setflag(dev, attr,
841                                                         BR_LEARNING_SYNC);
842                         break;
843                 case IFLA_BRPORT_UNICAST_FLOOD:
844                         err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
845                         break;
846                 default:
847                         err = -EOPNOTSUPP;
848                         break;
849                 }
850                 if (err)
851                         return err;
852         }
853
854         return 0;
855 }
856
857 static int switchdev_port_br_afspec(struct net_device *dev,
858                                     struct nlattr *afspec,
859                                     int (*f)(struct net_device *dev,
860                                              const struct switchdev_obj *obj))
861 {
862         struct nlattr *attr;
863         struct bridge_vlan_info *vinfo;
864         struct switchdev_obj_port_vlan vlan = {
865                 .obj.orig_dev = dev,
866                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
867         };
868         int rem;
869         int err;
870
871         nla_for_each_nested(attr, afspec, rem) {
872                 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
873                         continue;
874                 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
875                         return -EINVAL;
876                 vinfo = nla_data(attr);
877                 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
878                         return -EINVAL;
879                 vlan.flags = vinfo->flags;
880                 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
881                         if (vlan.vid_begin)
882                                 return -EINVAL;
883                         vlan.vid_begin = vinfo->vid;
884                         /* don't allow range of pvids */
885                         if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
886                                 return -EINVAL;
887                 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
888                         if (!vlan.vid_begin)
889                                 return -EINVAL;
890                         vlan.vid_end = vinfo->vid;
891                         if (vlan.vid_end <= vlan.vid_begin)
892                                 return -EINVAL;
893                         err = f(dev, &vlan.obj);
894                         if (err)
895                                 return err;
896                         vlan.vid_begin = 0;
897                 } else {
898                         if (vlan.vid_begin)
899                                 return -EINVAL;
900                         vlan.vid_begin = vinfo->vid;
901                         vlan.vid_end = vinfo->vid;
902                         err = f(dev, &vlan.obj);
903                         if (err)
904                                 return err;
905                         vlan.vid_begin = 0;
906                 }
907         }
908
909         return 0;
910 }
911
912 /**
913  *      switchdev_port_bridge_setlink - Set bridge port attributes
914  *
915  *      @dev: port device
916  *      @nlh: netlink header
917  *      @flags: netlink flags
918  *
919  *      Called for SELF on rtnl_bridge_setlink to set bridge port
920  *      attributes.
921  */
922 int switchdev_port_bridge_setlink(struct net_device *dev,
923                                   struct nlmsghdr *nlh, u16 flags)
924 {
925         struct nlattr *protinfo;
926         struct nlattr *afspec;
927         int err = 0;
928
929         if (!netif_is_bridge_port(dev))
930                 return -EOPNOTSUPP;
931
932         protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
933                                    IFLA_PROTINFO);
934         if (protinfo) {
935                 err = switchdev_port_br_setlink_protinfo(dev, protinfo);
936                 if (err)
937                         return err;
938         }
939
940         afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
941                                  IFLA_AF_SPEC);
942         if (afspec)
943                 err = switchdev_port_br_afspec(dev, afspec,
944                                                switchdev_port_obj_add);
945
946         return err;
947 }
948 EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
949
950 /**
951  *      switchdev_port_bridge_dellink - Set bridge port attributes
952  *
953  *      @dev: port device
954  *      @nlh: netlink header
955  *      @flags: netlink flags
956  *
957  *      Called for SELF on rtnl_bridge_dellink to set bridge port
958  *      attributes.
959  */
960 int switchdev_port_bridge_dellink(struct net_device *dev,
961                                   struct nlmsghdr *nlh, u16 flags)
962 {
963         struct nlattr *afspec;
964
965         if (!netif_is_bridge_port(dev))
966                 return -EOPNOTSUPP;
967
968         afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
969                                  IFLA_AF_SPEC);
970         if (afspec)
971                 return switchdev_port_br_afspec(dev, afspec,
972                                                 switchdev_port_obj_del);
973
974         return 0;
975 }
976 EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
977
978 /**
979  *      switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
980  *
981  *      @ndmsg: netlink hdr
982  *      @nlattr: netlink attributes
983  *      @dev: port device
984  *      @addr: MAC address to add
985  *      @vid: VLAN to add
986  *
987  *      Add FDB entry to switch device.
988  */
989 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
990                            struct net_device *dev, const unsigned char *addr,
991                            u16 vid, u16 nlm_flags)
992 {
993         struct switchdev_obj_port_fdb fdb = {
994                 .obj.orig_dev = dev,
995                 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
996                 .vid = vid,
997         };
998
999         ether_addr_copy(fdb.addr, addr);
1000         return switchdev_port_obj_add(dev, &fdb.obj);
1001 }
1002 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
1003
1004 /**
1005  *      switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
1006  *
1007  *      @ndmsg: netlink hdr
1008  *      @nlattr: netlink attributes
1009  *      @dev: port device
1010  *      @addr: MAC address to delete
1011  *      @vid: VLAN to delete
1012  *
1013  *      Delete FDB entry from switch device.
1014  */
1015 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1016                            struct net_device *dev, const unsigned char *addr,
1017                            u16 vid)
1018 {
1019         struct switchdev_obj_port_fdb fdb = {
1020                 .obj.orig_dev = dev,
1021                 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1022                 .vid = vid,
1023         };
1024
1025         ether_addr_copy(fdb.addr, addr);
1026         return switchdev_port_obj_del(dev, &fdb.obj);
1027 }
1028 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
1029
1030 struct switchdev_fdb_dump {
1031         struct switchdev_obj_port_fdb fdb;
1032         struct net_device *dev;
1033         struct sk_buff *skb;
1034         struct netlink_callback *cb;
1035         int idx;
1036 };
1037
1038 static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
1039 {
1040         struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
1041         struct switchdev_fdb_dump *dump =
1042                 container_of(fdb, struct switchdev_fdb_dump, fdb);
1043         u32 portid = NETLINK_CB(dump->cb->skb).portid;
1044         u32 seq = dump->cb->nlh->nlmsg_seq;
1045         struct nlmsghdr *nlh;
1046         struct ndmsg *ndm;
1047
1048         if (dump->idx < dump->cb->args[2])
1049                 goto skip;
1050
1051         nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
1052                         sizeof(*ndm), NLM_F_MULTI);
1053         if (!nlh)
1054                 return -EMSGSIZE;
1055
1056         ndm = nlmsg_data(nlh);
1057         ndm->ndm_family  = AF_BRIDGE;
1058         ndm->ndm_pad1    = 0;
1059         ndm->ndm_pad2    = 0;
1060         ndm->ndm_flags   = NTF_SELF;
1061         ndm->ndm_type    = 0;
1062         ndm->ndm_ifindex = dump->dev->ifindex;
1063         ndm->ndm_state   = fdb->ndm_state;
1064
1065         if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
1066                 goto nla_put_failure;
1067
1068         if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
1069                 goto nla_put_failure;
1070
1071         nlmsg_end(dump->skb, nlh);
1072
1073 skip:
1074         dump->idx++;
1075         return 0;
1076
1077 nla_put_failure:
1078         nlmsg_cancel(dump->skb, nlh);
1079         return -EMSGSIZE;
1080 }
1081
1082 /**
1083  *      switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
1084  *
1085  *      @skb: netlink skb
1086  *      @cb: netlink callback
1087  *      @dev: port device
1088  *      @filter_dev: filter device
1089  *      @idx:
1090  *
1091  *      Dump FDB entries from switch device.
1092  */
1093 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1094                             struct net_device *dev,
1095                             struct net_device *filter_dev, int *idx)
1096 {
1097         struct switchdev_fdb_dump dump = {
1098                 .fdb.obj.orig_dev = dev,
1099                 .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1100                 .dev = dev,
1101                 .skb = skb,
1102                 .cb = cb,
1103                 .idx = *idx,
1104         };
1105         int err;
1106
1107         err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
1108                                       switchdev_port_fdb_dump_cb);
1109         *idx = dump.idx;
1110         return err;
1111 }
1112 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
1113
1114 bool switchdev_port_same_parent_id(struct net_device *a,
1115                                    struct net_device *b)
1116 {
1117         struct switchdev_attr a_attr = {
1118                 .orig_dev = a,
1119                 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1120         };
1121         struct switchdev_attr b_attr = {
1122                 .orig_dev = b,
1123                 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1124         };
1125
1126         if (switchdev_port_attr_get(a, &a_attr) ||
1127             switchdev_port_attr_get(b, &b_attr))
1128                 return false;
1129
1130         return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
1131 }
1132 EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);