]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bridge/br_vlan.c
switchdev: bring back switchdev_obj and use it as a generic object param
[karo-tx-linux.git] / net / bridge / br_vlan.c
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6
7 #include "br_private.h"
8
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
10                               const void *ptr)
11 {
12         const struct net_bridge_vlan *vle = ptr;
13         u16 vid = *(u16 *)arg->key;
14
15         return vle->vid != vid;
16 }
17
18 static const struct rhashtable_params br_vlan_rht_params = {
19         .head_offset = offsetof(struct net_bridge_vlan, vnode),
20         .key_offset = offsetof(struct net_bridge_vlan, vid),
21         .key_len = sizeof(u16),
22         .nelem_hint = 3,
23         .locks_mul = 1,
24         .max_size = VLAN_N_VID,
25         .obj_cmpfn = br_vlan_cmp,
26         .automatic_shrinking = true,
27 };
28
29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
30 {
31         return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
32 }
33
34 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35 {
36         if (vg->pvid == vid)
37                 return;
38
39         smp_wmb();
40         vg->pvid = vid;
41 }
42
43 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44 {
45         if (vg->pvid != vid)
46                 return;
47
48         smp_wmb();
49         vg->pvid = 0;
50 }
51
52 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53 {
54         struct net_bridge_vlan_group *vg;
55
56         if (br_vlan_is_master(v))
57                 vg = v->br->vlgrp;
58         else
59                 vg = v->port->vlgrp;
60
61         if (flags & BRIDGE_VLAN_INFO_PVID)
62                 __vlan_add_pvid(vg, v->vid);
63         else
64                 __vlan_delete_pvid(vg, v->vid);
65
66         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67                 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68         else
69                 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 }
71
72 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
73                           u16 vid, u16 flags)
74 {
75         const struct net_device_ops *ops = dev->netdev_ops;
76         int err;
77
78         /* If driver uses VLAN ndo ops, use 8021q to install vid
79          * on device, otherwise try switchdev ops to install vid.
80          */
81
82         if (ops->ndo_vlan_rx_add_vid) {
83                 err = vlan_vid_add(dev, br->vlan_proto, vid);
84         } else {
85                 struct switchdev_obj_port_vlan v = {
86                         .flags = flags,
87                         .vid_begin = vid,
88                         .vid_end = vid,
89                 };
90
91                 err = switchdev_port_obj_add(dev, SWITCHDEV_OBJ_ID_PORT_VLAN,
92                                              &v.obj);
93                 if (err == -EOPNOTSUPP)
94                         err = 0;
95         }
96
97         return err;
98 }
99
100 static void __vlan_add_list(struct net_bridge_vlan *v)
101 {
102         struct list_head *headp, *hpos;
103         struct net_bridge_vlan *vent;
104
105         headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
106                                        &v->port->vlgrp->vlan_list;
107         list_for_each_prev(hpos, headp) {
108                 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
109                 if (v->vid < vent->vid)
110                         continue;
111                 else
112                         break;
113         }
114         list_add(&v->vlist, hpos);
115 }
116
117 static void __vlan_del_list(struct net_bridge_vlan *v)
118 {
119         list_del(&v->vlist);
120 }
121
122 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
123                           u16 vid)
124 {
125         const struct net_device_ops *ops = dev->netdev_ops;
126         int err = 0;
127
128         /* If driver uses VLAN ndo ops, use 8021q to delete vid
129          * on device, otherwise try switchdev ops to delete vid.
130          */
131
132         if (ops->ndo_vlan_rx_kill_vid) {
133                 vlan_vid_del(dev, br->vlan_proto, vid);
134         } else {
135                 struct switchdev_obj_port_vlan v = {
136                         .vid_begin = vid,
137                         .vid_end = vid,
138                 };
139
140                 err = switchdev_port_obj_del(dev, SWITCHDEV_OBJ_ID_PORT_VLAN,
141                                              &v.obj);
142                 if (err == -EOPNOTSUPP)
143                         err = 0;
144         }
145
146         return err;
147 }
148
149 /* This is the shared VLAN add function which works for both ports and bridge
150  * devices. There are four possible calls to this function in terms of the
151  * vlan entry type:
152  * 1. vlan is being added on a port (no master flags, global entry exists)
153  * 2. vlan is being added on a bridge (both master and brvlan flags)
154  * 3. vlan is being added on a port, but a global entry didn't exist which
155  *    is being created right now (master flag set, brvlan flag unset), the
156  *    global entry is used for global per-vlan features, but not for filtering
157  * 4. same as 3 but with both master and brvlan flags set so the entry
158  *    will be used for filtering in both the port and the bridge
159  */
160 static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
161 {
162         struct net_bridge_vlan *masterv = NULL;
163         struct net_bridge_port *p = NULL;
164         struct rhashtable *tbl;
165         struct net_device *dev;
166         struct net_bridge *br;
167         int err;
168
169         if (br_vlan_is_master(v)) {
170                 br = v->br;
171                 dev = br->dev;
172                 tbl = &br->vlgrp->vlan_hash;
173         } else {
174                 p = v->port;
175                 br = p->br;
176                 dev = p->dev;
177                 tbl = &p->vlgrp->vlan_hash;
178         }
179
180         if (p) {
181                 u16 master_flags = flags;
182
183                 /* Add VLAN to the device filter if it is supported.
184                  * This ensures tagged traffic enters the bridge when
185                  * promiscuous mode is disabled by br_manage_promisc().
186                  */
187                 err = __vlan_vid_add(dev, br, v->vid, flags);
188                 if (err)
189                         goto out;
190
191                 /* need to work on the master vlan too */
192                 if (flags & BRIDGE_VLAN_INFO_MASTER) {
193                         master_flags |= BRIDGE_VLAN_INFO_BRENTRY;
194                         err = br_vlan_add(br, v->vid, master_flags);
195                         if (err)
196                                 goto out_filt;
197                 }
198
199                 masterv = br_vlan_find(br->vlgrp, v->vid);
200                 if (!masterv) {
201                         /* missing global ctx, create it now */
202                         err = br_vlan_add(br, v->vid, 0);
203                         if (err)
204                                 goto out_filt;
205                         masterv = br_vlan_find(br->vlgrp, v->vid);
206                         WARN_ON(!masterv);
207                 }
208                 atomic_inc(&masterv->refcnt);
209                 v->brvlan = masterv;
210         }
211
212         /* Add the dev mac only if it's a usable vlan */
213         if (br_vlan_should_use(v)) {
214                 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
215                 if (err) {
216                         br_err(br, "failed insert local address into bridge forwarding table\n");
217                         goto out_filt;
218                 }
219         }
220
221         err = rhashtable_lookup_insert_fast(tbl, &v->vnode, br_vlan_rht_params);
222         if (err)
223                 goto out_fdb_insert;
224
225         __vlan_add_list(v);
226         __vlan_add_flags(v, flags);
227         if (br_vlan_is_master(v)) {
228                 if (br_vlan_is_brentry(v))
229                         br->vlgrp->num_vlans++;
230         } else {
231                 p->vlgrp->num_vlans++;
232         }
233 out:
234         return err;
235
236 out_fdb_insert:
237         br_fdb_find_delete_local(br, p, br->dev->dev_addr, v->vid);
238
239 out_filt:
240         if (p) {
241                 __vlan_vid_del(dev, br, v->vid);
242                 if (masterv) {
243                         atomic_dec(&masterv->refcnt);
244                         v->brvlan = NULL;
245                 }
246         }
247
248         goto out;
249 }
250
251 static int __vlan_del(struct net_bridge_vlan *v)
252 {
253         struct net_bridge_vlan *masterv = v;
254         struct net_bridge_vlan_group *vg;
255         struct net_bridge_port *p = NULL;
256         struct net_bridge *br;
257         int err = 0;
258
259         if (br_vlan_is_master(v)) {
260                 br = v->br;
261                 vg = v->br->vlgrp;
262         } else {
263                 p = v->port;
264                 br = p->br;
265                 vg = v->port->vlgrp;
266                 masterv = v->brvlan;
267         }
268
269         __vlan_delete_pvid(vg, v->vid);
270         if (p) {
271                 err = __vlan_vid_del(p->dev, p->br, v->vid);
272                 if (err)
273                         goto out;
274         }
275
276         if (br_vlan_is_master(v)) {
277                 if (br_vlan_is_brentry(v)) {
278                         v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
279                         br->vlgrp->num_vlans--;
280                 }
281         } else {
282                 p->vlgrp->num_vlans--;
283         }
284
285         if (masterv != v) {
286                 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
287                                        br_vlan_rht_params);
288                 __vlan_del_list(v);
289                 kfree_rcu(v, rcu);
290         }
291
292         if (atomic_dec_and_test(&masterv->refcnt)) {
293                 rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
294                                        &masterv->vnode, br_vlan_rht_params);
295                 __vlan_del_list(masterv);
296                 kfree_rcu(masterv, rcu);
297         }
298 out:
299         return err;
300 }
301
302 static void __vlan_flush(struct net_bridge_vlan_group *vlgrp)
303 {
304         struct net_bridge_vlan *vlan, *tmp;
305
306         __vlan_delete_pvid(vlgrp, vlgrp->pvid);
307         list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
308                 __vlan_del(vlan);
309         rhashtable_destroy(&vlgrp->vlan_hash);
310         kfree(vlgrp);
311 }
312
313 struct sk_buff *br_handle_vlan(struct net_bridge *br,
314                                struct net_bridge_vlan_group *vg,
315                                struct sk_buff *skb)
316 {
317         struct net_bridge_vlan *v;
318         u16 vid;
319
320         /* If this packet was not filtered at input, let it pass */
321         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
322                 goto out;
323
324         /* At this point, we know that the frame was filtered and contains
325          * a valid vlan id.  If the vlan id has untagged flag set,
326          * send untagged; otherwise, send tagged.
327          */
328         br_vlan_get_tag(skb, &vid);
329         v = br_vlan_find(vg, vid);
330         /* Vlan entry must be configured at this point.  The
331          * only exception is the bridge is set in promisc mode and the
332          * packet is destined for the bridge device.  In this case
333          * pass the packet as is.
334          */
335         if (!v || !br_vlan_should_use(v)) {
336                 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
337                         goto out;
338                 } else {
339                         kfree_skb(skb);
340                         return NULL;
341                 }
342         }
343         if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
344                 skb->vlan_tci = 0;
345
346 out:
347         return skb;
348 }
349
350 /* Called under RCU */
351 static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
352                               struct sk_buff *skb, u16 *vid)
353 {
354         const struct net_bridge_vlan *v;
355         bool tagged;
356
357         BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
358         /* If vlan tx offload is disabled on bridge device and frame was
359          * sent from vlan device on the bridge device, it does not have
360          * HW accelerated vlan tag.
361          */
362         if (unlikely(!skb_vlan_tag_present(skb) &&
363                      skb->protocol == proto)) {
364                 skb = skb_vlan_untag(skb);
365                 if (unlikely(!skb))
366                         return false;
367         }
368
369         if (!br_vlan_get_tag(skb, vid)) {
370                 /* Tagged frame */
371                 if (skb->vlan_proto != proto) {
372                         /* Protocol-mismatch, empty out vlan_tci for new tag */
373                         skb_push(skb, ETH_HLEN);
374                         skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
375                                                         skb_vlan_tag_get(skb));
376                         if (unlikely(!skb))
377                                 return false;
378
379                         skb_pull(skb, ETH_HLEN);
380                         skb_reset_mac_len(skb);
381                         *vid = 0;
382                         tagged = false;
383                 } else {
384                         tagged = true;
385                 }
386         } else {
387                 /* Untagged frame */
388                 tagged = false;
389         }
390
391         if (!*vid) {
392                 u16 pvid = br_get_pvid(vg);
393
394                 /* Frame had a tag with VID 0 or did not have a tag.
395                  * See if pvid is set on this port.  That tells us which
396                  * vlan untagged or priority-tagged traffic belongs to.
397                  */
398                 if (!pvid)
399                         goto drop;
400
401                 /* PVID is set on this port.  Any untagged or priority-tagged
402                  * ingress frame is considered to belong to this vlan.
403                  */
404                 *vid = pvid;
405                 if (likely(!tagged))
406                         /* Untagged Frame. */
407                         __vlan_hwaccel_put_tag(skb, proto, pvid);
408                 else
409                         /* Priority-tagged Frame.
410                          * At this point, We know that skb->vlan_tci had
411                          * VLAN_TAG_PRESENT bit and its VID field was 0x000.
412                          * We update only VID field and preserve PCP field.
413                          */
414                         skb->vlan_tci |= pvid;
415
416                 return true;
417         }
418
419         /* Frame had a valid vlan tag.  See if vlan is allowed */
420         v = br_vlan_find(vg, *vid);
421         if (v && br_vlan_should_use(v))
422                 return true;
423 drop:
424         kfree_skb(skb);
425         return false;
426 }
427
428 bool br_allowed_ingress(const struct net_bridge *br,
429                         struct net_bridge_vlan_group *vg, struct sk_buff *skb,
430                         u16 *vid)
431 {
432         /* If VLAN filtering is disabled on the bridge, all packets are
433          * permitted.
434          */
435         if (!br->vlan_enabled) {
436                 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
437                 return true;
438         }
439
440         return __allowed_ingress(vg, br->vlan_proto, skb, vid);
441 }
442
443 /* Called under RCU. */
444 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
445                        const struct sk_buff *skb)
446 {
447         const struct net_bridge_vlan *v;
448         u16 vid;
449
450         /* If this packet was not filtered at input, let it pass */
451         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
452                 return true;
453
454         br_vlan_get_tag(skb, &vid);
455         v = br_vlan_find(vg, vid);
456         if (v && br_vlan_should_use(v))
457                 return true;
458
459         return false;
460 }
461
462 /* Called under RCU */
463 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
464 {
465         struct net_bridge_vlan_group *vg;
466         struct net_bridge *br = p->br;
467
468         /* If filtering was disabled at input, let it pass. */
469         if (!br->vlan_enabled)
470                 return true;
471
472         vg = p->vlgrp;
473         if (!vg || !vg->num_vlans)
474                 return false;
475
476         if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
477                 *vid = 0;
478
479         if (!*vid) {
480                 *vid = br_get_pvid(vg);
481                 if (!*vid)
482                         return false;
483
484                 return true;
485         }
486
487         if (br_vlan_find(vg, *vid))
488                 return true;
489
490         return false;
491 }
492
493 /* Must be protected by RTNL.
494  * Must be called with vid in range from 1 to 4094 inclusive.
495  */
496 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
497 {
498         struct net_bridge_vlan *vlan;
499         int ret;
500
501         ASSERT_RTNL();
502
503         vlan = br_vlan_find(br->vlgrp, vid);
504         if (vlan) {
505                 if (!br_vlan_is_brentry(vlan)) {
506                         /* Trying to change flags of non-existent bridge vlan */
507                         if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
508                                 return -EINVAL;
509                         /* It was only kept for port vlans, now make it real */
510                         ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
511                                             vlan->vid);
512                         if (ret) {
513                                 br_err(br, "failed insert local address into bridge forwarding table\n");
514                                 return ret;
515                         }
516                         atomic_inc(&vlan->refcnt);
517                         vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
518                         br->vlgrp->num_vlans++;
519                 }
520                 __vlan_add_flags(vlan, flags);
521                 return 0;
522         }
523
524         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
525         if (!vlan)
526                 return -ENOMEM;
527
528         vlan->vid = vid;
529         vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
530         vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
531         vlan->br = br;
532         if (flags & BRIDGE_VLAN_INFO_BRENTRY)
533                 atomic_set(&vlan->refcnt, 1);
534         ret = __vlan_add(vlan, flags);
535         if (ret)
536                 kfree(vlan);
537
538         return ret;
539 }
540
541 /* Must be protected by RTNL.
542  * Must be called with vid in range from 1 to 4094 inclusive.
543  */
544 int br_vlan_delete(struct net_bridge *br, u16 vid)
545 {
546         struct net_bridge_vlan *v;
547
548         ASSERT_RTNL();
549
550         v = br_vlan_find(br->vlgrp, vid);
551         if (!v || !br_vlan_is_brentry(v))
552                 return -ENOENT;
553
554         br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
555
556         return __vlan_del(v);
557 }
558
559 void br_vlan_flush(struct net_bridge *br)
560 {
561         ASSERT_RTNL();
562
563         __vlan_flush(br_vlan_group(br));
564 }
565
566 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
567 {
568         if (!vg)
569                 return NULL;
570
571         return br_vlan_lookup(&vg->vlan_hash, vid);
572 }
573
574 /* Must be protected by RTNL. */
575 static void recalculate_group_addr(struct net_bridge *br)
576 {
577         if (br->group_addr_set)
578                 return;
579
580         spin_lock_bh(&br->lock);
581         if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
582                 /* Bridge Group Address */
583                 br->group_addr[5] = 0x00;
584         } else { /* vlan_enabled && ETH_P_8021AD */
585                 /* Provider Bridge Group Address */
586                 br->group_addr[5] = 0x08;
587         }
588         spin_unlock_bh(&br->lock);
589 }
590
591 /* Must be protected by RTNL. */
592 void br_recalculate_fwd_mask(struct net_bridge *br)
593 {
594         if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
595                 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
596         else /* vlan_enabled && ETH_P_8021AD */
597                 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
598                                               ~(1u << br->group_addr[5]);
599 }
600
601 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
602 {
603         if (br->vlan_enabled == val)
604                 return 0;
605
606         br->vlan_enabled = val;
607         br_manage_promisc(br);
608         recalculate_group_addr(br);
609         br_recalculate_fwd_mask(br);
610
611         return 0;
612 }
613
614 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
615 {
616         if (!rtnl_trylock())
617                 return restart_syscall();
618
619         __br_vlan_filter_toggle(br, val);
620         rtnl_unlock();
621
622         return 0;
623 }
624
625 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
626 {
627         int err = 0;
628         struct net_bridge_port *p;
629         struct net_bridge_vlan *vlan;
630         __be16 oldproto;
631
632         if (br->vlan_proto == proto)
633                 return 0;
634
635         /* Add VLANs for the new proto to the device filter. */
636         list_for_each_entry(p, &br->port_list, list) {
637                 list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
638                         err = vlan_vid_add(p->dev, proto, vlan->vid);
639                         if (err)
640                                 goto err_filt;
641                 }
642         }
643
644         oldproto = br->vlan_proto;
645         br->vlan_proto = proto;
646
647         recalculate_group_addr(br);
648         br_recalculate_fwd_mask(br);
649
650         /* Delete VLANs for the old proto from the device filter. */
651         list_for_each_entry(p, &br->port_list, list)
652                 list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
653                         vlan_vid_del(p->dev, oldproto, vlan->vid);
654
655         return 0;
656
657 err_filt:
658         list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
659                 vlan_vid_del(p->dev, proto, vlan->vid);
660
661         list_for_each_entry_continue_reverse(p, &br->port_list, list)
662                 list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
663                         vlan_vid_del(p->dev, proto, vlan->vid);
664
665         return err;
666 }
667
668 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
669 {
670         int err;
671
672         if (val != ETH_P_8021Q && val != ETH_P_8021AD)
673                 return -EPROTONOSUPPORT;
674
675         if (!rtnl_trylock())
676                 return restart_syscall();
677
678         err = __br_vlan_set_proto(br, htons(val));
679         rtnl_unlock();
680
681         return err;
682 }
683
684 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
685 {
686         struct net_bridge_vlan *v;
687
688         if (vid != vg->pvid)
689                 return false;
690
691         v = br_vlan_lookup(&vg->vlan_hash, vid);
692         if (v && br_vlan_should_use(v) &&
693             (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
694                 return true;
695
696         return false;
697 }
698
699 static void br_vlan_disable_default_pvid(struct net_bridge *br)
700 {
701         struct net_bridge_port *p;
702         u16 pvid = br->default_pvid;
703
704         /* Disable default_pvid on all ports where it is still
705          * configured.
706          */
707         if (vlan_default_pvid(br->vlgrp, pvid))
708                 br_vlan_delete(br, pvid);
709
710         list_for_each_entry(p, &br->port_list, list) {
711                 if (vlan_default_pvid(p->vlgrp, pvid))
712                         nbp_vlan_delete(p, pvid);
713         }
714
715         br->default_pvid = 0;
716 }
717
718 static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
719 {
720         const struct net_bridge_vlan *pvent;
721         struct net_bridge_port *p;
722         u16 old_pvid;
723         int err = 0;
724         unsigned long *changed;
725
726         changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
727                           GFP_KERNEL);
728         if (!changed)
729                 return -ENOMEM;
730
731         old_pvid = br->default_pvid;
732
733         /* Update default_pvid config only if we do not conflict with
734          * user configuration.
735          */
736         pvent = br_vlan_find(br->vlgrp, pvid);
737         if ((!old_pvid || vlan_default_pvid(br->vlgrp, old_pvid)) &&
738             (!pvent || !br_vlan_should_use(pvent))) {
739                 err = br_vlan_add(br, pvid,
740                                   BRIDGE_VLAN_INFO_PVID |
741                                   BRIDGE_VLAN_INFO_UNTAGGED |
742                                   BRIDGE_VLAN_INFO_BRENTRY);
743                 if (err)
744                         goto out;
745                 br_vlan_delete(br, old_pvid);
746                 set_bit(0, changed);
747         }
748
749         list_for_each_entry(p, &br->port_list, list) {
750                 /* Update default_pvid config only if we do not conflict with
751                  * user configuration.
752                  */
753                 if ((old_pvid &&
754                      !vlan_default_pvid(p->vlgrp, old_pvid)) ||
755                     br_vlan_find(p->vlgrp, pvid))
756                         continue;
757
758                 err = nbp_vlan_add(p, pvid,
759                                    BRIDGE_VLAN_INFO_PVID |
760                                    BRIDGE_VLAN_INFO_UNTAGGED);
761                 if (err)
762                         goto err_port;
763                 nbp_vlan_delete(p, old_pvid);
764                 set_bit(p->port_no, changed);
765         }
766
767         br->default_pvid = pvid;
768
769 out:
770         kfree(changed);
771         return err;
772
773 err_port:
774         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
775                 if (!test_bit(p->port_no, changed))
776                         continue;
777
778                 if (old_pvid)
779                         nbp_vlan_add(p, old_pvid,
780                                      BRIDGE_VLAN_INFO_PVID |
781                                      BRIDGE_VLAN_INFO_UNTAGGED);
782                 nbp_vlan_delete(p, pvid);
783         }
784
785         if (test_bit(0, changed)) {
786                 if (old_pvid)
787                         br_vlan_add(br, old_pvid,
788                                     BRIDGE_VLAN_INFO_PVID |
789                                     BRIDGE_VLAN_INFO_UNTAGGED |
790                                     BRIDGE_VLAN_INFO_BRENTRY);
791                 br_vlan_delete(br, pvid);
792         }
793         goto out;
794 }
795
796 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
797 {
798         u16 pvid = val;
799         int err = 0;
800
801         if (val >= VLAN_VID_MASK)
802                 return -EINVAL;
803
804         if (!rtnl_trylock())
805                 return restart_syscall();
806
807         if (pvid == br->default_pvid)
808                 goto unlock;
809
810         /* Only allow default pvid change when filtering is disabled */
811         if (br->vlan_enabled) {
812                 pr_info_once("Please disable vlan filtering to change default_pvid\n");
813                 err = -EPERM;
814                 goto unlock;
815         }
816
817         if (!pvid)
818                 br_vlan_disable_default_pvid(br);
819         else
820                 err = __br_vlan_set_default_pvid(br, pvid);
821
822 unlock:
823         rtnl_unlock();
824         return err;
825 }
826
827 int br_vlan_init(struct net_bridge *br)
828 {
829         int ret = -ENOMEM;
830
831         br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
832         if (!br->vlgrp)
833                 goto out;
834         ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
835         if (ret)
836                 goto err_rhtbl;
837         INIT_LIST_HEAD(&br->vlgrp->vlan_list);
838         br->vlan_proto = htons(ETH_P_8021Q);
839         br->default_pvid = 1;
840         ret = br_vlan_add(br, 1,
841                           BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
842                           BRIDGE_VLAN_INFO_BRENTRY);
843         if (ret)
844                 goto err_vlan_add;
845
846 out:
847         return ret;
848
849 err_vlan_add:
850         rhashtable_destroy(&br->vlgrp->vlan_hash);
851 err_rhtbl:
852         kfree(br->vlgrp);
853
854         goto out;
855 }
856
857 int nbp_vlan_init(struct net_bridge_port *p)
858 {
859         struct net_bridge_vlan_group *vg;
860         int ret = -ENOMEM;
861
862         vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
863         if (!vg)
864                 goto out;
865
866         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
867         if (ret)
868                 goto err_rhtbl;
869         INIT_LIST_HEAD(&vg->vlan_list);
870         /* Make sure everything's committed before publishing vg */
871         smp_wmb();
872         p->vlgrp = vg;
873         if (p->br->default_pvid) {
874                 ret = nbp_vlan_add(p, p->br->default_pvid,
875                                    BRIDGE_VLAN_INFO_PVID |
876                                    BRIDGE_VLAN_INFO_UNTAGGED);
877                 if (ret)
878                         goto err_vlan_add;
879         }
880 out:
881         return ret;
882
883 err_vlan_add:
884         rhashtable_destroy(&vg->vlan_hash);
885 err_rhtbl:
886         kfree(vg);
887
888         goto out;
889 }
890
891 /* Must be protected by RTNL.
892  * Must be called with vid in range from 1 to 4094 inclusive.
893  */
894 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
895 {
896         struct net_bridge_vlan *vlan;
897         int ret;
898
899         ASSERT_RTNL();
900
901         vlan = br_vlan_find(port->vlgrp, vid);
902         if (vlan) {
903                 __vlan_add_flags(vlan, flags);
904                 return 0;
905         }
906
907         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
908         if (!vlan)
909                 return -ENOMEM;
910
911         vlan->vid = vid;
912         vlan->port = port;
913         ret = __vlan_add(vlan, flags);
914         if (ret)
915                 kfree(vlan);
916
917         return ret;
918 }
919
920 /* Must be protected by RTNL.
921  * Must be called with vid in range from 1 to 4094 inclusive.
922  */
923 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
924 {
925         struct net_bridge_vlan *v;
926
927         ASSERT_RTNL();
928
929         v = br_vlan_find(port->vlgrp, vid);
930         if (!v)
931                 return -ENOENT;
932         br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
933         br_fdb_delete_by_port(port->br, port, vid, 0);
934
935         return __vlan_del(v);
936 }
937
938 void nbp_vlan_flush(struct net_bridge_port *port)
939 {
940         struct net_bridge_vlan *vlan;
941
942         ASSERT_RTNL();
943
944         list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
945                 vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
946
947         __vlan_flush(nbp_vlan_group(port));
948 }