]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
mlxsw: spectrum_switchdev: Remove redundant check
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 struct mlxsw_sp_bridge {
56         struct mlxsw_sp *mlxsw_sp;
57         struct {
58                 struct delayed_work dw;
59 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
60                 unsigned int interval; /* ms */
61         } fdb_notify;
62 #define MLXSW_SP_MIN_AGEING_TIME 10
63 #define MLXSW_SP_MAX_AGEING_TIME 1000000
64 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
65         u32 ageing_time;
66         struct mlxsw_sp_upper master_bridge;
67         struct list_head mids_list;
68         DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
69 };
70
71 struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp)
72 {
73         return &mlxsw_sp->bridge->master_bridge;
74 }
75
76 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
77                                         u16 vid)
78 {
79         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
80         u16 fid = vid;
81
82         fid = f ? f->fid : fid;
83
84         if (!fid)
85                 fid = mlxsw_sp_port->pvid;
86
87         return fid;
88 }
89
90 static struct mlxsw_sp_port *
91 mlxsw_sp_port_orig_get(struct net_device *dev,
92                        struct mlxsw_sp_port *mlxsw_sp_port)
93 {
94         struct mlxsw_sp_port *mlxsw_sp_vport;
95         struct mlxsw_sp_fid *fid;
96         u16 vid;
97
98         if (netif_is_bridge_master(dev)) {
99                 fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
100                                          dev);
101                 if (fid) {
102                         mlxsw_sp_vport =
103                                 mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
104                                                                 fid->fid);
105                         WARN_ON(!mlxsw_sp_vport);
106                         return mlxsw_sp_vport;
107                 }
108         }
109
110         if (!is_vlan_dev(dev))
111                 return mlxsw_sp_port;
112
113         vid = vlan_dev_vlan_id(dev);
114         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
115         WARN_ON(!mlxsw_sp_vport);
116
117         return mlxsw_sp_vport;
118 }
119
120 static int mlxsw_sp_port_attr_get(struct net_device *dev,
121                                   struct switchdev_attr *attr)
122 {
123         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
124         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
125
126         mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
127         if (!mlxsw_sp_port)
128                 return -EINVAL;
129
130         switch (attr->id) {
131         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
132                 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
133                 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
134                        attr->u.ppid.id_len);
135                 break;
136         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
137                 attr->u.brport_flags =
138                         (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
139                         (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
140                         (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
141                 break;
142         default:
143                 return -EOPNOTSUPP;
144         }
145
146         return 0;
147 }
148
149 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
150                                        u8 state)
151 {
152         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
153         enum mlxsw_reg_spms_state spms_state;
154         char *spms_pl;
155         u16 vid;
156         int err;
157
158         switch (state) {
159         case BR_STATE_FORWARDING:
160                 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
161                 break;
162         case BR_STATE_LEARNING:
163                 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
164                 break;
165         case BR_STATE_LISTENING: /* fall-through */
166         case BR_STATE_DISABLED: /* fall-through */
167         case BR_STATE_BLOCKING:
168                 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
169                 break;
170         default:
171                 BUG();
172         }
173
174         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
175         if (!spms_pl)
176                 return -ENOMEM;
177         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
178
179         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
180                 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
181                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
182         } else {
183                 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
184                         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
185         }
186
187         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
188         kfree(spms_pl);
189         return err;
190 }
191
192 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
193                                             struct switchdev_trans *trans,
194                                             u8 state)
195 {
196         if (switchdev_trans_ph_prepare(trans))
197                 return 0;
198
199         mlxsw_sp_port->stp_state = state;
200         return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
201 }
202
203 static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
204                                            u16 idx_begin, u16 idx_end,
205                                            enum mlxsw_sp_flood_table table,
206                                            bool set)
207 {
208         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
209         u16 local_port = mlxsw_sp_port->local_port;
210         enum mlxsw_flood_table_type table_type;
211         u16 range = idx_end - idx_begin + 1;
212         char *sftr_pl;
213         int err;
214
215         if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
216                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
217         else
218                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
219
220         sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
221         if (!sftr_pl)
222                 return -ENOMEM;
223
224         mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
225                             table_type, range, local_port, set);
226         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
227
228         kfree(sftr_pl);
229         return err;
230 }
231
232 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
233                                      u16 idx_begin, u16 idx_end, bool uc_set,
234                                      bool bc_set, bool mc_set)
235 {
236         int err;
237
238         err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
239                                               MLXSW_SP_FLOOD_TABLE_UC, uc_set);
240         if (err)
241                 return err;
242
243         err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
244                                               MLXSW_SP_FLOOD_TABLE_BC, bc_set);
245         if (err)
246                 goto err_flood_bm_set;
247
248         err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
249                                               MLXSW_SP_FLOOD_TABLE_MC, mc_set);
250         if (err)
251                 goto err_flood_mc_set;
252         return 0;
253
254 err_flood_mc_set:
255         __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
256                                         MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
257 err_flood_bm_set:
258         __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
259                                         MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
260         return err;
261 }
262
263 static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
264                                          enum mlxsw_sp_flood_table table,
265                                          bool set)
266 {
267         struct net_device *dev = mlxsw_sp_port->dev;
268         u16 vid, last_visited_vid;
269         int err;
270
271         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
272                 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
273                 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
274
275                 return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
276                                                        vfid, table, set);
277         }
278
279         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
280                 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
281                                                       table, set);
282                 if (err) {
283                         last_visited_vid = vid;
284                         goto err_port_flood_set;
285                 }
286         }
287
288         return 0;
289
290 err_port_flood_set:
291         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
292                 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
293                                                 !set);
294         netdev_err(dev, "Failed to configure unicast flooding\n");
295         return err;
296 }
297
298 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
299                                          struct switchdev_trans *trans,
300                                          bool mc_disabled)
301 {
302         int set;
303         int err = 0;
304
305         if (switchdev_trans_ph_prepare(trans))
306                 return 0;
307
308         if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
309                 set = mc_disabled ?
310                         mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
311                 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
312                                                     MLXSW_SP_FLOOD_TABLE_MC,
313                                                     set);
314         }
315
316         if (!err)
317                 mlxsw_sp_port->mc_disabled = mc_disabled;
318
319         return err;
320 }
321
322 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
323                              bool set)
324 {
325         bool mc_set = set;
326         u16 vfid;
327
328         /* In case of vFIDs, index into the flooding table is relative to
329          * the start of the vFIDs range.
330          */
331         vfid = mlxsw_sp_fid_to_vfid(fid);
332
333         if (set)
334                 mc_set = mlxsw_sp_vport->mc_disabled ?
335                          mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
336
337         return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
338                                          mc_set);
339 }
340
341 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
342                                       bool set)
343 {
344         u16 vid;
345         int err;
346
347         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
348                 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
349
350                 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
351                                                         set);
352         }
353
354         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
355                 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
356                                                        set);
357                 if (err)
358                         goto err_port_vid_learning_set;
359         }
360
361         return 0;
362
363 err_port_vid_learning_set:
364         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
365                 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
366         return err;
367 }
368
369 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
370                                            struct switchdev_trans *trans,
371                                            unsigned long brport_flags)
372 {
373         unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
374         unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
375         int err;
376
377         if (switchdev_trans_ph_prepare(trans))
378                 return 0;
379
380         if ((uc_flood ^ brport_flags) & BR_FLOOD) {
381                 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
382                                                     MLXSW_SP_FLOOD_TABLE_UC,
383                                                     !mlxsw_sp_port->uc_flood);
384                 if (err)
385                         return err;
386         }
387
388         if ((learning ^ brport_flags) & BR_LEARNING) {
389                 err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
390                                                  !mlxsw_sp_port->learning);
391                 if (err)
392                         goto err_port_learning_set;
393         }
394
395         mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
396         mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
397         mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
398
399         return 0;
400
401 err_port_learning_set:
402         if ((uc_flood ^ brport_flags) & BR_FLOOD)
403                 mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
404                                               MLXSW_SP_FLOOD_TABLE_UC,
405                                               mlxsw_sp_port->uc_flood);
406         return err;
407 }
408
409 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
410 {
411         char sfdat_pl[MLXSW_REG_SFDAT_LEN];
412         int err;
413
414         mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
415         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
416         if (err)
417                 return err;
418         mlxsw_sp->bridge->ageing_time = ageing_time;
419         return 0;
420 }
421
422 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
423                                             struct switchdev_trans *trans,
424                                             unsigned long ageing_clock_t)
425 {
426         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
427         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
428         u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
429
430         if (switchdev_trans_ph_prepare(trans)) {
431                 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
432                     ageing_time > MLXSW_SP_MAX_AGEING_TIME)
433                         return -ERANGE;
434                 else
435                         return 0;
436         }
437
438         return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
439 }
440
441 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
442                                           struct switchdev_trans *trans,
443                                           struct net_device *orig_dev,
444                                           bool vlan_enabled)
445 {
446         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
447
448         /* SWITCHDEV_TRANS_PREPARE phase */
449         if ((!vlan_enabled) &&
450             (mlxsw_sp->bridge->master_bridge.dev == orig_dev)) {
451                 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
452                 return -EINVAL;
453         }
454
455         return 0;
456 }
457
458 static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
459                                             struct switchdev_trans *trans,
460                                             bool is_port_mc_router)
461 {
462         if (switchdev_trans_ph_prepare(trans))
463                 return 0;
464
465         mlxsw_sp_port->mc_router = is_port_mc_router;
466         if (!mlxsw_sp_port->mc_disabled)
467                 return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
468                                                      MLXSW_SP_FLOOD_TABLE_MC,
469                                                      is_port_mc_router);
470
471         return 0;
472 }
473
474 static int mlxsw_sp_port_attr_set(struct net_device *dev,
475                                   const struct switchdev_attr *attr,
476                                   struct switchdev_trans *trans)
477 {
478         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
479         int err = 0;
480
481         mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
482         if (!mlxsw_sp_port)
483                 return -EINVAL;
484
485         switch (attr->id) {
486         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
487                 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
488                                                        attr->u.stp_state);
489                 break;
490         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
491                 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
492                                                       attr->u.brport_flags);
493                 break;
494         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
495                 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
496                                                        attr->u.ageing_time);
497                 break;
498         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
499                 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
500                                                      attr->orig_dev,
501                                                      attr->u.vlan_filtering);
502                 break;
503         case SWITCHDEV_ATTR_ID_PORT_MROUTER:
504                 err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
505                                                        attr->u.mrouter);
506                 break;
507         case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
508                 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
509                                                     attr->u.mc_disabled);
510                 break;
511         default:
512                 err = -EOPNOTSUPP;
513                 break;
514         }
515
516         return err;
517 }
518
519 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
520 {
521         char sfmr_pl[MLXSW_REG_SFMR_LEN];
522
523         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
524         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
525 }
526
527 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
528 {
529         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
530         char svfa_pl[MLXSW_REG_SVFA_LEN];
531
532         mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
533         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
534 }
535
536 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
537 {
538         struct mlxsw_sp_fid *f;
539
540         f = kzalloc(sizeof(*f), GFP_KERNEL);
541         if (!f)
542                 return NULL;
543
544         f->fid = fid;
545
546         return f;
547 }
548
549 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
550 {
551         struct mlxsw_sp_fid *f;
552         int err;
553
554         err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
555         if (err)
556                 return ERR_PTR(err);
557
558         /* Although all the ports member in the FID might be using a
559          * {Port, VID} to FID mapping, we create a global VID-to-FID
560          * mapping. This allows a port to transition to VLAN mode,
561          * knowing the global mapping exists.
562          */
563         err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
564         if (err)
565                 goto err_fid_map;
566
567         f = mlxsw_sp_fid_alloc(fid);
568         if (!f) {
569                 err = -ENOMEM;
570                 goto err_allocate_fid;
571         }
572
573         list_add(&f->list, &mlxsw_sp->fids);
574
575         return f;
576
577 err_allocate_fid:
578         mlxsw_sp_fid_map(mlxsw_sp, fid, false);
579 err_fid_map:
580         mlxsw_sp_fid_op(mlxsw_sp, fid, false);
581         return ERR_PTR(err);
582 }
583
584 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
585 {
586         u16 fid = f->fid;
587
588         list_del(&f->list);
589
590         if (f->rif)
591                 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
592
593         kfree(f);
594
595         mlxsw_sp_fid_map(mlxsw_sp, fid, false);
596
597         mlxsw_sp_fid_op(mlxsw_sp, fid, false);
598 }
599
600 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
601                                     u16 fid)
602 {
603         struct mlxsw_sp_fid *f;
604
605         if (test_bit(fid, mlxsw_sp_port->active_vlans))
606                 return 0;
607
608         f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
609         if (!f) {
610                 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
611                 if (IS_ERR(f))
612                         return PTR_ERR(f);
613         }
614
615         f->ref_count++;
616
617         netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
618
619         return 0;
620 }
621
622 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
623                                       u16 fid)
624 {
625         struct mlxsw_sp_fid *f;
626
627         f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
628         if (WARN_ON(!f))
629                 return;
630
631         netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
632
633         mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
634
635         if (--f->ref_count == 0)
636                 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
637 }
638
639 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
640                                  bool valid)
641 {
642         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
643
644         /* If port doesn't have vPorts, then it can use the global
645          * VID-to-FID mapping.
646          */
647         if (list_empty(&mlxsw_sp_port->vports_list))
648                 return 0;
649
650         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
651 }
652
653 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
654                                   u16 fid_begin, u16 fid_end)
655 {
656         bool mc_flood;
657         int fid, err;
658
659         for (fid = fid_begin; fid <= fid_end; fid++) {
660                 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
661                 if (err)
662                         goto err_port_fid_join;
663         }
664
665         mc_flood = mlxsw_sp_port->mc_disabled ?
666                         mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
667
668         err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
669                                         mlxsw_sp_port->uc_flood, true,
670                                         mc_flood);
671         if (err)
672                 goto err_port_flood_set;
673
674         for (fid = fid_begin; fid <= fid_end; fid++) {
675                 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
676                 if (err)
677                         goto err_port_fid_map;
678         }
679
680         return 0;
681
682 err_port_fid_map:
683         for (fid--; fid >= fid_begin; fid--)
684                 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
685         __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
686                                   false, false);
687 err_port_flood_set:
688         fid = fid_end;
689 err_port_fid_join:
690         for (fid--; fid >= fid_begin; fid--)
691                 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
692         return err;
693 }
694
695 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
696                                     u16 fid_begin, u16 fid_end)
697 {
698         int fid;
699
700         for (fid = fid_begin; fid <= fid_end; fid++)
701                 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
702
703         __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
704                                   false, false);
705
706         for (fid = fid_begin; fid <= fid_end; fid++)
707                 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
708 }
709
710 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
711                                     u16 vid)
712 {
713         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
714         char spvid_pl[MLXSW_REG_SPVID_LEN];
715
716         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
717         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
718 }
719
720 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
721                                             bool allow)
722 {
723         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724         char spaft_pl[MLXSW_REG_SPAFT_LEN];
725
726         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
727         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
728 }
729
730 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
731 {
732         struct net_device *dev = mlxsw_sp_port->dev;
733         int err;
734
735         if (!vid) {
736                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
737                 if (err) {
738                         netdev_err(dev, "Failed to disallow untagged traffic\n");
739                         return err;
740                 }
741         } else {
742                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
743                 if (err) {
744                         netdev_err(dev, "Failed to set PVID\n");
745                         return err;
746                 }
747
748                 /* Only allow if not already allowed. */
749                 if (!mlxsw_sp_port->pvid) {
750                         err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
751                                                                true);
752                         if (err) {
753                                 netdev_err(dev, "Failed to allow untagged traffic\n");
754                                 goto err_port_allow_untagged_set;
755                         }
756                 }
757         }
758
759         mlxsw_sp_port->pvid = vid;
760         return 0;
761
762 err_port_allow_untagged_set:
763         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
764         return err;
765 }
766
767 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
768                                           u16 vid_begin, u16 vid_end,
769                                           bool learn_enable)
770 {
771         u16 vid, vid_e;
772         int err;
773
774         for (vid = vid_begin; vid <= vid_end;
775              vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
776                 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
777                             vid_end);
778
779                 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
780                                                        vid_e, learn_enable);
781                 if (err)
782                         return err;
783         }
784
785         return 0;
786 }
787
788 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
789                                      u16 vid_begin, u16 vid_end,
790                                      bool flag_untagged, bool flag_pvid)
791 {
792         struct net_device *dev = mlxsw_sp_port->dev;
793         u16 vid, old_pvid;
794         int err;
795
796         err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
797         if (err) {
798                 netdev_err(dev, "Failed to join FIDs\n");
799                 return err;
800         }
801
802         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
803                                      true, flag_untagged);
804         if (err) {
805                 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
806                            vid_end);
807                 goto err_port_vlans_set;
808         }
809
810         old_pvid = mlxsw_sp_port->pvid;
811         if (flag_pvid && old_pvid != vid_begin) {
812                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
813                 if (err) {
814                         netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
815                         goto err_port_pvid_set;
816                 }
817         } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
818                 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
819                 if (err) {
820                         netdev_err(dev, "Unable to del PVID\n");
821                         goto err_port_pvid_set;
822                 }
823         }
824
825         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
826                                              mlxsw_sp_port->learning);
827         if (err) {
828                 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
829                            vid_begin, vid_end);
830                 goto err_port_vid_learning_set;
831         }
832
833         /* Changing activity bits only if HW operation succeded */
834         for (vid = vid_begin; vid <= vid_end; vid++) {
835                 set_bit(vid, mlxsw_sp_port->active_vlans);
836                 if (flag_untagged)
837                         set_bit(vid, mlxsw_sp_port->untagged_vlans);
838                 else
839                         clear_bit(vid, mlxsw_sp_port->untagged_vlans);
840         }
841
842         /* STP state change must be done after we set active VLANs */
843         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
844                                           mlxsw_sp_port->stp_state);
845         if (err) {
846                 netdev_err(dev, "Failed to set STP state\n");
847                 goto err_port_stp_state_set;
848         }
849
850         return 0;
851
852 err_port_stp_state_set:
853         for (vid = vid_begin; vid <= vid_end; vid++)
854                 clear_bit(vid, mlxsw_sp_port->active_vlans);
855         mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
856                                        false);
857 err_port_vid_learning_set:
858         if (old_pvid != mlxsw_sp_port->pvid)
859                 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
860 err_port_pvid_set:
861         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
862                                false, false);
863 err_port_vlans_set:
864         mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
865         return err;
866 }
867
868 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
869                                    const struct switchdev_obj_port_vlan *vlan,
870                                    struct switchdev_trans *trans)
871 {
872         bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
873         bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
874
875         if (switchdev_trans_ph_prepare(trans))
876                 return 0;
877
878         return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
879                                          vlan->vid_begin, vlan->vid_end,
880                                          flag_untagged, flag_pvid);
881 }
882
883 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
884 {
885         return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
886                          MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
887 }
888
889 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
890 {
891         return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
892                         MLXSW_REG_SFD_OP_WRITE_REMOVE;
893 }
894
895 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
896                                      const char *mac, u16 fid, bool adding,
897                                      enum mlxsw_reg_sfd_rec_action action,
898                                      bool dynamic)
899 {
900         char *sfd_pl;
901         int err;
902
903         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
904         if (!sfd_pl)
905                 return -ENOMEM;
906
907         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
908         mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
909                               mac, fid, action, local_port);
910         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
911         kfree(sfd_pl);
912
913         return err;
914 }
915
916 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
917                                    const char *mac, u16 fid, bool adding,
918                                    bool dynamic)
919 {
920         return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
921                                          MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
922 }
923
924 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
925                         bool adding)
926 {
927         return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
928                                          MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
929                                          false);
930 }
931
932 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
933                                        const char *mac, u16 fid, u16 lag_vid,
934                                        bool adding, bool dynamic)
935 {
936         char *sfd_pl;
937         int err;
938
939         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
940         if (!sfd_pl)
941                 return -ENOMEM;
942
943         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
944         mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
945                                   mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
946                                   lag_vid, lag_id);
947         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
948         kfree(sfd_pl);
949
950         return err;
951 }
952
953 static int
954 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
955                              const struct switchdev_obj_port_fdb *fdb,
956                              struct switchdev_trans *trans)
957 {
958         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
959         u16 lag_vid = 0;
960
961         if (switchdev_trans_ph_prepare(trans))
962                 return 0;
963
964         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
965                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
966         }
967
968         if (!mlxsw_sp_port->lagged)
969                 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
970                                                mlxsw_sp_port->local_port,
971                                                fdb->addr, fid, true, false);
972         else
973                 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
974                                                    mlxsw_sp_port->lag_id,
975                                                    fdb->addr, fid, lag_vid,
976                                                    true, false);
977 }
978
979 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
980                                 u16 fid, u16 mid, bool adding)
981 {
982         char *sfd_pl;
983         int err;
984
985         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
986         if (!sfd_pl)
987                 return -ENOMEM;
988
989         mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
990         mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
991                               MLXSW_REG_SFD_REC_ACTION_NOP, mid);
992         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
993         kfree(sfd_pl);
994         return err;
995 }
996
997 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
998                                   bool add, bool clear_all_ports)
999 {
1000         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1001         char *smid_pl;
1002         int err, i;
1003
1004         smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1005         if (!smid_pl)
1006                 return -ENOMEM;
1007
1008         mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
1009         if (clear_all_ports) {
1010                 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1011                         if (mlxsw_sp->ports[i])
1012                                 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1013         }
1014         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1015         kfree(smid_pl);
1016         return err;
1017 }
1018
1019 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
1020                                               const unsigned char *addr,
1021                                               u16 fid)
1022 {
1023         struct mlxsw_sp_mid *mid;
1024
1025         list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
1026                 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1027                         return mid;
1028         }
1029         return NULL;
1030 }
1031
1032 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1033                                                 const unsigned char *addr,
1034                                                 u16 fid)
1035 {
1036         struct mlxsw_sp_mid *mid;
1037         u16 mid_idx;
1038
1039         mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1040                                       MLXSW_SP_MID_MAX);
1041         if (mid_idx == MLXSW_SP_MID_MAX)
1042                 return NULL;
1043
1044         mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1045         if (!mid)
1046                 return NULL;
1047
1048         set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1049         ether_addr_copy(mid->addr, addr);
1050         mid->fid = fid;
1051         mid->mid = mid_idx;
1052         mid->ref_count = 0;
1053         list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
1054
1055         return mid;
1056 }
1057
1058 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
1059                                  struct mlxsw_sp_mid *mid)
1060 {
1061         if (--mid->ref_count == 0) {
1062                 list_del(&mid->list);
1063                 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1064                 kfree(mid);
1065                 return 1;
1066         }
1067         return 0;
1068 }
1069
1070 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1071                                  const struct switchdev_obj_port_mdb *mdb,
1072                                  struct switchdev_trans *trans)
1073 {
1074         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1075         struct net_device *dev = mlxsw_sp_port->dev;
1076         struct mlxsw_sp_mid *mid;
1077         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1078         int err = 0;
1079
1080         if (switchdev_trans_ph_prepare(trans))
1081                 return 0;
1082
1083         mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1084         if (!mid) {
1085                 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
1086                 if (!mid) {
1087                         netdev_err(dev, "Unable to allocate MC group\n");
1088                         return -ENOMEM;
1089                 }
1090         }
1091         mid->ref_count++;
1092
1093         err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1094                                      mid->ref_count == 1);
1095         if (err) {
1096                 netdev_err(dev, "Unable to set SMID\n");
1097                 goto err_out;
1098         }
1099
1100         if (mid->ref_count == 1) {
1101                 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
1102                                            true);
1103                 if (err) {
1104                         netdev_err(dev, "Unable to set MC SFD\n");
1105                         goto err_out;
1106                 }
1107         }
1108
1109         return 0;
1110
1111 err_out:
1112         __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1113         return err;
1114 }
1115
1116 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1117                                  const struct switchdev_obj *obj,
1118                                  struct switchdev_trans *trans)
1119 {
1120         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1121         int err = 0;
1122
1123         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1124         if (!mlxsw_sp_port)
1125                 return -EINVAL;
1126
1127         switch (obj->id) {
1128         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1129                 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1130                         return 0;
1131
1132                 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1133                                               SWITCHDEV_OBJ_PORT_VLAN(obj),
1134                                               trans);
1135                 break;
1136         case SWITCHDEV_OBJ_ID_PORT_FDB:
1137                 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1138                                                    SWITCHDEV_OBJ_PORT_FDB(obj),
1139                                                    trans);
1140                 break;
1141         case SWITCHDEV_OBJ_ID_PORT_MDB:
1142                 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1143                                             SWITCHDEV_OBJ_PORT_MDB(obj),
1144                                             trans);
1145                 break;
1146         default:
1147                 err = -EOPNOTSUPP;
1148                 break;
1149         }
1150
1151         return err;
1152 }
1153
1154 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1155                                      u16 vid_begin, u16 vid_end)
1156 {
1157         u16 vid, pvid;
1158
1159         mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
1160                                        false);
1161
1162         pvid = mlxsw_sp_port->pvid;
1163         if (pvid >= vid_begin && pvid <= vid_end)
1164                 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1165
1166         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
1167                                false, false);
1168
1169         mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1170
1171         /* Changing activity bits only if HW operation succeded */
1172         for (vid = vid_begin; vid <= vid_end; vid++)
1173                 clear_bit(vid, mlxsw_sp_port->active_vlans);
1174
1175         return 0;
1176 }
1177
1178 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1179                                    const struct switchdev_obj_port_vlan *vlan)
1180 {
1181         return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1182                                          vlan->vid_end);
1183 }
1184
1185 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1186 {
1187         u16 vid;
1188
1189         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1190                 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1191 }
1192
1193 static int
1194 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1195                              const struct switchdev_obj_port_fdb *fdb)
1196 {
1197         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1198         u16 lag_vid = 0;
1199
1200         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1201                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1202         }
1203
1204         if (!mlxsw_sp_port->lagged)
1205                 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1206                                                mlxsw_sp_port->local_port,
1207                                                fdb->addr, fid,
1208                                                false, false);
1209         else
1210                 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1211                                                    mlxsw_sp_port->lag_id,
1212                                                    fdb->addr, fid, lag_vid,
1213                                                    false, false);
1214 }
1215
1216 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1217                                  const struct switchdev_obj_port_mdb *mdb)
1218 {
1219         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1220         struct net_device *dev = mlxsw_sp_port->dev;
1221         struct mlxsw_sp_mid *mid;
1222         u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1223         u16 mid_idx;
1224         int err = 0;
1225
1226         mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1227         if (!mid) {
1228                 netdev_err(dev, "Unable to remove port from MC DB\n");
1229                 return -EINVAL;
1230         }
1231
1232         err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1233         if (err)
1234                 netdev_err(dev, "Unable to remove port from SMID\n");
1235
1236         mid_idx = mid->mid;
1237         if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1238                 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1239                                            false);
1240                 if (err)
1241                         netdev_err(dev, "Unable to remove MC SFD\n");
1242         }
1243
1244         return err;
1245 }
1246
1247 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1248                                  const struct switchdev_obj *obj)
1249 {
1250         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1251         int err = 0;
1252
1253         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1254         if (!mlxsw_sp_port)
1255                 return -EINVAL;
1256
1257         switch (obj->id) {
1258         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1259                 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1260                         return 0;
1261
1262                 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1263                                               SWITCHDEV_OBJ_PORT_VLAN(obj));
1264                 break;
1265         case SWITCHDEV_OBJ_ID_PORT_FDB:
1266                 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1267                                                    SWITCHDEV_OBJ_PORT_FDB(obj));
1268                 break;
1269         case SWITCHDEV_OBJ_ID_PORT_MDB:
1270                 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1271                                             SWITCHDEV_OBJ_PORT_MDB(obj));
1272                 break;
1273         default:
1274                 err = -EOPNOTSUPP;
1275                 break;
1276         }
1277
1278         return err;
1279 }
1280
1281 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1282                                                    u16 lag_id)
1283 {
1284         struct mlxsw_sp_port *mlxsw_sp_port;
1285         u64 max_lag_members;
1286         int i;
1287
1288         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1289                                              MAX_LAG_MEMBERS);
1290         for (i = 0; i < max_lag_members; i++) {
1291                 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1292                 if (mlxsw_sp_port)
1293                         return mlxsw_sp_port;
1294         }
1295         return NULL;
1296 }
1297
1298 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1299                                   struct switchdev_obj_port_fdb *fdb,
1300                                   switchdev_obj_dump_cb_t *cb,
1301                                   struct net_device *orig_dev)
1302 {
1303         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1304         struct mlxsw_sp_port *tmp;
1305         struct mlxsw_sp_fid *f;
1306         u16 vport_fid;
1307         char *sfd_pl;
1308         char mac[ETH_ALEN];
1309         u16 fid;
1310         u8 local_port;
1311         u16 lag_id;
1312         u8 num_rec;
1313         int stored_err = 0;
1314         int i;
1315         int err;
1316
1317         sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1318         if (!sfd_pl)
1319                 return -ENOMEM;
1320
1321         f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1322         vport_fid = f ? f->fid : 0;
1323
1324         mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1325         do {
1326                 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1327                 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1328                 if (err)
1329                         goto out;
1330
1331                 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1332
1333                 /* Even in case of error, we have to run the dump to the end
1334                  * so the session in firmware is finished.
1335                  */
1336                 if (stored_err)
1337                         continue;
1338
1339                 for (i = 0; i < num_rec; i++) {
1340                         switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1341                         case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1342                                 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1343                                                         &local_port);
1344                                 if (local_port == mlxsw_sp_port->local_port) {
1345                                         if (vport_fid && vport_fid == fid)
1346                                                 fdb->vid = 0;
1347                                         else if (!vport_fid &&
1348                                                  !mlxsw_sp_fid_is_vfid(fid))
1349                                                 fdb->vid = fid;
1350                                         else
1351                                                 continue;
1352                                         ether_addr_copy(fdb->addr, mac);
1353                                         fdb->ndm_state = NUD_REACHABLE;
1354                                         err = cb(&fdb->obj);
1355                                         if (err)
1356                                                 stored_err = err;
1357                                 }
1358                                 break;
1359                         case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1360                                 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1361                                                             mac, &fid, &lag_id);
1362                                 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1363                                 if (tmp && tmp->local_port ==
1364                                     mlxsw_sp_port->local_port) {
1365                                         /* LAG records can only point to LAG
1366                                          * devices or VLAN devices on top.
1367                                          */
1368                                         if (!netif_is_lag_master(orig_dev) &&
1369                                             !is_vlan_dev(orig_dev))
1370                                                 continue;
1371                                         if (vport_fid && vport_fid == fid)
1372                                                 fdb->vid = 0;
1373                                         else if (!vport_fid &&
1374                                                  !mlxsw_sp_fid_is_vfid(fid))
1375                                                 fdb->vid = fid;
1376                                         else
1377                                                 continue;
1378                                         ether_addr_copy(fdb->addr, mac);
1379                                         fdb->ndm_state = NUD_REACHABLE;
1380                                         err = cb(&fdb->obj);
1381                                         if (err)
1382                                                 stored_err = err;
1383                                 }
1384                                 break;
1385                         }
1386                 }
1387         } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1388
1389 out:
1390         kfree(sfd_pl);
1391         return stored_err ? stored_err : err;
1392 }
1393
1394 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1395                                    struct switchdev_obj_port_vlan *vlan,
1396                                    switchdev_obj_dump_cb_t *cb)
1397 {
1398         u16 vid;
1399         int err = 0;
1400
1401         if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1402                 vlan->flags = 0;
1403                 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1404                 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1405                 return cb(&vlan->obj);
1406         }
1407
1408         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1409                 vlan->flags = 0;
1410                 if (vid == mlxsw_sp_port->pvid)
1411                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1412                 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1413                         vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1414                 vlan->vid_begin = vid;
1415                 vlan->vid_end = vid;
1416                 err = cb(&vlan->obj);
1417                 if (err)
1418                         break;
1419         }
1420         return err;
1421 }
1422
1423 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1424                                   struct switchdev_obj *obj,
1425                                   switchdev_obj_dump_cb_t *cb)
1426 {
1427         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1428         int err = 0;
1429
1430         mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1431         if (!mlxsw_sp_port)
1432                 return -EINVAL;
1433
1434         switch (obj->id) {
1435         case SWITCHDEV_OBJ_ID_PORT_VLAN:
1436                 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1437                                               SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1438                 break;
1439         case SWITCHDEV_OBJ_ID_PORT_FDB:
1440                 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1441                                              SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1442                                              obj->orig_dev);
1443                 break;
1444         default:
1445                 err = -EOPNOTSUPP;
1446                 break;
1447         }
1448
1449         return err;
1450 }
1451
1452 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1453         .switchdev_port_attr_get        = mlxsw_sp_port_attr_get,
1454         .switchdev_port_attr_set        = mlxsw_sp_port_attr_set,
1455         .switchdev_port_obj_add         = mlxsw_sp_port_obj_add,
1456         .switchdev_port_obj_del         = mlxsw_sp_port_obj_del,
1457         .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
1458 };
1459
1460 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1461                                         char *mac, u16 vid,
1462                                         struct net_device *dev)
1463 {
1464         struct switchdev_notifier_fdb_info info;
1465         unsigned long notifier_type;
1466
1467         if (learning_sync) {
1468                 info.addr = mac;
1469                 info.vid = vid;
1470                 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1471                 call_switchdev_notifiers(notifier_type, dev, &info.info);
1472         }
1473 }
1474
1475 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1476                                             char *sfn_pl, int rec_index,
1477                                             bool adding)
1478 {
1479         struct mlxsw_sp_port *mlxsw_sp_port;
1480         char mac[ETH_ALEN];
1481         u8 local_port;
1482         u16 vid, fid;
1483         bool do_notification = true;
1484         int err;
1485
1486         mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1487         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1488         if (!mlxsw_sp_port) {
1489                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1490                 goto just_remove;
1491         }
1492
1493         if (mlxsw_sp_fid_is_vfid(fid)) {
1494                 struct mlxsw_sp_port *mlxsw_sp_vport;
1495
1496                 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1497                                                                  fid);
1498                 if (!mlxsw_sp_vport) {
1499                         netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1500                         goto just_remove;
1501                 }
1502                 vid = 0;
1503                 /* Override the physical port with the vPort. */
1504                 mlxsw_sp_port = mlxsw_sp_vport;
1505         } else {
1506                 vid = fid;
1507         }
1508
1509 do_fdb_op:
1510         err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1511                                       adding, true);
1512         if (err) {
1513                 if (net_ratelimit())
1514                         netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1515                 return;
1516         }
1517
1518         if (!do_notification)
1519                 return;
1520         mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1521                                     adding, mac, vid, mlxsw_sp_port->dev);
1522         return;
1523
1524 just_remove:
1525         adding = false;
1526         do_notification = false;
1527         goto do_fdb_op;
1528 }
1529
1530 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1531                                                 char *sfn_pl, int rec_index,
1532                                                 bool adding)
1533 {
1534         struct mlxsw_sp_port *mlxsw_sp_port;
1535         struct net_device *dev;
1536         char mac[ETH_ALEN];
1537         u16 lag_vid = 0;
1538         u16 lag_id;
1539         u16 vid, fid;
1540         bool do_notification = true;
1541         int err;
1542
1543         mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1544         mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1545         if (!mlxsw_sp_port) {
1546                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1547                 goto just_remove;
1548         }
1549
1550         if (mlxsw_sp_fid_is_vfid(fid)) {
1551                 struct mlxsw_sp_port *mlxsw_sp_vport;
1552
1553                 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1554                                                                  fid);
1555                 if (!mlxsw_sp_vport) {
1556                         netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1557                         goto just_remove;
1558                 }
1559
1560                 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1561                 dev = mlxsw_sp_vport->dev;
1562                 vid = 0;
1563                 /* Override the physical port with the vPort. */
1564                 mlxsw_sp_port = mlxsw_sp_vport;
1565         } else {
1566                 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1567                 vid = fid;
1568         }
1569
1570 do_fdb_op:
1571         err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1572                                           adding, true);
1573         if (err) {
1574                 if (net_ratelimit())
1575                         netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1576                 return;
1577         }
1578
1579         if (!do_notification)
1580                 return;
1581         mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1582                                     vid, dev);
1583         return;
1584
1585 just_remove:
1586         adding = false;
1587         do_notification = false;
1588         goto do_fdb_op;
1589 }
1590
1591 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1592                                             char *sfn_pl, int rec_index)
1593 {
1594         switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1595         case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1596                 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1597                                                 rec_index, true);
1598                 break;
1599         case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1600                 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1601                                                 rec_index, false);
1602                 break;
1603         case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1604                 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1605                                                     rec_index, true);
1606                 break;
1607         case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1608                 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1609                                                     rec_index, false);
1610                 break;
1611         }
1612 }
1613
1614 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1615 {
1616         struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
1617
1618         mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
1619                                msecs_to_jiffies(bridge->fdb_notify.interval));
1620 }
1621
1622 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1623 {
1624         struct mlxsw_sp_bridge *bridge;
1625         struct mlxsw_sp *mlxsw_sp;
1626         char *sfn_pl;
1627         u8 num_rec;
1628         int i;
1629         int err;
1630
1631         sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1632         if (!sfn_pl)
1633                 return;
1634
1635         bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
1636         mlxsw_sp = bridge->mlxsw_sp;
1637
1638         rtnl_lock();
1639         mlxsw_reg_sfn_pack(sfn_pl);
1640         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1641         if (err) {
1642                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1643                 goto out;
1644         }
1645         num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1646         for (i = 0; i < num_rec; i++)
1647                 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1648
1649 out:
1650         rtnl_unlock();
1651         kfree(sfn_pl);
1652         mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1653 }
1654
1655 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1656 {
1657         struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
1658         int err;
1659
1660         err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1661         if (err) {
1662                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1663                 return err;
1664         }
1665         INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1666         bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1667         mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1668         return 0;
1669 }
1670
1671 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1672 {
1673         cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
1674 }
1675
1676 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1677 {
1678         struct mlxsw_sp_bridge *bridge;
1679
1680         bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
1681         if (!bridge)
1682                 return -ENOMEM;
1683         mlxsw_sp->bridge = bridge;
1684         bridge->mlxsw_sp = mlxsw_sp;
1685
1686         INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
1687
1688         return mlxsw_sp_fdb_init(mlxsw_sp);
1689 }
1690
1691 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1692 {
1693         mlxsw_sp_fdb_fini(mlxsw_sp);
1694         WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
1695         kfree(mlxsw_sp->bridge);
1696 }
1697
1698 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1699 {
1700         mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1701 }
1702
1703 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1704 {
1705 }