X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=drivers%2Fnet%2Fethernet%2Fmellanox%2Fmlxsw%2Fspectrum_switchdev.c;h=b17b224f2b1cbb019870f320242a02e4f5e9700c;hb=c57529e1d5d8;hp=ea0f4a5787c38e5639b2fcd362051ecda8a1d906;hpb=c6cd850d6568872f66fc4a4582f34f9b910066ca;p=karo-tx-linux.git diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index ea0f4a5787c3..b17b224f2b1c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -52,6 +52,8 @@ #include "core.h" #include "reg.h" +struct mlxsw_sp_bridge_ops; + struct mlxsw_sp_bridge { struct mlxsw_sp *mlxsw_sp; struct { @@ -63,58 +65,376 @@ struct mlxsw_sp_bridge { #define MLXSW_SP_MAX_AGEING_TIME 1000000 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; - struct mlxsw_sp_upper master_bridge; + bool vlan_enabled_exists; + struct list_head bridges_list; struct list_head mids_list; DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); + const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; + const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; +}; + +struct mlxsw_sp_bridge_device { + struct net_device *dev; + struct list_head list; + struct list_head ports_list; + u8 vlan_enabled:1, + multicast_enabled:1; + const struct mlxsw_sp_bridge_ops *ops; +}; + +struct mlxsw_sp_bridge_port { + struct net_device *dev; + struct mlxsw_sp_bridge_device *bridge_device; + struct list_head list; + struct list_head vlans_list; + unsigned int ref_count; + u8 stp_state; + unsigned long flags; + bool mrouter; + bool lagged; + union { + u16 lag_id; + u16 system_port; + }; +}; + +struct mlxsw_sp_bridge_vlan { + struct list_head list; + struct list_head port_vlan_list; + u16 vid; + u8 egress_untagged:1, + pvid:1; }; -struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp) +struct mlxsw_sp_bridge_ops { + int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port); + void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port); +}; + +static int +mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_port *bridge_port, + u16 fid_index); + +static struct mlxsw_sp_bridge_device * +mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, + const struct net_device *br_dev) { - return &mlxsw_sp->bridge->master_bridge; + struct mlxsw_sp_bridge_device *bridge_device; + + list_for_each_entry(bridge_device, &bridge->bridges_list, list) + if (bridge_device->dev == br_dev) + return bridge_device; + + return NULL; } -static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid) +static struct mlxsw_sp_bridge_device * +mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, + struct net_device *br_dev) { - struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); - u16 fid = vid; + struct device *dev = bridge->mlxsw_sp->bus_info->dev; + struct mlxsw_sp_bridge_device *bridge_device; + bool vlan_enabled = br_vlan_enabled(br_dev); - fid = f ? f->fid : fid; + if (vlan_enabled && bridge->vlan_enabled_exists) { + dev_err(dev, "Only one VLAN-aware bridge is supported\n"); + return ERR_PTR(-EINVAL); + } - if (!fid) - fid = mlxsw_sp_port->pvid; + bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL); + if (!bridge_device) + return ERR_PTR(-ENOMEM); + + bridge_device->dev = br_dev; + bridge_device->vlan_enabled = vlan_enabled; + bridge_device->multicast_enabled = br_multicast_enabled(br_dev); + INIT_LIST_HEAD(&bridge_device->ports_list); + if (vlan_enabled) { + bridge->vlan_enabled_exists = true; + bridge_device->ops = bridge->bridge_8021q_ops; + } else { + bridge_device->ops = bridge->bridge_8021d_ops; + } + list_add(&bridge_device->list, &bridge->bridges_list); - return fid; + return bridge_device; } -static struct mlxsw_sp_port * -mlxsw_sp_port_orig_get(struct net_device *dev, - struct mlxsw_sp_port *mlxsw_sp_port) +static void +mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, + struct mlxsw_sp_bridge_device *bridge_device) { - struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_fid *fid; - u16 vid; + list_del(&bridge_device->list); + if (bridge_device->vlan_enabled) + bridge->vlan_enabled_exists = false; + WARN_ON(!list_empty(&bridge_device->ports_list)); + kfree(bridge_device); +} - if (netif_is_bridge_master(dev)) { - fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp, - dev); - if (fid) { - mlxsw_sp_vport = - mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, - fid->fid); - WARN_ON(!mlxsw_sp_vport); - return mlxsw_sp_vport; - } +static struct mlxsw_sp_bridge_device * +mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, + struct net_device *br_dev) +{ + struct mlxsw_sp_bridge_device *bridge_device; + + bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); + if (bridge_device) + return bridge_device; + + return mlxsw_sp_bridge_device_create(bridge, br_dev); +} + +static void +mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge, + struct mlxsw_sp_bridge_device *bridge_device) +{ + if (list_empty(&bridge_device->ports_list)) + mlxsw_sp_bridge_device_destroy(bridge, bridge_device); +} + +static struct mlxsw_sp_bridge_port * +__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *brport_dev) +{ + struct mlxsw_sp_bridge_port *bridge_port; + + list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { + if (bridge_port->dev == brport_dev) + return bridge_port; + } + + return NULL; +} + +static struct mlxsw_sp_bridge_port * +mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, + struct net_device *brport_dev) +{ + struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); + struct mlxsw_sp_bridge_device *bridge_device; + + if (!br_dev) + return NULL; + + bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev); + if (!bridge_device) + return NULL; + + return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); +} + +static struct mlxsw_sp_bridge_port * +mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, + struct net_device *brport_dev) +{ + struct mlxsw_sp_bridge_port *bridge_port; + struct mlxsw_sp_port *mlxsw_sp_port; + + bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); + if (!bridge_port) + return NULL; + + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); + bridge_port->lagged = mlxsw_sp_port->lagged; + if (bridge_port->lagged) + bridge_port->lag_id = mlxsw_sp_port->lag_id; + else + bridge_port->system_port = mlxsw_sp_port->local_port; + bridge_port->dev = brport_dev; + bridge_port->bridge_device = bridge_device; + bridge_port->stp_state = BR_STATE_DISABLED; + bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC; + INIT_LIST_HEAD(&bridge_port->vlans_list); + list_add(&bridge_port->list, &bridge_device->ports_list); + bridge_port->ref_count = 1; + + return bridge_port; +} + +static void +mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) +{ + list_del(&bridge_port->list); + WARN_ON(!list_empty(&bridge_port->vlans_list)); + kfree(bridge_port); +} + +static bool +mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * + bridge_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); + + /* In case ports were pulled from out of a bridged LAG, then + * it's possible the reference count isn't zero, yet the bridge + * port should be destroyed, as it's no longer an upper of ours. + */ + if (!mlxsw_sp && list_empty(&bridge_port->vlans_list)) + return true; + else if (bridge_port->ref_count == 0) + return true; + else + return false; +} + +static struct mlxsw_sp_bridge_port * +mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, + struct net_device *brport_dev) +{ + struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; + int err; + + bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); + if (bridge_port) { + bridge_port->ref_count++; + return bridge_port; + } + + bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev); + if (IS_ERR(bridge_device)) + return ERR_CAST(bridge_device); + + bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); + if (!bridge_port) { + err = -ENOMEM; + goto err_bridge_port_create; + } + + return bridge_port; + +err_bridge_port_create: + mlxsw_sp_bridge_device_put(bridge, bridge_device); + return ERR_PTR(err); +} + +static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, + struct mlxsw_sp_bridge_port *bridge_port) +{ + struct mlxsw_sp_bridge_device *bridge_device; + + bridge_port->ref_count--; + if (!mlxsw_sp_bridge_port_should_destroy(bridge_port)) + return; + bridge_device = bridge_port->bridge_device; + mlxsw_sp_bridge_port_destroy(bridge_port); + mlxsw_sp_bridge_device_put(bridge, bridge_device); +} + +static struct mlxsw_sp_port_vlan * +mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_bridge_device * + bridge_device, + u16 vid) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, + list) { + if (!mlxsw_sp_port_vlan->bridge_port) + continue; + if (mlxsw_sp_port_vlan->bridge_port->bridge_device != + bridge_device) + continue; + if (bridge_device->vlan_enabled && + mlxsw_sp_port_vlan->vid != vid) + continue; + return mlxsw_sp_port_vlan; } - if (!is_vlan_dev(dev)) - return mlxsw_sp_port; + return NULL; +} + +static struct mlxsw_sp_port_vlan* +mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_index) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, + list) { + struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + + if (fid && fid->fid == fid_index) + return mlxsw_sp_port_vlan; + } + + return NULL; +} + +static struct mlxsw_sp_bridge_vlan * +mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port, + u16 vid) +{ + struct mlxsw_sp_bridge_vlan *bridge_vlan; + + list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { + if (bridge_vlan->vid == vid) + return bridge_vlan; + } + + return NULL; +} + +static struct mlxsw_sp_bridge_vlan * +mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) +{ + struct mlxsw_sp_bridge_vlan *bridge_vlan; + + bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL); + if (!bridge_vlan) + return NULL; + + INIT_LIST_HEAD(&bridge_vlan->port_vlan_list); + bridge_vlan->vid = vid; + list_add(&bridge_vlan->list, &bridge_port->vlans_list); - vid = vlan_dev_vlan_id(dev); - mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - WARN_ON(!mlxsw_sp_vport); + return bridge_vlan; +} + +static void +mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan) +{ + list_del(&bridge_vlan->list); + WARN_ON(!list_empty(&bridge_vlan->port_vlan_list)); + kfree(bridge_vlan); +} + +static struct mlxsw_sp_bridge_vlan * +mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid) +{ + struct mlxsw_sp_bridge_vlan *bridge_vlan; + + bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); + if (bridge_vlan) + return bridge_vlan; - return mlxsw_sp_vport; + return mlxsw_sp_bridge_vlan_create(bridge_port, vid); +} + +static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) +{ + if (list_empty(&bridge_vlan->port_vlan_list)) + mlxsw_sp_bridge_vlan_destroy(bridge_vlan); +} + +static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, + struct net_device *dev, + unsigned long *brport_flags) +{ + struct mlxsw_sp_bridge_port *bridge_port; + + bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); + if (WARN_ON(!bridge_port)) + return; + + memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); } static int mlxsw_sp_port_attr_get(struct net_device *dev, @@ -123,10 +443,6 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); - if (!mlxsw_sp_port) - return -EINVAL; - switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); @@ -134,10 +450,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev, attr->u.ppid.id_len); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - attr->u.brport_flags = - (mlxsw_sp_port->learning ? BR_LEARNING : 0) | - (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | - (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); + mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, + &attr->u.brport_flags); break; default: return -EOPNOTSUPP; @@ -146,237 +460,213 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev, return 0; } +static int +mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_vlan *bridge_vlan, + u8 state) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, + bridge_vlan_node) { + if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) + continue; + return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, + bridge_vlan->vid, state); + } + + return 0; +} + static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, + struct net_device *orig_dev, u8 state) { - u16 vid; + struct mlxsw_sp_bridge_port *bridge_port; + struct mlxsw_sp_bridge_vlan *bridge_vlan; int err; if (switchdev_trans_ph_prepare(trans)) return 0; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); - err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, state); - if (err) - return err; - mlxsw_sp_port->stp_state = state; + /* It's possible we failed to enslave the port, yet this + * operation is executed due to it being deferred. + */ + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, + orig_dev); + if (!bridge_port) return 0; - } - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { - err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, state); + list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { + err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, + bridge_vlan, state); if (err) - return err; + goto err_port_bridge_vlan_stp_set; } - mlxsw_sp_port->stp_state = state; + + bridge_port->stp_state = state; return 0; + +err_port_bridge_vlan_stp_set: + list_for_each_entry_continue_reverse(bridge_vlan, + &bridge_port->vlans_list, list) + mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan, + bridge_port->stp_state); + return err; } -static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 idx_begin, u16 idx_end, - enum mlxsw_sp_flood_table table, - bool set) +static int mlxsw_sp_port_fid_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_fid *fid, + enum mlxsw_sp_flood_table table, + bool member) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u16 local_port = mlxsw_sp_port->local_port; enum mlxsw_flood_table_type table_type; - u16 range = idx_end - idx_begin + 1; + u16 flood_index = fid->fid; char *sftr_pl; int err; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + if (mlxsw_sp_fid_is_vfid(fid->fid)) { table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - else - table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + flood_index = mlxsw_sp_fid_to_vfid(fid->fid); + } sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin, - table_type, range, local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, table, flood_index, table_type, 1, + local_port, member); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); kfree(sftr_pl); return err; } -static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 idx_begin, u16 idx_end, bool uc_set, - bool bc_set, bool mc_set) +static int +mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_vlan *bridge_vlan, + enum mlxsw_sp_flood_table table, + bool member) { - int err; - - err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, - MLXSW_SP_FLOOD_TABLE_UC, uc_set); - if (err) - return err; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, - MLXSW_SP_FLOOD_TABLE_BC, bc_set); - if (err) - goto err_flood_bm_set; + list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, + bridge_vlan_node) { + if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) + continue; + return mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, + mlxsw_sp_port_vlan->fid, + table, member); + } - err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, - MLXSW_SP_FLOOD_TABLE_MC, mc_set); - if (err) - goto err_flood_mc_set; return 0; - -err_flood_mc_set: - __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, - MLXSW_SP_FLOOD_TABLE_BC, !bc_set); -err_flood_bm_set: - __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end, - MLXSW_SP_FLOOD_TABLE_UC, !uc_set); - return err; } -static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, - enum mlxsw_sp_flood_table table, - bool set) +static int +mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + enum mlxsw_sp_flood_table table, + bool member) { - struct net_device *dev = mlxsw_sp_port->dev; - u16 vid, last_visited_vid; + struct mlxsw_sp_bridge_vlan *bridge_vlan; int err; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; - u16 vfid = mlxsw_sp_fid_to_vfid(fid); - - return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid, - vfid, table, set); - } - - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { - err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, - table, set); - if (err) { - last_visited_vid = vid; - goto err_port_flood_set; - } + list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { + err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, + bridge_vlan, table, + member); + if (err) + goto err_port_bridge_vlan_flood_set; } return 0; -err_port_flood_set: - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) - __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table, - !set); - netdev_err(dev, "Failed to configure unicast flooding\n"); +err_port_bridge_vlan_flood_set: + list_for_each_entry_continue_reverse(bridge_vlan, + &bridge_port->vlans_list, list) + mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan, + table, !member); return err; } -static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, - bool mc_disabled) +static int +mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_vlan *bridge_vlan, + bool set) { - int set; - int err = 0; - - if (switchdev_trans_ph_prepare(trans)) - return 0; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + u16 vid = bridge_vlan->vid; - if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) { - set = mc_disabled ? - mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; - err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, - MLXSW_SP_FLOOD_TABLE_MC, - set); + list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, + bridge_vlan_node) { + if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port) + continue; + return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); } - if (!err) - mlxsw_sp_port->mc_disabled = mc_disabled; - - return err; -} - -int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, - bool set) -{ - bool mc_set = set; - u16 vfid; - - /* In case of vFIDs, index into the flooding table is relative to - * the start of the vFIDs range. - */ - vfid = mlxsw_sp_fid_to_vfid(fid); - - if (set) - mc_set = mlxsw_sp_vport->mc_disabled ? - mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router; - - return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set, - mc_set); + return 0; } -static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool set) +static int +mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + bool set) { - u16 vid; + struct mlxsw_sp_bridge_vlan *bridge_vlan; int err; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); - - return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); - } - - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set); + list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { + err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, + bridge_vlan, set); if (err) - goto err_port_vid_learning_set; + goto err_port_bridge_vlan_learning_set; } return 0; -err_port_vid_learning_set: - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) - mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, !set); +err_port_bridge_vlan_learning_set: + list_for_each_entry_continue_reverse(bridge_vlan, + &bridge_port->vlans_list, list) + mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port, + bridge_vlan, !set); return err; } static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, + struct net_device *orig_dev, unsigned long brport_flags) { - unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0; - unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; + struct mlxsw_sp_bridge_port *bridge_port; int err; if (switchdev_trans_ph_prepare(trans)) return 0; - if ((uc_flood ^ brport_flags) & BR_FLOOD) { - err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port, - MLXSW_SP_FLOOD_TABLE_UC, - !mlxsw_sp_port->uc_flood); - if (err) - return err; - } + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, + orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; - if ((learning ^ brport_flags) & BR_LEARNING) { - err = mlxsw_sp_port_learning_set(mlxsw_sp_port, - !mlxsw_sp_port->learning); - if (err) - goto err_port_learning_set; - } + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, + MLXSW_SP_FLOOD_TABLE_UC, + brport_flags & BR_FLOOD); + if (err) + return err; - mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; - mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; - mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; + err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port, + brport_flags & BR_LEARNING); + if (err) + return err; - return 0; + memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); -err_port_learning_set: - if ((uc_flood ^ brport_flags) & BR_FLOOD) - mlxsw_sp_port_flood_table_set(mlxsw_sp_port, - MLXSW_SP_FLOOD_TABLE_UC, - mlxsw_sp_port->uc_flood); - return err; + return 0; } static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) @@ -417,29 +707,77 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, bool vlan_enabled) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; - /* SWITCHDEV_TRANS_PREPARE phase */ - if ((!vlan_enabled) && - (mlxsw_sp->bridge->master_bridge.dev == orig_dev)) { - netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); + if (!switchdev_trans_ph_prepare(trans)) + return 0; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_device)) return -EINVAL; - } - return 0; + if (bridge_device->vlan_enabled == vlan_enabled) + return 0; + + netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n"); + return -EINVAL; } static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, + struct net_device *orig_dev, bool is_port_mc_router) { + struct mlxsw_sp_bridge_port *bridge_port; + if (switchdev_trans_ph_prepare(trans)) return 0; - mlxsw_sp_port->mc_router = is_port_mc_router; - if (!mlxsw_sp_port->mc_disabled) - return mlxsw_sp_port_flood_table_set(mlxsw_sp_port, - MLXSW_SP_FLOOD_TABLE_MC, - is_port_mc_router); + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, + orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; + + if (!bridge_port->bridge_device->multicast_enabled) + return 0; + + return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, + MLXSW_SP_FLOOD_TABLE_MC, + is_port_mc_router); +} + +static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool mc_disabled) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; + int err; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + /* It's possible we failed to enslave the port, yet this + * operation is executed due to it being deferred. + */ + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); + if (!bridge_device) + return 0; + + list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { + enum mlxsw_sp_flood_table table = MLXSW_SP_FLOOD_TABLE_MC; + bool member = mc_disabled ? true : bridge_port->mrouter; + + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, + bridge_port, table, + member); + if (err) + return err; + } + + bridge_device->multicast_enabled = !mc_disabled; return 0; } @@ -449,19 +787,17 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, struct switchdev_trans *trans) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - int err = 0; - - mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); - if (!mlxsw_sp_port) - return -EINVAL; + int err; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, + attr->orig_dev, attr->u.stp_state); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, + attr->orig_dev, attr->u.brport_flags); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: @@ -475,10 +811,12 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, break; case SWITCHDEV_ATTR_ID_PORT_MROUTER: err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, + attr->orig_dev, attr->u.mrouter); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, + attr->orig_dev, attr->u.mc_disabled); break; default: @@ -489,178 +827,337 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } -static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) +static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid) { + return valid ? MLXSW_REG_SFMR_OP_CREATE_FID : + MLXSW_REG_SFMR_OP_DESTROY_FID; +} + +int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, bool valid) +{ + u16 fid_offset = fid_index < MLXSW_SP_VFID_BASE ? fid_index : 0; char sfmr_pl[MLXSW_REG_SFMR_LEN]; - mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid); + mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index, + fid_offset); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); } -static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid) +static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index, + bool valid) { enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID; char svfa_pl[MLXSW_REG_SVFA_LEN]; - mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid); + mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, fid_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); } -static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid) +struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, + u16 fid_index) { - struct mlxsw_sp_fid *f; + struct mlxsw_sp_fid *fid; + int err; - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - return NULL; + err = mlxsw_sp_fid_op(mlxsw_sp, fid_index, true); + if (err) + return ERR_PTR(err); - f->fid = fid; + err = mlxsw_sp_fid_map(mlxsw_sp, fid_index, true); + if (err) + goto err_fid_map; + + fid = kzalloc(sizeof(*fid), GFP_KERNEL); + if (!fid) { + err = -ENOMEM; + goto err_allocate_fid; + } - return f; + fid->fid = fid_index; + fid->ref_count = 1; + list_add(&fid->list, &mlxsw_sp->fids); + + return fid; + +err_allocate_fid: + mlxsw_sp_fid_map(mlxsw_sp, fid_index, false); +err_fid_map: + mlxsw_sp_fid_op(mlxsw_sp, fid_index, false); + return ERR_PTR(err); +} + +static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + u16 fid_index = fid->fid; + + list_del(&fid->list); + if (fid->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, fid->rif); + kfree(fid); + mlxsw_sp_fid_map(mlxsw_sp, fid_index, false); + mlxsw_sp_fid_op(mlxsw_sp, fid_index, false); } -struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) +static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) { - struct mlxsw_sp_fid *f; + u16 vfid_index, fid_index; + struct mlxsw_sp_fid *fid; int err; - err = mlxsw_sp_fid_op(mlxsw_sp, fid, true); - if (err) - return ERR_PTR(err); + vfid_index = find_first_zero_bit(mlxsw_sp->vfids.mapped, + MLXSW_SP_VFID_MAX); + if (vfid_index == MLXSW_SP_VFID_MAX) + return ERR_PTR(-ENOBUFS); - /* Although all the ports member in the FID might be using a - * {Port, VID} to FID mapping, we create a global VID-to-FID - * mapping. This allows a port to transition to VLAN mode, - * knowing the global mapping exists. - */ - err = mlxsw_sp_fid_map(mlxsw_sp, fid, true); + fid_index = mlxsw_sp_vfid_to_fid(vfid_index); + err = mlxsw_sp_fid_op(mlxsw_sp, fid_index, true); if (err) - goto err_fid_map; + return ERR_PTR(err); - f = mlxsw_sp_fid_alloc(fid); - if (!f) { + fid = kzalloc(sizeof(*fid), GFP_KERNEL); + if (!fid) { err = -ENOMEM; goto err_allocate_fid; } - list_add(&f->list, &mlxsw_sp->fids); + fid->fid = fid_index; + fid->ref_count = 1; + fid->dev = dev; + list_add(&fid->list, &mlxsw_sp->vfids.list); + __set_bit(vfid_index, mlxsw_sp->vfids.mapped); + + return fid; + +err_allocate_fid: + mlxsw_sp_fid_op(mlxsw_sp, fid_index, false); + return ERR_PTR(err); +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + u16 vfid_index = mlxsw_sp_fid_to_vfid(fid->fid); + u16 fid_index = fid->fid; + + __clear_bit(vfid_index, mlxsw_sp->vfids.mapped); + list_del(&fid->list); + if (fid->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, fid->rif); + kfree(fid); + mlxsw_sp_fid_op(mlxsw_sp, fid_index, false); +} + +static struct mlxsw_sp_fid *__mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, + u16 fid_index) +{ + struct mlxsw_sp_fid *fid; + + fid = mlxsw_sp_fid_find(mlxsw_sp, fid_index); + if (fid) { + fid->ref_count++; + return fid; + } + + return mlxsw_sp_fid_create(mlxsw_sp, fid_index); +} + +static struct mlxsw_sp_fid *mlxsw_sp_vfid_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) +{ + struct mlxsw_sp_fid *fid; - return f; + fid = mlxsw_sp_vfid_find(mlxsw_sp, dev); + if (fid) { + fid->ref_count++; + return fid; + } -err_allocate_fid: - mlxsw_sp_fid_map(mlxsw_sp, fid, false); -err_fid_map: - mlxsw_sp_fid_op(mlxsw_sp, fid, false); - return ERR_PTR(err); + return mlxsw_sp_vfid_create(mlxsw_sp, dev); } -void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) +static struct mlxsw_sp_fid * +mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, u16 vid, + struct mlxsw_sp_bridge_device *bridge_device) { - u16 fid = f->fid; + if (bridge_device->vlan_enabled) + return __mlxsw_sp_fid_get(mlxsw_sp, vid); + else + return mlxsw_sp_vfid_get(mlxsw_sp, bridge_device->dev); +} - list_del(&f->list); +static void __mlxsw_sp_fid_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + if (--fid->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, fid); +} - if (f->rif) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); +static void mlxsw_sp_vfid_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + if (--fid->ref_count == 0) + mlxsw_sp_vfid_destroy(mlxsw_sp, fid); +} - kfree(f); +static void mlxsw_sp_fid_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *fid) +{ + if (!mlxsw_sp_fid_is_vfid(fid->fid)) + __mlxsw_sp_fid_put(mlxsw_sp, fid); + else + mlxsw_sp_vfid_put(mlxsw_sp, fid); +} - mlxsw_sp_fid_map(mlxsw_sp, fid, false); +static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) +{ + const struct mlxsw_sp_bridge_device *bridge_device; - mlxsw_sp_fid_op(mlxsw_sp, fid, false); + bridge_device = bridge_port->bridge_device; + return !bridge_device->multicast_enabled ? true : bridge_port->mrouter; } -static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid) +static int __mlxsw_sp_port_vid_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, u16 fid_index) { - struct mlxsw_sp_fid *f; + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + int err; - if (test_bit(fid, mlxsw_sp_port->active_vlans)) - return 0; + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid_index, + vid); + if (err) + return err; - f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); - if (!f) { - f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); - if (IS_ERR(f)) - return PTR_ERR(f); + if (mlxsw_sp_port->nr_port_vid_map++ == 0) { + err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); + if (err) + goto err_port_vp_mode_trans; } - f->ref_count++; + return 0; + +err_port_vp_mode_trans: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid_index, vid); + return err; +} + +static int __mlxsw_sp_port_vid_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, u16 fid_index) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + + if (mlxsw_sp_port->nr_port_vid_map == 1) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + mlxsw_sp_port->nr_port_vid_map--; - netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid); + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid_index, vid); return 0; } -static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid) +static int mlxsw_sp_port_vid_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, u16 fid_index) { - struct mlxsw_sp_fid *f; - - f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); - if (WARN_ON(!f)) - return; + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; - netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid); + if (mlxsw_sp_fid_is_vfid(fid_index)) + return __mlxsw_sp_port_vid_fid_map(mlxsw_sp_port, vid, + fid_index); - mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid); + if (mlxsw_sp_port->nr_port_vid_map == 0) + return 0; - if (--f->ref_count == 0) - mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f); + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid_index, + fid_index); } -static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, - bool valid) +static int mlxsw_sp_port_vid_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, u16 fid_index) { enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; - /* If port doesn't have vPorts, then it can use the global - * VID-to-FID mapping. - */ - if (list_empty(&mlxsw_sp_port->vports_list)) + if (mlxsw_sp_fid_is_vfid(fid_index)) + return __mlxsw_sp_port_vid_fid_unmap(mlxsw_sp_port, vid, + fid_index); + + if (mlxsw_sp_port->nr_port_vid_map == 0) return 0; - return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid); + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid_index, + fid_index); } -static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) +static int +mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, + struct mlxsw_sp_bridge_port *bridge_port) { - bool mc_flood; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 vid = mlxsw_sp_port_vlan->vid; + struct mlxsw_sp_fid *fid; int err; - err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid); + fid = mlxsw_sp_fid_get(mlxsw_sp, vid, bridge_port->bridge_device); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + err = mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, + MLXSW_SP_FLOOD_TABLE_UC, + bridge_port->flags & BR_FLOOD); if (err) - return err; + goto err_port_fid_uc_flood_set; - mc_flood = mlxsw_sp_port->mc_disabled ? - mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router; + err = mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, + MLXSW_SP_FLOOD_TABLE_MC, + mlxsw_sp_mc_flood(bridge_port)); + if (err) + goto err_port_fid_mc_flood_set; - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid, fid, - mlxsw_sp_port->uc_flood, true, - mc_flood); + err = mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, + MLXSW_SP_FLOOD_TABLE_BC, true); if (err) - goto err_port_flood_set; + goto err_port_fid_bc_flood_set; - err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true); + err = mlxsw_sp_port_vid_fid_map(mlxsw_sp_port, vid, fid->fid); if (err) - goto err_port_fid_map; + goto err_port_vid_fid_map; + + mlxsw_sp_port_vlan->fid = fid; return 0; -err_port_fid_map: - __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid, fid, false, false, false); -err_port_flood_set: - __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); +err_port_vid_fid_map: + mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_BC, + false); +err_port_fid_bc_flood_set: + mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_MC, + false); +err_port_fid_mc_flood_set: + mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_UC, + false); +err_port_fid_uc_flood_set: + mlxsw_sp_fid_put(mlxsw_sp, fid); return err; } -static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid) +static void +mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { - mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); - __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid, fid, false, - false, false); - __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + u16 vid = mlxsw_sp_port_vlan->vid; + + mlxsw_sp_port_vlan->fid = NULL; + mlxsw_sp_port_vid_fid_unmap(mlxsw_sp_port, vid, fid->fid); + mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_BC, + false); + mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_MC, + false); + mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_UC, + false); + mlxsw_sp_fid_put(mlxsw_sp, fid); } static u16 @@ -675,52 +1172,124 @@ mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port->pvid; } -static int mlxsw_sp_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, - bool is_untagged, bool is_pvid) +static int +mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, + struct mlxsw_sp_bridge_port *bridge_port) { - u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); - u16 old_pvid = mlxsw_sp_port->pvid; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; + struct mlxsw_sp_bridge_vlan *bridge_vlan; + u16 vid = mlxsw_sp_port_vlan->vid; int err; - err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid); - if (err) - return err; - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, - is_untagged); - if (err) - goto err_port_vlan_set; + /* No need to continue if only VLAN flags were changed */ + if (mlxsw_sp_port_vlan->bridge_port) + return 0; - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); + err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); if (err) - goto err_port_pvid_set; + return err; err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, - mlxsw_sp_port->learning); + bridge_port->flags & BR_LEARNING); if (err) goto err_port_vid_learning_set; err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, - mlxsw_sp_port->stp_state); + bridge_port->stp_state); if (err) goto err_port_vid_stp_set; - if (is_untagged) - __set_bit(vid, mlxsw_sp_port->untagged_vlans); - else - __clear_bit(vid, mlxsw_sp_port->untagged_vlans); - __set_bit(vid, mlxsw_sp_port->active_vlans); + bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid); + if (!bridge_vlan) { + err = -ENOMEM; + goto err_bridge_vlan_get; + } + + list_add(&mlxsw_sp_port_vlan->bridge_vlan_node, + &bridge_vlan->port_vlan_list); + + mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge, + bridge_port->dev); + mlxsw_sp_port_vlan->bridge_port = bridge_port; return 0; +err_bridge_vlan_get: + mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); err_port_vid_stp_set: mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); err_port_vid_learning_set: + mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); + return err; +} + +void +mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) +{ + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; + struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + struct mlxsw_sp_bridge_vlan *bridge_vlan; + struct mlxsw_sp_bridge_port *bridge_port; + u16 vid = mlxsw_sp_port_vlan->vid; + bool last; + + bridge_port = mlxsw_sp_port_vlan->bridge_port; + bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); + last = list_is_singular(&bridge_vlan->port_vlan_list); + + list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); + mlxsw_sp_bridge_vlan_put(bridge_vlan); + mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + if (last) + mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, + bridge_port, fid->fid); + mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); + + mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); + mlxsw_sp_port_vlan->bridge_port = NULL; +} + +static int +mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + u16 vid, bool is_untagged, bool is_pvid) +{ + u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_bridge_vlan *bridge_vlan; + u16 old_pvid = mlxsw_sp_port->pvid; + int err; + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid); + if (IS_ERR(mlxsw_sp_port_vlan)) + return PTR_ERR(mlxsw_sp_port_vlan); + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, + is_untagged); + if (err) + goto err_port_vlan_set; + + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); + if (err) + goto err_port_pvid_set; + + err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); + if (err) + goto err_port_vlan_bridge_join; + + bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); + bridge_vlan->egress_untagged = is_untagged; + bridge_vlan->pvid = is_pvid; + + return 0; + +err_port_vlan_bridge_join: mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); err_port_pvid_set: mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); err_port_vlan_set: - mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid); + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); return err; } @@ -730,16 +1299,27 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, { bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = vlan->obj.orig_dev; + struct mlxsw_sp_bridge_port *bridge_port; u16 vid; if (switchdev_trans_ph_prepare(trans)) return 0; + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; + + if (!bridge_port->bridge_device->vlan_enabled) + return 0; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { int err; - err = mlxsw_sp_port_vlan_add(mlxsw_sp_port, vid, flag_untagged, - flag_pvid); + err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, + vid, flag_untagged, + flag_pvid); if (err) return err; } @@ -747,6 +1327,29 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged) +{ + return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID : + MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID; +} + +static int +mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_port *bridge_port, + u16 fid_index) +{ + bool lagged = bridge_port->lagged; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + u16 system_port; + + system_port = lagged ? bridge_port->lag_id : bridge_port->system_port; + mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged)); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index); + mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) { return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : @@ -822,24 +1425,39 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { - u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); - u16 lag_vid = 0; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = fdb->obj.orig_dev; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; + u16 fid_index, vid; if (switchdev_trans_ph_prepare(trans)) return 0; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); - } + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; + + bridge_device = bridge_port->bridge_device; + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, + bridge_device, + fdb->vid); + if (!mlxsw_sp_port_vlan) + return 0; + + fid_index = mlxsw_sp_port_vlan->fid->fid; + vid = mlxsw_sp_port_vlan->vid; if (!mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, mlxsw_sp_port->local_port, - fdb->addr, fid, true, false); + fdb->addr, fid_index, true, + false); else - return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, mlxsw_sp_port->lag_id, - fdb->addr, fid, lag_vid, + fdb->addr, fid_index, vid, true, false); } @@ -939,17 +1557,34 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = mdb->obj.orig_dev; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_mid *mid; - u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + u16 fid_index; int err = 0; if (switchdev_trans_ph_prepare(trans)) return 0; - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; + + bridge_device = bridge_port->bridge_device; + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, + bridge_device, + mdb->vid); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return -EINVAL; + + fid_index = mlxsw_sp_port_vlan->fid->fid; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index); if (!mid) { - mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index); if (!mid) { netdev_err(dev, "Unable to allocate MC group\n"); return -ENOMEM; @@ -965,8 +1600,8 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, } if (mid->ref_count == 1) { - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, - true); + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, + mid->mid, true); if (err) { netdev_err(dev, "Unable to set MC SFD\n"); goto err_out; @@ -987,15 +1622,8 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; - mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); - if (!mlxsw_sp_port) - return -EINVAL; - switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) - return 0; - err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); @@ -1018,57 +1646,78 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, return err; } -static void mlxsw_sp_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +static void +mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, u16 vid) { u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - __clear_bit(vid, mlxsw_sp_port->active_vlans); - mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); - mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return; + + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); - mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid); + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); } static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = vlan->obj.orig_dev; + struct mlxsw_sp_bridge_port *bridge_port; u16 vid; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) - mlxsw_sp_port_vlan_del(mlxsw_sp_port, vid); + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; - return 0; -} + if (!bridge_port->bridge_device->vlan_enabled) + return 0; -void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) -{ - u16 vid; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) + mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid); - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) - mlxsw_sp_port_vlan_del(mlxsw_sp_port, vid); + return 0; } static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) { - u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); - u16 lag_vid = 0; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = fdb->obj.orig_dev; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; + u16 fid_index, vid; + + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); - } + bridge_device = bridge_port->bridge_device; + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, + bridge_device, + fdb->vid); + if (!mlxsw_sp_port_vlan) + return 0; + + fid_index = mlxsw_sp_port_vlan->fid->fid; + vid = mlxsw_sp_port_vlan->vid; if (!mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, mlxsw_sp_port->local_port, - fdb->addr, fid, - false, false); + fdb->addr, fid_index, false, + false); else - return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, mlxsw_sp_port->lag_id, - fdb->addr, fid, lag_vid, + fdb->addr, fid_index, vid, false, false); } @@ -1076,13 +1725,30 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_mdb *mdb) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = mdb->obj.orig_dev; + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_bridge_device *bridge_device; struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_mid *mid; - u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + u16 fid_index; u16 mid_idx; int err = 0; - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; + + bridge_device = bridge_port->bridge_device; + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, + bridge_device, + mdb->vid); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return -EINVAL; + + fid_index = mlxsw_sp_port_vlan->fid->fid; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index); if (!mid) { netdev_err(dev, "Unable to remove port from MC DB\n"); return -EINVAL; @@ -1094,8 +1760,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, mid_idx = mid->mid; if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, - false); + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, + mid_idx, false); if (err) netdev_err(dev, "Unable to remove MC SFD\n"); } @@ -1109,15 +1775,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; - mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); - if (!mlxsw_sp_port) - return -EINVAL; - switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) - return 0; - err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; @@ -1156,32 +1815,32 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb, - struct net_device *orig_dev) + switchdev_obj_dump_cb_t *cb) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port *tmp; - struct mlxsw_sp_fid *f; - u16 vport_fid; - char *sfd_pl; + struct net_device *orig_dev = fdb->obj.orig_dev; + struct mlxsw_sp_bridge_port *bridge_port; + u16 lag_id, fid_index; char mac[ETH_ALEN]; - u16 fid; - u8 local_port; - u16 lag_id; - u8 num_rec; int stored_err = 0; - int i; + char *sfd_pl; + u8 num_rec; int err; + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (!bridge_port) + return 0; + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; - f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); - vport_fid = f ? f->fid : 0; - mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); do { + struct mlxsw_sp_port *tmp; + u8 local_port; + int i; + mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1198,48 +1857,44 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, for (i = 0; i < num_rec; i++) { switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { case MLXSW_REG_SFD_REC_TYPE_UNICAST: - mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, + &fid_index, &local_port); - if (local_port == mlxsw_sp_port->local_port) { - if (vport_fid && vport_fid == fid) - fdb->vid = 0; - else if (!vport_fid && - !mlxsw_sp_fid_is_vfid(fid)) - fdb->vid = fid; - else - continue; - ether_addr_copy(fdb->addr, mac); - fdb->ndm_state = NUD_REACHABLE; - err = cb(&fdb->obj); - if (err) - stored_err = err; - } + if (bridge_port->lagged) + continue; + if (bridge_port->system_port != local_port) + continue; + if (bridge_port->bridge_device->vlan_enabled) + fdb->vid = fid_index; + else + fdb->vid = 0; + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + err = cb(&fdb->obj); + if (err) + stored_err = err; break; case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, - mac, &fid, &lag_id); + mac, &fid_index, + &lag_id); + if (!bridge_port->lagged) + continue; + if (bridge_port->lag_id != lag_id) + continue; tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); - if (tmp && tmp->local_port == - mlxsw_sp_port->local_port) { - /* LAG records can only point to LAG - * devices or VLAN devices on top. - */ - if (!netif_is_lag_master(orig_dev) && - !is_vlan_dev(orig_dev)) - continue; - if (vport_fid && vport_fid == fid) - fdb->vid = 0; - else if (!vport_fid && - !mlxsw_sp_fid_is_vfid(fid)) - fdb->vid = fid; - else - continue; - ether_addr_copy(fdb->addr, mac); - fdb->ndm_state = NUD_REACHABLE; - err = cb(&fdb->obj); - if (err) - stored_err = err; - } + if (tmp->local_port != + mlxsw_sp_port->local_port) + continue; + if (bridge_port->bridge_device->vlan_enabled) + fdb->vid = fid_index; + else + fdb->vid = 0; + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + err = cb(&fdb->obj); + if (err) + stored_err = err; break; } } @@ -1254,28 +1909,32 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_vlan *vlan, switchdev_obj_dump_cb_t *cb) { - u16 vid; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *orig_dev = vlan->obj.orig_dev; + struct mlxsw_sp_bridge_port *bridge_port; + struct mlxsw_sp_bridge_vlan *bridge_vlan; int err = 0; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - vlan->flags = 0; - vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); - vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); - return cb(&vlan->obj); - } + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_port)) + return -EINVAL; + + if (!bridge_port->bridge_device->vlan_enabled) + return 0; - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { vlan->flags = 0; - if (vid == mlxsw_sp_port->pvid) + if (bridge_vlan->pvid) vlan->flags |= BRIDGE_VLAN_INFO_PVID; - if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) + if (bridge_vlan->egress_untagged) vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - vlan->vid_begin = vid; - vlan->vid_end = vid; + vlan->vid_begin = bridge_vlan->vid; + vlan->vid_end = bridge_vlan->vid; err = cb(&vlan->obj); if (err) break; } + return err; } @@ -1286,10 +1945,6 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; - mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); - if (!mlxsw_sp_port) - return -EINVAL; - switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, @@ -1297,8 +1952,7 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb, - obj->orig_dev); + SWITCHDEV_OBJ_PORT_FDB(obj), cb); break; default: err = -EOPNOTSUPP; @@ -1316,6 +1970,154 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; +static int +mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + if (is_vlan_dev(bridge_port->dev)) + return -EINVAL; + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return -EINVAL; + + /* Let VLAN-aware bridge take care of its own VLANs */ + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + + return 0; +} + +static void +mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); + /* Make sure untagged frames are allowed to ingress */ + mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { + .port_join = mlxsw_sp_bridge_8021q_port_join, + .port_leave = mlxsw_sp_bridge_8021q_port_leave, +}; + +static bool +mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port, + const struct net_device *br_dev) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list, + list) { + if (mlxsw_sp_port_vlan->bridge_port && + mlxsw_sp_port_vlan->bridge_port->bridge_device->dev == + br_dev) + return true; + } + + return false; +} + +static int +mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_fid *fid; + u16 vid; + + if (!is_vlan_dev(bridge_port->dev)) + return -EINVAL; + vid = vlan_dev_vlan_id(bridge_port->dev); + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return -EINVAL; + fid = mlxsw_sp_port_vlan->fid; + + if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { + netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n"); + return -EINVAL; + } + + /* Port is no longer usable as a router interface */ + if (fid) + fid->leave(mlxsw_sp_port_vlan); + + return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); +} + +static void +mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + u16 vid = vlan_dev_vlan_id(bridge_port->dev); + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return; + + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { + .port_join = mlxsw_sp_bridge_8021d_port_join, + .port_leave = mlxsw_sp_bridge_8021d_port_leave, +}; + +int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *brport_dev, + struct net_device *br_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; + int err; + + bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev); + if (IS_ERR(bridge_port)) + return PTR_ERR(bridge_port); + bridge_device = bridge_port->bridge_device; + + err = bridge_device->ops->port_join(bridge_device, bridge_port, + mlxsw_sp_port); + if (err) + goto err_port_join; + + return 0; + +err_port_join: + mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); + return err; +} + +void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *brport_dev, + struct net_device *br_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return; + bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); + if (!bridge_port) + return; + + bridge_device->ops->port_leave(bridge_device, bridge_port, + mlxsw_sp_port); + mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); +} + static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, char *mac, u16 vid, struct net_device *dev) @@ -1335,6 +2137,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, char *sfn_pl, int rec_index, bool adding) { + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_port *mlxsw_sp_port; char mac[ETH_ALEN]; u8 local_port; @@ -1349,22 +2154,21 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, goto just_remove; } - if (mlxsw_sp_fid_is_vfid(fid)) { - struct mlxsw_sp_port *mlxsw_sp_vport; + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); + if (!mlxsw_sp_port_vlan) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); + goto just_remove; + } - mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, - fid); - if (!mlxsw_sp_vport) { - netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); - goto just_remove; - } - vid = 0; - /* Override the physical port with the vPort. */ - mlxsw_sp_port = mlxsw_sp_vport; - } else { - vid = fid; + bridge_port = mlxsw_sp_port_vlan->bridge_port; + if (!bridge_port) { + netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); + goto just_remove; } + bridge_device = bridge_port->bridge_device; + vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; + do_fdb_op: err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, true); @@ -1375,8 +2179,8 @@ do_fdb_op: if (!do_notification) return; - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, - adding, mac, vid, mlxsw_sp_port->dev); + mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC, + adding, mac, vid, bridge_port->dev); return; just_remove: @@ -1389,8 +2193,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, char *sfn_pl, int rec_index, bool adding) { + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_port *mlxsw_sp_port; - struct net_device *dev; char mac[ETH_ALEN]; u16 lag_vid = 0; u16 lag_id; @@ -1405,26 +2211,22 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, goto just_remove; } - if (mlxsw_sp_fid_is_vfid(fid)) { - struct mlxsw_sp_port *mlxsw_sp_vport; - - mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, - fid); - if (!mlxsw_sp_vport) { - netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); - goto just_remove; - } + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); + if (!mlxsw_sp_port_vlan) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); + goto just_remove; + } - lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - dev = mlxsw_sp_vport->dev; - vid = 0; - /* Override the physical port with the vPort. */ - mlxsw_sp_port = mlxsw_sp_vport; - } else { - dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; - vid = fid; + bridge_port = mlxsw_sp_port_vlan->bridge_port; + if (!bridge_port) { + netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n"); + goto just_remove; } + bridge_device = bridge_port->bridge_device; + vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; + lag_vid = mlxsw_sp_port_vlan->vid; + do_fdb_op: err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, adding, true); @@ -1435,8 +2237,8 @@ do_fdb_op: if (!do_notification) return; - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, - vid, dev); + mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC, + adding, mac, vid, bridge_port->dev); return; just_remove: @@ -1540,8 +2342,12 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) mlxsw_sp->bridge = bridge; bridge->mlxsw_sp = mlxsw_sp; + INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list); + bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; + bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; + return mlxsw_sp_fdb_init(mlxsw_sp); } @@ -1549,6 +2355,7 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp_fdb_fini(mlxsw_sp); WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); + WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); kfree(mlxsw_sp->bridge); }