2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
71 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
72 static const char mlxsw_sp_driver_version[] = "1.0";
78 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
81 * Packet control type.
82 * 0 - Ethernet control (e.g. EMADs, LACP)
85 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
88 * Packet protocol type. Must be set to 1 (Ethernet).
90 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
92 /* tx_hdr_rx_is_router
93 * Packet is sent from the router. Valid for data packets only.
95 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
98 * Indicates if the 'fid' field is valid and should be used for
99 * forwarding lookup. Valid for data packets only.
101 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
104 * Switch partition ID. Must be set to 0.
106 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
108 /* tx_hdr_control_tclass
109 * Indicates if the packet should use the control TClass and not one
110 * of the data TClasses.
112 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
115 * Egress TClass to be used on the egress device on the egress port.
117 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
120 * Destination local port for unicast packets.
121 * Destination multicast ID for multicast packets.
123 * Control packets are directed to a specific egress port, while data
124 * packets are transmitted through the CPU port (0) into the switch partition,
125 * where forwarding rules are applied.
127 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
130 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
131 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
132 * Valid for data packets only.
134 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
138 * 6 - Control packets
140 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
142 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
143 unsigned int counter_index, u64 *packets,
146 char mgpc_pl[MLXSW_REG_MGPC_LEN];
149 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
150 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
154 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
155 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
159 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
160 unsigned int counter_index)
162 char mgpc_pl[MLXSW_REG_MGPC_LEN];
164 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
165 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
169 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
170 unsigned int *p_counter_index)
174 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
178 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
180 goto err_counter_clear;
184 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
189 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
190 unsigned int counter_index)
192 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
196 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
197 const struct mlxsw_tx_info *tx_info)
199 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
201 memset(txhdr, 0, MLXSW_TXHDR_LEN);
203 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
204 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
205 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
206 mlxsw_tx_hdr_swid_set(txhdr, 0);
207 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
208 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
209 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
212 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
214 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
220 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
224 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
228 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
231 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
233 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
234 sizeof(struct mlxsw_sp_span_entry),
236 if (!mlxsw_sp->span.entries)
239 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
240 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
245 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
249 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
250 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
252 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
254 kfree(mlxsw_sp->span.entries);
257 static struct mlxsw_sp_span_entry *
258 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
260 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
261 struct mlxsw_sp_span_entry *span_entry;
262 char mpat_pl[MLXSW_REG_MPAT_LEN];
263 u8 local_port = port->local_port;
268 /* find a free entry to use */
270 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
271 if (!mlxsw_sp->span.entries[i].used) {
273 span_entry = &mlxsw_sp->span.entries[i];
280 /* create a new port analayzer entry for local_port */
281 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
286 span_entry->used = true;
287 span_entry->id = index;
288 span_entry->ref_count = 1;
289 span_entry->local_port = local_port;
293 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_span_entry *span_entry)
296 u8 local_port = span_entry->local_port;
297 char mpat_pl[MLXSW_REG_MPAT_LEN];
298 int pa_id = span_entry->id;
300 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
301 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
302 span_entry->used = false;
305 static struct mlxsw_sp_span_entry *
306 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
308 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
311 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
312 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
314 if (curr->used && curr->local_port == port->local_port)
320 static struct mlxsw_sp_span_entry
321 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
323 struct mlxsw_sp_span_entry *span_entry;
325 span_entry = mlxsw_sp_span_entry_find(port);
327 /* Already exists, just take a reference */
328 span_entry->ref_count++;
332 return mlxsw_sp_span_entry_create(port);
335 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_span_entry *span_entry)
338 WARN_ON(!span_entry->ref_count);
339 if (--span_entry->ref_count == 0)
340 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
344 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
346 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
347 struct mlxsw_sp_span_inspected_port *p;
350 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
351 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
353 list_for_each_entry(p, &curr->bound_ports_list, list)
354 if (p->local_port == port->local_port &&
355 p->type == MLXSW_SP_SPAN_EGRESS)
362 static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
364 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
367 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
369 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
370 char sbib_pl[MLXSW_REG_SBIB_LEN];
373 /* If port is egress mirrored, the shared buffer size should be
374 * updated according to the mtu value
376 if (mlxsw_sp_span_is_egress_mirror(port)) {
377 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
378 mlxsw_sp_span_mtu_to_buffsize(mtu));
379 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
381 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
389 static struct mlxsw_sp_span_inspected_port *
390 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
391 struct mlxsw_sp_span_entry *span_entry)
393 struct mlxsw_sp_span_inspected_port *p;
395 list_for_each_entry(p, &span_entry->bound_ports_list, list)
396 if (port->local_port == p->local_port)
402 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
403 struct mlxsw_sp_span_entry *span_entry,
404 enum mlxsw_sp_span_type type)
406 struct mlxsw_sp_span_inspected_port *inspected_port;
407 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
408 char mpar_pl[MLXSW_REG_MPAR_LEN];
409 char sbib_pl[MLXSW_REG_SBIB_LEN];
410 int pa_id = span_entry->id;
413 /* if it is an egress SPAN, bind a shared buffer to it */
414 if (type == MLXSW_SP_SPAN_EGRESS) {
415 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
416 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
419 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
424 /* bind the port to the SPAN entry */
425 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
426 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
427 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
429 goto err_mpar_reg_write;
431 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
432 if (!inspected_port) {
434 goto err_inspected_port_alloc;
436 inspected_port->local_port = port->local_port;
437 inspected_port->type = type;
438 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
443 err_inspected_port_alloc:
444 if (type == MLXSW_SP_SPAN_EGRESS) {
445 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
446 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
452 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
453 struct mlxsw_sp_span_entry *span_entry,
454 enum mlxsw_sp_span_type type)
456 struct mlxsw_sp_span_inspected_port *inspected_port;
457 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
458 char mpar_pl[MLXSW_REG_MPAR_LEN];
459 char sbib_pl[MLXSW_REG_SBIB_LEN];
460 int pa_id = span_entry->id;
462 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
466 /* remove the inspected port */
467 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
468 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
469 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
471 /* remove the SBIB buffer if it was egress SPAN */
472 if (type == MLXSW_SP_SPAN_EGRESS) {
473 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
474 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
477 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
479 list_del(&inspected_port->list);
480 kfree(inspected_port);
483 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
484 struct mlxsw_sp_port *to,
485 enum mlxsw_sp_span_type type)
487 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
488 struct mlxsw_sp_span_entry *span_entry;
491 span_entry = mlxsw_sp_span_entry_get(to);
495 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
498 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
505 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
509 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
510 struct mlxsw_sp_port *to,
511 enum mlxsw_sp_span_type type)
513 struct mlxsw_sp_span_entry *span_entry;
515 span_entry = mlxsw_sp_span_entry_find(to);
517 netdev_err(from->dev, "no span entry found\n");
521 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
523 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
526 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
527 bool enable, u32 rate)
529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
530 char mpsc_pl[MLXSW_REG_MPSC_LEN];
532 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
536 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540 char paos_pl[MLXSW_REG_PAOS_LEN];
542 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
543 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
544 MLXSW_PORT_ADMIN_STATUS_DOWN);
545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
548 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
552 char ppad_pl[MLXSW_REG_PPAD_LEN];
554 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
555 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
559 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
564 ether_addr_copy(addr, mlxsw_sp->base_mac);
565 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
566 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
569 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 char pmtu_pl[MLXSW_REG_PMTU_LEN];
576 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
577 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
578 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
581 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
586 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
587 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
590 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
593 char pspa_pl[MLXSW_REG_PSPA_LEN];
595 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
599 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
603 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
607 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
611 char svpe_pl[MLXSW_REG_SVPE_LEN];
613 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
617 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
618 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 char svfa_pl[MLXSW_REG_SVFA_LEN];
624 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
626 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
629 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
630 u16 vid_begin, u16 vid_end,
633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
637 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
640 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
641 vid_end, learn_enable);
642 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
647 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
648 u16 vid, bool learn_enable)
650 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
655 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
657 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
658 char sspr_pl[MLXSW_REG_SSPR_LEN];
660 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
661 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
664 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
665 u8 local_port, u8 *p_module,
666 u8 *p_width, u8 *p_lane)
668 char pmlp_pl[MLXSW_REG_PMLP_LEN];
671 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
672 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
675 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
676 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
677 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
681 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
682 u8 module, u8 width, u8 lane)
684 char pmlp_pl[MLXSW_REG_PMLP_LEN];
687 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
688 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
689 for (i = 0; i < width; i++) {
690 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
691 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
697 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
699 char pmlp_pl[MLXSW_REG_PMLP_LEN];
701 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
702 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
706 static int mlxsw_sp_port_open(struct net_device *dev)
708 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
711 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
714 netif_start_queue(dev);
718 static int mlxsw_sp_port_stop(struct net_device *dev)
720 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722 netif_stop_queue(dev);
723 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
726 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
727 struct net_device *dev)
729 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
730 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
731 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
732 const struct mlxsw_tx_info tx_info = {
733 .local_port = mlxsw_sp_port->local_port,
739 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
740 return NETDEV_TX_BUSY;
742 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
743 struct sk_buff *skb_orig = skb;
745 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
747 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
748 dev_kfree_skb_any(skb_orig);
751 dev_consume_skb_any(skb_orig);
754 if (eth_skb_pad(skb)) {
755 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
759 mlxsw_sp_txhdr_construct(skb, &tx_info);
760 /* TX header is consumed by HW on the way so we shouldn't count its
761 * bytes as being sent.
763 len = skb->len - MLXSW_TXHDR_LEN;
765 /* Due to a race we might fail here because of a full queue. In that
766 * unlikely case we simply drop the packet.
768 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
771 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
772 u64_stats_update_begin(&pcpu_stats->syncp);
773 pcpu_stats->tx_packets++;
774 pcpu_stats->tx_bytes += len;
775 u64_stats_update_end(&pcpu_stats->syncp);
777 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
778 dev_kfree_skb_any(skb);
783 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
787 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
790 struct sockaddr *addr = p;
793 if (!is_valid_ether_addr(addr->sa_data))
794 return -EADDRNOTAVAIL;
796 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
799 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
803 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
804 bool pause_en, bool pfc_en, u16 delay)
806 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
808 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
809 MLXSW_SP_PAUSE_DELAY;
811 if (pause_en || pfc_en)
812 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
813 pg_size + delay, pg_size);
815 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
818 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
819 u8 *prio_tc, bool pause_en,
820 struct ieee_pfc *my_pfc)
822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
823 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
824 u16 delay = !!my_pfc ? my_pfc->delay : 0;
825 char pbmc_pl[MLXSW_REG_PBMC_LEN];
828 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
829 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
833 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
834 bool configure = false;
837 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
838 if (prio_tc[j] == i) {
839 pfc = pfc_en & BIT(j);
847 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
850 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
853 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
854 int mtu, bool pause_en)
856 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
857 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
858 struct ieee_pfc *my_pfc;
861 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
862 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
864 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
868 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
870 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
871 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
874 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
877 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
879 goto err_span_port_mtu_update;
880 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
882 goto err_port_mtu_set;
887 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
888 err_span_port_mtu_update:
889 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
894 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
895 struct rtnl_link_stats64 *stats)
897 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
898 struct mlxsw_sp_port_pcpu_stats *p;
899 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
904 for_each_possible_cpu(i) {
905 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
907 start = u64_stats_fetch_begin_irq(&p->syncp);
908 rx_packets = p->rx_packets;
909 rx_bytes = p->rx_bytes;
910 tx_packets = p->tx_packets;
911 tx_bytes = p->tx_bytes;
912 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
914 stats->rx_packets += rx_packets;
915 stats->rx_bytes += rx_bytes;
916 stats->tx_packets += tx_packets;
917 stats->tx_bytes += tx_bytes;
918 /* tx_dropped is u32, updated without syncp protection. */
919 tx_dropped += p->tx_dropped;
921 stats->tx_dropped = tx_dropped;
925 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
928 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
935 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
939 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
940 return mlxsw_sp_port_get_sw_stats64(dev, sp);
946 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
947 int prio, char *ppcnt_pl)
949 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
952 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
953 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
956 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
957 struct rtnl_link_stats64 *stats)
959 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
962 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
968 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
970 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
972 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
974 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
976 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
978 stats->rx_crc_errors =
979 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
980 stats->rx_frame_errors =
981 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
983 stats->rx_length_errors = (
984 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
985 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
986 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
988 stats->rx_errors = (stats->rx_crc_errors +
989 stats->rx_frame_errors + stats->rx_length_errors);
995 static void update_stats_cache(struct work_struct *work)
997 struct mlxsw_sp_port *mlxsw_sp_port =
998 container_of(work, struct mlxsw_sp_port,
999 hw_stats.update_dw.work);
1001 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1004 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1005 mlxsw_sp_port->hw_stats.cache);
1008 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1009 MLXSW_HW_STATS_UPDATE_TIME);
1012 /* Return the stats from a cache that is updated periodically,
1013 * as this function might get called in an atomic context.
1016 mlxsw_sp_port_get_stats64(struct net_device *dev,
1017 struct rtnl_link_stats64 *stats)
1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1021 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1024 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1025 u16 vid_end, bool is_member, bool untagged)
1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1031 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1035 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1036 vid_end, is_member, untagged);
1037 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1042 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1044 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1045 u16 vid, last_visited_vid;
1048 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1049 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1052 last_visited_vid = vid;
1053 goto err_port_vid_to_fid_set;
1057 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1059 last_visited_vid = VLAN_N_VID;
1060 goto err_port_vid_to_fid_set;
1065 err_port_vid_to_fid_set:
1066 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1067 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1072 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1074 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1078 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1082 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1083 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1092 static struct mlxsw_sp_port *
1093 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1095 struct mlxsw_sp_port *mlxsw_sp_vport;
1097 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1098 if (!mlxsw_sp_vport)
1101 /* dev will be set correctly after the VLAN device is linked
1102 * with the real device. In case of bridge SELF invocation, dev
1103 * will remain as is.
1105 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1106 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1107 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1108 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
1109 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1110 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
1111 mlxsw_sp_vport->vport.vid = vid;
1113 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1115 return mlxsw_sp_vport;
1118 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1120 list_del(&mlxsw_sp_vport->vport.list);
1121 kfree(mlxsw_sp_vport);
1124 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1125 __be16 __always_unused proto, u16 vid)
1127 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1128 struct mlxsw_sp_port *mlxsw_sp_vport;
1129 bool untagged = vid == 1;
1132 /* VLAN 0 is added to HW filter when device goes up, but it is
1133 * reserved in our case, so simply return.
1138 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
1141 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
1142 if (!mlxsw_sp_vport)
1145 /* When adding the first VLAN interface on a bridged port we need to
1146 * transition all the active 802.1Q bridge VLANs to use explicit
1147 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1149 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
1150 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
1152 goto err_port_vp_mode_trans;
1155 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
1157 goto err_port_add_vid;
1162 if (list_is_singular(&mlxsw_sp_port->vports_list))
1163 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1164 err_port_vp_mode_trans:
1165 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1169 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1170 __be16 __always_unused proto, u16 vid)
1172 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1173 struct mlxsw_sp_port *mlxsw_sp_vport;
1174 struct mlxsw_sp_fid *f;
1176 /* VLAN 0 is removed from HW filter when device goes down, but
1177 * it is reserved in our case, so simply return.
1182 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1183 if (WARN_ON(!mlxsw_sp_vport))
1186 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1188 /* Drop FID reference. If this was the last reference the
1189 * resources will be freed.
1191 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1192 if (f && !WARN_ON(!f->leave))
1193 f->leave(mlxsw_sp_vport);
1195 /* When removing the last VLAN interface on a bridged port we need to
1196 * transition all active 802.1Q bridge VLANs to use VID to FID
1197 * mappings and set port's mode to VLAN mode.
1199 if (list_is_singular(&mlxsw_sp_port->vports_list))
1200 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1202 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1207 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1210 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1211 u8 module = mlxsw_sp_port->mapping.module;
1212 u8 width = mlxsw_sp_port->mapping.width;
1213 u8 lane = mlxsw_sp_port->mapping.lane;
1216 if (!mlxsw_sp_port->split)
1217 err = snprintf(name, len, "p%d", module + 1);
1219 err = snprintf(name, len, "p%ds%d", module + 1,
1228 static struct mlxsw_sp_port_mall_tc_entry *
1229 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1230 unsigned long cookie) {
1231 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1233 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1234 if (mall_tc_entry->cookie == cookie)
1235 return mall_tc_entry;
1241 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1242 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1243 const struct tc_action *a,
1246 struct net *net = dev_net(mlxsw_sp_port->dev);
1247 enum mlxsw_sp_span_type span_type;
1248 struct mlxsw_sp_port *to_port;
1249 struct net_device *to_dev;
1252 ifindex = tcf_mirred_ifindex(a);
1253 to_dev = __dev_get_by_index(net, ifindex);
1255 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1259 if (!mlxsw_sp_port_dev_check(to_dev)) {
1260 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1263 to_port = netdev_priv(to_dev);
1265 mirror->to_local_port = to_port->local_port;
1266 mirror->ingress = ingress;
1267 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1268 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1272 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1273 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1276 enum mlxsw_sp_span_type span_type;
1277 struct mlxsw_sp_port *to_port;
1279 to_port = mlxsw_sp->ports[mirror->to_local_port];
1280 span_type = mirror->ingress ?
1281 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1282 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1286 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1287 struct tc_cls_matchall_offload *cls,
1288 const struct tc_action *a,
1293 if (!mlxsw_sp_port->sample)
1295 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1296 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1299 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1300 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1304 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1305 tcf_sample_psample_group(a));
1306 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1307 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1308 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1310 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1312 goto err_port_sample_set;
1315 err_port_sample_set:
1316 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1321 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1323 if (!mlxsw_sp_port->sample)
1326 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1327 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1330 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1332 struct tc_cls_matchall_offload *cls,
1335 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1336 const struct tc_action *a;
1340 if (!tc_single_action(cls->exts)) {
1341 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1345 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1348 mall_tc_entry->cookie = cls->cookie;
1350 tcf_exts_to_list(cls->exts, &actions);
1351 a = list_first_entry(&actions, struct tc_action, list);
1353 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1354 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1356 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1357 mirror = &mall_tc_entry->mirror;
1358 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1359 mirror, a, ingress);
1360 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1361 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1362 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1369 goto err_add_action;
1371 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1375 kfree(mall_tc_entry);
1379 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1380 struct tc_cls_matchall_offload *cls)
1382 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1384 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1386 if (!mall_tc_entry) {
1387 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1390 list_del(&mall_tc_entry->list);
1392 switch (mall_tc_entry->type) {
1393 case MLXSW_SP_PORT_MALL_MIRROR:
1394 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1395 &mall_tc_entry->mirror);
1397 case MLXSW_SP_PORT_MALL_SAMPLE:
1398 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1404 kfree(mall_tc_entry);
1407 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1408 __be16 proto, struct tc_to_netdev *tc)
1410 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1411 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1414 case TC_SETUP_MATCHALL:
1415 switch (tc->cls_mall->command) {
1416 case TC_CLSMATCHALL_REPLACE:
1417 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1421 case TC_CLSMATCHALL_DESTROY:
1422 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1428 case TC_SETUP_CLSFLOWER:
1429 switch (tc->cls_flower->command) {
1430 case TC_CLSFLOWER_REPLACE:
1431 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1432 proto, tc->cls_flower);
1433 case TC_CLSFLOWER_DESTROY:
1434 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1437 case TC_CLSFLOWER_STATS:
1438 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1448 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1449 .ndo_open = mlxsw_sp_port_open,
1450 .ndo_stop = mlxsw_sp_port_stop,
1451 .ndo_start_xmit = mlxsw_sp_port_xmit,
1452 .ndo_setup_tc = mlxsw_sp_setup_tc,
1453 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1454 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1455 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1456 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1457 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1458 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1459 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1460 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1461 .ndo_fdb_add = switchdev_port_fdb_add,
1462 .ndo_fdb_del = switchdev_port_fdb_del,
1463 .ndo_fdb_dump = switchdev_port_fdb_dump,
1464 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1465 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1466 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
1467 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1470 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1471 struct ethtool_drvinfo *drvinfo)
1473 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1476 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1477 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1478 sizeof(drvinfo->version));
1479 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1481 mlxsw_sp->bus_info->fw_rev.major,
1482 mlxsw_sp->bus_info->fw_rev.minor,
1483 mlxsw_sp->bus_info->fw_rev.subminor);
1484 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1485 sizeof(drvinfo->bus_info));
1488 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1489 struct ethtool_pauseparam *pause)
1491 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1493 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1494 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1497 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1498 struct ethtool_pauseparam *pause)
1500 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1502 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1503 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1504 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1506 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1510 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1511 struct ethtool_pauseparam *pause)
1513 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1514 bool pause_en = pause->tx_pause || pause->rx_pause;
1517 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1518 netdev_err(dev, "PFC already enabled on port\n");
1522 if (pause->autoneg) {
1523 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1527 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1529 netdev_err(dev, "Failed to configure port's headroom\n");
1533 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1535 netdev_err(dev, "Failed to set PAUSE parameters\n");
1536 goto err_port_pause_configure;
1539 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1540 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1544 err_port_pause_configure:
1545 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1546 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1550 struct mlxsw_sp_port_hw_stats {
1551 char str[ETH_GSTRING_LEN];
1552 u64 (*getter)(const char *payload);
1555 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1557 .str = "a_frames_transmitted_ok",
1558 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1561 .str = "a_frames_received_ok",
1562 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1565 .str = "a_frame_check_sequence_errors",
1566 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1569 .str = "a_alignment_errors",
1570 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1573 .str = "a_octets_transmitted_ok",
1574 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1577 .str = "a_octets_received_ok",
1578 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1581 .str = "a_multicast_frames_xmitted_ok",
1582 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1585 .str = "a_broadcast_frames_xmitted_ok",
1586 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1589 .str = "a_multicast_frames_received_ok",
1590 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1593 .str = "a_broadcast_frames_received_ok",
1594 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1597 .str = "a_in_range_length_errors",
1598 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1601 .str = "a_out_of_range_length_field",
1602 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1605 .str = "a_frame_too_long_errors",
1606 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1609 .str = "a_symbol_error_during_carrier",
1610 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1613 .str = "a_mac_control_frames_transmitted",
1614 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1617 .str = "a_mac_control_frames_received",
1618 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1621 .str = "a_unsupported_opcodes_received",
1622 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1625 .str = "a_pause_mac_ctrl_frames_received",
1626 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1629 .str = "a_pause_mac_ctrl_frames_xmitted",
1630 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1634 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1636 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1638 .str = "rx_octets_prio",
1639 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1642 .str = "rx_frames_prio",
1643 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1646 .str = "tx_octets_prio",
1647 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1650 .str = "tx_frames_prio",
1651 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1654 .str = "rx_pause_prio",
1655 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1658 .str = "rx_pause_duration_prio",
1659 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1662 .str = "tx_pause_prio",
1663 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1666 .str = "tx_pause_duration_prio",
1667 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1671 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1673 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
1675 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1677 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1680 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1682 .str = "tc_transmit_queue_tc",
1683 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1686 .str = "tc_no_buffer_discard_uc_tc",
1687 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1691 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1693 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1694 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1695 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1696 IEEE_8021QAZ_MAX_TCS)
1698 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1702 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1703 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1704 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1705 *p += ETH_GSTRING_LEN;
1709 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1713 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1714 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1715 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1716 *p += ETH_GSTRING_LEN;
1720 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1721 u32 stringset, u8 *data)
1726 switch (stringset) {
1728 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1729 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1731 p += ETH_GSTRING_LEN;
1734 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1735 mlxsw_sp_port_get_prio_strings(&p, i);
1737 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1738 mlxsw_sp_port_get_tc_strings(&p, i);
1744 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1745 enum ethtool_phys_id_state state)
1747 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1748 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1749 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1753 case ETHTOOL_ID_ACTIVE:
1756 case ETHTOOL_ID_INACTIVE:
1763 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1768 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1769 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1772 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1773 *p_hw_stats = mlxsw_sp_port_hw_stats;
1774 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1776 case MLXSW_REG_PPCNT_PRIO_CNT:
1777 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1778 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1780 case MLXSW_REG_PPCNT_TC_CNT:
1781 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1782 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1791 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1792 enum mlxsw_reg_ppcnt_grp grp, int prio,
1793 u64 *data, int data_index)
1795 struct mlxsw_sp_port_hw_stats *hw_stats;
1796 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1800 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1803 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
1804 for (i = 0; i < len; i++)
1805 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
1808 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1809 struct ethtool_stats *stats, u64 *data)
1811 int i, data_index = 0;
1813 /* IEEE 802.3 Counters */
1814 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1816 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1818 /* Per-Priority Counters */
1819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1820 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1822 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1825 /* Per-TC Counters */
1826 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1827 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1829 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1833 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1837 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1843 struct mlxsw_sp_port_link_mode {
1844 enum ethtool_link_mode_bit_indices mask_ethtool;
1849 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1851 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1852 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1856 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1857 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1858 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1859 .speed = SPEED_1000,
1862 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1863 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1864 .speed = SPEED_10000,
1867 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1868 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1869 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1870 .speed = SPEED_10000,
1873 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1874 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1875 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1876 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1877 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1878 .speed = SPEED_10000,
1881 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1882 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1883 .speed = SPEED_20000,
1886 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1887 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1888 .speed = SPEED_40000,
1891 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1892 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1893 .speed = SPEED_40000,
1896 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1897 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1898 .speed = SPEED_40000,
1901 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1902 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1903 .speed = SPEED_40000,
1906 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1907 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1908 .speed = SPEED_25000,
1911 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1912 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1913 .speed = SPEED_25000,
1916 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1917 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1918 .speed = SPEED_25000,
1921 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1922 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1923 .speed = SPEED_25000,
1926 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1927 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1928 .speed = SPEED_50000,
1931 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1932 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1933 .speed = SPEED_50000,
1936 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1937 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1938 .speed = SPEED_50000,
1941 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1942 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1943 .speed = SPEED_56000,
1946 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1947 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1948 .speed = SPEED_56000,
1951 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1952 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1953 .speed = SPEED_56000,
1956 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1957 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
1958 .speed = SPEED_56000,
1961 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
1962 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1963 .speed = SPEED_100000,
1966 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
1967 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1968 .speed = SPEED_100000,
1971 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
1972 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1973 .speed = SPEED_100000,
1976 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1977 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1978 .speed = SPEED_100000,
1982 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1985 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
1986 struct ethtool_link_ksettings *cmd)
1988 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1989 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1990 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1991 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1992 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1993 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1994 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
1996 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1997 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1998 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1999 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2000 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2001 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2004 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2008 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2009 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2010 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2015 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2016 struct ethtool_link_ksettings *cmd)
2018 u32 speed = SPEED_UNKNOWN;
2019 u8 duplex = DUPLEX_UNKNOWN;
2025 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2026 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2027 speed = mlxsw_sp_port_link_mode[i].speed;
2028 duplex = DUPLEX_FULL;
2033 cmd->base.speed = speed;
2034 cmd->base.duplex = duplex;
2037 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2039 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2040 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2041 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2042 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2045 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2046 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2047 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2050 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2051 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2052 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2053 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2060 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2065 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2066 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2067 cmd->link_modes.advertising))
2068 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2073 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2078 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2079 if (speed == mlxsw_sp_port_link_mode[i].speed)
2080 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2085 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2090 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2091 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2092 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2097 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2098 struct ethtool_link_ksettings *cmd)
2100 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2101 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2102 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2104 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2105 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2108 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2109 struct ethtool_link_ksettings *cmd)
2114 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2115 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2119 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2120 struct ethtool_link_ksettings *cmd)
2122 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2125 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2126 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2129 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2130 struct ethtool_link_ksettings *cmd)
2132 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2133 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2134 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2135 char ptys_pl[MLXSW_REG_PTYS_LEN];
2140 autoneg = mlxsw_sp_port->link.autoneg;
2141 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2142 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2145 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2148 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2150 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2152 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2153 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2154 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2156 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2157 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2158 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2165 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2166 const struct ethtool_link_ksettings *cmd)
2168 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2169 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2170 char ptys_pl[MLXSW_REG_PTYS_LEN];
2171 u32 eth_proto_cap, eth_proto_new;
2175 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2179 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2181 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2182 eth_proto_new = autoneg ?
2183 mlxsw_sp_to_ptys_advert_link(cmd) :
2184 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2186 eth_proto_new = eth_proto_new & eth_proto_cap;
2187 if (!eth_proto_new) {
2188 netdev_err(dev, "No supported speed requested\n");
2192 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2194 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2198 if (!netif_running(dev))
2201 mlxsw_sp_port->link.autoneg = autoneg;
2203 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2204 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2209 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2210 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2211 .get_link = ethtool_op_get_link,
2212 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2213 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2214 .get_strings = mlxsw_sp_port_get_strings,
2215 .set_phys_id = mlxsw_sp_port_set_phys_id,
2216 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2217 .get_sset_count = mlxsw_sp_port_get_sset_count,
2218 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2219 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2223 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2225 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2226 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2227 char ptys_pl[MLXSW_REG_PTYS_LEN];
2228 u32 eth_proto_admin;
2230 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2231 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2233 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2236 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2237 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2238 bool dwrr, u8 dwrr_weight)
2240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2241 char qeec_pl[MLXSW_REG_QEEC_LEN];
2243 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2245 mlxsw_reg_qeec_de_set(qeec_pl, true);
2246 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2247 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2251 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2252 enum mlxsw_reg_qeec_hr hr, u8 index,
2253 u8 next_index, u32 maxrate)
2255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2256 char qeec_pl[MLXSW_REG_QEEC_LEN];
2258 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2260 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2261 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2265 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2266 u8 switch_prio, u8 tclass)
2268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2269 char qtct_pl[MLXSW_REG_QTCT_LEN];
2271 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2273 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2276 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2280 /* Setup the elements hierarcy, so that each TC is linked to
2281 * one subgroup, which are all member in the same group.
2283 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2284 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2288 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2289 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2290 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2295 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2296 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2297 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2303 /* Make sure the max shaper is disabled in all hierarcies that
2306 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2307 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2308 MLXSW_REG_QEEC_MAS_DIS);
2311 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2312 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2313 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2315 MLXSW_REG_QEEC_MAS_DIS);
2319 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2320 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2321 MLXSW_REG_QEEC_HIERARCY_TC,
2323 MLXSW_REG_QEEC_MAS_DIS);
2328 /* Map all priorities to traffic class 0. */
2329 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2330 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2338 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2340 mlxsw_sp_port->pvid = 1;
2342 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2345 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2347 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2350 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2351 bool split, u8 module, u8 width, u8 lane)
2353 struct mlxsw_sp_port *mlxsw_sp_port;
2354 struct net_device *dev;
2358 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2361 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2362 mlxsw_sp_port = netdev_priv(dev);
2363 mlxsw_sp_port->dev = dev;
2364 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2365 mlxsw_sp_port->local_port = local_port;
2366 mlxsw_sp_port->split = split;
2367 mlxsw_sp_port->mapping.module = module;
2368 mlxsw_sp_port->mapping.width = width;
2369 mlxsw_sp_port->mapping.lane = lane;
2370 mlxsw_sp_port->link.autoneg = 1;
2371 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2372 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2373 if (!mlxsw_sp_port->active_vlans) {
2375 goto err_port_active_vlans_alloc;
2377 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2378 if (!mlxsw_sp_port->untagged_vlans) {
2380 goto err_port_untagged_vlans_alloc;
2382 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2383 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2385 mlxsw_sp_port->pcpu_stats =
2386 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2387 if (!mlxsw_sp_port->pcpu_stats) {
2389 goto err_alloc_stats;
2392 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2394 if (!mlxsw_sp_port->sample) {
2396 goto err_alloc_sample;
2399 mlxsw_sp_port->hw_stats.cache =
2400 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2402 if (!mlxsw_sp_port->hw_stats.cache) {
2404 goto err_alloc_hw_stats;
2406 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2407 &update_stats_cache);
2409 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2410 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2412 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2414 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2415 mlxsw_sp_port->local_port);
2416 goto err_port_swid_set;
2419 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2421 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2422 mlxsw_sp_port->local_port);
2423 goto err_dev_addr_init;
2426 netif_carrier_off(dev);
2428 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2429 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2430 dev->hw_features |= NETIF_F_HW_TC;
2433 dev->max_mtu = ETH_MAX_MTU;
2435 /* Each packet needs to have a Tx header (metadata) on top all other
2438 dev->needed_headroom = MLXSW_TXHDR_LEN;
2440 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2442 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2443 mlxsw_sp_port->local_port);
2444 goto err_port_system_port_mapping_set;
2447 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2449 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2450 mlxsw_sp_port->local_port);
2451 goto err_port_speed_by_width_set;
2454 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2456 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2457 mlxsw_sp_port->local_port);
2458 goto err_port_mtu_set;
2461 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2463 goto err_port_admin_status_set;
2465 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2467 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2468 mlxsw_sp_port->local_port);
2469 goto err_port_buffers_init;
2472 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2474 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2475 mlxsw_sp_port->local_port);
2476 goto err_port_ets_init;
2479 /* ETS and buffers must be initialized before DCB. */
2480 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2482 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2483 mlxsw_sp_port->local_port);
2484 goto err_port_dcb_init;
2487 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2489 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2490 mlxsw_sp_port->local_port);
2491 goto err_port_pvid_vport_create;
2494 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2495 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2496 err = register_netdev(dev);
2498 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2499 mlxsw_sp_port->local_port);
2500 goto err_register_netdev;
2503 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2504 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2506 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2509 err_register_netdev:
2510 mlxsw_sp->ports[local_port] = NULL;
2511 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2512 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2513 err_port_pvid_vport_create:
2514 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2517 err_port_buffers_init:
2518 err_port_admin_status_set:
2520 err_port_speed_by_width_set:
2521 err_port_system_port_mapping_set:
2523 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2525 kfree(mlxsw_sp_port->hw_stats.cache);
2527 kfree(mlxsw_sp_port->sample);
2529 free_percpu(mlxsw_sp_port->pcpu_stats);
2531 kfree(mlxsw_sp_port->untagged_vlans);
2532 err_port_untagged_vlans_alloc:
2533 kfree(mlxsw_sp_port->active_vlans);
2534 err_port_active_vlans_alloc:
2539 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2540 bool split, u8 module, u8 width, u8 lane)
2544 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2546 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2550 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2551 module, width, lane);
2553 goto err_port_create;
2557 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2561 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2563 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2565 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2566 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2567 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2568 mlxsw_sp->ports[local_port] = NULL;
2569 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2570 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2571 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2572 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2573 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2574 kfree(mlxsw_sp_port->hw_stats.cache);
2575 kfree(mlxsw_sp_port->sample);
2576 free_percpu(mlxsw_sp_port->pcpu_stats);
2577 kfree(mlxsw_sp_port->untagged_vlans);
2578 kfree(mlxsw_sp_port->active_vlans);
2579 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2580 free_netdev(mlxsw_sp_port->dev);
2583 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2585 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2586 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2589 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2591 return mlxsw_sp->ports[local_port] != NULL;
2594 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2598 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
2599 if (mlxsw_sp_port_created(mlxsw_sp, i))
2600 mlxsw_sp_port_remove(mlxsw_sp, i);
2601 kfree(mlxsw_sp->ports);
2604 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2606 u8 module, width, lane;
2611 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
2612 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2613 if (!mlxsw_sp->ports)
2616 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
2617 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2620 goto err_port_module_info_get;
2623 mlxsw_sp->port_to_module[i] = module;
2624 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2625 module, width, lane);
2627 goto err_port_create;
2632 err_port_module_info_get:
2633 for (i--; i >= 1; i--)
2634 if (mlxsw_sp_port_created(mlxsw_sp, i))
2635 mlxsw_sp_port_remove(mlxsw_sp, i);
2636 kfree(mlxsw_sp->ports);
2640 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2642 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2644 return local_port - offset;
2647 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2648 u8 module, unsigned int count)
2650 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2653 for (i = 0; i < count; i++) {
2654 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2657 goto err_port_module_map;
2660 for (i = 0; i < count; i++) {
2661 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2663 goto err_port_swid_set;
2666 for (i = 0; i < count; i++) {
2667 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2668 module, width, i * width);
2670 goto err_port_create;
2676 for (i--; i >= 0; i--)
2677 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2678 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2681 for (i--; i >= 0; i--)
2682 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2683 MLXSW_PORT_SWID_DISABLED_PORT);
2685 err_port_module_map:
2686 for (i--; i >= 0; i--)
2687 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2691 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2692 u8 base_port, unsigned int count)
2694 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2697 /* Split by four means we need to re-create two ports, otherwise
2702 for (i = 0; i < count; i++) {
2703 local_port = base_port + i * 2;
2704 module = mlxsw_sp->port_to_module[local_port];
2706 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2710 for (i = 0; i < count; i++)
2711 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2713 for (i = 0; i < count; i++) {
2714 local_port = base_port + i * 2;
2715 module = mlxsw_sp->port_to_module[local_port];
2717 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2722 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2725 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2726 struct mlxsw_sp_port *mlxsw_sp_port;
2727 u8 module, cur_width, base_port;
2731 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2732 if (!mlxsw_sp_port) {
2733 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2738 module = mlxsw_sp_port->mapping.module;
2739 cur_width = mlxsw_sp_port->mapping.width;
2741 if (count != 2 && count != 4) {
2742 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2746 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2747 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2751 /* Make sure we have enough slave (even) ports for the split. */
2753 base_port = local_port;
2754 if (mlxsw_sp->ports[base_port + 1]) {
2755 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2759 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2760 if (mlxsw_sp->ports[base_port + 1] ||
2761 mlxsw_sp->ports[base_port + 3]) {
2762 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2767 for (i = 0; i < count; i++)
2768 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2769 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2771 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2773 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2774 goto err_port_split_create;
2779 err_port_split_create:
2780 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2784 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2786 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2787 struct mlxsw_sp_port *mlxsw_sp_port;
2788 u8 cur_width, base_port;
2792 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2793 if (!mlxsw_sp_port) {
2794 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2799 if (!mlxsw_sp_port->split) {
2800 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2804 cur_width = mlxsw_sp_port->mapping.width;
2805 count = cur_width == 1 ? 4 : 2;
2807 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2809 /* Determine which ports to remove. */
2810 if (count == 2 && local_port >= base_port + 2)
2811 base_port = base_port + 2;
2813 for (i = 0; i < count; i++)
2814 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2815 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2817 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2822 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2823 char *pude_pl, void *priv)
2825 struct mlxsw_sp *mlxsw_sp = priv;
2826 struct mlxsw_sp_port *mlxsw_sp_port;
2827 enum mlxsw_reg_pude_oper_status status;
2830 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2831 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2835 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2836 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2837 netdev_info(mlxsw_sp_port->dev, "link up\n");
2838 netif_carrier_on(mlxsw_sp_port->dev);
2840 netdev_info(mlxsw_sp_port->dev, "link down\n");
2841 netif_carrier_off(mlxsw_sp_port->dev);
2845 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2846 u8 local_port, void *priv)
2848 struct mlxsw_sp *mlxsw_sp = priv;
2849 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2850 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2852 if (unlikely(!mlxsw_sp_port)) {
2853 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2858 skb->dev = mlxsw_sp_port->dev;
2860 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2861 u64_stats_update_begin(&pcpu_stats->syncp);
2862 pcpu_stats->rx_packets++;
2863 pcpu_stats->rx_bytes += skb->len;
2864 u64_stats_update_end(&pcpu_stats->syncp);
2866 skb->protocol = eth_type_trans(skb, skb->dev);
2867 netif_receive_skb(skb);
2870 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2873 skb->offload_fwd_mark = 1;
2874 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2877 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2880 struct mlxsw_sp *mlxsw_sp = priv;
2881 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2882 struct psample_group *psample_group;
2885 if (unlikely(!mlxsw_sp_port)) {
2886 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2890 if (unlikely(!mlxsw_sp_port->sample)) {
2891 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2896 size = mlxsw_sp_port->sample->truncate ?
2897 mlxsw_sp_port->sample->trunc_size : skb->len;
2900 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2903 psample_sample_packet(psample_group, skb, size,
2904 mlxsw_sp_port->dev->ifindex, 0,
2905 mlxsw_sp_port->sample->rate);
2912 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2913 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2914 _is_ctrl, SP_##_trap_group, DISCARD)
2916 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2917 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2918 _is_ctrl, SP_##_trap_group, DISCARD)
2920 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2921 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2923 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2925 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2927 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2928 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2929 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2930 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2931 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2932 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2933 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2934 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2935 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2936 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2937 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
2939 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2940 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2941 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2942 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2943 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2944 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2945 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2946 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
2947 /* PKT Sample trap */
2948 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
2949 false, SP_IP2ME, DISCARD)
2952 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2954 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2955 enum mlxsw_reg_qpcr_ir_units ir_units;
2956 int max_cpu_policers;
2962 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2965 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2967 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2968 for (i = 0; i < max_cpu_policers; i++) {
2971 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
2972 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
2973 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
2974 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
2978 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
2982 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
2983 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
2984 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
2985 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
2986 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2987 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
2991 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3000 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3002 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3010 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3012 char htgt_pl[MLXSW_REG_HTGT_LEN];
3013 enum mlxsw_reg_htgt_trap_group i;
3014 int max_cpu_policers;
3015 int max_trap_groups;
3020 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3023 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3024 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3026 for (i = 0; i < max_trap_groups; i++) {
3029 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3030 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3031 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3032 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3036 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3037 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3041 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3042 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3046 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3050 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3051 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3052 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3056 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3057 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3058 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3059 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3065 if (max_cpu_policers <= policer_id &&
3066 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3069 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3070 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3078 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3083 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3087 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3091 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3092 err = mlxsw_core_trap_register(mlxsw_sp->core,
3093 &mlxsw_sp_listener[i],
3096 goto err_listener_register;
3101 err_listener_register:
3102 for (i--; i >= 0; i--) {
3103 mlxsw_core_trap_unregister(mlxsw_sp->core,
3104 &mlxsw_sp_listener[i],
3110 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3114 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3115 mlxsw_core_trap_unregister(mlxsw_sp->core,
3116 &mlxsw_sp_listener[i],
3121 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3122 enum mlxsw_reg_sfgc_type type,
3123 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3125 enum mlxsw_flood_table_type table_type;
3126 enum mlxsw_sp_flood_table flood_table;
3127 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3129 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
3130 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
3132 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3135 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
3136 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
3138 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
3139 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3142 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3145 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3147 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3150 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3154 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3155 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3158 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3159 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3163 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3164 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3172 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3174 char slcr_pl[MLXSW_REG_SLCR_LEN];
3177 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3178 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3179 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3180 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3181 MLXSW_REG_SLCR_LAG_HASH_SIP |
3182 MLXSW_REG_SLCR_LAG_HASH_DIP |
3183 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3184 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3185 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3186 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3190 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3191 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3194 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3195 sizeof(struct mlxsw_sp_upper),
3197 if (!mlxsw_sp->lags)
3203 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3205 kfree(mlxsw_sp->lags);
3208 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3210 char htgt_pl[MLXSW_REG_HTGT_LEN];
3212 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3213 MLXSW_REG_HTGT_INVALID_POLICER,
3214 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3215 MLXSW_REG_HTGT_DEFAULT_TC);
3216 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3219 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3220 const struct mlxsw_bus_info *mlxsw_bus_info)
3222 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3225 mlxsw_sp->core = mlxsw_core;
3226 mlxsw_sp->bus_info = mlxsw_bus_info;
3227 INIT_LIST_HEAD(&mlxsw_sp->fids);
3228 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3229 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
3231 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3233 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3237 err = mlxsw_sp_traps_init(mlxsw_sp);
3239 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3243 err = mlxsw_sp_flood_init(mlxsw_sp);
3245 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3246 goto err_flood_init;
3249 err = mlxsw_sp_buffers_init(mlxsw_sp);
3251 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3252 goto err_buffers_init;
3255 err = mlxsw_sp_lag_init(mlxsw_sp);
3257 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3261 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3263 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3264 goto err_switchdev_init;
3267 err = mlxsw_sp_router_init(mlxsw_sp);
3269 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3270 goto err_router_init;
3273 err = mlxsw_sp_span_init(mlxsw_sp);
3275 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3279 err = mlxsw_sp_acl_init(mlxsw_sp);
3281 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3285 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3287 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3288 goto err_counter_pool_init;
3291 err = mlxsw_sp_ports_create(mlxsw_sp);
3293 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3294 goto err_ports_create;
3300 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3301 err_counter_pool_init:
3302 mlxsw_sp_acl_fini(mlxsw_sp);
3304 mlxsw_sp_span_fini(mlxsw_sp);
3306 mlxsw_sp_router_fini(mlxsw_sp);
3308 mlxsw_sp_switchdev_fini(mlxsw_sp);
3310 mlxsw_sp_lag_fini(mlxsw_sp);
3312 mlxsw_sp_buffers_fini(mlxsw_sp);
3315 mlxsw_sp_traps_fini(mlxsw_sp);
3319 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3321 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3323 mlxsw_sp_ports_remove(mlxsw_sp);
3324 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3325 mlxsw_sp_acl_fini(mlxsw_sp);
3326 mlxsw_sp_span_fini(mlxsw_sp);
3327 mlxsw_sp_router_fini(mlxsw_sp);
3328 mlxsw_sp_switchdev_fini(mlxsw_sp);
3329 mlxsw_sp_lag_fini(mlxsw_sp);
3330 mlxsw_sp_buffers_fini(mlxsw_sp);
3331 mlxsw_sp_traps_fini(mlxsw_sp);
3332 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3333 WARN_ON(!list_empty(&mlxsw_sp->fids));
3336 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3337 .used_max_vepa_channels = 1,
3338 .max_vepa_channels = 0,
3340 .max_mid = MLXSW_SP_MID_MAX,
3343 .used_flood_tables = 1,
3344 .used_flood_mode = 1,
3346 .max_fid_offset_flood_tables = 3,
3347 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3348 .max_fid_flood_tables = 3,
3349 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
3350 .used_max_ib_mc = 1,
3354 .used_kvd_split_data = 1,
3355 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3356 .kvd_hash_single_parts = 2,
3357 .kvd_hash_double_parts = 1,
3358 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3362 .type = MLXSW_PORT_SWID_TYPE_ETH,
3365 .resource_query_enable = 1,
3368 static struct mlxsw_driver mlxsw_sp_driver = {
3369 .kind = mlxsw_sp_driver_name,
3370 .priv_size = sizeof(struct mlxsw_sp),
3371 .init = mlxsw_sp_init,
3372 .fini = mlxsw_sp_fini,
3373 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3374 .port_split = mlxsw_sp_port_split,
3375 .port_unsplit = mlxsw_sp_port_unsplit,
3376 .sb_pool_get = mlxsw_sp_sb_pool_get,
3377 .sb_pool_set = mlxsw_sp_sb_pool_set,
3378 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3379 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3380 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3381 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3382 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3383 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3384 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3385 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3386 .txhdr_construct = mlxsw_sp_txhdr_construct,
3387 .txhdr_len = MLXSW_TXHDR_LEN,
3388 .profile = &mlxsw_sp_config_profile,
3391 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3393 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3396 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3398 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3401 if (mlxsw_sp_port_dev_check(lower_dev)) {
3402 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3409 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3411 struct mlxsw_sp_port *mlxsw_sp_port;
3413 if (mlxsw_sp_port_dev_check(dev))
3414 return netdev_priv(dev);
3416 mlxsw_sp_port = NULL;
3417 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3419 return mlxsw_sp_port;
3422 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3424 struct mlxsw_sp_port *mlxsw_sp_port;
3426 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3427 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3430 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3432 struct mlxsw_sp_port *mlxsw_sp_port;
3434 if (mlxsw_sp_port_dev_check(dev))
3435 return netdev_priv(dev);
3437 mlxsw_sp_port = NULL;
3438 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3441 return mlxsw_sp_port;
3444 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3446 struct mlxsw_sp_port *mlxsw_sp_port;
3449 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3451 dev_hold(mlxsw_sp_port->dev);
3453 return mlxsw_sp_port;
3456 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3458 dev_put(mlxsw_sp_port->dev);
3461 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3464 if (mlxsw_sp_fid_is_vfid(fid))
3465 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3467 return test_bit(fid, lag_port->active_vlans);
3470 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3473 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3474 u8 local_port = mlxsw_sp_port->local_port;
3475 u16 lag_id = mlxsw_sp_port->lag_id;
3476 u64 max_lag_members;
3479 if (!mlxsw_sp_port->lagged)
3482 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3484 for (i = 0; i < max_lag_members; i++) {
3485 struct mlxsw_sp_port *lag_port;
3487 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3488 if (!lag_port || lag_port->local_port == local_port)
3490 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3498 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3501 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3502 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3504 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3505 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3506 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3507 mlxsw_sp_port->local_port);
3509 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3510 mlxsw_sp_port->local_port, fid);
3512 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3516 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3519 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3520 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3522 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3523 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3524 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3526 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3527 mlxsw_sp_port->lag_id, fid);
3529 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3532 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3534 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3537 if (mlxsw_sp_port->lagged)
3538 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3541 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3544 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3546 struct mlxsw_sp_fid *f, *tmp;
3548 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3549 if (--f->ref_count == 0)
3550 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3555 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3556 struct net_device *br_dev)
3558 return !mlxsw_sp->master_bridge.dev ||
3559 mlxsw_sp->master_bridge.dev == br_dev;
3562 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3563 struct net_device *br_dev)
3565 mlxsw_sp->master_bridge.dev = br_dev;
3566 mlxsw_sp->master_bridge.ref_count++;
3569 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3571 if (--mlxsw_sp->master_bridge.ref_count == 0) {
3572 mlxsw_sp->master_bridge.dev = NULL;
3573 /* It's possible upper VLAN devices are still holding
3574 * references to underlying FIDs. Drop the reference
3575 * and release the resources if it was the last one.
3576 * If it wasn't, then something bad happened.
3578 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3582 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3583 struct net_device *br_dev)
3585 struct net_device *dev = mlxsw_sp_port->dev;
3588 /* When port is not bridged untagged packets are tagged with
3589 * PVID=VID=1, thereby creating an implicit VLAN interface in
3590 * the device. Remove it and let bridge code take care of its
3593 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3597 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3599 mlxsw_sp_port->learning = 1;
3600 mlxsw_sp_port->learning_sync = 1;
3601 mlxsw_sp_port->uc_flood = 1;
3602 mlxsw_sp_port->mc_flood = 1;
3603 mlxsw_sp_port->mc_router = 0;
3604 mlxsw_sp_port->mc_disabled = 1;
3605 mlxsw_sp_port->bridged = 1;
3610 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3612 struct net_device *dev = mlxsw_sp_port->dev;
3614 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3616 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3618 mlxsw_sp_port->learning = 0;
3619 mlxsw_sp_port->learning_sync = 0;
3620 mlxsw_sp_port->uc_flood = 0;
3621 mlxsw_sp_port->mc_flood = 0;
3622 mlxsw_sp_port->mc_router = 0;
3623 mlxsw_sp_port->bridged = 0;
3625 /* Add implicit VLAN interface in the device, so that untagged
3626 * packets will be classified to the default vFID.
3628 mlxsw_sp_port_add_vid(dev, 0, 1);
3631 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3633 char sldr_pl[MLXSW_REG_SLDR_LEN];
3635 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3636 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3639 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3641 char sldr_pl[MLXSW_REG_SLDR_LEN];
3643 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3644 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3647 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3648 u16 lag_id, u8 port_index)
3650 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3651 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3653 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3654 lag_id, port_index);
3655 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3658 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3661 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3662 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3664 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3669 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3672 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3673 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3675 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3677 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3680 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3684 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3686 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3688 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3691 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3692 struct net_device *lag_dev,
3695 struct mlxsw_sp_upper *lag;
3696 int free_lag_id = -1;
3700 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3701 for (i = 0; i < max_lag; i++) {
3702 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3703 if (lag->ref_count) {
3704 if (lag->dev == lag_dev) {
3708 } else if (free_lag_id < 0) {
3712 if (free_lag_id < 0)
3714 *p_lag_id = free_lag_id;
3719 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3720 struct net_device *lag_dev,
3721 struct netdev_lag_upper_info *lag_upper_info)
3725 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3727 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3732 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3733 u16 lag_id, u8 *p_port_index)
3735 u64 max_lag_members;
3738 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3740 for (i = 0; i < max_lag_members; i++) {
3741 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3750 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3751 struct net_device *lag_dev, u16 lag_id)
3753 struct mlxsw_sp_port *mlxsw_sp_vport;
3754 struct mlxsw_sp_fid *f;
3756 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3757 if (WARN_ON(!mlxsw_sp_vport))
3760 /* If vPort is assigned a RIF, then leave it since it's no
3763 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3765 f->leave(mlxsw_sp_vport);
3767 mlxsw_sp_vport->lag_id = lag_id;
3768 mlxsw_sp_vport->lagged = 1;
3769 mlxsw_sp_vport->dev = lag_dev;
3773 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3775 struct mlxsw_sp_port *mlxsw_sp_vport;
3776 struct mlxsw_sp_fid *f;
3778 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3779 if (WARN_ON(!mlxsw_sp_vport))
3782 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3784 f->leave(mlxsw_sp_vport);
3786 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3787 mlxsw_sp_vport->lagged = 0;
3790 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3791 struct net_device *lag_dev)
3793 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3794 struct mlxsw_sp_upper *lag;
3799 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3802 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3803 if (!lag->ref_count) {
3804 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3810 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3813 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3815 goto err_col_port_add;
3816 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3818 goto err_col_port_enable;
3820 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3821 mlxsw_sp_port->local_port);
3822 mlxsw_sp_port->lag_id = lag_id;
3823 mlxsw_sp_port->lagged = 1;
3826 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
3830 err_col_port_enable:
3831 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3833 if (!lag->ref_count)
3834 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3838 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3839 struct net_device *lag_dev)
3841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3842 u16 lag_id = mlxsw_sp_port->lag_id;
3843 struct mlxsw_sp_upper *lag;
3845 if (!mlxsw_sp_port->lagged)
3847 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3848 WARN_ON(lag->ref_count == 0);
3850 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3851 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3853 if (mlxsw_sp_port->bridged) {
3854 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3855 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3858 if (lag->ref_count == 1)
3859 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3861 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3862 mlxsw_sp_port->local_port);
3863 mlxsw_sp_port->lagged = 0;
3866 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3869 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3872 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3873 char sldr_pl[MLXSW_REG_SLDR_LEN];
3875 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3876 mlxsw_sp_port->local_port);
3877 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3880 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3883 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3884 char sldr_pl[MLXSW_REG_SLDR_LEN];
3886 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3887 mlxsw_sp_port->local_port);
3888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3891 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3892 bool lag_tx_enabled)
3895 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3896 mlxsw_sp_port->lag_id);
3898 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3899 mlxsw_sp_port->lag_id);
3902 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3903 struct netdev_lag_lower_state_info *info)
3905 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3908 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3909 struct net_device *vlan_dev)
3911 struct mlxsw_sp_port *mlxsw_sp_vport;
3912 u16 vid = vlan_dev_vlan_id(vlan_dev);
3914 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3915 if (WARN_ON(!mlxsw_sp_vport))
3918 mlxsw_sp_vport->dev = vlan_dev;
3923 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3924 struct net_device *vlan_dev)
3926 struct mlxsw_sp_port *mlxsw_sp_vport;
3927 u16 vid = vlan_dev_vlan_id(vlan_dev);
3929 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3930 if (WARN_ON(!mlxsw_sp_vport))
3933 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3936 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3937 unsigned long event, void *ptr)
3939 struct netdev_notifier_changeupper_info *info;
3940 struct mlxsw_sp_port *mlxsw_sp_port;
3941 struct net_device *upper_dev;
3942 struct mlxsw_sp *mlxsw_sp;
3945 mlxsw_sp_port = netdev_priv(dev);
3946 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3950 case NETDEV_PRECHANGEUPPER:
3951 upper_dev = info->upper_dev;
3952 if (!is_vlan_dev(upper_dev) &&
3953 !netif_is_lag_master(upper_dev) &&
3954 !netif_is_bridge_master(upper_dev))
3958 /* HW limitation forbids to put ports to multiple bridges. */
3959 if (netif_is_bridge_master(upper_dev) &&
3960 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3962 if (netif_is_lag_master(upper_dev) &&
3963 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3966 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3968 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3969 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3972 case NETDEV_CHANGEUPPER:
3973 upper_dev = info->upper_dev;
3974 if (is_vlan_dev(upper_dev)) {
3976 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3979 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3981 } else if (netif_is_bridge_master(upper_dev)) {
3983 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3986 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3987 } else if (netif_is_lag_master(upper_dev)) {
3989 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3992 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4004 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4005 unsigned long event, void *ptr)
4007 struct netdev_notifier_changelowerstate_info *info;
4008 struct mlxsw_sp_port *mlxsw_sp_port;
4011 mlxsw_sp_port = netdev_priv(dev);
4015 case NETDEV_CHANGELOWERSTATE:
4016 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4017 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4018 info->lower_state_info);
4020 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4028 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4029 unsigned long event, void *ptr)
4032 case NETDEV_PRECHANGEUPPER:
4033 case NETDEV_CHANGEUPPER:
4034 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4035 case NETDEV_CHANGELOWERSTATE:
4036 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4042 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4043 unsigned long event, void *ptr)
4045 struct net_device *dev;
4046 struct list_head *iter;
4049 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4050 if (mlxsw_sp_port_dev_check(dev)) {
4051 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4060 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4061 struct net_device *vlan_dev)
4063 u16 fid = vlan_dev_vlan_id(vlan_dev);
4064 struct mlxsw_sp_fid *f;
4066 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4068 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4078 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4079 struct net_device *vlan_dev)
4081 u16 fid = vlan_dev_vlan_id(vlan_dev);
4082 struct mlxsw_sp_fid *f;
4084 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4086 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4087 if (f && --f->ref_count == 0)
4088 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4091 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4092 unsigned long event, void *ptr)
4094 struct netdev_notifier_changeupper_info *info;
4095 struct net_device *upper_dev;
4096 struct mlxsw_sp *mlxsw_sp;
4099 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4106 case NETDEV_PRECHANGEUPPER:
4107 upper_dev = info->upper_dev;
4108 if (!is_vlan_dev(upper_dev))
4110 if (is_vlan_dev(upper_dev) &&
4111 br_dev != mlxsw_sp->master_bridge.dev)
4114 case NETDEV_CHANGEUPPER:
4115 upper_dev = info->upper_dev;
4116 if (is_vlan_dev(upper_dev)) {
4118 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4121 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4133 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4135 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4139 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4141 char sfmr_pl[MLXSW_REG_SFMR_LEN];
4143 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4144 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4147 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4149 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4150 struct net_device *br_dev)
4152 struct device *dev = mlxsw_sp->bus_info->dev;
4153 struct mlxsw_sp_fid *f;
4157 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4158 if (vfid == MLXSW_SP_VFID_MAX) {
4159 dev_err(dev, "No available vFIDs\n");
4160 return ERR_PTR(-ERANGE);
4163 fid = mlxsw_sp_vfid_to_fid(vfid);
4164 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4166 dev_err(dev, "Failed to create FID=%d\n", fid);
4167 return ERR_PTR(err);
4170 f = kzalloc(sizeof(*f), GFP_KERNEL);
4172 goto err_allocate_vfid;
4174 f->leave = mlxsw_sp_vport_vfid_leave;
4178 list_add(&f->list, &mlxsw_sp->vfids.list);
4179 set_bit(vfid, mlxsw_sp->vfids.mapped);
4184 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4185 return ERR_PTR(-ENOMEM);
4188 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4189 struct mlxsw_sp_fid *f)
4191 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4194 clear_bit(vfid, mlxsw_sp->vfids.mapped);
4198 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4202 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4205 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4208 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4209 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4211 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4215 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4216 struct net_device *br_dev)
4218 struct mlxsw_sp_fid *f;
4221 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4223 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4228 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4230 goto err_vport_flood_set;
4232 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4234 goto err_vport_fid_map;
4236 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4239 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4244 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4245 err_vport_flood_set:
4247 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4251 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4253 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4255 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4257 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4259 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4261 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4263 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4264 if (--f->ref_count == 0)
4265 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4268 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4269 struct net_device *br_dev)
4271 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4272 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4273 struct net_device *dev = mlxsw_sp_vport->dev;
4276 if (f && !WARN_ON(!f->leave))
4277 f->leave(mlxsw_sp_vport);
4279 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4281 netdev_err(dev, "Failed to join vFID\n");
4285 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4287 netdev_err(dev, "Failed to enable learning\n");
4288 goto err_port_vid_learning_set;
4291 mlxsw_sp_vport->learning = 1;
4292 mlxsw_sp_vport->learning_sync = 1;
4293 mlxsw_sp_vport->uc_flood = 1;
4294 mlxsw_sp_vport->mc_flood = 1;
4295 mlxsw_sp_vport->mc_router = 0;
4296 mlxsw_sp_vport->mc_disabled = 1;
4297 mlxsw_sp_vport->bridged = 1;
4301 err_port_vid_learning_set:
4302 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4306 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4308 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4310 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4312 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4314 mlxsw_sp_vport->learning = 0;
4315 mlxsw_sp_vport->learning_sync = 0;
4316 mlxsw_sp_vport->uc_flood = 0;
4317 mlxsw_sp_vport->mc_flood = 0;
4318 mlxsw_sp_vport->mc_router = 0;
4319 mlxsw_sp_vport->bridged = 0;
4323 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4324 const struct net_device *br_dev)
4326 struct mlxsw_sp_port *mlxsw_sp_vport;
4328 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4330 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4332 if (dev && dev == br_dev)
4339 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4340 unsigned long event, void *ptr,
4343 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4344 struct netdev_notifier_changeupper_info *info = ptr;
4345 struct mlxsw_sp_port *mlxsw_sp_vport;
4346 struct net_device *upper_dev;
4349 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4350 if (!mlxsw_sp_vport)
4354 case NETDEV_PRECHANGEUPPER:
4355 upper_dev = info->upper_dev;
4356 if (!netif_is_bridge_master(upper_dev))
4360 /* We can't have multiple VLAN interfaces configured on
4361 * the same port and being members in the same bridge.
4363 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4367 case NETDEV_CHANGEUPPER:
4368 upper_dev = info->upper_dev;
4369 if (netif_is_bridge_master(upper_dev)) {
4371 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4374 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4385 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4386 unsigned long event, void *ptr,
4389 struct net_device *dev;
4390 struct list_head *iter;
4393 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4394 if (mlxsw_sp_port_dev_check(dev)) {
4395 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4405 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4406 unsigned long event, void *ptr)
4408 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4409 u16 vid = vlan_dev_vlan_id(vlan_dev);
4411 if (mlxsw_sp_port_dev_check(real_dev))
4412 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4414 else if (netif_is_lag_master(real_dev))
4415 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4421 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4422 unsigned long event, void *ptr)
4424 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4427 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4428 err = mlxsw_sp_netdevice_router_port_event(dev);
4429 else if (mlxsw_sp_port_dev_check(dev))
4430 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4431 else if (netif_is_lag_master(dev))
4432 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4433 else if (netif_is_bridge_master(dev))
4434 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4435 else if (is_vlan_dev(dev))
4436 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4438 return notifier_from_errno(err);
4441 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4442 .notifier_call = mlxsw_sp_netdevice_event,
4445 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4446 .notifier_call = mlxsw_sp_inetaddr_event,
4447 .priority = 10, /* Must be called before FIB notifier block */
4450 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4451 .notifier_call = mlxsw_sp_router_netevent_event,
4454 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4455 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4459 static struct pci_driver mlxsw_sp_pci_driver = {
4460 .name = mlxsw_sp_driver_name,
4461 .id_table = mlxsw_sp_pci_id_table,
4464 static int __init mlxsw_sp_module_init(void)
4468 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4469 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4470 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4472 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4474 goto err_core_driver_register;
4476 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4478 goto err_pci_driver_register;
4482 err_pci_driver_register:
4483 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4484 err_core_driver_register:
4485 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4486 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4487 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4491 static void __exit mlxsw_sp_module_exit(void)
4493 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4494 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4495 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4496 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4497 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4500 module_init(mlxsw_sp_module_init);
4501 module_exit(mlxsw_sp_module_exit);
4503 MODULE_LICENSE("Dual BSD/GPL");
4504 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4505 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4506 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);