2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <asm/byteorder.h>
59 #include <net/devlink.h>
69 static LIST_HEAD(mlxsw_core_driver_list);
70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
72 static const char mlxsw_core_driver_name[] = "mlxsw_core";
74 static struct dentry *mlxsw_core_dbg_root;
76 struct mlxsw_core_pcpu_stats {
77 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
78 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
79 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
80 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
81 struct u64_stats_sync syncp;
82 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
83 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
89 struct mlxsw_driver *driver;
90 const struct mlxsw_bus *bus;
92 const struct mlxsw_bus_info *bus_info;
93 struct list_head rx_listener_list;
94 struct list_head event_listener_list;
96 struct sk_buff *resp_skb;
98 wait_queue_head_t wait;
100 struct mutex lock; /* One EMAD transaction at a time. */
103 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
104 struct dentry *dbg_dir;
106 struct debugfs_blob_wrapper vsd_blob;
107 struct debugfs_blob_wrapper psid_blob;
110 u8 *mapping; /* lag_id+port_index to local_port mapping */
112 struct mlxsw_hwmon *hwmon;
113 unsigned long driver_priv[0];
114 /* driver_priv has to be always the last item */
117 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
119 return mlxsw_core->driver_priv;
121 EXPORT_SYMBOL(mlxsw_core_driver_priv);
123 struct mlxsw_rx_listener_item {
124 struct list_head list;
125 struct mlxsw_rx_listener rxl;
129 struct mlxsw_event_listener_item {
130 struct list_head list;
131 struct mlxsw_event_listener el;
140 * Destination MAC in EMAD's Ethernet header.
141 * Must be set to 01:02:c9:00:00:01
143 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
146 * Source MAC in EMAD's Ethernet header.
147 * Must be set to 00:02:c9:01:02:03
149 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
151 /* emad_eth_hdr_ethertype
152 * Ethertype in EMAD's Ethernet header.
153 * Must be set to 0x8932
155 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
157 /* emad_eth_hdr_mlx_proto
159 * Must be set to 0x0.
161 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
164 * Mellanox protocol version.
165 * Must be set to 0x0.
167 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
171 * Must be set to 0x1 (operation TLV).
173 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
176 * Length of the operation TLV in u32.
177 * Must be set to 0x4.
179 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
182 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
183 * EMAD. DR TLV must follow.
185 * Note: Currently not supported and must not be set.
187 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
189 /* emad_op_tlv_status
190 * Returned status in case of EMAD response. Must be set to 0 in case
193 * 0x1 - device is busy. Requester should retry
194 * 0x2 - Mellanox protocol version not supported
196 * 0x4 - register not supported
197 * 0x5 - operation class not supported
198 * 0x6 - EMAD method not supported
199 * 0x7 - bad parameter (e.g. port out of range)
200 * 0x8 - resource not available
201 * 0x9 - message receipt acknowledgment. Requester should retry
202 * 0x70 - internal error
204 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
206 /* emad_op_tlv_register_id
207 * Register ID of register within register TLV.
209 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
212 * Response bit. Setting to 1 indicates Response, otherwise request.
214 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
216 /* emad_op_tlv_method
220 * 0x3 - send (currently not supported)
223 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
226 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
228 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
231 * EMAD transaction ID. Used for pairing request and response EMADs.
233 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
237 * Must be set to 0x3 (register TLV).
239 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
242 * Length of the operation TLV in u32.
244 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
248 * Must be set to 0x0 (end TLV).
250 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
253 * Length of the end TLV in u32.
256 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
258 enum mlxsw_core_reg_access_type {
259 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
260 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
263 static inline const char *
264 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
267 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
269 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
275 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
277 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
278 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
281 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
282 const struct mlxsw_reg_info *reg,
285 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
286 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
287 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
290 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
291 const struct mlxsw_reg_info *reg,
292 enum mlxsw_core_reg_access_type type,
293 struct mlxsw_core *mlxsw_core)
295 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
296 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
297 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
298 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
299 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
300 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
301 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
302 mlxsw_emad_op_tlv_method_set(op_tlv,
303 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
305 mlxsw_emad_op_tlv_method_set(op_tlv,
306 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
307 mlxsw_emad_op_tlv_class_set(op_tlv,
308 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
309 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
312 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
314 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
316 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
317 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
318 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
319 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
320 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
322 skb_reset_mac_header(skb);
327 static void mlxsw_emad_construct(struct sk_buff *skb,
328 const struct mlxsw_reg_info *reg,
330 enum mlxsw_core_reg_access_type type,
331 struct mlxsw_core *mlxsw_core)
335 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
336 mlxsw_emad_pack_end_tlv(buf);
338 buf = skb_push(skb, reg->len + sizeof(u32));
339 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
341 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
342 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
344 mlxsw_emad_construct_eth_hdr(skb);
347 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
349 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
352 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
354 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
355 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
358 static char *mlxsw_emad_reg_payload(const char *op_tlv)
360 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
363 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
367 op_tlv = mlxsw_emad_op_tlv(skb);
368 return mlxsw_emad_op_tlv_tid_get(op_tlv);
371 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
375 op_tlv = mlxsw_emad_op_tlv(skb);
376 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
379 #define MLXSW_EMAD_TIMEOUT_MS 200
381 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
383 const struct mlxsw_tx_info *tx_info)
388 mlxsw_core->emad.trans_active = true;
390 err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info);
392 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
393 mlxsw_core->emad.tid);
395 goto trans_inactive_out;
398 ret = wait_event_timeout(mlxsw_core->emad.wait,
399 !(mlxsw_core->emad.trans_active),
400 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
402 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
403 mlxsw_core->emad.tid);
405 goto trans_inactive_out;
411 mlxsw_core->emad.trans_active = false;
415 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
418 enum mlxsw_emad_op_tlv_status status;
421 status = mlxsw_emad_op_tlv_status_get(op_tlv);
422 tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
425 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
427 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
428 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
429 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
430 tid, status, mlxsw_emad_op_tlv_status_str(status));
432 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
433 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
434 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
435 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
436 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
437 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
438 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
439 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
441 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
442 tid, status, mlxsw_emad_op_tlv_status_str(status));
447 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
450 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
453 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
455 const struct mlxsw_tx_info *tx_info)
457 struct sk_buff *trans_skb;
463 /* We copy the EMAD to a new skb, since we might need
464 * to retransmit it in case of failure.
466 trans_skb = skb_copy(skb, GFP_KERNEL);
472 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
474 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
476 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
478 dev_kfree_skb(resp_skb);
479 if (!err || err != -EAGAIN)
482 if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
487 mlxsw_core->emad.tid++;
491 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
494 struct mlxsw_core *mlxsw_core = priv;
496 if (mlxsw_emad_is_resp(skb) &&
497 mlxsw_core->emad.trans_active &&
498 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
499 mlxsw_core->emad.resp_skb = skb;
500 mlxsw_core->emad.trans_active = false;
501 wake_up(&mlxsw_core->emad.wait);
507 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
508 .func = mlxsw_emad_rx_listener_func,
509 .local_port = MLXSW_PORT_DONT_CARE,
510 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
513 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
515 char htgt_pl[MLXSW_REG_HTGT_LEN];
516 char hpkt_pl[MLXSW_REG_HPKT_LEN];
519 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
520 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
524 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
525 MLXSW_TRAP_ID_ETHEMAD);
526 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
529 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
533 /* Set the upper 32 bits of the transaction ID field to a random
534 * number. This allows us to discard EMADs addressed to other
537 get_random_bytes(&mlxsw_core->emad.tid, 4);
538 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
540 init_waitqueue_head(&mlxsw_core->emad.wait);
541 mlxsw_core->emad.trans_active = false;
542 mutex_init(&mlxsw_core->emad.lock);
544 err = mlxsw_core_rx_listener_register(mlxsw_core,
545 &mlxsw_emad_rx_listener,
550 err = mlxsw_emad_traps_set(mlxsw_core);
552 goto err_emad_trap_set;
554 mlxsw_core->emad.use_emad = true;
559 mlxsw_core_rx_listener_unregister(mlxsw_core,
560 &mlxsw_emad_rx_listener,
565 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
567 char hpkt_pl[MLXSW_REG_HPKT_LEN];
569 mlxsw_core->emad.use_emad = false;
570 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
571 MLXSW_TRAP_ID_ETHEMAD);
572 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
574 mlxsw_core_rx_listener_unregister(mlxsw_core,
575 &mlxsw_emad_rx_listener,
579 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
585 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
586 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
587 sizeof(u32) + mlxsw_core->driver->txhdr_len);
588 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
591 skb = netdev_alloc_skb(NULL, emad_len);
594 memset(skb->data, 0, emad_len);
595 skb_reserve(skb, emad_len);
604 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
606 struct mlxsw_core *mlxsw_core = file->private;
607 struct mlxsw_core_pcpu_stats *p;
608 u64 rx_packets, rx_bytes;
609 u64 tmp_rx_packets, tmp_rx_bytes;
610 u32 rx_dropped, rx_invalid;
614 static const char hdr[] =
615 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
617 seq_printf(file, hdr);
618 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
622 for_each_possible_cpu(j) {
623 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
625 start = u64_stats_fetch_begin(&p->syncp);
626 tmp_rx_packets = p->trap_rx_packets[i];
627 tmp_rx_bytes = p->trap_rx_bytes[i];
628 } while (u64_stats_fetch_retry(&p->syncp, start));
630 rx_packets += tmp_rx_packets;
631 rx_bytes += tmp_rx_bytes;
632 rx_dropped += p->trap_rx_dropped[i];
634 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
635 i, rx_packets, rx_bytes, rx_dropped);
638 for_each_possible_cpu(j) {
639 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
640 rx_invalid += p->trap_rx_invalid;
642 seq_printf(file, "trap INV %10u\n",
645 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
649 for_each_possible_cpu(j) {
650 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
652 start = u64_stats_fetch_begin(&p->syncp);
653 tmp_rx_packets = p->port_rx_packets[i];
654 tmp_rx_bytes = p->port_rx_bytes[i];
655 } while (u64_stats_fetch_retry(&p->syncp, start));
657 rx_packets += tmp_rx_packets;
658 rx_bytes += tmp_rx_bytes;
659 rx_dropped += p->port_rx_dropped[i];
661 seq_printf(file, "port %3d %12llu %12llu %10u\n",
662 i, rx_packets, rx_bytes, rx_dropped);
665 for_each_possible_cpu(j) {
666 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
667 rx_invalid += p->port_rx_invalid;
669 seq_printf(file, "port INV %10u\n",
674 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
676 struct mlxsw_core *mlxsw_core = inode->i_private;
678 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
681 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
682 .owner = THIS_MODULE,
683 .open = mlxsw_core_rx_stats_dbg_open,
684 .release = single_release,
689 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
690 const char *buf, size_t size)
692 __be32 *m = (__be32 *) buf;
694 int count = size / sizeof(__be32);
696 for (i = count - 1; i >= 0; i--)
701 for (i = 0; i < count; i += 4)
702 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
703 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
704 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
707 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
709 spin_lock(&mlxsw_core_driver_list_lock);
710 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
711 spin_unlock(&mlxsw_core_driver_list_lock);
714 EXPORT_SYMBOL(mlxsw_core_driver_register);
716 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
718 spin_lock(&mlxsw_core_driver_list_lock);
719 list_del(&mlxsw_driver->list);
720 spin_unlock(&mlxsw_core_driver_list_lock);
722 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
724 static struct mlxsw_driver *__driver_find(const char *kind)
726 struct mlxsw_driver *mlxsw_driver;
728 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
729 if (strcmp(mlxsw_driver->kind, kind) == 0)
735 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
737 struct mlxsw_driver *mlxsw_driver;
739 spin_lock(&mlxsw_core_driver_list_lock);
740 mlxsw_driver = __driver_find(kind);
742 spin_unlock(&mlxsw_core_driver_list_lock);
743 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
744 spin_lock(&mlxsw_core_driver_list_lock);
745 mlxsw_driver = __driver_find(kind);
748 if (!try_module_get(mlxsw_driver->owner))
752 spin_unlock(&mlxsw_core_driver_list_lock);
756 static void mlxsw_core_driver_put(const char *kind)
758 struct mlxsw_driver *mlxsw_driver;
760 spin_lock(&mlxsw_core_driver_list_lock);
761 mlxsw_driver = __driver_find(kind);
762 spin_unlock(&mlxsw_core_driver_list_lock);
765 module_put(mlxsw_driver->owner);
768 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
770 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
772 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
773 mlxsw_core_dbg_root);
774 if (!mlxsw_core->dbg_dir)
776 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
777 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
778 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
779 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
780 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
781 &mlxsw_core->dbg.vsd_blob);
782 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
783 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
784 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
785 &mlxsw_core->dbg.psid_blob);
789 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
791 debugfs_remove_recursive(mlxsw_core->dbg_dir);
794 static int mlxsw_devlink_port_split(struct devlink *devlink,
795 unsigned int port_index,
798 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
800 if (port_index >= MLXSW_PORT_MAX_PORTS)
802 if (!mlxsw_core->driver->port_split)
804 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
807 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
808 unsigned int port_index)
810 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
812 if (port_index >= MLXSW_PORT_MAX_PORTS)
814 if (!mlxsw_core->driver->port_unsplit)
816 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
819 static const struct devlink_ops mlxsw_devlink_ops = {
820 .port_split = mlxsw_devlink_port_split,
821 .port_unsplit = mlxsw_devlink_port_unsplit,
824 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
825 const struct mlxsw_bus *mlxsw_bus,
828 const char *device_kind = mlxsw_bus_info->device_kind;
829 struct mlxsw_core *mlxsw_core;
830 struct mlxsw_driver *mlxsw_driver;
831 struct devlink *devlink;
835 mlxsw_driver = mlxsw_core_driver_get(device_kind);
838 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
839 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
842 goto err_devlink_alloc;
845 mlxsw_core = devlink_priv(devlink);
846 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
847 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
848 mlxsw_core->driver = mlxsw_driver;
849 mlxsw_core->bus = mlxsw_bus;
850 mlxsw_core->bus_priv = bus_priv;
851 mlxsw_core->bus_info = mlxsw_bus_info;
853 mlxsw_core->pcpu_stats =
854 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
855 if (!mlxsw_core->pcpu_stats) {
857 goto err_alloc_stats;
860 if (mlxsw_driver->profile->used_max_lag &&
861 mlxsw_driver->profile->used_max_port_per_lag) {
862 alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
863 mlxsw_driver->profile->max_port_per_lag;
864 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
865 if (!mlxsw_core->lag.mapping) {
867 goto err_alloc_lag_mapping;
871 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
875 err = mlxsw_emad_init(mlxsw_core);
879 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
883 err = devlink_register(devlink, mlxsw_bus_info->dev);
885 goto err_devlink_register;
887 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
889 goto err_driver_init;
891 err = mlxsw_core_debugfs_init(mlxsw_core);
893 goto err_debugfs_init;
898 mlxsw_core->driver->fini(mlxsw_core);
900 devlink_unregister(devlink);
901 err_devlink_register:
903 mlxsw_emad_fini(mlxsw_core);
905 mlxsw_bus->fini(bus_priv);
907 kfree(mlxsw_core->lag.mapping);
908 err_alloc_lag_mapping:
909 free_percpu(mlxsw_core->pcpu_stats);
911 devlink_free(devlink);
913 mlxsw_core_driver_put(device_kind);
916 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
918 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
920 const char *device_kind = mlxsw_core->bus_info->device_kind;
921 struct devlink *devlink = priv_to_devlink(mlxsw_core);
923 mlxsw_core_debugfs_fini(mlxsw_core);
924 mlxsw_core->driver->fini(mlxsw_core);
925 devlink_unregister(devlink);
926 mlxsw_emad_fini(mlxsw_core);
927 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
928 kfree(mlxsw_core->lag.mapping);
929 free_percpu(mlxsw_core->pcpu_stats);
930 devlink_free(devlink);
931 mlxsw_core_driver_put(device_kind);
933 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
935 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
936 const struct mlxsw_tx_info *tx_info)
938 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
941 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
943 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
944 const struct mlxsw_tx_info *tx_info)
946 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
949 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
951 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
952 const struct mlxsw_rx_listener *rxl_b)
954 return (rxl_a->func == rxl_b->func &&
955 rxl_a->local_port == rxl_b->local_port &&
956 rxl_a->trap_id == rxl_b->trap_id);
959 static struct mlxsw_rx_listener_item *
960 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
961 const struct mlxsw_rx_listener *rxl,
964 struct mlxsw_rx_listener_item *rxl_item;
966 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
967 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
968 rxl_item->priv == priv)
974 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
975 const struct mlxsw_rx_listener *rxl,
978 struct mlxsw_rx_listener_item *rxl_item;
980 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
983 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
986 rxl_item->rxl = *rxl;
987 rxl_item->priv = priv;
989 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
992 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
994 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
995 const struct mlxsw_rx_listener *rxl,
998 struct mlxsw_rx_listener_item *rxl_item;
1000 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1003 list_del_rcu(&rxl_item->list);
1007 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1009 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1012 struct mlxsw_event_listener_item *event_listener_item = priv;
1013 struct mlxsw_reg_info reg;
1015 char *op_tlv = mlxsw_emad_op_tlv(skb);
1016 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1018 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1019 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1020 payload = mlxsw_emad_reg_payload(op_tlv);
1021 event_listener_item->el.func(®, payload, event_listener_item->priv);
1025 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1026 const struct mlxsw_event_listener *el_b)
1028 return (el_a->func == el_b->func &&
1029 el_a->trap_id == el_b->trap_id);
1032 static struct mlxsw_event_listener_item *
1033 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1034 const struct mlxsw_event_listener *el,
1037 struct mlxsw_event_listener_item *el_item;
1039 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1040 if (__is_event_listener_equal(&el_item->el, el) &&
1041 el_item->priv == priv)
1047 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1048 const struct mlxsw_event_listener *el,
1052 struct mlxsw_event_listener_item *el_item;
1053 const struct mlxsw_rx_listener rxl = {
1054 .func = mlxsw_core_event_listener_func,
1055 .local_port = MLXSW_PORT_DONT_CARE,
1056 .trap_id = el->trap_id,
1059 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1062 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1066 el_item->priv = priv;
1068 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1070 goto err_rx_listener_register;
1072 /* No reason to save item if we did not manage to register an RX
1075 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1079 err_rx_listener_register:
1083 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1085 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1086 const struct mlxsw_event_listener *el,
1089 struct mlxsw_event_listener_item *el_item;
1090 const struct mlxsw_rx_listener rxl = {
1091 .func = mlxsw_core_event_listener_func,
1092 .local_port = MLXSW_PORT_DONT_CARE,
1093 .trap_id = el->trap_id,
1096 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1099 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1100 list_del(&el_item->list);
1103 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1105 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1106 const struct mlxsw_reg_info *reg,
1108 enum mlxsw_core_reg_access_type type)
1112 struct sk_buff *skb;
1113 struct mlxsw_tx_info tx_info = {
1114 .local_port = MLXSW_PORT_CPU_PORT,
1118 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
1122 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
1123 mlxsw_core->driver->txhdr_construct(skb, &tx_info);
1125 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
1126 mlxsw_core->emad.tid);
1127 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
1129 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
1131 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
1132 memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
1135 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
1136 mlxsw_core->emad.tid - 1);
1137 mlxsw_core_buf_dump_dbg(mlxsw_core,
1138 mlxsw_core->emad.resp_skb->data,
1139 mlxsw_core->emad.resp_skb->len);
1141 dev_kfree_skb(mlxsw_core->emad.resp_skb);
1147 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1148 const struct mlxsw_reg_info *reg,
1150 enum mlxsw_core_reg_access_type type)
1153 char *in_mbox, *out_mbox, *tmp;
1155 in_mbox = mlxsw_cmd_mbox_alloc();
1159 out_mbox = mlxsw_cmd_mbox_alloc();
1165 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
1166 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1167 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1171 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1173 err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
1174 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1179 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1182 mlxsw_core->emad.tid++;
1183 mlxsw_cmd_mbox_free(out_mbox);
1185 mlxsw_cmd_mbox_free(in_mbox);
1189 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1190 const struct mlxsw_reg_info *reg,
1192 enum mlxsw_core_reg_access_type type)
1197 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
1198 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1199 reg->id, mlxsw_reg_id_str(reg->id),
1200 mlxsw_core_reg_access_type_str(type));
1204 cur_tid = mlxsw_core->emad.tid;
1205 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1206 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1207 mlxsw_core_reg_access_type_str(type));
1209 /* During initialization EMAD interface is not available to us,
1210 * so we default to command interface. We switch to EMAD interface
1211 * after setting the appropriate traps.
1213 if (!mlxsw_core->emad.use_emad)
1214 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1217 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1221 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1222 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1223 mlxsw_core_reg_access_type_str(type));
1225 mutex_unlock(&mlxsw_core->emad.lock);
1229 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1230 const struct mlxsw_reg_info *reg, char *payload)
1232 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1233 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1235 EXPORT_SYMBOL(mlxsw_reg_query);
1237 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1238 const struct mlxsw_reg_info *reg, char *payload)
1240 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1241 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1243 EXPORT_SYMBOL(mlxsw_reg_write);
1245 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1246 struct mlxsw_rx_info *rx_info)
1248 struct mlxsw_rx_listener_item *rxl_item;
1249 const struct mlxsw_rx_listener *rxl;
1250 struct mlxsw_core_pcpu_stats *pcpu_stats;
1254 if (rx_info->is_lag) {
1255 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1256 __func__, rx_info->u.lag_id,
1258 /* Upper layer does not care if the skb came from LAG or not,
1259 * so just get the local_port for the lag port and push it up.
1261 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1263 rx_info->lag_port_index);
1265 local_port = rx_info->u.sys_port;
1268 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1269 __func__, local_port, rx_info->trap_id);
1271 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1272 (local_port >= MLXSW_PORT_MAX_PORTS))
1276 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1277 rxl = &rxl_item->rxl;
1278 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1279 rxl->local_port == local_port) &&
1280 rxl->trap_id == rx_info->trap_id) {
1289 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1290 u64_stats_update_begin(&pcpu_stats->syncp);
1291 pcpu_stats->port_rx_packets[local_port]++;
1292 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1293 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1294 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1295 u64_stats_update_end(&pcpu_stats->syncp);
1297 rxl->func(skb, local_port, rxl_item->priv);
1301 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1302 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1304 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1305 if (local_port >= MLXSW_PORT_MAX_PORTS)
1306 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1308 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1311 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1313 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1314 u16 lag_id, u8 port_index)
1316 return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
1320 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1321 u16 lag_id, u8 port_index, u8 local_port)
1323 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1324 lag_id, port_index);
1326 mlxsw_core->lag.mapping[index] = local_port;
1328 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1330 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1331 u16 lag_id, u8 port_index)
1333 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1334 lag_id, port_index);
1336 return mlxsw_core->lag.mapping[index];
1338 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1340 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1341 u16 lag_id, u8 local_port)
1345 for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
1346 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1349 if (mlxsw_core->lag.mapping[index] == local_port)
1350 mlxsw_core->lag.mapping[index] = 0;
1353 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1355 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
1356 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
1357 struct net_device *dev, bool split, u32 split_group)
1359 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1360 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1363 devlink_port_split_set(devlink_port, split_group);
1364 devlink_port_type_eth_set(devlink_port, dev);
1365 return devlink_port_register(devlink, devlink_port, local_port);
1367 EXPORT_SYMBOL(mlxsw_core_port_init);
1369 void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
1371 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1373 devlink_port_unregister(devlink_port);
1375 EXPORT_SYMBOL(mlxsw_core_port_fini);
1377 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1378 u32 in_mod, bool out_mbox_direct,
1379 char *in_mbox, size_t in_mbox_size,
1380 char *out_mbox, size_t out_mbox_size)
1385 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1386 if (!mlxsw_core->bus->cmd_exec)
1389 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1390 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1392 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1393 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1396 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1397 opcode_mod, in_mod, out_mbox_direct,
1398 in_mbox, in_mbox_size,
1399 out_mbox, out_mbox_size, &status);
1401 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1402 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1403 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1404 in_mod, status, mlxsw_cmd_status_str(status));
1405 } else if (err == -ETIMEDOUT) {
1406 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1407 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1411 if (!err && out_mbox) {
1412 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1413 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1417 EXPORT_SYMBOL(mlxsw_cmd_exec);
1419 static int __init mlxsw_core_module_init(void)
1421 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1422 if (!mlxsw_core_dbg_root)
1427 static void __exit mlxsw_core_module_exit(void)
1429 debugfs_remove_recursive(mlxsw_core_dbg_root);
1432 module_init(mlxsw_core_module_init);
1433 module_exit(mlxsw_core_module_exit);
1435 MODULE_LICENSE("Dual BSD/GPL");
1436 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1437 MODULE_DESCRIPTION("Mellanox switch device core driver");