]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
Merge tag 'for-linus-20170812' of git://git.infradead.org/linux-mtd
[karo-tx-linux.git] / drivers / net / ethernet / cavium / liquidio / lio_ethtool.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
33
34 static int octnet_get_link_stats(struct net_device *netdev);
35
36 struct oct_intrmod_context {
37         int octeon_id;
38         wait_queue_head_t wc;
39         int cond;
40         int status;
41 };
42
43 struct oct_intrmod_resp {
44         u64     rh;
45         struct oct_intrmod_cfg intrmod;
46         u64     status;
47 };
48
49 struct oct_mdio_cmd_context {
50         int octeon_id;
51         wait_queue_head_t wc;
52         int cond;
53 };
54
55 struct oct_mdio_cmd_resp {
56         u64 rh;
57         struct oct_mdio_cmd resp;
58         u64 status;
59 };
60
61 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
62
63 /* Octeon's interface mode of operation */
64 enum {
65         INTERFACE_MODE_DISABLED,
66         INTERFACE_MODE_RGMII,
67         INTERFACE_MODE_GMII,
68         INTERFACE_MODE_SPI,
69         INTERFACE_MODE_PCIE,
70         INTERFACE_MODE_XAUI,
71         INTERFACE_MODE_SGMII,
72         INTERFACE_MODE_PICMG,
73         INTERFACE_MODE_NPI,
74         INTERFACE_MODE_LOOP,
75         INTERFACE_MODE_SRIO,
76         INTERFACE_MODE_ILK,
77         INTERFACE_MODE_RXAUI,
78         INTERFACE_MODE_QSGMII,
79         INTERFACE_MODE_AGL,
80         INTERFACE_MODE_XLAUI,
81         INTERFACE_MODE_XFI,
82         INTERFACE_MODE_10G_KR,
83         INTERFACE_MODE_40G_KR4,
84         INTERFACE_MODE_MIXED,
85 };
86
87 #define OCT_ETHTOOL_REGDUMP_LEN  4096
88 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
90 #define OCT_ETHTOOL_REGSVER  1
91
92 /* statistics of PF */
93 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
94         "rx_packets",
95         "tx_packets",
96         "rx_bytes",
97         "tx_bytes",
98         "rx_errors",    /*jabber_err+l2_err+frame_err */
99         "tx_errors",    /*fw_err_pko+fw_err_link+fw_err_drop */
100         "rx_dropped",   /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
101                          *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
102                          */
103         "tx_dropped",
104
105         "tx_total_sent",
106         "tx_total_fwd",
107         "tx_err_pko",
108         "tx_err_link",
109         "tx_err_drop",
110
111         "tx_tso",
112         "tx_tso_packets",
113         "tx_tso_err",
114         "tx_vxlan",
115
116         "mac_tx_total_pkts",
117         "mac_tx_total_bytes",
118         "mac_tx_mcast_pkts",
119         "mac_tx_bcast_pkts",
120         "mac_tx_ctl_packets",   /*oct->link_stats.fromhost.ctl_sent */
121         "mac_tx_total_collisions",
122         "mac_tx_one_collision",
123         "mac_tx_multi_collison",
124         "mac_tx_max_collision_fail",
125         "mac_tx_max_deferal_fail",
126         "mac_tx_fifo_err",
127         "mac_tx_runts",
128
129         "rx_total_rcvd",
130         "rx_total_fwd",
131         "rx_jabber_err",
132         "rx_l2_err",
133         "rx_frame_err",
134         "rx_err_pko",
135         "rx_err_link",
136         "rx_err_drop",
137
138         "rx_vxlan",
139         "rx_vxlan_err",
140
141         "rx_lro_pkts",
142         "rx_lro_bytes",
143         "rx_total_lro",
144
145         "rx_lro_aborts",
146         "rx_lro_aborts_port",
147         "rx_lro_aborts_seq",
148         "rx_lro_aborts_tsval",
149         "rx_lro_aborts_timer",
150         "rx_fwd_rate",
151
152         "mac_rx_total_rcvd",
153         "mac_rx_bytes",
154         "mac_rx_total_bcst",
155         "mac_rx_total_mcst",
156         "mac_rx_runts",
157         "mac_rx_ctl_packets",
158         "mac_rx_fifo_err",
159         "mac_rx_dma_drop",
160         "mac_rx_fcs_err",
161
162         "link_state_changes",
163 };
164
165 /* statistics of VF */
166 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
167         "rx_packets",
168         "tx_packets",
169         "rx_bytes",
170         "tx_bytes",
171         "rx_errors", /* jabber_err + l2_err+frame_err */
172         "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
173         "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
174         "tx_dropped",
175         "link_state_changes",
176 };
177
178 /* statistics of host tx queue */
179 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
180         "packets",              /*oct->instr_queue[iq_no]->stats.tx_done*/
181         "bytes",                /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
182         "dropped",
183         "iq_busy",
184         "sgentry_sent",
185
186         "fw_instr_posted",
187         "fw_instr_processed",
188         "fw_instr_dropped",
189         "fw_bytes_sent",
190
191         "tso",
192         "vxlan",
193         "txq_restart",
194 };
195
196 /* statistics of host rx queue */
197 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
198         "packets",              /*oct->droq[oq_no]->stats.rx_pkts_received */
199         "bytes",                /*oct->droq[oq_no]->stats.rx_bytes_received */
200         "dropped",              /*oct->droq[oq_no]->stats.rx_dropped+
201                                  *oct->droq[oq_no]->stats.dropped_nodispatch+
202                                  *oct->droq[oq_no]->stats.dropped_toomany+
203                                  *oct->droq[oq_no]->stats.dropped_nomem
204                                  */
205         "dropped_nomem",
206         "dropped_toomany",
207         "fw_dropped",
208         "fw_pkts_received",
209         "fw_bytes_received",
210         "fw_dropped_nodispatch",
211
212         "vxlan",
213         "buffer_alloc_failure",
214 };
215
216 /* LiquidIO driver private flags */
217 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
218 };
219
220 #define OCTNIC_NCMD_AUTONEG_ON  0x1
221 #define OCTNIC_NCMD_PHY_ON      0x2
222
223 static int lio_get_link_ksettings(struct net_device *netdev,
224                                   struct ethtool_link_ksettings *ecmd)
225 {
226         struct lio *lio = GET_LIO(netdev);
227         struct octeon_device *oct = lio->oct_dev;
228         struct oct_link_info *linfo;
229         u32 supported = 0, advertising = 0;
230
231         linfo = &lio->linfo;
232
233         if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
234             linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
235             linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
236             linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
237                 ecmd->base.port = PORT_FIBRE;
238
239                 if (linfo->link.s.speed == SPEED_10000) {
240                         supported = SUPPORTED_10000baseT_Full;
241                         advertising = ADVERTISED_10000baseT_Full;
242                 }
243
244                 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
245                 advertising |= ADVERTISED_Pause;
246                 ethtool_convert_legacy_u32_to_link_mode(
247                         ecmd->link_modes.supported, supported);
248                 ethtool_convert_legacy_u32_to_link_mode(
249                         ecmd->link_modes.advertising, advertising);
250                 ecmd->base.autoneg = AUTONEG_DISABLE;
251
252         } else {
253                 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
254                         linfo->link.s.if_mode);
255         }
256
257         if (linfo->link.s.link_up) {
258                 ecmd->base.speed = linfo->link.s.speed;
259                 ecmd->base.duplex = linfo->link.s.duplex;
260         } else {
261                 ecmd->base.speed = SPEED_UNKNOWN;
262                 ecmd->base.duplex = DUPLEX_UNKNOWN;
263         }
264
265         return 0;
266 }
267
268 static void
269 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
270 {
271         struct lio *lio;
272         struct octeon_device *oct;
273
274         lio = GET_LIO(netdev);
275         oct = lio->oct_dev;
276
277         memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
278         strcpy(drvinfo->driver, "liquidio");
279         strcpy(drvinfo->version, LIQUIDIO_VERSION);
280         strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
281                 ETHTOOL_FWVERS_LEN);
282         strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
283 }
284
285 static void
286 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
287 {
288         struct octeon_device *oct;
289         struct lio *lio;
290
291         lio = GET_LIO(netdev);
292         oct = lio->oct_dev;
293
294         memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
295         strcpy(drvinfo->driver, "liquidio_vf");
296         strcpy(drvinfo->version, LIQUIDIO_VERSION);
297         strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
298                 ETHTOOL_FWVERS_LEN);
299         strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
300 }
301
302 static void
303 lio_ethtool_get_channels(struct net_device *dev,
304                          struct ethtool_channels *channel)
305 {
306         struct lio *lio = GET_LIO(dev);
307         struct octeon_device *oct = lio->oct_dev;
308         u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
309
310         if (OCTEON_CN6XXX(oct)) {
311                 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
312
313                 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
314                 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
315                 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
316                 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
317         } else if (OCTEON_CN23XX_PF(oct)) {
318
319                 max_rx = oct->sriov_info.num_pf_rings;
320                 max_tx = oct->sriov_info.num_pf_rings;
321                 rx_count = lio->linfo.num_rxpciq;
322                 tx_count = lio->linfo.num_txpciq;
323         } else if (OCTEON_CN23XX_VF(oct)) {
324                 max_tx = oct->sriov_info.rings_per_vf;
325                 max_rx = oct->sriov_info.rings_per_vf;
326                 rx_count = lio->linfo.num_rxpciq;
327                 tx_count = lio->linfo.num_txpciq;
328         }
329
330         channel->max_rx = max_rx;
331         channel->max_tx = max_tx;
332         channel->rx_count = rx_count;
333         channel->tx_count = tx_count;
334 }
335
336 static int lio_get_eeprom_len(struct net_device *netdev)
337 {
338         u8 buf[192];
339         struct lio *lio = GET_LIO(netdev);
340         struct octeon_device *oct_dev = lio->oct_dev;
341         struct octeon_board_info *board_info;
342         int len;
343
344         board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
345         len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
346                       board_info->name, board_info->serial_number,
347                       board_info->major, board_info->minor);
348
349         return len;
350 }
351
352 static int
353 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
354                u8 *bytes)
355 {
356         struct lio *lio = GET_LIO(netdev);
357         struct octeon_device *oct_dev = lio->oct_dev;
358         struct octeon_board_info *board_info;
359
360         if (eeprom->offset)
361                 return -EINVAL;
362
363         eeprom->magic = oct_dev->pci_dev->vendor;
364         board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
365         sprintf((char *)bytes,
366                 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
367                 board_info->name, board_info->serial_number,
368                 board_info->major, board_info->minor);
369
370         return 0;
371 }
372
373 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
374 {
375         struct lio *lio = GET_LIO(netdev);
376         struct octeon_device *oct = lio->oct_dev;
377         struct octnic_ctrl_pkt nctrl;
378         int ret = 0;
379
380         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
381
382         nctrl.ncmd.u64 = 0;
383         nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
384         nctrl.ncmd.s.param1 = addr;
385         nctrl.ncmd.s.param2 = val;
386         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
387         nctrl.wait_time = 100;
388         nctrl.netpndev = (u64)netdev;
389         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
390
391         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
392         if (ret < 0) {
393                 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
394                 return -EINVAL;
395         }
396
397         return 0;
398 }
399
400 static int octnet_id_active(struct net_device *netdev, int val)
401 {
402         struct lio *lio = GET_LIO(netdev);
403         struct octeon_device *oct = lio->oct_dev;
404         struct octnic_ctrl_pkt nctrl;
405         int ret = 0;
406
407         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
408
409         nctrl.ncmd.u64 = 0;
410         nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
411         nctrl.ncmd.s.param1 = val;
412         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
413         nctrl.wait_time = 100;
414         nctrl.netpndev = (u64)netdev;
415         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
416
417         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
418         if (ret < 0) {
419                 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
420                 return -EINVAL;
421         }
422
423         return 0;
424 }
425
426 /* Callback for when mdio command response arrives
427  */
428 static void octnet_mdio_resp_callback(struct octeon_device *oct,
429                                       u32 status,
430                                       void *buf)
431 {
432         struct oct_mdio_cmd_context *mdio_cmd_ctx;
433         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
434
435         mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
436
437         oct = lio_get_device(mdio_cmd_ctx->octeon_id);
438         if (status) {
439                 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
440                         CVM_CAST64(status));
441                 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
442         } else {
443                 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
444         }
445         wake_up_interruptible(&mdio_cmd_ctx->wc);
446 }
447
448 /* This routine provides PHY access routines for
449  * mdio  clause45 .
450  */
451 static int
452 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
453 {
454         struct octeon_device *oct_dev = lio->oct_dev;
455         struct octeon_soft_command *sc;
456         struct oct_mdio_cmd_resp *mdio_cmd_rsp;
457         struct oct_mdio_cmd_context *mdio_cmd_ctx;
458         struct oct_mdio_cmd *mdio_cmd;
459         int retval = 0;
460
461         sc = (struct octeon_soft_command *)
462                 octeon_alloc_soft_command(oct_dev,
463                                           sizeof(struct oct_mdio_cmd),
464                                           sizeof(struct oct_mdio_cmd_resp),
465                                           sizeof(struct oct_mdio_cmd_context));
466
467         if (!sc)
468                 return -ENOMEM;
469
470         mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
471         mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
472         mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
473
474         WRITE_ONCE(mdio_cmd_ctx->cond, 0);
475         mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
476         mdio_cmd->op = op;
477         mdio_cmd->mdio_addr = loc;
478         if (op)
479                 mdio_cmd->value1 = *value;
480         octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
481
482         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
483
484         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
485                                     0, 0, 0);
486
487         sc->wait_time = 1000;
488         sc->callback = octnet_mdio_resp_callback;
489         sc->callback_arg = sc;
490
491         init_waitqueue_head(&mdio_cmd_ctx->wc);
492
493         retval = octeon_send_soft_command(oct_dev, sc);
494
495         if (retval == IQ_SEND_FAILED) {
496                 dev_err(&oct_dev->pci_dev->dev,
497                         "octnet_mdio45_access instruction failed status: %x\n",
498                         retval);
499                 retval = -EBUSY;
500         } else {
501                 /* Sleep on a wait queue till the cond flag indicates that the
502                  * response arrived
503                  */
504                 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
505                 retval = mdio_cmd_rsp->status;
506                 if (retval) {
507                         dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
508                         retval = -EBUSY;
509                 } else {
510                         octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
511                                             sizeof(struct oct_mdio_cmd) / 8);
512
513                         if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
514                                 if (!op)
515                                         *value = mdio_cmd_rsp->resp.value1;
516                         } else {
517                                 retval = -EINVAL;
518                         }
519                 }
520         }
521
522         octeon_free_soft_command(oct_dev, sc);
523
524         return retval;
525 }
526
527 static int lio_set_phys_id(struct net_device *netdev,
528                            enum ethtool_phys_id_state state)
529 {
530         struct lio *lio = GET_LIO(netdev);
531         struct octeon_device *oct = lio->oct_dev;
532         int value, ret;
533
534         switch (state) {
535         case ETHTOOL_ID_ACTIVE:
536                 if (oct->chip_id == OCTEON_CN66XX) {
537                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
538                                            VITESSE_PHY_GPIO_DRIVEON);
539                         return 2;
540
541                 } else if (oct->chip_id == OCTEON_CN68XX) {
542                         /* Save the current LED settings */
543                         ret = octnet_mdio45_access(lio, 0,
544                                                    LIO68XX_LED_BEACON_ADDR,
545                                                    &lio->phy_beacon_val);
546                         if (ret)
547                                 return ret;
548
549                         ret = octnet_mdio45_access(lio, 0,
550                                                    LIO68XX_LED_CTRL_ADDR,
551                                                    &lio->led_ctrl_val);
552                         if (ret)
553                                 return ret;
554
555                         /* Configure Beacon values */
556                         value = LIO68XX_LED_BEACON_CFGON;
557                         ret = octnet_mdio45_access(lio, 1,
558                                                    LIO68XX_LED_BEACON_ADDR,
559                                                    &value);
560                         if (ret)
561                                 return ret;
562
563                         value = LIO68XX_LED_CTRL_CFGON;
564                         ret = octnet_mdio45_access(lio, 1,
565                                                    LIO68XX_LED_CTRL_ADDR,
566                                                    &value);
567                         if (ret)
568                                 return ret;
569                 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
570                         octnet_id_active(netdev, LED_IDENTIFICATION_ON);
571
572                         /* returns 0 since updates are asynchronous */
573                         return 0;
574                 } else {
575                         return -EINVAL;
576                 }
577                 break;
578
579         case ETHTOOL_ID_ON:
580                 if (oct->chip_id == OCTEON_CN66XX) {
581                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
582                                            VITESSE_PHY_GPIO_HIGH);
583
584                 } else if (oct->chip_id == OCTEON_CN68XX) {
585                         return -EINVAL;
586                 } else {
587                         return -EINVAL;
588                 }
589                 break;
590
591         case ETHTOOL_ID_OFF:
592                 if (oct->chip_id == OCTEON_CN66XX)
593                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
594                                            VITESSE_PHY_GPIO_LOW);
595                 else if (oct->chip_id == OCTEON_CN68XX)
596                         return -EINVAL;
597                 else
598                         return -EINVAL;
599
600                 break;
601
602         case ETHTOOL_ID_INACTIVE:
603                 if (oct->chip_id == OCTEON_CN66XX) {
604                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
605                                            VITESSE_PHY_GPIO_DRIVEOFF);
606                 } else if (oct->chip_id == OCTEON_CN68XX) {
607                         /* Restore LED settings */
608                         ret = octnet_mdio45_access(lio, 1,
609                                                    LIO68XX_LED_CTRL_ADDR,
610                                                    &lio->led_ctrl_val);
611                         if (ret)
612                                 return ret;
613
614                         ret = octnet_mdio45_access(lio, 1,
615                                                    LIO68XX_LED_BEACON_ADDR,
616                                                    &lio->phy_beacon_val);
617                         if (ret)
618                                 return ret;
619                 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
620                         octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
621
622                         return 0;
623                 } else {
624                         return -EINVAL;
625                 }
626                 break;
627
628         default:
629                 return -EINVAL;
630         }
631
632         return 0;
633 }
634
635 static void
636 lio_ethtool_get_ringparam(struct net_device *netdev,
637                           struct ethtool_ringparam *ering)
638 {
639         struct lio *lio = GET_LIO(netdev);
640         struct octeon_device *oct = lio->oct_dev;
641         u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
642             rx_pending = 0;
643
644         if (OCTEON_CN6XXX(oct)) {
645                 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
646
647                 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
648                 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
649                 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
650                 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
651         } else if (OCTEON_CN23XX_PF(oct)) {
652                 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
653
654                 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
655                 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
656                 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
657                 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
658         }
659
660         if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
661                 ering->rx_pending = 0;
662                 ering->rx_max_pending = 0;
663                 ering->rx_mini_pending = 0;
664                 ering->rx_jumbo_pending = rx_pending;
665                 ering->rx_mini_max_pending = 0;
666                 ering->rx_jumbo_max_pending = rx_max_pending;
667         } else {
668                 ering->rx_pending = rx_pending;
669                 ering->rx_max_pending = rx_max_pending;
670                 ering->rx_mini_pending = 0;
671                 ering->rx_jumbo_pending = 0;
672                 ering->rx_mini_max_pending = 0;
673                 ering->rx_jumbo_max_pending = 0;
674         }
675
676         ering->tx_pending = tx_pending;
677         ering->tx_max_pending = tx_max_pending;
678 }
679
680 static u32 lio_get_msglevel(struct net_device *netdev)
681 {
682         struct lio *lio = GET_LIO(netdev);
683
684         return lio->msg_enable;
685 }
686
687 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
688 {
689         struct lio *lio = GET_LIO(netdev);
690
691         if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
692                 if (msglvl & NETIF_MSG_HW)
693                         liquidio_set_feature(netdev,
694                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
695                 else
696                         liquidio_set_feature(netdev,
697                                              OCTNET_CMD_VERBOSE_DISABLE, 0);
698         }
699
700         lio->msg_enable = msglvl;
701 }
702
703 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
704 {
705         struct lio *lio = GET_LIO(netdev);
706
707         lio->msg_enable = msglvl;
708 }
709
710 static void
711 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
712 {
713         /* Notes: Not supporting any auto negotiation in these
714          * drivers. Just report pause frame support.
715          */
716         struct lio *lio = GET_LIO(netdev);
717         struct octeon_device *oct = lio->oct_dev;
718
719         pause->autoneg = 0;
720
721         pause->tx_pause = oct->tx_pause;
722         pause->rx_pause = oct->rx_pause;
723 }
724
725 static int
726 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
727 {
728         /* Notes: Not supporting any auto negotiation in these
729          * drivers.
730          */
731         struct lio *lio = GET_LIO(netdev);
732         struct octeon_device *oct = lio->oct_dev;
733         struct octnic_ctrl_pkt nctrl;
734         struct oct_link_info *linfo = &lio->linfo;
735
736         int ret = 0;
737
738         if (oct->chip_id != OCTEON_CN23XX_PF_VID)
739                 return -EINVAL;
740
741         if (linfo->link.s.duplex == 0) {
742                 /*no flow control for half duplex*/
743                 if (pause->rx_pause || pause->tx_pause)
744                         return -EINVAL;
745         }
746
747         /*do not support autoneg of link flow control*/
748         if (pause->autoneg == AUTONEG_ENABLE)
749                 return -EINVAL;
750
751         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
752
753         nctrl.ncmd.u64 = 0;
754         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
755         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
756         nctrl.wait_time = 100;
757         nctrl.netpndev = (u64)netdev;
758         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
759
760         if (pause->rx_pause) {
761                 /*enable rx pause*/
762                 nctrl.ncmd.s.param1 = 1;
763         } else {
764                 /*disable rx pause*/
765                 nctrl.ncmd.s.param1 = 0;
766         }
767
768         if (pause->tx_pause) {
769                 /*enable tx pause*/
770                 nctrl.ncmd.s.param2 = 1;
771         } else {
772                 /*disable tx pause*/
773                 nctrl.ncmd.s.param2 = 0;
774         }
775
776         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
777         if (ret < 0) {
778                 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
779                 return -EINVAL;
780         }
781
782         oct->rx_pause = pause->rx_pause;
783         oct->tx_pause = pause->tx_pause;
784
785         return 0;
786 }
787
788 static void
789 lio_get_ethtool_stats(struct net_device *netdev,
790                       struct ethtool_stats *stats  __attribute__((unused)),
791                       u64 *data)
792 {
793         struct lio *lio = GET_LIO(netdev);
794         struct octeon_device *oct_dev = lio->oct_dev;
795         struct net_device_stats *netstats = &netdev->stats;
796         int i = 0, j;
797
798         netdev->netdev_ops->ndo_get_stats(netdev);
799         octnet_get_link_stats(netdev);
800
801         /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
802         data[i++] = CVM_CAST64(netstats->rx_packets);
803         /*sum of oct->instr_queue[iq_no]->stats.tx_done */
804         data[i++] = CVM_CAST64(netstats->tx_packets);
805         /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
806         data[i++] = CVM_CAST64(netstats->rx_bytes);
807         /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
808         data[i++] = CVM_CAST64(netstats->tx_bytes);
809         data[i++] = CVM_CAST64(netstats->rx_errors);
810         data[i++] = CVM_CAST64(netstats->tx_errors);
811         /*sum of oct->droq[oq_no]->stats->rx_dropped +
812          *oct->droq[oq_no]->stats->dropped_nodispatch +
813          *oct->droq[oq_no]->stats->dropped_toomany +
814          *oct->droq[oq_no]->stats->dropped_nomem
815          */
816         data[i++] = CVM_CAST64(netstats->rx_dropped);
817         /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
818         data[i++] = CVM_CAST64(netstats->tx_dropped);
819
820         /* firmware tx stats */
821         /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
822          *fromhost.fw_total_sent
823          */
824         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
825         /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
826         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
827         /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
828         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
829         /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
830         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
831         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
832          *fw_err_drop
833          */
834         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
835
836         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
837         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
838         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
839          *fw_tso_fwd
840          */
841         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
842         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
843          *fw_err_tso
844          */
845         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
846         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
847          *fw_tx_vxlan
848          */
849         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
850
851         /* mac tx statistics */
852         /*CVMX_BGXX_CMRX_TX_STAT5 */
853         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
854         /*CVMX_BGXX_CMRX_TX_STAT4 */
855         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
856         /*CVMX_BGXX_CMRX_TX_STAT15 */
857         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
858         /*CVMX_BGXX_CMRX_TX_STAT14 */
859         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
860         /*CVMX_BGXX_CMRX_TX_STAT17 */
861         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
862         /*CVMX_BGXX_CMRX_TX_STAT0 */
863         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
864         /*CVMX_BGXX_CMRX_TX_STAT3 */
865         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
866         /*CVMX_BGXX_CMRX_TX_STAT2 */
867         data[i++] =
868                 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
869         /*CVMX_BGXX_CMRX_TX_STAT0 */
870         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
871         /*CVMX_BGXX_CMRX_TX_STAT1 */
872         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
873         /*CVMX_BGXX_CMRX_TX_STAT16 */
874         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
875         /*CVMX_BGXX_CMRX_TX_STAT6 */
876         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
877
878         /* RX firmware stats */
879         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
880          *fw_total_rcvd
881          */
882         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
883         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
884          *fw_total_fwd
885          */
886         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
887         /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
888         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
889         /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
890         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
891         /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
892         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
893         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
894          *fw_err_pko
895          */
896         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
897         /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
898         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
899         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
900          *fromwire.fw_err_drop
901          */
902         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
903
904         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
905          *fromwire.fw_rx_vxlan
906          */
907         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
908         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
909          *fromwire.fw_rx_vxlan_err
910          */
911         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
912
913         /* LRO */
914         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
915          *fw_lro_pkts
916          */
917         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
918         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
919          *fw_lro_octs
920          */
921         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
922         /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
923         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
924         /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
925         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
926         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
927          *fw_lro_aborts_port
928          */
929         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
930         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
931          *fw_lro_aborts_seq
932          */
933         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
934         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
935          *fw_lro_aborts_tsval
936          */
937         data[i++] =
938                 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
939         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
940          *fw_lro_aborts_timer
941          */
942         /* intrmod: packet forward rate */
943         data[i++] =
944                 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
945         /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
946         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
947
948         /* mac: link-level stats */
949         /*CVMX_BGXX_CMRX_RX_STAT0 */
950         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
951         /*CVMX_BGXX_CMRX_RX_STAT1 */
952         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
953         /*CVMX_PKI_STATX_STAT5 */
954         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
955         /*CVMX_PKI_STATX_STAT5 */
956         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
957         /*wqe->word2.err_code or wqe->word2.err_level */
958         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
959         /*CVMX_BGXX_CMRX_RX_STAT2 */
960         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
961         /*CVMX_BGXX_CMRX_RX_STAT6 */
962         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
963         /*CVMX_BGXX_CMRX_RX_STAT4 */
964         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
965         /*wqe->word2.err_code or wqe->word2.err_level */
966         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
967         /*lio->link_changes*/
968         data[i++] = CVM_CAST64(lio->link_changes);
969
970         for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
971                 if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
972                         continue;
973                 /*packets to network port*/
974                 /*# of packets tx to network */
975                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
976                 /*# of bytes tx to network */
977                 data[i++] =
978                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
979                 /*# of packets dropped */
980                 data[i++] =
981                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
982                 /*# of tx fails due to queue full */
983                 data[i++] =
984                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
985                 /*XXX gather entries sent */
986                 data[i++] =
987                         CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
988
989                 /*instruction to firmware: data and control */
990                 /*# of instructions to the queue */
991                 data[i++] =
992                         CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
993                 /*# of instructions processed */
994                 data[i++] = CVM_CAST64(
995                                 oct_dev->instr_queue[j]->stats.instr_processed);
996                 /*# of instructions could not be processed */
997                 data[i++] = CVM_CAST64(
998                                 oct_dev->instr_queue[j]->stats.instr_dropped);
999                 /*bytes sent through the queue */
1000                 data[i++] =
1001                         CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1002
1003                 /*tso request*/
1004                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1005                 /*vxlan request*/
1006                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1007                 /*txq restart*/
1008                 data[i++] =
1009                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1010         }
1011
1012         /* RX */
1013         for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1014                 if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1015                         continue;
1016
1017                 /*packets send to TCP/IP network stack */
1018                 /*# of packets to network stack */
1019                 data[i++] =
1020                         CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1021                 /*# of bytes to network stack */
1022                 data[i++] =
1023                         CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1024                 /*# of packets dropped */
1025                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1026                                        oct_dev->droq[j]->stats.dropped_toomany +
1027                                        oct_dev->droq[j]->stats.rx_dropped);
1028                 data[i++] =
1029                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1030                 data[i++] =
1031                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1032                 data[i++] =
1033                         CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1034
1035                 /*control and data path*/
1036                 data[i++] =
1037                         CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1038                 data[i++] =
1039                         CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1040                 data[i++] =
1041                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1042
1043                 data[i++] =
1044                         CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1045                 data[i++] =
1046                         CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1047         }
1048 }
1049
1050 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1051                                      struct ethtool_stats *stats
1052                                      __attribute__((unused)),
1053                                      u64 *data)
1054 {
1055         struct net_device_stats *netstats = &netdev->stats;
1056         struct lio *lio = GET_LIO(netdev);
1057         struct octeon_device *oct_dev = lio->oct_dev;
1058         int i = 0, j, vj;
1059
1060         netdev->netdev_ops->ndo_get_stats(netdev);
1061         /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1062         data[i++] = CVM_CAST64(netstats->rx_packets);
1063         /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1064         data[i++] = CVM_CAST64(netstats->tx_packets);
1065         /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1066         data[i++] = CVM_CAST64(netstats->rx_bytes);
1067         /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1068         data[i++] = CVM_CAST64(netstats->tx_bytes);
1069         data[i++] = CVM_CAST64(netstats->rx_errors);
1070         data[i++] = CVM_CAST64(netstats->tx_errors);
1071          /* sum of oct->droq[oq_no]->stats->rx_dropped +
1072           * oct->droq[oq_no]->stats->dropped_nodispatch +
1073           * oct->droq[oq_no]->stats->dropped_toomany +
1074           * oct->droq[oq_no]->stats->dropped_nomem
1075           */
1076         data[i++] = CVM_CAST64(netstats->rx_dropped);
1077         /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1078         data[i++] = CVM_CAST64(netstats->tx_dropped);
1079         /* lio->link_changes */
1080         data[i++] = CVM_CAST64(lio->link_changes);
1081
1082         for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
1083                 j = lio->linfo.txpciq[vj].s.q_no;
1084
1085                 /* packets to network port */
1086                 /* # of packets tx to network */
1087                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1088                  /* # of bytes tx to network */
1089                 data[i++] = CVM_CAST64(
1090                                 oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1091                 /* # of packets dropped */
1092                 data[i++] = CVM_CAST64(
1093                                 oct_dev->instr_queue[j]->stats.tx_dropped);
1094                 /* # of tx fails due to queue full */
1095                 data[i++] = CVM_CAST64(
1096                                 oct_dev->instr_queue[j]->stats.tx_iq_busy);
1097                 /* XXX gather entries sent */
1098                 data[i++] = CVM_CAST64(
1099                                 oct_dev->instr_queue[j]->stats.sgentry_sent);
1100
1101                 /* instruction to firmware: data and control */
1102                 /* # of instructions to the queue */
1103                 data[i++] = CVM_CAST64(
1104                                 oct_dev->instr_queue[j]->stats.instr_posted);
1105                 /* # of instructions processed */
1106                 data[i++] =
1107                     CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1108                 /* # of instructions could not be processed */
1109                 data[i++] =
1110                     CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1111                 /* bytes sent through the queue */
1112                 data[i++] = CVM_CAST64(
1113                                 oct_dev->instr_queue[j]->stats.bytes_sent);
1114                 /* tso request */
1115                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1116                 /* vxlan request */
1117                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1118                 /* txq restart */
1119                 data[i++] = CVM_CAST64(
1120                                 oct_dev->instr_queue[j]->stats.tx_restart);
1121         }
1122
1123         /* RX */
1124         for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
1125                 j = lio->linfo.rxpciq[vj].s.q_no;
1126
1127                 /* packets send to TCP/IP network stack */
1128                 /* # of packets to network stack */
1129                 data[i++] = CVM_CAST64(
1130                                 oct_dev->droq[j]->stats.rx_pkts_received);
1131                 /* # of bytes to network stack */
1132                 data[i++] = CVM_CAST64(
1133                                 oct_dev->droq[j]->stats.rx_bytes_received);
1134                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1135                                        oct_dev->droq[j]->stats.dropped_toomany +
1136                                        oct_dev->droq[j]->stats.rx_dropped);
1137                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1138                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1139                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1140
1141                 /* control and data path */
1142                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1143                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1144                 data[i++] =
1145                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1146
1147                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1148                 data[i++] =
1149                     CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1150         }
1151 }
1152
1153 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1154 {
1155         struct octeon_device *oct_dev = lio->oct_dev;
1156         int i;
1157
1158         switch (oct_dev->chip_id) {
1159         case OCTEON_CN23XX_PF_VID:
1160         case OCTEON_CN23XX_VF_VID:
1161                 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1162                         sprintf(data, "%s", oct_priv_flags_strings[i]);
1163                         data += ETH_GSTRING_LEN;
1164                 }
1165                 break;
1166         case OCTEON_CN68XX:
1167         case OCTEON_CN66XX:
1168                 break;
1169         default:
1170                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1171                 break;
1172         }
1173 }
1174
1175 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1176 {
1177         struct lio *lio = GET_LIO(netdev);
1178         struct octeon_device *oct_dev = lio->oct_dev;
1179         int num_iq_stats, num_oq_stats, i, j;
1180         int num_stats;
1181
1182         switch (stringset) {
1183         case ETH_SS_STATS:
1184                 num_stats = ARRAY_SIZE(oct_stats_strings);
1185                 for (j = 0; j < num_stats; j++) {
1186                         sprintf(data, "%s", oct_stats_strings[j]);
1187                         data += ETH_GSTRING_LEN;
1188                 }
1189
1190                 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1191                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1192                         if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1193                                 continue;
1194                         for (j = 0; j < num_iq_stats; j++) {
1195                                 sprintf(data, "tx-%d-%s", i,
1196                                         oct_iq_stats_strings[j]);
1197                                 data += ETH_GSTRING_LEN;
1198                         }
1199                 }
1200
1201                 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1202                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1203                         if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1204                                 continue;
1205                         for (j = 0; j < num_oq_stats; j++) {
1206                                 sprintf(data, "rx-%d-%s", i,
1207                                         oct_droq_stats_strings[j]);
1208                                 data += ETH_GSTRING_LEN;
1209                         }
1210                 }
1211                 break;
1212
1213         case ETH_SS_PRIV_FLAGS:
1214                 lio_get_priv_flags_strings(lio, data);
1215                 break;
1216         default:
1217                 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1218                 break;
1219         }
1220 }
1221
1222 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1223                                u8 *data)
1224 {
1225         int num_iq_stats, num_oq_stats, i, j;
1226         struct lio *lio = GET_LIO(netdev);
1227         struct octeon_device *oct_dev = lio->oct_dev;
1228         int num_stats;
1229
1230         switch (stringset) {
1231         case ETH_SS_STATS:
1232                 num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1233                 for (j = 0; j < num_stats; j++) {
1234                         sprintf(data, "%s", oct_vf_stats_strings[j]);
1235                         data += ETH_GSTRING_LEN;
1236                 }
1237
1238                 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1239                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1240                         if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1241                                 continue;
1242                         for (j = 0; j < num_iq_stats; j++) {
1243                                 sprintf(data, "tx-%d-%s", i,
1244                                         oct_iq_stats_strings[j]);
1245                                 data += ETH_GSTRING_LEN;
1246                         }
1247                 }
1248
1249                 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1250                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1251                         if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1252                                 continue;
1253                         for (j = 0; j < num_oq_stats; j++) {
1254                                 sprintf(data, "rx-%d-%s", i,
1255                                         oct_droq_stats_strings[j]);
1256                                 data += ETH_GSTRING_LEN;
1257                         }
1258                 }
1259                 break;
1260
1261         case ETH_SS_PRIV_FLAGS:
1262                 lio_get_priv_flags_strings(lio, data);
1263                 break;
1264         default:
1265                 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1266                 break;
1267         }
1268 }
1269
1270 static int lio_get_priv_flags_ss_count(struct lio *lio)
1271 {
1272         struct octeon_device *oct_dev = lio->oct_dev;
1273
1274         switch (oct_dev->chip_id) {
1275         case OCTEON_CN23XX_PF_VID:
1276         case OCTEON_CN23XX_VF_VID:
1277                 return ARRAY_SIZE(oct_priv_flags_strings);
1278         case OCTEON_CN68XX:
1279         case OCTEON_CN66XX:
1280                 return -EOPNOTSUPP;
1281         default:
1282                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1283                 return -EOPNOTSUPP;
1284         }
1285 }
1286
1287 static int lio_get_sset_count(struct net_device *netdev, int sset)
1288 {
1289         struct lio *lio = GET_LIO(netdev);
1290         struct octeon_device *oct_dev = lio->oct_dev;
1291
1292         switch (sset) {
1293         case ETH_SS_STATS:
1294                 return (ARRAY_SIZE(oct_stats_strings) +
1295                         ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1296                         ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1297         case ETH_SS_PRIV_FLAGS:
1298                 return lio_get_priv_flags_ss_count(lio);
1299         default:
1300                 return -EOPNOTSUPP;
1301         }
1302 }
1303
1304 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1305 {
1306         struct lio *lio = GET_LIO(netdev);
1307         struct octeon_device *oct_dev = lio->oct_dev;
1308
1309         switch (sset) {
1310         case ETH_SS_STATS:
1311                 return (ARRAY_SIZE(oct_vf_stats_strings) +
1312                         ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1313                         ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1314         case ETH_SS_PRIV_FLAGS:
1315                 return lio_get_priv_flags_ss_count(lio);
1316         default:
1317                 return -EOPNOTSUPP;
1318         }
1319 }
1320
1321 /* Callback function for intrmod */
1322 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1323                                     u32 status,
1324                                     void *ptr)
1325 {
1326         struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1327         struct oct_intrmod_context *ctx;
1328
1329         ctx  = (struct oct_intrmod_context *)sc->ctxptr;
1330
1331         ctx->status = status;
1332
1333         WRITE_ONCE(ctx->cond, 1);
1334
1335         /* This barrier is required to be sure that the response has been
1336          * written fully before waking up the handler
1337          */
1338         wmb();
1339
1340         wake_up_interruptible(&ctx->wc);
1341 }
1342
1343 /*  get interrupt moderation parameters */
1344 static int octnet_get_intrmod_cfg(struct lio *lio,
1345                                   struct oct_intrmod_cfg *intr_cfg)
1346 {
1347         struct octeon_soft_command *sc;
1348         struct oct_intrmod_context *ctx;
1349         struct oct_intrmod_resp *resp;
1350         int retval;
1351         struct octeon_device *oct_dev = lio->oct_dev;
1352
1353         /* Alloc soft command */
1354         sc = (struct octeon_soft_command *)
1355                 octeon_alloc_soft_command(oct_dev,
1356                                           0,
1357                                           sizeof(struct oct_intrmod_resp),
1358                                           sizeof(struct oct_intrmod_context));
1359
1360         if (!sc)
1361                 return -ENOMEM;
1362
1363         resp = (struct oct_intrmod_resp *)sc->virtrptr;
1364         memset(resp, 0, sizeof(struct oct_intrmod_resp));
1365
1366         ctx = (struct oct_intrmod_context *)sc->ctxptr;
1367         memset(ctx, 0, sizeof(struct oct_intrmod_context));
1368         WRITE_ONCE(ctx->cond, 0);
1369         ctx->octeon_id = lio_get_device_id(oct_dev);
1370         init_waitqueue_head(&ctx->wc);
1371
1372         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1373
1374         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1375                                     OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
1376
1377         sc->callback = octnet_intrmod_callback;
1378         sc->callback_arg = sc;
1379         sc->wait_time = 1000;
1380
1381         retval = octeon_send_soft_command(oct_dev, sc);
1382         if (retval == IQ_SEND_FAILED) {
1383                 octeon_free_soft_command(oct_dev, sc);
1384                 return -EINVAL;
1385         }
1386
1387         /* Sleep on a wait queue till the cond flag indicates that the
1388          * response arrived or timed-out.
1389          */
1390         if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1391                 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
1392                 goto intrmod_info_wait_intr;
1393         }
1394
1395         retval = ctx->status || resp->status;
1396         if (retval) {
1397                 dev_err(&oct_dev->pci_dev->dev,
1398                         "Get interrupt moderation parameters failed\n");
1399                 goto intrmod_info_wait_fail;
1400         }
1401
1402         octeon_swap_8B_data((u64 *)&resp->intrmod,
1403                             (sizeof(struct oct_intrmod_cfg)) / 8);
1404         memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
1405         octeon_free_soft_command(oct_dev, sc);
1406
1407         return 0;
1408
1409 intrmod_info_wait_fail:
1410
1411         octeon_free_soft_command(oct_dev, sc);
1412
1413 intrmod_info_wait_intr:
1414
1415         return -ENODEV;
1416 }
1417
1418 /*  Configure interrupt moderation parameters */
1419 static int octnet_set_intrmod_cfg(struct lio *lio,
1420                                   struct oct_intrmod_cfg *intr_cfg)
1421 {
1422         struct octeon_soft_command *sc;
1423         struct oct_intrmod_context *ctx;
1424         struct oct_intrmod_cfg *cfg;
1425         int retval;
1426         struct octeon_device *oct_dev = lio->oct_dev;
1427
1428         /* Alloc soft command */
1429         sc = (struct octeon_soft_command *)
1430                 octeon_alloc_soft_command(oct_dev,
1431                                           sizeof(struct oct_intrmod_cfg),
1432                                           0,
1433                                           sizeof(struct oct_intrmod_context));
1434
1435         if (!sc)
1436                 return -ENOMEM;
1437
1438         ctx = (struct oct_intrmod_context *)sc->ctxptr;
1439
1440         WRITE_ONCE(ctx->cond, 0);
1441         ctx->octeon_id = lio_get_device_id(oct_dev);
1442         init_waitqueue_head(&ctx->wc);
1443
1444         cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1445
1446         memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1447         octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1448
1449         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1450
1451         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1452                                     OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1453
1454         sc->callback = octnet_intrmod_callback;
1455         sc->callback_arg = sc;
1456         sc->wait_time = 1000;
1457
1458         retval = octeon_send_soft_command(oct_dev, sc);
1459         if (retval == IQ_SEND_FAILED) {
1460                 octeon_free_soft_command(oct_dev, sc);
1461                 return -EINVAL;
1462         }
1463
1464         /* Sleep on a wait queue till the cond flag indicates that the
1465          * response arrived or timed-out.
1466          */
1467         if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
1468                 retval = ctx->status;
1469                 if (retval)
1470                         dev_err(&oct_dev->pci_dev->dev,
1471                                 "intrmod config failed. Status: %llx\n",
1472                                 CVM_CAST64(retval));
1473                 else
1474                         dev_info(&oct_dev->pci_dev->dev,
1475                                  "Rx-Adaptive Interrupt moderation %s\n",
1476                                  (intr_cfg->rx_enable) ?
1477                                  "enabled" : "disabled");
1478
1479                 octeon_free_soft_command(oct_dev, sc);
1480
1481                 return ((retval) ? -ENODEV : 0);
1482         }
1483
1484         dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
1485
1486         return -EINTR;
1487 }
1488
1489 static void
1490 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1491                           u32 status, void *ptr)
1492 {
1493         struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1494         struct oct_nic_stats_resp *resp =
1495             (struct oct_nic_stats_resp *)sc->virtrptr;
1496         struct oct_nic_stats_ctrl *ctrl =
1497             (struct oct_nic_stats_ctrl *)sc->ctxptr;
1498         struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1499         struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1500
1501         struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1502         struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1503
1504         if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1505                 octeon_swap_8B_data((u64 *)&resp->stats,
1506                                     (sizeof(struct oct_link_stats)) >> 3);
1507
1508                 /* RX link-level stats */
1509                 rstats->total_rcvd = rsp_rstats->total_rcvd;
1510                 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1511                 rstats->total_bcst = rsp_rstats->total_bcst;
1512                 rstats->total_mcst = rsp_rstats->total_mcst;
1513                 rstats->runts      = rsp_rstats->runts;
1514                 rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1515                 /* Accounts for over/under-run of buffers */
1516                 rstats->fifo_err  = rsp_rstats->fifo_err;
1517                 rstats->dmac_drop = rsp_rstats->dmac_drop;
1518                 rstats->fcs_err   = rsp_rstats->fcs_err;
1519                 rstats->jabber_err = rsp_rstats->jabber_err;
1520                 rstats->l2_err    = rsp_rstats->l2_err;
1521                 rstats->frame_err = rsp_rstats->frame_err;
1522
1523                 /* RX firmware stats */
1524                 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1525                 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1526                 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1527                 rstats->fw_err_link = rsp_rstats->fw_err_link;
1528                 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1529                 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1530                 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1531
1532                 /* Number of packets that are LROed      */
1533                 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1534                 /* Number of octets that are LROed       */
1535                 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1536                 /* Number of LRO packets formed          */
1537                 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1538                 /* Number of times lRO of packet aborted */
1539                 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1540                 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1541                 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1542                 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1543                 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1544                 /* intrmod: packet forward rate */
1545                 rstats->fwd_rate = rsp_rstats->fwd_rate;
1546
1547                 /* TX link-level stats */
1548                 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1549                 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1550                 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1551                 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1552                 tstats->ctl_sent = rsp_tstats->ctl_sent;
1553                 /* Packets sent after one collision*/
1554                 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1555                 /* Packets sent after multiple collision*/
1556                 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1557                 /* Packets not sent due to max collisions */
1558                 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1559                 /* Packets not sent due to max deferrals */
1560                 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1561                 /* Accounts for over/under-run of buffers */
1562                 tstats->fifo_err = rsp_tstats->fifo_err;
1563                 tstats->runts = rsp_tstats->runts;
1564                 /* Total number of collisions detected */
1565                 tstats->total_collisions = rsp_tstats->total_collisions;
1566
1567                 /* firmware stats */
1568                 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1569                 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1570                 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1571                 tstats->fw_err_link = rsp_tstats->fw_err_link;
1572                 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1573                 tstats->fw_tso = rsp_tstats->fw_tso;
1574                 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1575                 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1576                 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1577
1578                 resp->status = 1;
1579         } else {
1580                 resp->status = -1;
1581         }
1582         complete(&ctrl->complete);
1583 }
1584
1585 /*  Configure interrupt moderation parameters */
1586 static int octnet_get_link_stats(struct net_device *netdev)
1587 {
1588         struct lio *lio = GET_LIO(netdev);
1589         struct octeon_device *oct_dev = lio->oct_dev;
1590
1591         struct octeon_soft_command *sc;
1592         struct oct_nic_stats_ctrl *ctrl;
1593         struct oct_nic_stats_resp *resp;
1594
1595         int retval;
1596
1597         /* Alloc soft command */
1598         sc = (struct octeon_soft_command *)
1599                 octeon_alloc_soft_command(oct_dev,
1600                                           0,
1601                                           sizeof(struct oct_nic_stats_resp),
1602                                           sizeof(struct octnic_ctrl_pkt));
1603
1604         if (!sc)
1605                 return -ENOMEM;
1606
1607         resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1608         memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1609
1610         ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1611         memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1612         ctrl->netdev = netdev;
1613         init_completion(&ctrl->complete);
1614
1615         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1616
1617         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1618                                     OPCODE_NIC_PORT_STATS, 0, 0, 0);
1619
1620         sc->callback = octnet_nic_stats_callback;
1621         sc->callback_arg = sc;
1622         sc->wait_time = 500;    /*in milli seconds*/
1623
1624         retval = octeon_send_soft_command(oct_dev, sc);
1625         if (retval == IQ_SEND_FAILED) {
1626                 octeon_free_soft_command(oct_dev, sc);
1627                 return -EINVAL;
1628         }
1629
1630         wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1631
1632         if (resp->status != 1) {
1633                 octeon_free_soft_command(oct_dev, sc);
1634
1635                 return -EINVAL;
1636         }
1637
1638         octeon_free_soft_command(oct_dev, sc);
1639
1640         return 0;
1641 }
1642
1643 static int lio_get_intr_coalesce(struct net_device *netdev,
1644                                  struct ethtool_coalesce *intr_coal)
1645 {
1646         struct lio *lio = GET_LIO(netdev);
1647         struct octeon_device *oct = lio->oct_dev;
1648         struct octeon_instr_queue *iq;
1649         struct oct_intrmod_cfg intrmod_cfg;
1650
1651         if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
1652                 return -ENODEV;
1653
1654         switch (oct->chip_id) {
1655         case OCTEON_CN23XX_PF_VID:
1656         case OCTEON_CN23XX_VF_VID: {
1657                 if (!intrmod_cfg.rx_enable) {
1658                         intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
1659                         intr_coal->rx_max_coalesced_frames =
1660                                 oct->rx_max_coalesced_frames;
1661                 }
1662                 if (!intrmod_cfg.tx_enable)
1663                         intr_coal->tx_max_coalesced_frames =
1664                                 oct->tx_max_coalesced_frames;
1665                 break;
1666         }
1667         case OCTEON_CN68XX:
1668         case OCTEON_CN66XX: {
1669                 struct octeon_cn6xxx *cn6xxx =
1670                         (struct octeon_cn6xxx *)oct->chip;
1671
1672                 if (!intrmod_cfg.rx_enable) {
1673                         intr_coal->rx_coalesce_usecs =
1674                                 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1675                         intr_coal->rx_max_coalesced_frames =
1676                                 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1677                 }
1678                 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1679                 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1680                 break;
1681         }
1682         default:
1683                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1684                 return -EINVAL;
1685         }
1686         if (intrmod_cfg.rx_enable) {
1687                 intr_coal->use_adaptive_rx_coalesce =
1688                         intrmod_cfg.rx_enable;
1689                 intr_coal->rate_sample_interval =
1690                         intrmod_cfg.check_intrvl;
1691                 intr_coal->pkt_rate_high =
1692                         intrmod_cfg.maxpkt_ratethr;
1693                 intr_coal->pkt_rate_low =
1694                         intrmod_cfg.minpkt_ratethr;
1695                 intr_coal->rx_max_coalesced_frames_high =
1696                         intrmod_cfg.rx_maxcnt_trigger;
1697                 intr_coal->rx_coalesce_usecs_high =
1698                         intrmod_cfg.rx_maxtmr_trigger;
1699                 intr_coal->rx_coalesce_usecs_low =
1700                         intrmod_cfg.rx_mintmr_trigger;
1701                 intr_coal->rx_max_coalesced_frames_low =
1702                         intrmod_cfg.rx_mincnt_trigger;
1703         }
1704         if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
1705             (intrmod_cfg.tx_enable)) {
1706                 intr_coal->use_adaptive_tx_coalesce =
1707                         intrmod_cfg.tx_enable;
1708                 intr_coal->tx_max_coalesced_frames_high =
1709                         intrmod_cfg.tx_maxcnt_trigger;
1710                 intr_coal->tx_max_coalesced_frames_low =
1711                         intrmod_cfg.tx_mincnt_trigger;
1712         }
1713         return 0;
1714 }
1715
1716 /* Enable/Disable auto interrupt Moderation */
1717 static int oct_cfg_adaptive_intr(struct lio *lio,
1718                                  struct oct_intrmod_cfg *intrmod_cfg,
1719                                  struct ethtool_coalesce *intr_coal)
1720 {
1721         int ret = 0;
1722
1723         if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
1724                 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
1725                 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
1726                 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
1727         }
1728         if (intrmod_cfg->rx_enable) {
1729                 intrmod_cfg->rx_maxcnt_trigger =
1730                         intr_coal->rx_max_coalesced_frames_high;
1731                 intrmod_cfg->rx_maxtmr_trigger =
1732                         intr_coal->rx_coalesce_usecs_high;
1733                 intrmod_cfg->rx_mintmr_trigger =
1734                         intr_coal->rx_coalesce_usecs_low;
1735                 intrmod_cfg->rx_mincnt_trigger =
1736                         intr_coal->rx_max_coalesced_frames_low;
1737         }
1738         if (intrmod_cfg->tx_enable) {
1739                 intrmod_cfg->tx_maxcnt_trigger =
1740                         intr_coal->tx_max_coalesced_frames_high;
1741                 intrmod_cfg->tx_mincnt_trigger =
1742                         intr_coal->tx_max_coalesced_frames_low;
1743         }
1744
1745         ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
1746
1747         return ret;
1748 }
1749
1750 static int
1751 oct_cfg_rx_intrcnt(struct lio *lio,
1752                    struct oct_intrmod_cfg *intrmod,
1753                    struct ethtool_coalesce *intr_coal)
1754 {
1755         struct octeon_device *oct = lio->oct_dev;
1756         u32 rx_max_coalesced_frames;
1757
1758         /* Config Cnt based interrupt values */
1759         switch (oct->chip_id) {
1760         case OCTEON_CN68XX:
1761         case OCTEON_CN66XX: {
1762                 struct octeon_cn6xxx *cn6xxx =
1763                         (struct octeon_cn6xxx *)oct->chip;
1764
1765                 if (!intr_coal->rx_max_coalesced_frames)
1766                         rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1767                 else
1768                         rx_max_coalesced_frames =
1769                                 intr_coal->rx_max_coalesced_frames;
1770                 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1771                                  rx_max_coalesced_frames);
1772                 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1773                 break;
1774         }
1775         case OCTEON_CN23XX_PF_VID: {
1776                 int q_no;
1777
1778                 if (!intr_coal->rx_max_coalesced_frames)
1779                         rx_max_coalesced_frames = intrmod->rx_frames;
1780                 else
1781                         rx_max_coalesced_frames =
1782                             intr_coal->rx_max_coalesced_frames;
1783                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1784                         q_no += oct->sriov_info.pf_srn;
1785                         octeon_write_csr64(
1786                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1787                             (octeon_read_csr64(
1788                                  oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1789                              (0x3fffff00000000UL)) |
1790                                 (rx_max_coalesced_frames - 1));
1791                         /*consider setting resend bit*/
1792                 }
1793                 intrmod->rx_frames = rx_max_coalesced_frames;
1794                 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
1795                 break;
1796         }
1797         case OCTEON_CN23XX_VF_VID: {
1798                 int q_no;
1799
1800                 if (!intr_coal->rx_max_coalesced_frames)
1801                         rx_max_coalesced_frames = intrmod->rx_frames;
1802                 else
1803                         rx_max_coalesced_frames =
1804                             intr_coal->rx_max_coalesced_frames;
1805                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1806                         octeon_write_csr64(
1807                             oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
1808                             (octeon_read_csr64(
1809                                  oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1810                              (0x3fffff00000000UL)) |
1811                                 (rx_max_coalesced_frames - 1));
1812                         /*consider writing to resend bit here*/
1813                 }
1814                 intrmod->rx_frames = rx_max_coalesced_frames;
1815                 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
1816                 break;
1817         }
1818         default:
1819                 return -EINVAL;
1820         }
1821         return 0;
1822 }
1823
1824 static int oct_cfg_rx_intrtime(struct lio *lio,
1825                                struct oct_intrmod_cfg *intrmod,
1826                                struct ethtool_coalesce *intr_coal)
1827 {
1828         struct octeon_device *oct = lio->oct_dev;
1829         u32 time_threshold, rx_coalesce_usecs;
1830
1831         /* Config Time based interrupt values */
1832         switch (oct->chip_id) {
1833         case OCTEON_CN68XX:
1834         case OCTEON_CN66XX: {
1835                 struct octeon_cn6xxx *cn6xxx =
1836                         (struct octeon_cn6xxx *)oct->chip;
1837                 if (!intr_coal->rx_coalesce_usecs)
1838                         rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1839                 else
1840                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1841
1842                 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1843                                                          rx_coalesce_usecs);
1844                 octeon_write_csr(oct,
1845                                  CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1846                                  time_threshold);
1847
1848                 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1849                 break;
1850         }
1851         case OCTEON_CN23XX_PF_VID: {
1852                 u64 time_threshold;
1853                 int q_no;
1854
1855                 if (!intr_coal->rx_coalesce_usecs)
1856                         rx_coalesce_usecs = intrmod->rx_usecs;
1857                 else
1858                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1859                 time_threshold =
1860                     cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1861                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1862                         q_no += oct->sriov_info.pf_srn;
1863                         octeon_write_csr64(oct,
1864                                            CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1865                                            (intrmod->rx_frames |
1866                                             ((u64)time_threshold << 32)));
1867                         /*consider writing to resend bit here*/
1868                 }
1869                 intrmod->rx_usecs = rx_coalesce_usecs;
1870                 oct->rx_coalesce_usecs = rx_coalesce_usecs;
1871                 break;
1872         }
1873         case OCTEON_CN23XX_VF_VID: {
1874                 u64 time_threshold;
1875                 int q_no;
1876
1877                 if (!intr_coal->rx_coalesce_usecs)
1878                         rx_coalesce_usecs = intrmod->rx_usecs;
1879                 else
1880                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1881
1882                 time_threshold =
1883                     cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1884                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1885                         octeon_write_csr64(
1886                                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
1887                                 (intrmod->rx_frames |
1888                                  ((u64)time_threshold << 32)));
1889                         /*consider setting resend bit*/
1890                 }
1891                 intrmod->rx_usecs = rx_coalesce_usecs;
1892                 oct->rx_coalesce_usecs = rx_coalesce_usecs;
1893                 break;
1894         }
1895         default:
1896                 return -EINVAL;
1897         }
1898
1899         return 0;
1900 }
1901
1902 static int
1903 oct_cfg_tx_intrcnt(struct lio *lio,
1904                    struct oct_intrmod_cfg *intrmod,
1905                    struct ethtool_coalesce *intr_coal)
1906 {
1907         struct octeon_device *oct = lio->oct_dev;
1908         u32 iq_intr_pkt;
1909         void __iomem *inst_cnt_reg;
1910         u64 val;
1911
1912         /* Config Cnt based interrupt values */
1913         switch (oct->chip_id) {
1914         case OCTEON_CN68XX:
1915         case OCTEON_CN66XX:
1916                 break;
1917         case OCTEON_CN23XX_VF_VID:
1918         case OCTEON_CN23XX_PF_VID: {
1919                 int q_no;
1920
1921                 if (!intr_coal->tx_max_coalesced_frames)
1922                         iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
1923                                       CN23XX_PKT_IN_DONE_WMARK_MASK;
1924                 else
1925                         iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
1926                                       CN23XX_PKT_IN_DONE_WMARK_MASK;
1927                 for (q_no = 0; q_no < oct->num_iqs; q_no++) {
1928                         inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
1929                         val = readq(inst_cnt_reg);
1930                         /*clear wmark and count.dont want to write count back*/
1931                         val = (val & 0xFFFF000000000000ULL) |
1932                               ((u64)(iq_intr_pkt - 1)
1933                                << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
1934                         writeq(val, inst_cnt_reg);
1935                         /*consider setting resend bit*/
1936                 }
1937                 intrmod->tx_frames = iq_intr_pkt;
1938                 oct->tx_max_coalesced_frames = iq_intr_pkt;
1939                 break;
1940         }
1941         default:
1942                 return -EINVAL;
1943         }
1944         return 0;
1945 }
1946
1947 static int lio_set_intr_coalesce(struct net_device *netdev,
1948                                  struct ethtool_coalesce *intr_coal)
1949 {
1950         struct lio *lio = GET_LIO(netdev);
1951         int ret;
1952         struct octeon_device *oct = lio->oct_dev;
1953         struct oct_intrmod_cfg intrmod = {0};
1954         u32 j, q_no;
1955         int db_max, db_min;
1956
1957         switch (oct->chip_id) {
1958         case OCTEON_CN68XX:
1959         case OCTEON_CN66XX:
1960                 db_min = CN6XXX_DB_MIN;
1961                 db_max = CN6XXX_DB_MAX;
1962                 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1963                     (intr_coal->tx_max_coalesced_frames <= db_max)) {
1964                         for (j = 0; j < lio->linfo.num_txpciq; j++) {
1965                                 q_no = lio->linfo.txpciq[j].s.q_no;
1966                                 oct->instr_queue[q_no]->fill_threshold =
1967                                         intr_coal->tx_max_coalesced_frames;
1968                         }
1969                 } else {
1970                         dev_err(&oct->pci_dev->dev,
1971                                 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1972                                 intr_coal->tx_max_coalesced_frames,
1973                                 db_min, db_max);
1974                         return -EINVAL;
1975                 }
1976                 break;
1977         case OCTEON_CN23XX_PF_VID:
1978         case OCTEON_CN23XX_VF_VID:
1979                 break;
1980         default:
1981                 return -EINVAL;
1982         }
1983
1984         intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1985         intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
1986         intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
1987         intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
1988         intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
1989
1990         ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
1991
1992         if (!intr_coal->use_adaptive_rx_coalesce) {
1993                 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
1994                 if (ret)
1995                         goto ret_intrmod;
1996
1997                 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
1998                 if (ret)
1999                         goto ret_intrmod;
2000         } else {
2001                 oct->rx_coalesce_usecs =
2002                         CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2003                 oct->rx_max_coalesced_frames =
2004                         CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2005         }
2006
2007         if (!intr_coal->use_adaptive_tx_coalesce) {
2008                 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2009                 if (ret)
2010                         goto ret_intrmod;
2011         } else {
2012                 oct->tx_max_coalesced_frames =
2013                         CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2014         }
2015
2016         return 0;
2017 ret_intrmod:
2018         return ret;
2019 }
2020
2021 static int lio_get_ts_info(struct net_device *netdev,
2022                            struct ethtool_ts_info *info)
2023 {
2024         struct lio *lio = GET_LIO(netdev);
2025
2026         info->so_timestamping =
2027 #ifdef PTP_HARDWARE_TIMESTAMPING
2028                 SOF_TIMESTAMPING_TX_HARDWARE |
2029                 SOF_TIMESTAMPING_RX_HARDWARE |
2030                 SOF_TIMESTAMPING_RAW_HARDWARE |
2031                 SOF_TIMESTAMPING_TX_SOFTWARE |
2032 #endif
2033                 SOF_TIMESTAMPING_RX_SOFTWARE |
2034                 SOF_TIMESTAMPING_SOFTWARE;
2035
2036         if (lio->ptp_clock)
2037                 info->phc_index = ptp_clock_index(lio->ptp_clock);
2038         else
2039                 info->phc_index = -1;
2040
2041 #ifdef PTP_HARDWARE_TIMESTAMPING
2042         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2043
2044         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2045                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2046                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2047                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2048 #endif
2049
2050         return 0;
2051 }
2052
2053 /* Return register dump len. */
2054 static int lio_get_regs_len(struct net_device *dev)
2055 {
2056         struct lio *lio = GET_LIO(dev);
2057         struct octeon_device *oct = lio->oct_dev;
2058
2059         switch (oct->chip_id) {
2060         case OCTEON_CN23XX_PF_VID:
2061                 return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2062         case OCTEON_CN23XX_VF_VID:
2063                 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2064         default:
2065                 return OCT_ETHTOOL_REGDUMP_LEN;
2066         }
2067 }
2068
2069 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2070 {
2071         u32 reg;
2072         u8 pf_num = oct->pf_num;
2073         int len = 0;
2074         int i;
2075
2076         /* PCI  Window Registers */
2077
2078         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2079
2080         /*0x29030 or 0x29040*/
2081         reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2082         len += sprintf(s + len,
2083                        "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2084                        reg, oct->pcie_port, oct->pf_num,
2085                        (u64)octeon_read_csr64(oct, reg));
2086
2087         /*0x27080 or 0x27090*/
2088         reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2089         len +=
2090             sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2091                     reg, oct->pcie_port, oct->pf_num,
2092                     (u64)octeon_read_csr64(oct, reg));
2093
2094         /*0x27000 or 0x27010*/
2095         reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2096         len +=
2097             sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2098                     reg, oct->pcie_port, oct->pf_num,
2099                     (u64)octeon_read_csr64(oct, reg));
2100
2101         /*0x29120*/
2102         reg = 0x29120;
2103         len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2104                        (u64)octeon_read_csr64(oct, reg));
2105
2106         /*0x27300*/
2107         reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2108               (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2109         len += sprintf(
2110             s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2111             oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2112
2113         /*0x27200*/
2114         reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2115               (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2116         len += sprintf(s + len,
2117                        "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2118                        reg, oct->pcie_port, oct->pf_num,
2119                        (u64)octeon_read_csr64(oct, reg));
2120
2121         /*29130*/
2122         reg = CN23XX_SLI_PKT_CNT_INT;
2123         len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2124                        (u64)octeon_read_csr64(oct, reg));
2125
2126         /*0x29140*/
2127         reg = CN23XX_SLI_PKT_TIME_INT;
2128         len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2129                        (u64)octeon_read_csr64(oct, reg));
2130
2131         /*0x29160*/
2132         reg = 0x29160;
2133         len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2134                        (u64)octeon_read_csr64(oct, reg));
2135
2136         /*0x29180*/
2137         reg = CN23XX_SLI_OQ_WMARK;
2138         len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2139                        reg, (u64)octeon_read_csr64(oct, reg));
2140
2141         /*0x291E0*/
2142         reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2143         len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2144                        (u64)octeon_read_csr64(oct, reg));
2145
2146         /*0x29210*/
2147         reg = CN23XX_SLI_GBL_CONTROL;
2148         len += sprintf(s + len,
2149                        "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2150                        (u64)octeon_read_csr64(oct, reg));
2151
2152         /*0x29220*/
2153         reg = 0x29220;
2154         len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2155                        reg, (u64)octeon_read_csr64(oct, reg));
2156
2157         /*PF only*/
2158         if (pf_num == 0) {
2159                 /*0x29260*/
2160                 reg = CN23XX_SLI_OUT_BP_EN_W1S;
2161                 len += sprintf(s + len,
2162                                "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2163                                reg, (u64)octeon_read_csr64(oct, reg));
2164         } else if (pf_num == 1) {
2165                 /*0x29270*/
2166                 reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2167                 len += sprintf(s + len,
2168                                "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2169                                reg, (u64)octeon_read_csr64(oct, reg));
2170         }
2171
2172         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2173                 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2174                 len +=
2175                     sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2176                             reg, i, (u64)octeon_read_csr64(oct, reg));
2177         }
2178
2179         /*0x10040*/
2180         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2181                 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2182                 len += sprintf(s + len,
2183                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2184                                reg, i, (u64)octeon_read_csr64(oct, reg));
2185         }
2186
2187         /*0x10080*/
2188         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2189                 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2190                 len += sprintf(s + len,
2191                                "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2192                                reg, i, (u64)octeon_read_csr64(oct, reg));
2193         }
2194
2195         /*0x10090*/
2196         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2197                 reg = CN23XX_SLI_OQ_SIZE(i);
2198                 len += sprintf(
2199                     s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2200                     reg, i, (u64)octeon_read_csr64(oct, reg));
2201         }
2202
2203         /*0x10050*/
2204         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2205                 reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2206                 len += sprintf(
2207                         s + len,
2208                         "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2209                         reg, i, (u64)octeon_read_csr64(oct, reg));
2210         }
2211
2212         /*0x10070*/
2213         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2214                 reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2215                 len += sprintf(s + len,
2216                                "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2217                                reg, i, (u64)octeon_read_csr64(oct, reg));
2218         }
2219
2220         /*0x100a0*/
2221         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2222                 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2223                 len += sprintf(s + len,
2224                                "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2225                                reg, i, (u64)octeon_read_csr64(oct, reg));
2226         }
2227
2228         /*0x100b0*/
2229         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2230                 reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2231                 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2232                                reg, i, (u64)octeon_read_csr64(oct, reg));
2233         }
2234
2235         /*0x100c0*/
2236         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2237                 reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2238                 len += sprintf(s + len,
2239                                "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2240                                reg, i, (u64)octeon_read_csr64(oct, reg));
2241
2242                 /*0x10000*/
2243                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2244                         reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2245                         len += sprintf(
2246                                 s + len,
2247                                 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2248                                 reg, i, (u64)octeon_read_csr64(oct, reg));
2249                 }
2250
2251                 /*0x10010*/
2252                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2253                         reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2254                         len += sprintf(
2255                             s + len,
2256                             "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2257                             i, (u64)octeon_read_csr64(oct, reg));
2258                 }
2259
2260                 /*0x10020*/
2261                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2262                         reg = CN23XX_SLI_IQ_DOORBELL(i);
2263                         len += sprintf(
2264                             s + len,
2265                             "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2266                             reg, i, (u64)octeon_read_csr64(oct, reg));
2267                 }
2268
2269                 /*0x10030*/
2270                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2271                         reg = CN23XX_SLI_IQ_SIZE(i);
2272                         len += sprintf(
2273                             s + len,
2274                             "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2275                             reg, i, (u64)octeon_read_csr64(oct, reg));
2276                 }
2277
2278                 /*0x10040*/
2279                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2280                         reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2281                 len += sprintf(s + len,
2282                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2283                                reg, i, (u64)octeon_read_csr64(oct, reg));
2284         }
2285
2286         return len;
2287 }
2288
2289 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2290 {
2291         int len = 0;
2292         u32 reg;
2293         int i;
2294
2295         /* PCI  Window Registers */
2296
2297         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2298
2299         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2300                 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2301                 len += sprintf(s + len,
2302                                "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2303                                reg, i, (u64)octeon_read_csr64(oct, reg));
2304         }
2305
2306         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2307                 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2308                 len += sprintf(s + len,
2309                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2310                                reg, i, (u64)octeon_read_csr64(oct, reg));
2311         }
2312
2313         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2314                 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2315                 len += sprintf(s + len,
2316                                "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2317                                reg, i, (u64)octeon_read_csr64(oct, reg));
2318         }
2319
2320         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2321                 reg = CN23XX_VF_SLI_OQ_SIZE(i);
2322                 len += sprintf(s + len,
2323                                "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2324                                reg, i, (u64)octeon_read_csr64(oct, reg));
2325         }
2326
2327         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2328                 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2329                 len += sprintf(s + len,
2330                                "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2331                                reg, i, (u64)octeon_read_csr64(oct, reg));
2332         }
2333
2334         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2335                 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2336                 len += sprintf(s + len,
2337                                "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2338                                reg, i, (u64)octeon_read_csr64(oct, reg));
2339         }
2340
2341         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2342                 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2343                 len += sprintf(s + len,
2344                                "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2345                                reg, i, (u64)octeon_read_csr64(oct, reg));
2346         }
2347
2348         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2349                 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2350                 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2351                                reg, i, (u64)octeon_read_csr64(oct, reg));
2352         }
2353
2354         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2355                 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2356                 len += sprintf(s + len,
2357                                "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2358                                reg, i, (u64)octeon_read_csr64(oct, reg));
2359         }
2360
2361         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2362                 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2363                 len += sprintf(s + len,
2364                                "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2365                                reg, i, (u64)octeon_read_csr64(oct, reg));
2366         }
2367
2368         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2369                 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2370                 len += sprintf(s + len,
2371                                "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2372                                reg, i, (u64)octeon_read_csr64(oct, reg));
2373         }
2374
2375         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2376                 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2377                 len += sprintf(s + len,
2378                                "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2379                                reg, i, (u64)octeon_read_csr64(oct, reg));
2380         }
2381
2382         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2383                 reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2384                 len += sprintf(s + len,
2385                                "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2386                                reg, i, (u64)octeon_read_csr64(oct, reg));
2387         }
2388
2389         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2390                 reg = CN23XX_VF_SLI_IQ_SIZE(i);
2391                 len += sprintf(s + len,
2392                                "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2393                                reg, i, (u64)octeon_read_csr64(oct, reg));
2394         }
2395
2396         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2397                 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2398                 len += sprintf(s + len,
2399                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2400                                reg, i, (u64)octeon_read_csr64(oct, reg));
2401         }
2402
2403         return len;
2404 }
2405
2406 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2407 {
2408         u32 reg;
2409         int i, len = 0;
2410
2411         /* PCI  Window Registers */
2412
2413         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2414         reg = CN6XXX_WIN_WR_ADDR_LO;
2415         len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2416                        CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2417         reg = CN6XXX_WIN_WR_ADDR_HI;
2418         len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2419                        CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2420         reg = CN6XXX_WIN_RD_ADDR_LO;
2421         len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2422                        CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2423         reg = CN6XXX_WIN_RD_ADDR_HI;
2424         len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2425                        CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2426         reg = CN6XXX_WIN_WR_DATA_LO;
2427         len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2428                        CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2429         reg = CN6XXX_WIN_WR_DATA_HI;
2430         len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2431                        CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2432         len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2433                        CN6XXX_WIN_WR_MASK_REG,
2434                        octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2435
2436         /* PCI  Interrupt Register */
2437         len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2438                        CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2439                                                 CN6XXX_SLI_INT_ENB64_PORT0));
2440         len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2441                        CN6XXX_SLI_INT_ENB64_PORT1,
2442                        octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2443         len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2444                        octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2445
2446         /* PCI  Output queue registers */
2447         for (i = 0; i < oct->num_oqs; i++) {
2448                 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2449                 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2450                                reg, i, octeon_read_csr(oct, reg));
2451                 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2452                 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2453                                reg, i, octeon_read_csr(oct, reg));
2454         }
2455         reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2456         len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2457                        reg, octeon_read_csr(oct, reg));
2458         reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2459         len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2460                        reg, octeon_read_csr(oct, reg));
2461
2462         /* PCI  Input queue registers */
2463         for (i = 0; i <= 3; i++) {
2464                 u32 reg;
2465
2466                 reg = CN6XXX_SLI_IQ_DOORBELL(i);
2467                 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2468                                reg, i, octeon_read_csr(oct, reg));
2469                 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2470                 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2471                                reg, i, octeon_read_csr(oct, reg));
2472         }
2473
2474         /* PCI  DMA registers */
2475
2476         len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2477                        CN6XXX_DMA_CNT(0),
2478                        octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2479         reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2480         len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2481                        CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2482         reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2483         len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2484                        CN6XXX_DMA_TIME_INT_LEVEL(0),
2485                        octeon_read_csr(oct, reg));
2486
2487         len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2488                        CN6XXX_DMA_CNT(1),
2489                        octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2490         reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2491         len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2492                        CN6XXX_DMA_PKT_INT_LEVEL(1),
2493                        octeon_read_csr(oct, reg));
2494         reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2495         len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2496                        CN6XXX_DMA_TIME_INT_LEVEL(1),
2497                        octeon_read_csr(oct, reg));
2498
2499         /* PCI  Index registers */
2500
2501         len += sprintf(s + len, "\n");
2502
2503         for (i = 0; i < 16; i++) {
2504                 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2505                 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2506                                CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2507         }
2508
2509         return len;
2510 }
2511
2512 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2513 {
2514         u32 val;
2515         int i, len = 0;
2516
2517         /* PCI CONFIG Registers */
2518
2519         len += sprintf(s + len,
2520                        "\n\t Octeon Config space Registers\n\n");
2521
2522         for (i = 0; i <= 13; i++) {
2523                 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2524                 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2525                                (i * 4), i, val);
2526         }
2527
2528         for (i = 30; i <= 34; i++) {
2529                 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2530                 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2531                                (i * 4), i, val);
2532         }
2533
2534         return len;
2535 }
2536
2537 /*  Return register dump user app.  */
2538 static void lio_get_regs(struct net_device *dev,
2539                          struct ethtool_regs *regs, void *regbuf)
2540 {
2541         struct lio *lio = GET_LIO(dev);
2542         int len = 0;
2543         struct octeon_device *oct = lio->oct_dev;
2544
2545         regs->version = OCT_ETHTOOL_REGSVER;
2546
2547         switch (oct->chip_id) {
2548         case OCTEON_CN23XX_PF_VID:
2549                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2550                 len += cn23xx_read_csr_reg(regbuf + len, oct);
2551                 break;
2552         case OCTEON_CN23XX_VF_VID:
2553                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
2554                 len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
2555                 break;
2556         case OCTEON_CN68XX:
2557         case OCTEON_CN66XX:
2558                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2559                 len += cn6xxx_read_csr_reg(regbuf + len, oct);
2560                 len += cn6xxx_read_config_reg(regbuf + len, oct);
2561                 break;
2562         default:
2563                 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2564                         __func__, oct->chip_id);
2565         }
2566 }
2567
2568 static u32 lio_get_priv_flags(struct net_device *netdev)
2569 {
2570         struct lio *lio = GET_LIO(netdev);
2571
2572         return lio->oct_dev->priv_flags;
2573 }
2574
2575 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2576 {
2577         struct lio *lio = GET_LIO(netdev);
2578         bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2579
2580         lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2581                           intr_by_tx_bytes);
2582         return 0;
2583 }
2584
2585 static const struct ethtool_ops lio_ethtool_ops = {
2586         .get_link_ksettings     = lio_get_link_ksettings,
2587         .get_link               = ethtool_op_get_link,
2588         .get_drvinfo            = lio_get_drvinfo,
2589         .get_ringparam          = lio_ethtool_get_ringparam,
2590         .get_channels           = lio_ethtool_get_channels,
2591         .set_phys_id            = lio_set_phys_id,
2592         .get_eeprom_len         = lio_get_eeprom_len,
2593         .get_eeprom             = lio_get_eeprom,
2594         .get_strings            = lio_get_strings,
2595         .get_ethtool_stats      = lio_get_ethtool_stats,
2596         .get_pauseparam         = lio_get_pauseparam,
2597         .set_pauseparam         = lio_set_pauseparam,
2598         .get_regs_len           = lio_get_regs_len,
2599         .get_regs               = lio_get_regs,
2600         .get_msglevel           = lio_get_msglevel,
2601         .set_msglevel           = lio_set_msglevel,
2602         .get_sset_count         = lio_get_sset_count,
2603         .get_coalesce           = lio_get_intr_coalesce,
2604         .set_coalesce           = lio_set_intr_coalesce,
2605         .get_priv_flags         = lio_get_priv_flags,
2606         .set_priv_flags         = lio_set_priv_flags,
2607         .get_ts_info            = lio_get_ts_info,
2608 };
2609
2610 static const struct ethtool_ops lio_vf_ethtool_ops = {
2611         .get_link_ksettings     = lio_get_link_ksettings,
2612         .get_link               = ethtool_op_get_link,
2613         .get_drvinfo            = lio_get_vf_drvinfo,
2614         .get_ringparam          = lio_ethtool_get_ringparam,
2615         .get_channels           = lio_ethtool_get_channels,
2616         .get_strings            = lio_vf_get_strings,
2617         .get_ethtool_stats      = lio_vf_get_ethtool_stats,
2618         .get_regs_len           = lio_get_regs_len,
2619         .get_regs               = lio_get_regs,
2620         .get_msglevel           = lio_get_msglevel,
2621         .set_msglevel           = lio_vf_set_msglevel,
2622         .get_sset_count         = lio_vf_get_sset_count,
2623         .get_coalesce           = lio_get_intr_coalesce,
2624         .set_coalesce           = lio_set_intr_coalesce,
2625         .get_priv_flags         = lio_get_priv_flags,
2626         .set_priv_flags         = lio_set_priv_flags,
2627         .get_ts_info            = lio_get_ts_info,
2628 };
2629
2630 void liquidio_set_ethtool_ops(struct net_device *netdev)
2631 {
2632         struct lio *lio = GET_LIO(netdev);
2633         struct octeon_device *oct = lio->oct_dev;
2634
2635         if (OCTEON_CN23XX_VF(oct))
2636                 netdev->ethtool_ops = &lio_vf_ethtool_ops;
2637         else
2638                 netdev->ethtool_ops = &lio_ethtool_ops;
2639 }