1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/netdevice.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/ethtool.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pci.h>
28 #include "octeon_config.h"
29 #include "liquidio_common.h"
30 #include "octeon_droq.h"
31 #include "octeon_iq.h"
32 #include "response_manager.h"
33 #include "octeon_device.h"
34 #include "octeon_nic.h"
35 #include "octeon_main.h"
36 #include "octeon_network.h"
37 #include "cn66xx_regs.h"
38 #include "cn66xx_device.h"
39 #include "cn68xx_regs.h"
40 #include "cn68xx_device.h"
41 #include "liquidio_image.h"
43 struct oct_mdio_cmd_context {
49 struct oct_mdio_cmd_resp {
51 struct oct_mdio_cmd resp;
55 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
57 /* Octeon's interface mode of operation */
59 INTERFACE_MODE_DISABLED,
72 INTERFACE_MODE_QSGMII,
76 #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
77 #define OCT_ETHTOOL_REGDUMP_LEN 4096
78 #define OCT_ETHTOOL_REGSVER 1
80 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
93 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
96 "Dropped no dispatch",
104 #define OCTNIC_NCMD_AUTONEG_ON 0x1
105 #define OCTNIC_NCMD_PHY_ON 0x2
107 static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
109 struct lio *lio = GET_LIO(netdev);
110 struct octeon_device *oct = lio->oct_dev;
111 struct oct_link_info *linfo;
115 if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
116 linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
117 ecmd->port = PORT_FIBRE;
119 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
122 (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
123 ecmd->transceiver = XCVR_EXTERNAL;
124 ecmd->autoneg = AUTONEG_DISABLE;
127 dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
130 if (linfo->link.s.status) {
131 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
132 ecmd->duplex = linfo->link.s.duplex;
134 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
135 ecmd->duplex = DUPLEX_UNKNOWN;
142 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
145 struct octeon_device *oct;
147 lio = GET_LIO(netdev);
150 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
151 strcpy(drvinfo->driver, "liquidio");
152 strcpy(drvinfo->version, LIQUIDIO_VERSION);
153 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
155 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
156 drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
160 lio_ethtool_get_channels(struct net_device *dev,
161 struct ethtool_channels *channel)
163 struct lio *lio = GET_LIO(dev);
164 struct octeon_device *oct = lio->oct_dev;
165 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
167 if (OCTEON_CN6XXX(oct)) {
168 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
170 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
171 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
172 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
173 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
176 channel->max_rx = max_rx;
177 channel->max_tx = max_tx;
178 channel->rx_count = rx_count;
179 channel->tx_count = tx_count;
182 static int lio_get_eeprom_len(struct net_device *netdev)
185 struct lio *lio = GET_LIO(netdev);
186 struct octeon_device *oct_dev = lio->oct_dev;
187 struct octeon_board_info *board_info;
190 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
191 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
192 board_info->name, board_info->serial_number,
193 board_info->major, board_info->minor);
199 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
202 struct lio *lio = GET_LIO(netdev);
203 struct octeon_device *oct_dev = lio->oct_dev;
204 struct octeon_board_info *board_info;
207 if (eeprom->offset != 0)
210 eeprom->magic = oct_dev->pci_dev->vendor;
211 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
213 sprintf((char *)bytes,
214 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
215 board_info->name, board_info->serial_number,
216 board_info->major, board_info->minor);
221 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
223 struct lio *lio = GET_LIO(netdev);
224 struct octeon_device *oct = lio->oct_dev;
225 struct octnic_ctrl_pkt nctrl;
226 struct octnic_ctrl_params nparams;
229 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
232 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
233 nctrl.ncmd.s.param1 = lio->linfo.ifidx;
234 nctrl.ncmd.s.param2 = addr;
235 nctrl.ncmd.s.param3 = val;
236 nctrl.wait_time = 100;
237 nctrl.netpndev = (u64)netdev;
238 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
240 nparams.resp_order = OCTEON_RESP_ORDERED;
242 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
244 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
251 /* Callback for when mdio command response arrives
253 static void octnet_mdio_resp_callback(struct octeon_device *oct,
257 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
258 struct oct_mdio_cmd_context *mdio_cmd_ctx;
259 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
261 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
262 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
264 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
266 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
268 ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
270 ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
272 wake_up_interruptible(&mdio_cmd_ctx->wc);
275 /* This routine provides PHY access routines for
279 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
281 struct octeon_device *oct_dev = lio->oct_dev;
282 struct octeon_soft_command *sc;
283 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
284 struct oct_mdio_cmd_context *mdio_cmd_ctx;
285 struct oct_mdio_cmd *mdio_cmd;
288 sc = (struct octeon_soft_command *)
289 octeon_alloc_soft_command(oct_dev,
290 sizeof(struct oct_mdio_cmd),
291 sizeof(struct oct_mdio_cmd_resp),
292 sizeof(struct oct_mdio_cmd_context));
297 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
298 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
299 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
301 ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
302 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
304 mdio_cmd->mdio_addr = loc;
306 mdio_cmd->value1 = *value;
307 mdio_cmd->value2 = lio->linfo.ifidx;
308 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
310 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
313 sc->wait_time = 1000;
314 sc->callback = octnet_mdio_resp_callback;
315 sc->callback_arg = sc;
317 init_waitqueue_head(&mdio_cmd_ctx->wc);
319 retval = octeon_send_soft_command(oct_dev, sc);
322 dev_err(&oct_dev->pci_dev->dev,
323 "octnet_mdio45_access instruction failed status: %x\n",
327 /* Sleep on a wait queue till the cond flag indicates that the
330 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
331 retval = mdio_cmd_rsp->status;
333 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
336 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
337 sizeof(struct oct_mdio_cmd) / 8);
339 if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
341 *value = mdio_cmd_rsp->resp.value1;
348 octeon_free_soft_command(oct_dev, sc);
353 static int lio_set_phys_id(struct net_device *netdev,
354 enum ethtool_phys_id_state state)
356 struct lio *lio = GET_LIO(netdev);
357 struct octeon_device *oct = lio->oct_dev;
361 case ETHTOOL_ID_ACTIVE:
362 if (oct->chip_id == OCTEON_CN66XX) {
363 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
364 VITESSE_PHY_GPIO_DRIVEON);
367 } else if (oct->chip_id == OCTEON_CN68XX) {
368 /* Save the current LED settings */
369 ret = octnet_mdio45_access(lio, 0,
370 LIO68XX_LED_BEACON_ADDR,
371 &lio->phy_beacon_val);
375 ret = octnet_mdio45_access(lio, 0,
376 LIO68XX_LED_CTRL_ADDR,
381 /* Configure Beacon values */
382 value = LIO68XX_LED_BEACON_CFGON;
384 octnet_mdio45_access(lio, 1,
385 LIO68XX_LED_BEACON_ADDR,
390 value = LIO68XX_LED_CTRL_CFGON;
392 octnet_mdio45_access(lio, 1,
393 LIO68XX_LED_CTRL_ADDR,
403 if (oct->chip_id == OCTEON_CN66XX) {
404 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
405 VITESSE_PHY_GPIO_HIGH);
407 } else if (oct->chip_id == OCTEON_CN68XX) {
415 if (oct->chip_id == OCTEON_CN66XX)
416 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
417 VITESSE_PHY_GPIO_LOW);
418 else if (oct->chip_id == OCTEON_CN68XX)
425 case ETHTOOL_ID_INACTIVE:
426 if (oct->chip_id == OCTEON_CN66XX) {
427 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
428 VITESSE_PHY_GPIO_DRIVEOFF);
429 } else if (oct->chip_id == OCTEON_CN68XX) {
430 /* Restore LED settings */
431 ret = octnet_mdio45_access(lio, 1,
432 LIO68XX_LED_CTRL_ADDR,
437 ret = octnet_mdio45_access(lio, 1,
438 LIO68XX_LED_BEACON_ADDR,
439 &lio->phy_beacon_val);
456 lio_ethtool_get_ringparam(struct net_device *netdev,
457 struct ethtool_ringparam *ering)
459 struct lio *lio = GET_LIO(netdev);
460 struct octeon_device *oct = lio->oct_dev;
461 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
464 if (OCTEON_CN6XXX(oct)) {
465 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
467 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
468 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
469 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
470 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
473 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
474 ering->rx_pending = 0;
475 ering->rx_max_pending = 0;
476 ering->rx_mini_pending = 0;
477 ering->rx_jumbo_pending = rx_pending;
478 ering->rx_mini_max_pending = 0;
479 ering->rx_jumbo_max_pending = rx_max_pending;
481 ering->rx_pending = rx_pending;
482 ering->rx_max_pending = rx_max_pending;
483 ering->rx_mini_pending = 0;
484 ering->rx_jumbo_pending = 0;
485 ering->rx_mini_max_pending = 0;
486 ering->rx_jumbo_max_pending = 0;
489 ering->tx_pending = tx_pending;
490 ering->tx_max_pending = tx_max_pending;
493 static u32 lio_get_msglevel(struct net_device *netdev)
495 struct lio *lio = GET_LIO(netdev);
497 return lio->msg_enable;
500 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
502 struct lio *lio = GET_LIO(netdev);
504 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
505 if (msglvl & NETIF_MSG_HW)
506 liquidio_set_feature(netdev,
507 OCTNET_CMD_VERBOSE_ENABLE);
509 liquidio_set_feature(netdev,
510 OCTNET_CMD_VERBOSE_DISABLE);
513 lio->msg_enable = msglvl;
517 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
519 /* Notes: Not supporting any auto negotiation in these
520 * drivers. Just report pause frame support.
523 pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
527 lio_get_ethtool_stats(struct net_device *netdev,
528 struct ethtool_stats *stats, u64 *data)
530 struct lio *lio = GET_LIO(netdev);
531 struct octeon_device *oct_dev = lio->oct_dev;
534 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
535 if (!(oct_dev->io_qmask.iq & (1UL << j)))
538 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
541 oct_dev->instr_queue[j]->stats.instr_processed);
544 oct_dev->instr_queue[j]->stats.instr_dropped);
546 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
548 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
550 readl(oct_dev->instr_queue[j]->inst_cnt_reg);
552 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
554 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
556 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
558 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
561 /* for (j = 0; j < oct_dev->num_oqs; j++){ */
562 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
563 if (!(oct_dev->io_qmask.oq & (1UL << j)))
565 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
566 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
568 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
569 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
570 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
572 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
574 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
576 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
580 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
582 struct lio *lio = GET_LIO(netdev);
583 struct octeon_device *oct_dev = lio->oct_dev;
584 int num_iq_stats, num_oq_stats, i, j;
586 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
587 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
588 if (!(oct_dev->io_qmask.iq & (1UL << i)))
590 for (j = 0; j < num_iq_stats; j++) {
591 sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
592 data += ETH_GSTRING_LEN;
596 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
597 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
598 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
599 if (!(oct_dev->io_qmask.oq & (1UL << i)))
601 for (j = 0; j < num_oq_stats; j++) {
602 sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
603 data += ETH_GSTRING_LEN;
608 static int lio_get_sset_count(struct net_device *netdev, int sset)
610 struct lio *lio = GET_LIO(netdev);
611 struct octeon_device *oct_dev = lio->oct_dev;
613 return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
614 (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
617 static int lio_get_intr_coalesce(struct net_device *netdev,
618 struct ethtool_coalesce *intr_coal)
620 struct lio *lio = GET_LIO(netdev);
621 struct octeon_device *oct = lio->oct_dev;
622 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
623 struct octeon_instr_queue *iq;
624 struct oct_intrmod_cfg *intrmod_cfg;
626 intrmod_cfg = &oct->intrmod;
628 switch (oct->chip_id) {
629 /* case OCTEON_CN73XX: Todo */
633 if (!intrmod_cfg->intrmod_enable) {
634 intr_coal->rx_coalesce_usecs =
635 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
636 intr_coal->rx_max_coalesced_frames =
637 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
639 intr_coal->use_adaptive_rx_coalesce =
640 intrmod_cfg->intrmod_enable;
641 intr_coal->rate_sample_interval =
642 intrmod_cfg->intrmod_check_intrvl;
643 intr_coal->pkt_rate_high =
644 intrmod_cfg->intrmod_maxpkt_ratethr;
645 intr_coal->pkt_rate_low =
646 intrmod_cfg->intrmod_minpkt_ratethr;
647 intr_coal->rx_max_coalesced_frames_high =
648 intrmod_cfg->intrmod_maxcnt_trigger;
649 intr_coal->rx_coalesce_usecs_high =
650 intrmod_cfg->intrmod_maxtmr_trigger;
651 intr_coal->rx_coalesce_usecs_low =
652 intrmod_cfg->intrmod_mintmr_trigger;
653 intr_coal->rx_max_coalesced_frames_low =
654 intrmod_cfg->intrmod_mincnt_trigger;
657 iq = oct->instr_queue[lio->linfo.txpciq[0]];
658 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
662 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
669 /* Callback function for intrmod */
670 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
674 struct oct_intrmod_cmd *cmd = ptr;
675 struct octeon_soft_command *sc = cmd->sc;
677 oct_dev = cmd->oct_dev;
680 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
683 dev_info(&oct_dev->pci_dev->dev,
684 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
685 oct_dev->intrmod.intrmod_enable);
687 octeon_free_soft_command(oct_dev, sc);
690 /* Configure interrupt moderation parameters */
691 static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
693 struct octeon_soft_command *sc;
694 struct oct_intrmod_cmd *cmd;
695 struct oct_intrmod_cfg *cfg;
697 struct octeon_device *oct_dev = (struct octeon_device *)oct;
699 /* Alloc soft command */
700 sc = (struct octeon_soft_command *)
701 octeon_alloc_soft_command(oct_dev,
702 sizeof(struct oct_intrmod_cfg),
704 sizeof(struct oct_intrmod_cmd));
709 cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
710 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
712 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
713 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
716 cmd->oct_dev = oct_dev;
718 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
719 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
721 sc->callback = octnet_intrmod_callback;
722 sc->callback_arg = cmd;
723 sc->wait_time = 1000;
725 retval = octeon_send_soft_command(oct_dev, sc);
727 octeon_free_soft_command(oct_dev, sc);
734 /* Enable/Disable auto interrupt Moderation */
735 static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
736 *intr_coal, int adaptive)
739 struct octeon_device *oct = lio->oct_dev;
740 struct oct_intrmod_cfg *intrmod_cfg;
742 intrmod_cfg = &oct->intrmod;
745 if (intr_coal->rate_sample_interval)
746 intrmod_cfg->intrmod_check_intrvl =
747 intr_coal->rate_sample_interval;
749 intrmod_cfg->intrmod_check_intrvl =
750 LIO_INTRMOD_CHECK_INTERVAL;
752 if (intr_coal->pkt_rate_high)
753 intrmod_cfg->intrmod_maxpkt_ratethr =
754 intr_coal->pkt_rate_high;
756 intrmod_cfg->intrmod_maxpkt_ratethr =
757 LIO_INTRMOD_MAXPKT_RATETHR;
759 if (intr_coal->pkt_rate_low)
760 intrmod_cfg->intrmod_minpkt_ratethr =
761 intr_coal->pkt_rate_low;
763 intrmod_cfg->intrmod_minpkt_ratethr =
764 LIO_INTRMOD_MINPKT_RATETHR;
766 if (intr_coal->rx_max_coalesced_frames_high)
767 intrmod_cfg->intrmod_maxcnt_trigger =
768 intr_coal->rx_max_coalesced_frames_high;
770 intrmod_cfg->intrmod_maxcnt_trigger =
771 LIO_INTRMOD_MAXCNT_TRIGGER;
773 if (intr_coal->rx_coalesce_usecs_high)
774 intrmod_cfg->intrmod_maxtmr_trigger =
775 intr_coal->rx_coalesce_usecs_high;
777 intrmod_cfg->intrmod_maxtmr_trigger =
778 LIO_INTRMOD_MAXTMR_TRIGGER;
780 if (intr_coal->rx_coalesce_usecs_low)
781 intrmod_cfg->intrmod_mintmr_trigger =
782 intr_coal->rx_coalesce_usecs_low;
784 intrmod_cfg->intrmod_mintmr_trigger =
785 LIO_INTRMOD_MINTMR_TRIGGER;
787 if (intr_coal->rx_max_coalesced_frames_low)
788 intrmod_cfg->intrmod_mincnt_trigger =
789 intr_coal->rx_max_coalesced_frames_low;
791 intrmod_cfg->intrmod_mincnt_trigger =
792 LIO_INTRMOD_MINCNT_TRIGGER;
795 intrmod_cfg->intrmod_enable = adaptive;
796 ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
802 oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
805 struct octeon_device *oct = lio->oct_dev;
806 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
807 u32 rx_max_coalesced_frames;
809 if (!intr_coal->rx_max_coalesced_frames)
810 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
812 rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
814 /* Disable adaptive interrupt modulation */
815 ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
819 /* Config Cnt based interrupt values */
820 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
821 rx_max_coalesced_frames);
822 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
826 static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
830 struct octeon_device *oct = lio->oct_dev;
831 struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
832 u32 time_threshold, rx_coalesce_usecs;
834 if (!intr_coal->rx_coalesce_usecs)
835 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
837 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
839 /* Disable adaptive interrupt modulation */
840 ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
844 /* Config Time based interrupt values */
845 time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
846 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
847 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
852 static int lio_set_intr_coalesce(struct net_device *netdev,
853 struct ethtool_coalesce *intr_coal)
855 struct lio *lio = GET_LIO(netdev);
857 struct octeon_device *oct = lio->oct_dev;
860 if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
861 (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
862 for (j = 0; j < lio->linfo.num_txpciq; j++) {
863 q_no = lio->linfo.txpciq[j];
864 oct->instr_queue[q_no]->fill_threshold =
865 intr_coal->tx_max_coalesced_frames;
868 dev_err(&oct->pci_dev->dev,
869 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
870 intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
875 /* User requested adaptive-rx on */
876 if (intr_coal->use_adaptive_rx_coalesce) {
877 ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
882 /* User requested adaptive-rx off and rx coalesce */
883 if ((intr_coal->rx_coalesce_usecs) &&
884 (!intr_coal->use_adaptive_rx_coalesce)) {
885 ret = oct_cfg_rx_intrtime(lio, intr_coal);
890 /* User requested adaptive-rx off and rx coalesce */
891 if ((intr_coal->rx_max_coalesced_frames) &&
892 (!intr_coal->use_adaptive_rx_coalesce)) {
893 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
898 /* User requested adaptive-rx off, so use default coalesce params */
899 if ((!intr_coal->rx_max_coalesced_frames) &&
900 (!intr_coal->use_adaptive_rx_coalesce) &&
901 (!intr_coal->rx_coalesce_usecs)) {
902 dev_info(&oct->pci_dev->dev,
903 "Turning off adaptive-rx interrupt moderation\n");
904 dev_info(&oct->pci_dev->dev,
905 "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
906 CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
907 ret = oct_cfg_rx_intrtime(lio, intr_coal);
911 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
921 static int lio_get_ts_info(struct net_device *netdev,
922 struct ethtool_ts_info *info)
924 struct lio *lio = GET_LIO(netdev);
926 info->so_timestamping =
927 SOF_TIMESTAMPING_TX_HARDWARE |
928 SOF_TIMESTAMPING_TX_SOFTWARE |
929 SOF_TIMESTAMPING_RX_HARDWARE |
930 SOF_TIMESTAMPING_RX_SOFTWARE |
931 SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
934 info->phc_index = ptp_clock_index(lio->ptp_clock);
936 info->phc_index = -1;
938 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
940 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
941 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
942 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
943 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
948 static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
950 struct lio *lio = GET_LIO(netdev);
951 struct octeon_device *oct = lio->oct_dev;
952 struct oct_link_info *linfo;
953 struct octnic_ctrl_pkt nctrl;
954 struct octnic_ctrl_params nparams;
957 /* get the link info */
960 if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
963 if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
964 ecmd->speed != SPEED_10) ||
965 (ecmd->duplex != DUPLEX_HALF &&
966 ecmd->duplex != DUPLEX_FULL)))
969 /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
970 * as they operate at fixed Speed and Duplex settings
972 if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
973 linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
974 dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
978 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
981 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
982 nctrl.wait_time = 1000;
983 nctrl.netpndev = (u64)netdev;
984 nctrl.ncmd.s.param1 = lio->linfo.ifidx;
985 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
987 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
988 * to SE core application using ncmd.s.more & ncmd.s.param
990 if (ecmd->autoneg == AUTONEG_ENABLE) {
992 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
993 OCTNIC_NCMD_AUTONEG_ON;
994 nctrl.ncmd.s.param2 = ecmd->advertising;
997 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
999 nctrl.ncmd.s.param3 = ecmd->duplex;
1001 nctrl.ncmd.s.param2 = ecmd->speed;
1004 nparams.resp_order = OCTEON_RESP_ORDERED;
1006 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
1008 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1015 static int lio_nway_reset(struct net_device *netdev)
1017 if (netif_running(netdev)) {
1018 struct ethtool_cmd ecmd;
1020 memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1024 lio_set_settings(netdev, &ecmd);
1029 /* Return register dump len. */
1030 static int lio_get_regs_len(struct net_device *dev)
1032 return OCT_ETHTOOL_REGDUMP_LEN;
1035 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1040 /* PCI Window Registers */
1042 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1043 reg = CN6XXX_WIN_WR_ADDR_LO;
1044 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1045 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1046 reg = CN6XXX_WIN_WR_ADDR_HI;
1047 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1048 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1049 reg = CN6XXX_WIN_RD_ADDR_LO;
1050 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1051 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1052 reg = CN6XXX_WIN_RD_ADDR_HI;
1053 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1054 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1055 reg = CN6XXX_WIN_WR_DATA_LO;
1056 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1057 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1058 reg = CN6XXX_WIN_WR_DATA_HI;
1059 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1060 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1061 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1062 CN6XXX_WIN_WR_MASK_REG,
1063 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1065 /* PCI Interrupt Register */
1066 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1067 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1068 CN6XXX_SLI_INT_ENB64_PORT0));
1069 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1070 CN6XXX_SLI_INT_ENB64_PORT1,
1071 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1072 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1073 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1075 /* PCI Output queue registers */
1076 for (i = 0; i < oct->num_oqs; i++) {
1077 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1078 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1079 reg, i, octeon_read_csr(oct, reg));
1080 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1081 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1082 reg, i, octeon_read_csr(oct, reg));
1084 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1085 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1086 reg, octeon_read_csr(oct, reg));
1087 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1088 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1089 reg, octeon_read_csr(oct, reg));
1091 /* PCI Input queue registers */
1092 for (i = 0; i <= 3; i++) {
1095 reg = CN6XXX_SLI_IQ_DOORBELL(i);
1096 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1097 reg, i, octeon_read_csr(oct, reg));
1098 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1099 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1100 reg, i, octeon_read_csr(oct, reg));
1103 /* PCI DMA registers */
1105 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1107 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1108 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1109 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1110 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1111 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1112 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1113 CN6XXX_DMA_TIME_INT_LEVEL(0),
1114 octeon_read_csr(oct, reg));
1116 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1118 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1119 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1120 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1121 CN6XXX_DMA_PKT_INT_LEVEL(1),
1122 octeon_read_csr(oct, reg));
1123 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1124 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1125 CN6XXX_DMA_TIME_INT_LEVEL(1),
1126 octeon_read_csr(oct, reg));
1128 /* PCI Index registers */
1130 len += sprintf(s + len, "\n");
1132 for (i = 0; i < 16; i++) {
1133 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1134 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1135 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1141 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1146 /* PCI CONFIG Registers */
1148 len += sprintf(s + len,
1149 "\n\t Octeon Config space Registers\n\n");
1151 for (i = 0; i <= 13; i++) {
1152 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1153 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1157 for (i = 30; i <= 34; i++) {
1158 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1159 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1166 /* Return register dump user app. */
1167 static void lio_get_regs(struct net_device *dev,
1168 struct ethtool_regs *regs, void *regbuf)
1170 struct lio *lio = GET_LIO(dev);
1172 struct octeon_device *oct = lio->oct_dev;
1174 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1175 regs->version = OCT_ETHTOOL_REGSVER;
1177 switch (oct->chip_id) {
1178 /* case OCTEON_CN73XX: Todo */
1181 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1182 len += cn6xxx_read_config_reg(regbuf + len, oct);
1185 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1186 __func__, oct->chip_id);
1190 static const struct ethtool_ops lio_ethtool_ops = {
1191 .get_settings = lio_get_settings,
1192 .get_link = ethtool_op_get_link,
1193 .get_drvinfo = lio_get_drvinfo,
1194 .get_ringparam = lio_ethtool_get_ringparam,
1195 .get_channels = lio_ethtool_get_channels,
1196 .set_phys_id = lio_set_phys_id,
1197 .get_eeprom_len = lio_get_eeprom_len,
1198 .get_eeprom = lio_get_eeprom,
1199 .get_strings = lio_get_strings,
1200 .get_ethtool_stats = lio_get_ethtool_stats,
1201 .get_pauseparam = lio_get_pauseparam,
1202 .get_regs_len = lio_get_regs_len,
1203 .get_regs = lio_get_regs,
1204 .get_msglevel = lio_get_msglevel,
1205 .set_msglevel = lio_set_msglevel,
1206 .get_sset_count = lio_get_sset_count,
1207 .nway_reset = lio_nway_reset,
1208 .set_settings = lio_set_settings,
1209 .get_coalesce = lio_get_intr_coalesce,
1210 .set_coalesce = lio_set_intr_coalesce,
1211 .get_ts_info = lio_get_ts_info,
1214 void liquidio_set_ethtool_ops(struct net_device *netdev)
1216 netdev->ethtool_ops = &lio_ethtool_ops;