1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for ixgbe */
31 #include <linux/interrupt.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
40 #include <linux/uaccess.h>
43 #include "ixgbe_phy.h"
46 #define IXGBE_ALL_RAR_ENTRIES 16
48 enum {NETDEV_STATS, IXGBE_STATS};
51 char stat_string[ETH_GSTRING_LEN];
57 #define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
64 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
70 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
71 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
72 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73 {"lsc_int", IXGBE_STAT(lsc_int)},
74 {"tx_busy", IXGBE_STAT(tx_busy)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast)},
81 {"broadcast", IXGBE_STAT(stats.bprc)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
90 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
100 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
101 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
102 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
103 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
104 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
105 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
106 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
107 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
108 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
109 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
110 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
111 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
112 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
113 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
115 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
116 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
117 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
118 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
119 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
120 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
121 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
122 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
123 #endif /* IXGBE_FCOE */
126 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
127 * we set the num_rx_queues to evaluate to num_tx_queues. This is
128 * used because we do not have a good way to get the max number of
129 * rx queues with CONFIG_RPS disabled.
131 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
133 #define IXGBE_QUEUE_STATS_LEN ( \
134 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
135 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
136 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
137 #define IXGBE_PB_STATS_LEN ( \
138 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
140 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
143 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 IXGBE_PB_STATS_LEN + \
145 IXGBE_QUEUE_STATS_LEN)
147 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
148 "Register test (offline)", "Eeprom test (offline)",
149 "Interrupt test (offline)", "Loopback test (offline)",
150 "Link test (on/offline)"
152 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
154 static int ixgbe_get_settings(struct net_device *netdev,
155 struct ethtool_cmd *ecmd)
157 struct ixgbe_adapter *adapter = netdev_priv(netdev);
158 struct ixgbe_hw *hw = &adapter->hw;
159 ixgbe_link_speed supported_link;
161 bool autoneg = false;
164 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
166 /* set the supported link speeds */
167 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
168 ecmd->supported |= SUPPORTED_10000baseT_Full;
169 if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
170 ecmd->supported |= SUPPORTED_2500baseX_Full;
171 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
172 ecmd->supported |= SUPPORTED_1000baseT_Full;
173 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
174 ecmd->supported |= SUPPORTED_100baseT_Full;
176 /* set the advertised speeds */
177 if (hw->phy.autoneg_advertised) {
178 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
179 ecmd->advertising |= ADVERTISED_100baseT_Full;
180 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
181 ecmd->advertising |= ADVERTISED_10000baseT_Full;
182 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
183 ecmd->advertising |= ADVERTISED_2500baseX_Full;
184 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
185 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 /* default modes in case phy.autoneg_advertised isn't set */
188 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
189 ecmd->advertising |= ADVERTISED_10000baseT_Full;
190 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
191 ecmd->advertising |= ADVERTISED_1000baseT_Full;
192 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
193 ecmd->advertising |= ADVERTISED_100baseT_Full;
195 if (hw->phy.multispeed_fiber && !autoneg) {
196 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
197 ecmd->advertising = ADVERTISED_10000baseT_Full;
202 ecmd->supported |= SUPPORTED_Autoneg;
203 ecmd->advertising |= ADVERTISED_Autoneg;
204 ecmd->autoneg = AUTONEG_ENABLE;
206 ecmd->autoneg = AUTONEG_DISABLE;
208 ecmd->transceiver = XCVR_EXTERNAL;
210 /* Determine the remaining settings based on the PHY type. */
211 switch (adapter->hw.phy.type) {
214 case ixgbe_phy_x550em_ext_t:
215 case ixgbe_phy_cu_unknown:
216 ecmd->supported |= SUPPORTED_TP;
217 ecmd->advertising |= ADVERTISED_TP;
218 ecmd->port = PORT_TP;
221 ecmd->supported |= SUPPORTED_FIBRE;
222 ecmd->advertising |= ADVERTISED_FIBRE;
223 ecmd->port = PORT_FIBRE;
226 case ixgbe_phy_sfp_passive_tyco:
227 case ixgbe_phy_sfp_passive_unknown:
228 case ixgbe_phy_sfp_ftl:
229 case ixgbe_phy_sfp_avago:
230 case ixgbe_phy_sfp_intel:
231 case ixgbe_phy_sfp_unknown:
232 /* SFP+ devices, further checking needed */
233 switch (adapter->hw.phy.sfp_type) {
234 case ixgbe_sfp_type_da_cu:
235 case ixgbe_sfp_type_da_cu_core0:
236 case ixgbe_sfp_type_da_cu_core1:
237 ecmd->supported |= SUPPORTED_FIBRE;
238 ecmd->advertising |= ADVERTISED_FIBRE;
239 ecmd->port = PORT_DA;
241 case ixgbe_sfp_type_sr:
242 case ixgbe_sfp_type_lr:
243 case ixgbe_sfp_type_srlr_core0:
244 case ixgbe_sfp_type_srlr_core1:
245 case ixgbe_sfp_type_1g_sx_core0:
246 case ixgbe_sfp_type_1g_sx_core1:
247 case ixgbe_sfp_type_1g_lx_core0:
248 case ixgbe_sfp_type_1g_lx_core1:
249 ecmd->supported |= SUPPORTED_FIBRE;
250 ecmd->advertising |= ADVERTISED_FIBRE;
251 ecmd->port = PORT_FIBRE;
253 case ixgbe_sfp_type_not_present:
254 ecmd->supported |= SUPPORTED_FIBRE;
255 ecmd->advertising |= ADVERTISED_FIBRE;
256 ecmd->port = PORT_NONE;
258 case ixgbe_sfp_type_1g_cu_core0:
259 case ixgbe_sfp_type_1g_cu_core1:
260 ecmd->supported |= SUPPORTED_TP;
261 ecmd->advertising |= ADVERTISED_TP;
262 ecmd->port = PORT_TP;
264 case ixgbe_sfp_type_unknown:
266 ecmd->supported |= SUPPORTED_FIBRE;
267 ecmd->advertising |= ADVERTISED_FIBRE;
268 ecmd->port = PORT_OTHER;
273 ecmd->supported |= SUPPORTED_FIBRE;
274 ecmd->advertising |= ADVERTISED_FIBRE;
275 ecmd->port = PORT_NONE;
277 case ixgbe_phy_unknown:
278 case ixgbe_phy_generic:
279 case ixgbe_phy_sfp_unsupported:
281 ecmd->supported |= SUPPORTED_FIBRE;
282 ecmd->advertising |= ADVERTISED_FIBRE;
283 ecmd->port = PORT_OTHER;
287 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
289 switch (link_speed) {
290 case IXGBE_LINK_SPEED_10GB_FULL:
291 ethtool_cmd_speed_set(ecmd, SPEED_10000);
293 case IXGBE_LINK_SPEED_2_5GB_FULL:
294 ethtool_cmd_speed_set(ecmd, SPEED_2500);
296 case IXGBE_LINK_SPEED_1GB_FULL:
297 ethtool_cmd_speed_set(ecmd, SPEED_1000);
299 case IXGBE_LINK_SPEED_100_FULL:
300 ethtool_cmd_speed_set(ecmd, SPEED_100);
305 ecmd->duplex = DUPLEX_FULL;
307 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
308 ecmd->duplex = DUPLEX_UNKNOWN;
314 static int ixgbe_set_settings(struct net_device *netdev,
315 struct ethtool_cmd *ecmd)
317 struct ixgbe_adapter *adapter = netdev_priv(netdev);
318 struct ixgbe_hw *hw = &adapter->hw;
322 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
323 (hw->phy.multispeed_fiber)) {
325 * this function does not support duplex forcing, but can
326 * limit the advertising of the adapter to the specified speed
328 if (ecmd->advertising & ~ecmd->supported)
331 /* only allow one speed at a time if no autoneg */
332 if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
333 if (ecmd->advertising ==
334 (ADVERTISED_10000baseT_Full |
335 ADVERTISED_1000baseT_Full))
339 old = hw->phy.autoneg_advertised;
341 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
342 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
344 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
345 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
347 if (ecmd->advertising & ADVERTISED_100baseT_Full)
348 advertised |= IXGBE_LINK_SPEED_100_FULL;
350 if (old == advertised)
352 /* this sets the link speed and restarts auto-neg */
353 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
354 usleep_range(1000, 2000);
356 hw->mac.autotry_restart = true;
357 err = hw->mac.ops.setup_link(hw, advertised, true);
359 e_info(probe, "setup link failed with code %d\n", err);
360 hw->mac.ops.setup_link(hw, old, true);
362 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
364 /* in this case we currently only support 10Gb/FULL */
365 u32 speed = ethtool_cmd_speed(ecmd);
366 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
367 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
368 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
375 static void ixgbe_get_pauseparam(struct net_device *netdev,
376 struct ethtool_pauseparam *pause)
378 struct ixgbe_adapter *adapter = netdev_priv(netdev);
379 struct ixgbe_hw *hw = &adapter->hw;
381 if (ixgbe_device_supports_autoneg_fc(hw) &&
382 !hw->fc.disable_fc_autoneg)
387 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
389 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
391 } else if (hw->fc.current_mode == ixgbe_fc_full) {
397 static int ixgbe_set_pauseparam(struct net_device *netdev,
398 struct ethtool_pauseparam *pause)
400 struct ixgbe_adapter *adapter = netdev_priv(netdev);
401 struct ixgbe_hw *hw = &adapter->hw;
402 struct ixgbe_fc_info fc = hw->fc;
404 /* 82598 does no support link flow control with DCB enabled */
405 if ((hw->mac.type == ixgbe_mac_82598EB) &&
406 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
409 /* some devices do not support autoneg of link flow control */
410 if ((pause->autoneg == AUTONEG_ENABLE) &&
411 !ixgbe_device_supports_autoneg_fc(hw))
414 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
416 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
417 fc.requested_mode = ixgbe_fc_full;
418 else if (pause->rx_pause && !pause->tx_pause)
419 fc.requested_mode = ixgbe_fc_rx_pause;
420 else if (!pause->rx_pause && pause->tx_pause)
421 fc.requested_mode = ixgbe_fc_tx_pause;
423 fc.requested_mode = ixgbe_fc_none;
425 /* if the thing changed then we'll update and use new autoneg */
426 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
428 if (netif_running(netdev))
429 ixgbe_reinit_locked(adapter);
431 ixgbe_reset(adapter);
437 static u32 ixgbe_get_msglevel(struct net_device *netdev)
439 struct ixgbe_adapter *adapter = netdev_priv(netdev);
440 return adapter->msg_enable;
443 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
445 struct ixgbe_adapter *adapter = netdev_priv(netdev);
446 adapter->msg_enable = data;
449 static int ixgbe_get_regs_len(struct net_device *netdev)
451 #define IXGBE_REGS_LEN 1139
452 return IXGBE_REGS_LEN * sizeof(u32);
455 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
457 static void ixgbe_get_regs(struct net_device *netdev,
458 struct ethtool_regs *regs, void *p)
460 struct ixgbe_adapter *adapter = netdev_priv(netdev);
461 struct ixgbe_hw *hw = &adapter->hw;
465 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
467 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
470 /* General Registers */
471 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
472 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
473 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
474 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
475 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
476 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
477 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
478 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
481 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
482 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
483 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
484 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
485 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
486 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
487 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
488 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
489 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
490 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
493 /* don't read EICR because it can clear interrupt causes, instead
494 * read EICS which is a shadow but doesn't clear EICR */
495 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
496 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
497 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
498 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
499 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
500 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
501 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
502 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
503 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
504 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
505 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
506 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
509 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
510 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
511 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
512 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
513 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
514 for (i = 0; i < 8; i++) {
515 switch (hw->mac.type) {
516 case ixgbe_mac_82598EB:
517 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
518 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
520 case ixgbe_mac_82599EB:
523 case ixgbe_mac_X550EM_x:
524 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
525 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
531 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
532 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
535 for (i = 0; i < 64; i++)
536 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
537 for (i = 0; i < 64; i++)
538 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
539 for (i = 0; i < 64; i++)
540 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
541 for (i = 0; i < 64; i++)
542 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
543 for (i = 0; i < 64; i++)
544 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
545 for (i = 0; i < 64; i++)
546 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
547 for (i = 0; i < 16; i++)
548 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
549 for (i = 0; i < 16; i++)
550 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
551 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
552 for (i = 0; i < 8; i++)
553 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
554 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
555 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
558 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
559 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
560 for (i = 0; i < 16; i++)
561 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
562 for (i = 0; i < 16; i++)
563 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
564 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
565 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
566 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
567 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
568 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
569 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
570 for (i = 0; i < 8; i++)
571 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
572 for (i = 0; i < 8; i++)
573 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
574 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
577 for (i = 0; i < 32; i++)
578 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
579 for (i = 0; i < 32; i++)
580 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
581 for (i = 0; i < 32; i++)
582 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
583 for (i = 0; i < 32; i++)
584 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
585 for (i = 0; i < 32; i++)
586 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
587 for (i = 0; i < 32; i++)
588 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
589 for (i = 0; i < 32; i++)
590 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
591 for (i = 0; i < 32; i++)
592 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
593 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
594 for (i = 0; i < 16; i++)
595 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
596 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
597 for (i = 0; i < 8; i++)
598 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
599 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
602 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
603 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
604 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
605 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
606 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
607 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
608 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
609 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
610 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
613 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
614 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
616 switch (hw->mac.type) {
617 case ixgbe_mac_82598EB:
618 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
619 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
620 for (i = 0; i < 8; i++)
622 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
623 for (i = 0; i < 8; i++)
625 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
626 for (i = 0; i < 8; i++)
628 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
629 for (i = 0; i < 8; i++)
631 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
633 case ixgbe_mac_82599EB:
636 case ixgbe_mac_X550EM_x:
637 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
638 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
639 for (i = 0; i < 8; i++)
641 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
642 for (i = 0; i < 8; i++)
644 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
645 for (i = 0; i < 8; i++)
647 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
648 for (i = 0; i < 8; i++)
650 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
656 for (i = 0; i < 8; i++)
658 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
659 for (i = 0; i < 8; i++)
661 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
664 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
665 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
666 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
667 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
668 for (i = 0; i < 8; i++)
669 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
670 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
671 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
672 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
673 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
674 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
675 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
676 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
677 for (i = 0; i < 8; i++)
678 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
679 for (i = 0; i < 8; i++)
680 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
681 for (i = 0; i < 8; i++)
682 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
683 for (i = 0; i < 8; i++)
684 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
685 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
686 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
687 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
688 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
689 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
690 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
691 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
692 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
693 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
694 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
695 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
696 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
697 for (i = 0; i < 8; i++)
698 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
699 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
700 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
701 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
702 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
703 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
704 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
705 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
706 regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
707 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
708 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
709 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
710 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
711 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
712 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
713 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
714 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
715 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
716 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
717 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
718 for (i = 0; i < 16; i++)
719 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
720 for (i = 0; i < 16; i++)
721 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
722 for (i = 0; i < 16; i++)
723 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
724 for (i = 0; i < 16; i++)
725 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
728 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
729 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
730 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
731 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
732 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
733 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
734 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
735 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
736 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
737 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
738 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
739 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
740 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
741 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
742 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
743 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
744 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
745 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
746 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
747 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
748 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
749 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
750 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
751 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
752 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
753 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
754 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
755 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
756 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
757 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
758 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
759 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
760 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
763 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
764 for (i = 0; i < 8; i++)
765 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
766 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
767 for (i = 0; i < 4; i++)
768 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
769 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
770 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
771 for (i = 0; i < 8; i++)
772 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
773 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
774 for (i = 0; i < 4; i++)
775 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
776 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
777 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
778 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
779 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
780 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
781 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
782 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
783 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
784 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
785 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
786 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
787 for (i = 0; i < 8; i++)
788 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
789 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
790 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
791 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
792 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
793 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
794 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
795 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
796 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
797 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
799 /* 82599 X540 specific registers */
800 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
802 /* 82599 X540 specific DCB registers */
803 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
804 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
805 for (i = 0; i < 4; i++)
806 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
807 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
808 /* same as RTTQCNRM */
809 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
810 /* same as RTTQCNRR */
812 /* X540 specific DCB registers */
813 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
814 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
817 static int ixgbe_get_eeprom_len(struct net_device *netdev)
819 struct ixgbe_adapter *adapter = netdev_priv(netdev);
820 return adapter->hw.eeprom.word_size * 2;
823 static int ixgbe_get_eeprom(struct net_device *netdev,
824 struct ethtool_eeprom *eeprom, u8 *bytes)
826 struct ixgbe_adapter *adapter = netdev_priv(netdev);
827 struct ixgbe_hw *hw = &adapter->hw;
829 int first_word, last_word, eeprom_len;
833 if (eeprom->len == 0)
836 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
838 first_word = eeprom->offset >> 1;
839 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
840 eeprom_len = last_word - first_word + 1;
842 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
846 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
849 /* Device's eeprom is always little-endian, word addressable */
850 for (i = 0; i < eeprom_len; i++)
851 le16_to_cpus(&eeprom_buff[i]);
853 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
859 static int ixgbe_set_eeprom(struct net_device *netdev,
860 struct ethtool_eeprom *eeprom, u8 *bytes)
862 struct ixgbe_adapter *adapter = netdev_priv(netdev);
863 struct ixgbe_hw *hw = &adapter->hw;
866 int max_len, first_word, last_word, ret_val = 0;
869 if (eeprom->len == 0)
872 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
875 max_len = hw->eeprom.word_size * 2;
877 first_word = eeprom->offset >> 1;
878 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
879 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
885 if (eeprom->offset & 1) {
887 * need read/modify/write of first changed EEPROM word
888 * only the second byte of the word is being modified
890 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
896 if ((eeprom->offset + eeprom->len) & 1) {
898 * need read/modify/write of last changed EEPROM word
899 * only the first byte of the word is being modified
901 ret_val = hw->eeprom.ops.read(hw, last_word,
902 &eeprom_buff[last_word - first_word]);
907 /* Device's eeprom is always little-endian, word addressable */
908 for (i = 0; i < last_word - first_word + 1; i++)
909 le16_to_cpus(&eeprom_buff[i]);
911 memcpy(ptr, bytes, eeprom->len);
913 for (i = 0; i < last_word - first_word + 1; i++)
914 cpu_to_le16s(&eeprom_buff[i]);
916 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
917 last_word - first_word + 1,
920 /* Update the checksum */
922 hw->eeprom.ops.update_checksum(hw);
929 static void ixgbe_get_drvinfo(struct net_device *netdev,
930 struct ethtool_drvinfo *drvinfo)
932 struct ixgbe_adapter *adapter = netdev_priv(netdev);
935 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
936 strlcpy(drvinfo->version, ixgbe_driver_version,
937 sizeof(drvinfo->version));
939 nvm_track_id = (adapter->eeprom_verh << 16) |
940 adapter->eeprom_verl;
941 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
944 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
945 sizeof(drvinfo->bus_info));
946 drvinfo->n_stats = IXGBE_STATS_LEN;
947 drvinfo->testinfo_len = IXGBE_TEST_LEN;
948 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
951 static void ixgbe_get_ringparam(struct net_device *netdev,
952 struct ethtool_ringparam *ring)
954 struct ixgbe_adapter *adapter = netdev_priv(netdev);
955 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
956 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
958 ring->rx_max_pending = IXGBE_MAX_RXD;
959 ring->tx_max_pending = IXGBE_MAX_TXD;
960 ring->rx_pending = rx_ring->count;
961 ring->tx_pending = tx_ring->count;
964 static int ixgbe_set_ringparam(struct net_device *netdev,
965 struct ethtool_ringparam *ring)
967 struct ixgbe_adapter *adapter = netdev_priv(netdev);
968 struct ixgbe_ring *temp_ring;
970 u32 new_rx_count, new_tx_count;
972 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
975 new_tx_count = clamp_t(u32, ring->tx_pending,
976 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
977 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
979 new_rx_count = clamp_t(u32, ring->rx_pending,
980 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
981 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
983 if ((new_tx_count == adapter->tx_ring_count) &&
984 (new_rx_count == adapter->rx_ring_count)) {
989 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
990 usleep_range(1000, 2000);
992 if (!netif_running(adapter->netdev)) {
993 for (i = 0; i < adapter->num_tx_queues; i++)
994 adapter->tx_ring[i]->count = new_tx_count;
995 for (i = 0; i < adapter->num_rx_queues; i++)
996 adapter->rx_ring[i]->count = new_rx_count;
997 adapter->tx_ring_count = new_tx_count;
998 adapter->rx_ring_count = new_rx_count;
1002 /* allocate temporary buffer to store rings in */
1003 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
1004 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1011 ixgbe_down(adapter);
1014 * Setup new Tx resources and free the old Tx resources in that order.
1015 * We can then assign the new resources to the rings via a memcpy.
1016 * The advantage to this approach is that we are guaranteed to still
1017 * have resources even in the case of an allocation failure.
1019 if (new_tx_count != adapter->tx_ring_count) {
1020 for (i = 0; i < adapter->num_tx_queues; i++) {
1021 memcpy(&temp_ring[i], adapter->tx_ring[i],
1022 sizeof(struct ixgbe_ring));
1024 temp_ring[i].count = new_tx_count;
1025 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1029 ixgbe_free_tx_resources(&temp_ring[i]);
1035 for (i = 0; i < adapter->num_tx_queues; i++) {
1036 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1038 memcpy(adapter->tx_ring[i], &temp_ring[i],
1039 sizeof(struct ixgbe_ring));
1042 adapter->tx_ring_count = new_tx_count;
1045 /* Repeat the process for the Rx rings if needed */
1046 if (new_rx_count != adapter->rx_ring_count) {
1047 for (i = 0; i < adapter->num_rx_queues; i++) {
1048 memcpy(&temp_ring[i], adapter->rx_ring[i],
1049 sizeof(struct ixgbe_ring));
1051 temp_ring[i].count = new_rx_count;
1052 err = ixgbe_setup_rx_resources(&temp_ring[i]);
1056 ixgbe_free_rx_resources(&temp_ring[i]);
1063 for (i = 0; i < adapter->num_rx_queues; i++) {
1064 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1066 memcpy(adapter->rx_ring[i], &temp_ring[i],
1067 sizeof(struct ixgbe_ring));
1070 adapter->rx_ring_count = new_rx_count;
1077 clear_bit(__IXGBE_RESETTING, &adapter->state);
1081 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1085 return IXGBE_TEST_LEN;
1087 return IXGBE_STATS_LEN;
1093 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1094 struct ethtool_stats *stats, u64 *data)
1096 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1097 struct rtnl_link_stats64 temp;
1098 const struct rtnl_link_stats64 *net_stats;
1100 struct ixgbe_ring *ring;
1104 ixgbe_update_stats(adapter);
1105 net_stats = dev_get_stats(netdev, &temp);
1106 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1107 switch (ixgbe_gstrings_stats[i].type) {
1109 p = (char *) net_stats +
1110 ixgbe_gstrings_stats[i].stat_offset;
1113 p = (char *) adapter +
1114 ixgbe_gstrings_stats[i].stat_offset;
1121 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1122 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1124 for (j = 0; j < netdev->num_tx_queues; j++) {
1125 ring = adapter->tx_ring[j];
1130 #ifdef BP_EXTENDED_STATS
1140 start = u64_stats_fetch_begin_irq(&ring->syncp);
1141 data[i] = ring->stats.packets;
1142 data[i+1] = ring->stats.bytes;
1143 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1145 #ifdef BP_EXTENDED_STATS
1146 data[i] = ring->stats.yields;
1147 data[i+1] = ring->stats.misses;
1148 data[i+2] = ring->stats.cleaned;
1152 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1153 ring = adapter->rx_ring[j];
1158 #ifdef BP_EXTENDED_STATS
1168 start = u64_stats_fetch_begin_irq(&ring->syncp);
1169 data[i] = ring->stats.packets;
1170 data[i+1] = ring->stats.bytes;
1171 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1173 #ifdef BP_EXTENDED_STATS
1174 data[i] = ring->stats.yields;
1175 data[i+1] = ring->stats.misses;
1176 data[i+2] = ring->stats.cleaned;
1181 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1182 data[i++] = adapter->stats.pxontxc[j];
1183 data[i++] = adapter->stats.pxofftxc[j];
1185 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1186 data[i++] = adapter->stats.pxonrxc[j];
1187 data[i++] = adapter->stats.pxoffrxc[j];
1191 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1194 char *p = (char *)data;
1197 switch (stringset) {
1199 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1200 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1201 data += ETH_GSTRING_LEN;
1205 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1206 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1208 p += ETH_GSTRING_LEN;
1210 for (i = 0; i < netdev->num_tx_queues; i++) {
1211 sprintf(p, "tx_queue_%u_packets", i);
1212 p += ETH_GSTRING_LEN;
1213 sprintf(p, "tx_queue_%u_bytes", i);
1214 p += ETH_GSTRING_LEN;
1215 #ifdef BP_EXTENDED_STATS
1216 sprintf(p, "tx_queue_%u_bp_napi_yield", i);
1217 p += ETH_GSTRING_LEN;
1218 sprintf(p, "tx_queue_%u_bp_misses", i);
1219 p += ETH_GSTRING_LEN;
1220 sprintf(p, "tx_queue_%u_bp_cleaned", i);
1221 p += ETH_GSTRING_LEN;
1222 #endif /* BP_EXTENDED_STATS */
1224 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1225 sprintf(p, "rx_queue_%u_packets", i);
1226 p += ETH_GSTRING_LEN;
1227 sprintf(p, "rx_queue_%u_bytes", i);
1228 p += ETH_GSTRING_LEN;
1229 #ifdef BP_EXTENDED_STATS
1230 sprintf(p, "rx_queue_%u_bp_poll_yield", i);
1231 p += ETH_GSTRING_LEN;
1232 sprintf(p, "rx_queue_%u_bp_misses", i);
1233 p += ETH_GSTRING_LEN;
1234 sprintf(p, "rx_queue_%u_bp_cleaned", i);
1235 p += ETH_GSTRING_LEN;
1236 #endif /* BP_EXTENDED_STATS */
1238 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1239 sprintf(p, "tx_pb_%u_pxon", i);
1240 p += ETH_GSTRING_LEN;
1241 sprintf(p, "tx_pb_%u_pxoff", i);
1242 p += ETH_GSTRING_LEN;
1244 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1245 sprintf(p, "rx_pb_%u_pxon", i);
1246 p += ETH_GSTRING_LEN;
1247 sprintf(p, "rx_pb_%u_pxoff", i);
1248 p += ETH_GSTRING_LEN;
1250 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1255 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1257 struct ixgbe_hw *hw = &adapter->hw;
1261 if (ixgbe_removed(hw->hw_addr)) {
1267 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1275 /* ethtool register test data */
1276 struct ixgbe_reg_test {
1284 /* In the hardware, registers are laid out either singly, in arrays
1285 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1286 * most tests take place on arrays or single registers (handled
1287 * as a single-element array) and special-case the tables.
1288 * Table tests are always pattern tests.
1290 * We also make provision for some required setup steps by specifying
1291 * registers to be written without any read-back testing.
1294 #define PATTERN_TEST 1
1295 #define SET_READ_TEST 2
1296 #define WRITE_NO_TEST 3
1297 #define TABLE32_TEST 4
1298 #define TABLE64_TEST_LO 5
1299 #define TABLE64_TEST_HI 6
1301 /* default 82599 register test */
1302 static const struct ixgbe_reg_test reg_test_82599[] = {
1303 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1304 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1305 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1306 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1307 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1308 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1309 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1310 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1311 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1312 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1313 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1314 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1315 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1316 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1317 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1318 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1319 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1320 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1321 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1325 /* default 82598 register test */
1326 static const struct ixgbe_reg_test reg_test_82598[] = {
1327 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1328 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1329 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1330 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1331 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1332 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1333 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1334 /* Enable all four RX queues before testing. */
1335 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1336 /* RDH is read-only for 82598, only test RDT. */
1337 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1338 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1339 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1340 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1341 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1342 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1343 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1344 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1345 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1346 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1347 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1348 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1349 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1353 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1354 u32 mask, u32 write)
1356 u32 pat, val, before;
1357 static const u32 test_pattern[] = {
1358 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1360 if (ixgbe_removed(adapter->hw.hw_addr)) {
1364 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1365 before = ixgbe_read_reg(&adapter->hw, reg);
1366 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1367 val = ixgbe_read_reg(&adapter->hw, reg);
1368 if (val != (test_pattern[pat] & write & mask)) {
1369 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1370 reg, val, (test_pattern[pat] & write & mask));
1372 ixgbe_write_reg(&adapter->hw, reg, before);
1375 ixgbe_write_reg(&adapter->hw, reg, before);
1380 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1381 u32 mask, u32 write)
1385 if (ixgbe_removed(adapter->hw.hw_addr)) {
1389 before = ixgbe_read_reg(&adapter->hw, reg);
1390 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1391 val = ixgbe_read_reg(&adapter->hw, reg);
1392 if ((write & mask) != (val & mask)) {
1393 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1394 reg, (val & mask), (write & mask));
1396 ixgbe_write_reg(&adapter->hw, reg, before);
1399 ixgbe_write_reg(&adapter->hw, reg, before);
1403 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1405 const struct ixgbe_reg_test *test;
1406 u32 value, before, after;
1409 if (ixgbe_removed(adapter->hw.hw_addr)) {
1410 e_err(drv, "Adapter removed - register test blocked\n");
1414 switch (adapter->hw.mac.type) {
1415 case ixgbe_mac_82598EB:
1416 toggle = 0x7FFFF3FF;
1417 test = reg_test_82598;
1419 case ixgbe_mac_82599EB:
1420 case ixgbe_mac_X540:
1421 case ixgbe_mac_X550:
1422 case ixgbe_mac_X550EM_x:
1423 toggle = 0x7FFFF30F;
1424 test = reg_test_82599;
1432 * Because the status register is such a special case,
1433 * we handle it separately from the rest of the register
1434 * tests. Some bits are read-only, some toggle, and some
1435 * are writeable on newer MACs.
1437 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1438 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1439 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1440 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1441 if (value != after) {
1442 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1447 /* restore previous status */
1448 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1451 * Perform the remainder of the register test, looping through
1452 * the test table until we either fail or reach the null entry.
1455 for (i = 0; i < test->array_len; i++) {
1458 switch (test->test_type) {
1460 b = reg_pattern_test(adapter, data,
1461 test->reg + (i * 0x40),
1466 b = reg_set_and_check(adapter, data,
1467 test->reg + (i * 0x40),
1472 ixgbe_write_reg(&adapter->hw,
1473 test->reg + (i * 0x40),
1477 b = reg_pattern_test(adapter, data,
1478 test->reg + (i * 4),
1482 case TABLE64_TEST_LO:
1483 b = reg_pattern_test(adapter, data,
1484 test->reg + (i * 8),
1488 case TABLE64_TEST_HI:
1489 b = reg_pattern_test(adapter, data,
1490 (test->reg + 4) + (i * 8),
1505 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1507 struct ixgbe_hw *hw = &adapter->hw;
1508 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1515 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1517 struct net_device *netdev = (struct net_device *) data;
1518 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1520 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1525 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1527 struct net_device *netdev = adapter->netdev;
1528 u32 mask, i = 0, shared_int = true;
1529 u32 irq = adapter->pdev->irq;
1533 /* Hook up test interrupt handler just for this test */
1534 if (adapter->msix_entries) {
1535 /* NOTE: we don't test MSI-X interrupts here, yet */
1537 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1539 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1544 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1545 netdev->name, netdev)) {
1547 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1548 netdev->name, netdev)) {
1552 e_info(hw, "testing %s interrupt\n", shared_int ?
1553 "shared" : "unshared");
1555 /* Disable all the interrupts */
1556 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1557 IXGBE_WRITE_FLUSH(&adapter->hw);
1558 usleep_range(10000, 20000);
1560 /* Test each interrupt */
1561 for (; i < 10; i++) {
1562 /* Interrupt to test */
1567 * Disable the interrupts to be reported in
1568 * the cause register and then force the same
1569 * interrupt and see if one gets posted. If
1570 * an interrupt was posted to the bus, the
1573 adapter->test_icr = 0;
1574 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1575 ~mask & 0x00007FFF);
1576 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1577 ~mask & 0x00007FFF);
1578 IXGBE_WRITE_FLUSH(&adapter->hw);
1579 usleep_range(10000, 20000);
1581 if (adapter->test_icr & mask) {
1588 * Enable the interrupt to be reported in the cause
1589 * register and then force the same interrupt and see
1590 * if one gets posted. If an interrupt was not posted
1591 * to the bus, the test failed.
1593 adapter->test_icr = 0;
1594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1595 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1596 IXGBE_WRITE_FLUSH(&adapter->hw);
1597 usleep_range(10000, 20000);
1599 if (!(adapter->test_icr & mask)) {
1606 * Disable the other interrupts to be reported in
1607 * the cause register and then force the other
1608 * interrupts and see if any get posted. If
1609 * an interrupt was posted to the bus, the
1612 adapter->test_icr = 0;
1613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1614 ~mask & 0x00007FFF);
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1616 ~mask & 0x00007FFF);
1617 IXGBE_WRITE_FLUSH(&adapter->hw);
1618 usleep_range(10000, 20000);
1620 if (adapter->test_icr) {
1627 /* Disable all the interrupts */
1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1629 IXGBE_WRITE_FLUSH(&adapter->hw);
1630 usleep_range(10000, 20000);
1632 /* Unhook test interrupt handler */
1633 free_irq(irq, netdev);
1638 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1640 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1641 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1642 struct ixgbe_hw *hw = &adapter->hw;
1645 /* shut down the DMA engines now so they can be reinitialized later */
1648 hw->mac.ops.disable_rx(hw);
1649 ixgbe_disable_rx_queue(adapter, rx_ring);
1652 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1653 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1654 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1656 switch (hw->mac.type) {
1657 case ixgbe_mac_82599EB:
1658 case ixgbe_mac_X540:
1659 case ixgbe_mac_X550:
1660 case ixgbe_mac_X550EM_x:
1661 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1662 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1663 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1669 ixgbe_reset(adapter);
1671 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1672 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1675 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1677 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1678 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1679 struct ixgbe_hw *hw = &adapter->hw;
1684 /* Setup Tx descriptor ring and Tx buffers */
1685 tx_ring->count = IXGBE_DEFAULT_TXD;
1686 tx_ring->queue_index = 0;
1687 tx_ring->dev = &adapter->pdev->dev;
1688 tx_ring->netdev = adapter->netdev;
1689 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1691 err = ixgbe_setup_tx_resources(tx_ring);
1695 switch (adapter->hw.mac.type) {
1696 case ixgbe_mac_82599EB:
1697 case ixgbe_mac_X540:
1698 case ixgbe_mac_X550:
1699 case ixgbe_mac_X550EM_x:
1700 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1701 reg_data |= IXGBE_DMATXCTL_TE;
1702 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1708 ixgbe_configure_tx_ring(adapter, tx_ring);
1710 /* Setup Rx Descriptor ring and Rx buffers */
1711 rx_ring->count = IXGBE_DEFAULT_RXD;
1712 rx_ring->queue_index = 0;
1713 rx_ring->dev = &adapter->pdev->dev;
1714 rx_ring->netdev = adapter->netdev;
1715 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1717 err = ixgbe_setup_rx_resources(rx_ring);
1723 hw->mac.ops.disable_rx(hw);
1725 ixgbe_configure_rx_ring(adapter, rx_ring);
1727 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1728 rctl |= IXGBE_RXCTRL_DMBYPS;
1729 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1731 hw->mac.ops.enable_rx(hw);
1736 ixgbe_free_desc_rings(adapter);
1740 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1742 struct ixgbe_hw *hw = &adapter->hw;
1746 /* Setup MAC loopback */
1747 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1748 reg_data |= IXGBE_HLREG0_LPBK;
1749 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1751 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1752 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1753 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1755 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1756 switch (adapter->hw.mac.type) {
1757 case ixgbe_mac_X540:
1758 case ixgbe_mac_X550:
1759 case ixgbe_mac_X550EM_x:
1760 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1761 reg_data |= IXGBE_MACC_FLU;
1762 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1765 if (hw->mac.orig_autoc) {
1766 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1767 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1772 IXGBE_WRITE_FLUSH(hw);
1773 usleep_range(10000, 20000);
1775 /* Disable Atlas Tx lanes; re-enabled in reset path */
1776 if (hw->mac.type == ixgbe_mac_82598EB) {
1779 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1780 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1781 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1783 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1784 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1785 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1787 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1788 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1789 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1791 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1792 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1793 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1799 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1803 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1804 reg_data &= ~IXGBE_HLREG0_LPBK;
1805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1808 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1809 unsigned int frame_size)
1811 memset(skb->data, 0xFF, frame_size);
1813 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1814 memset(&skb->data[frame_size + 10], 0xBE, 1);
1815 memset(&skb->data[frame_size + 12], 0xAF, 1);
1818 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1819 unsigned int frame_size)
1821 unsigned char *data;
1826 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1828 if (data[3] != 0xFF ||
1829 data[frame_size + 10] != 0xBE ||
1830 data[frame_size + 12] != 0xAF)
1833 kunmap(rx_buffer->page);
1838 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1839 struct ixgbe_ring *tx_ring,
1842 union ixgbe_adv_rx_desc *rx_desc;
1843 struct ixgbe_rx_buffer *rx_buffer;
1844 struct ixgbe_tx_buffer *tx_buffer;
1845 u16 rx_ntc, tx_ntc, count = 0;
1847 /* initialize next to clean and descriptor values */
1848 rx_ntc = rx_ring->next_to_clean;
1849 tx_ntc = tx_ring->next_to_clean;
1850 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1852 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1853 /* check Rx buffer */
1854 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1856 /* sync Rx buffer for CPU read */
1857 dma_sync_single_for_cpu(rx_ring->dev,
1859 ixgbe_rx_bufsz(rx_ring),
1862 /* verify contents of skb */
1863 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1866 /* sync Rx buffer for device write */
1867 dma_sync_single_for_device(rx_ring->dev,
1869 ixgbe_rx_bufsz(rx_ring),
1872 /* unmap buffer on Tx side */
1873 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1874 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1876 /* increment Rx/Tx next to clean counters */
1878 if (rx_ntc == rx_ring->count)
1881 if (tx_ntc == tx_ring->count)
1884 /* fetch next descriptor */
1885 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1888 netdev_tx_reset_queue(txring_txq(tx_ring));
1890 /* re-map buffers to ring, store next to clean values */
1891 ixgbe_alloc_rx_buffers(rx_ring, count);
1892 rx_ring->next_to_clean = rx_ntc;
1893 tx_ring->next_to_clean = tx_ntc;
1898 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1900 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1901 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1902 int i, j, lc, good_cnt, ret_val = 0;
1903 unsigned int size = 1024;
1904 netdev_tx_t tx_ret_val;
1905 struct sk_buff *skb;
1906 u32 flags_orig = adapter->flags;
1908 /* DCB can modify the frames on Tx */
1909 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1911 /* allocate test skb */
1912 skb = alloc_skb(size, GFP_KERNEL);
1916 /* place data into test skb */
1917 ixgbe_create_lbtest_frame(skb, size);
1921 * Calculate the loop count based on the largest descriptor ring
1922 * The idea is to wrap the largest ring a number of times using 64
1923 * send/receive pairs during each loop
1926 if (rx_ring->count <= tx_ring->count)
1927 lc = ((tx_ring->count / 64) * 2) + 1;
1929 lc = ((rx_ring->count / 64) * 2) + 1;
1931 for (j = 0; j <= lc; j++) {
1932 /* reset count of good packets */
1935 /* place 64 packets on the transmit queue*/
1936 for (i = 0; i < 64; i++) {
1938 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1941 if (tx_ret_val == NETDEV_TX_OK)
1945 if (good_cnt != 64) {
1950 /* allow 200 milliseconds for packets to go from Tx to Rx */
1953 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1954 if (good_cnt != 64) {
1960 /* free the original skb */
1962 adapter->flags = flags_orig;
1967 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1969 *data = ixgbe_setup_desc_rings(adapter);
1972 *data = ixgbe_setup_loopback_test(adapter);
1975 *data = ixgbe_run_loopback_test(adapter);
1976 ixgbe_loopback_cleanup(adapter);
1979 ixgbe_free_desc_rings(adapter);
1984 static void ixgbe_diag_test(struct net_device *netdev,
1985 struct ethtool_test *eth_test, u64 *data)
1987 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1988 bool if_running = netif_running(netdev);
1990 if (ixgbe_removed(adapter->hw.hw_addr)) {
1991 e_err(hw, "Adapter removed - test blocked\n");
1997 eth_test->flags |= ETH_TEST_FL_FAILED;
2000 set_bit(__IXGBE_TESTING, &adapter->state);
2001 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2002 struct ixgbe_hw *hw = &adapter->hw;
2004 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2006 for (i = 0; i < adapter->num_vfs; i++) {
2007 if (adapter->vfinfo[i].clear_to_send) {
2008 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2014 eth_test->flags |= ETH_TEST_FL_FAILED;
2015 clear_bit(__IXGBE_TESTING,
2023 e_info(hw, "offline testing starting\n");
2025 /* Link test performed before hardware reset so autoneg doesn't
2026 * interfere with test result
2028 if (ixgbe_link_test(adapter, &data[4]))
2029 eth_test->flags |= ETH_TEST_FL_FAILED;
2032 /* indicate we're in test mode */
2035 ixgbe_reset(adapter);
2037 e_info(hw, "register testing starting\n");
2038 if (ixgbe_reg_test(adapter, &data[0]))
2039 eth_test->flags |= ETH_TEST_FL_FAILED;
2041 ixgbe_reset(adapter);
2042 e_info(hw, "eeprom testing starting\n");
2043 if (ixgbe_eeprom_test(adapter, &data[1]))
2044 eth_test->flags |= ETH_TEST_FL_FAILED;
2046 ixgbe_reset(adapter);
2047 e_info(hw, "interrupt testing starting\n");
2048 if (ixgbe_intr_test(adapter, &data[2]))
2049 eth_test->flags |= ETH_TEST_FL_FAILED;
2051 /* If SRIOV or VMDq is enabled then skip MAC
2052 * loopback diagnostic. */
2053 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2054 IXGBE_FLAG_VMDQ_ENABLED)) {
2055 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2060 ixgbe_reset(adapter);
2061 e_info(hw, "loopback testing starting\n");
2062 if (ixgbe_loopback_test(adapter, &data[3]))
2063 eth_test->flags |= ETH_TEST_FL_FAILED;
2066 ixgbe_reset(adapter);
2068 /* clear testing bit and return adapter to previous state */
2069 clear_bit(__IXGBE_TESTING, &adapter->state);
2072 else if (hw->mac.ops.disable_tx_laser)
2073 hw->mac.ops.disable_tx_laser(hw);
2075 e_info(hw, "online testing starting\n");
2078 if (ixgbe_link_test(adapter, &data[4]))
2079 eth_test->flags |= ETH_TEST_FL_FAILED;
2081 /* Offline tests aren't run; pass by default */
2087 clear_bit(__IXGBE_TESTING, &adapter->state);
2091 msleep_interruptible(4 * 1000);
2094 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2095 struct ethtool_wolinfo *wol)
2097 struct ixgbe_hw *hw = &adapter->hw;
2100 /* WOL not supported for all devices */
2101 if (!ixgbe_wol_supported(adapter, hw->device_id,
2102 hw->subsystem_device_id)) {
2110 static void ixgbe_get_wol(struct net_device *netdev,
2111 struct ethtool_wolinfo *wol)
2113 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2115 wol->supported = WAKE_UCAST | WAKE_MCAST |
2116 WAKE_BCAST | WAKE_MAGIC;
2119 if (ixgbe_wol_exclusion(adapter, wol) ||
2120 !device_can_wakeup(&adapter->pdev->dev))
2123 if (adapter->wol & IXGBE_WUFC_EX)
2124 wol->wolopts |= WAKE_UCAST;
2125 if (adapter->wol & IXGBE_WUFC_MC)
2126 wol->wolopts |= WAKE_MCAST;
2127 if (adapter->wol & IXGBE_WUFC_BC)
2128 wol->wolopts |= WAKE_BCAST;
2129 if (adapter->wol & IXGBE_WUFC_MAG)
2130 wol->wolopts |= WAKE_MAGIC;
2133 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2135 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2137 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2140 if (ixgbe_wol_exclusion(adapter, wol))
2141 return wol->wolopts ? -EOPNOTSUPP : 0;
2145 if (wol->wolopts & WAKE_UCAST)
2146 adapter->wol |= IXGBE_WUFC_EX;
2147 if (wol->wolopts & WAKE_MCAST)
2148 adapter->wol |= IXGBE_WUFC_MC;
2149 if (wol->wolopts & WAKE_BCAST)
2150 adapter->wol |= IXGBE_WUFC_BC;
2151 if (wol->wolopts & WAKE_MAGIC)
2152 adapter->wol |= IXGBE_WUFC_MAG;
2154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2159 static int ixgbe_nway_reset(struct net_device *netdev)
2161 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2163 if (netif_running(netdev))
2164 ixgbe_reinit_locked(adapter);
2169 static int ixgbe_set_phys_id(struct net_device *netdev,
2170 enum ethtool_phys_id_state state)
2172 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2173 struct ixgbe_hw *hw = &adapter->hw;
2176 case ETHTOOL_ID_ACTIVE:
2177 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2181 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2184 case ETHTOOL_ID_OFF:
2185 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2188 case ETHTOOL_ID_INACTIVE:
2189 /* Restore LED settings */
2190 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2197 static int ixgbe_get_coalesce(struct net_device *netdev,
2198 struct ethtool_coalesce *ec)
2200 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2202 /* only valid if in constant ITR mode */
2203 if (adapter->rx_itr_setting <= 1)
2204 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2206 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2208 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2209 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2212 /* only valid if in constant ITR mode */
2213 if (adapter->tx_itr_setting <= 1)
2214 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2216 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2222 * this function must be called before setting the new value of
2225 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2227 struct net_device *netdev = adapter->netdev;
2229 /* nothing to do if LRO or RSC are not enabled */
2230 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2231 !(netdev->features & NETIF_F_LRO))
2234 /* check the feature flag value and enable RSC if necessary */
2235 if (adapter->rx_itr_setting == 1 ||
2236 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2237 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2238 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2239 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2242 /* if interrupt rate is too high then disable RSC */
2243 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2244 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2245 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2251 static int ixgbe_set_coalesce(struct net_device *netdev,
2252 struct ethtool_coalesce *ec)
2254 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2255 struct ixgbe_q_vector *q_vector;
2257 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2258 bool need_reset = false;
2260 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2261 /* reject Tx specific changes in case of mixed RxTx vectors */
2262 if (ec->tx_coalesce_usecs)
2264 tx_itr_prev = adapter->rx_itr_setting;
2266 tx_itr_prev = adapter->tx_itr_setting;
2269 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2270 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2273 if (ec->rx_coalesce_usecs > 1)
2274 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2276 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2278 if (adapter->rx_itr_setting == 1)
2279 rx_itr_param = IXGBE_20K_ITR;
2281 rx_itr_param = adapter->rx_itr_setting;
2283 if (ec->tx_coalesce_usecs > 1)
2284 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2286 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2288 if (adapter->tx_itr_setting == 1)
2289 tx_itr_param = IXGBE_10K_ITR;
2291 tx_itr_param = adapter->tx_itr_setting;
2294 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2295 adapter->tx_itr_setting = adapter->rx_itr_setting;
2297 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2298 if ((adapter->tx_itr_setting != 1) &&
2299 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2300 if ((tx_itr_prev == 1) ||
2301 (tx_itr_prev >= IXGBE_100K_ITR))
2304 if ((tx_itr_prev != 1) &&
2305 (tx_itr_prev < IXGBE_100K_ITR))
2309 /* check the old value and enable RSC if necessary */
2310 need_reset |= ixgbe_update_rsc(adapter);
2312 for (i = 0; i < adapter->num_q_vectors; i++) {
2313 q_vector = adapter->q_vector[i];
2314 if (q_vector->tx.count && !q_vector->rx.count)
2316 q_vector->itr = tx_itr_param;
2318 /* rx only or mixed */
2319 q_vector->itr = rx_itr_param;
2320 ixgbe_write_eitr(q_vector);
2324 * do reset here at the end to make sure EITR==0 case is handled
2325 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2326 * also locks in RSC enable/disable which requires reset
2329 ixgbe_do_reset(netdev);
2334 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2335 struct ethtool_rxnfc *cmd)
2337 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2338 struct ethtool_rx_flow_spec *fsp =
2339 (struct ethtool_rx_flow_spec *)&cmd->fs;
2340 struct hlist_node *node2;
2341 struct ixgbe_fdir_filter *rule = NULL;
2343 /* report total rule count */
2344 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2346 hlist_for_each_entry_safe(rule, node2,
2347 &adapter->fdir_filter_list, fdir_node) {
2348 if (fsp->location <= rule->sw_idx)
2352 if (!rule || fsp->location != rule->sw_idx)
2355 /* fill out the flow spec entry */
2357 /* set flow type field */
2358 switch (rule->filter.formatted.flow_type) {
2359 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2360 fsp->flow_type = TCP_V4_FLOW;
2362 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2363 fsp->flow_type = UDP_V4_FLOW;
2365 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2366 fsp->flow_type = SCTP_V4_FLOW;
2368 case IXGBE_ATR_FLOW_TYPE_IPV4:
2369 fsp->flow_type = IP_USER_FLOW;
2370 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2371 fsp->h_u.usr_ip4_spec.proto = 0;
2372 fsp->m_u.usr_ip4_spec.proto = 0;
2378 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2379 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2380 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2381 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2382 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2383 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2384 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2385 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2386 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2387 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2388 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2389 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2390 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2391 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2392 fsp->flow_type |= FLOW_EXT;
2395 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2396 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2398 fsp->ring_cookie = rule->action;
2403 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2404 struct ethtool_rxnfc *cmd,
2407 struct hlist_node *node2;
2408 struct ixgbe_fdir_filter *rule;
2411 /* report total rule count */
2412 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2414 hlist_for_each_entry_safe(rule, node2,
2415 &adapter->fdir_filter_list, fdir_node) {
2416 if (cnt == cmd->rule_cnt)
2418 rule_locs[cnt] = rule->sw_idx;
2422 cmd->rule_cnt = cnt;
2427 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2428 struct ethtool_rxnfc *cmd)
2432 /* Report default options for RSS on ixgbe */
2433 switch (cmd->flow_type) {
2435 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2438 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2439 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2442 case AH_ESP_V4_FLOW:
2446 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2449 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2452 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2453 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2456 case AH_ESP_V6_FLOW:
2460 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2469 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2472 struct ixgbe_adapter *adapter = netdev_priv(dev);
2473 int ret = -EOPNOTSUPP;
2476 case ETHTOOL_GRXRINGS:
2477 cmd->data = adapter->num_rx_queues;
2480 case ETHTOOL_GRXCLSRLCNT:
2481 cmd->rule_cnt = adapter->fdir_filter_count;
2484 case ETHTOOL_GRXCLSRULE:
2485 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2487 case ETHTOOL_GRXCLSRLALL:
2488 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2491 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2500 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2501 struct ixgbe_fdir_filter *input,
2504 struct ixgbe_hw *hw = &adapter->hw;
2505 struct hlist_node *node2;
2506 struct ixgbe_fdir_filter *rule, *parent;
2512 hlist_for_each_entry_safe(rule, node2,
2513 &adapter->fdir_filter_list, fdir_node) {
2514 /* hash found, or no matching entry */
2515 if (rule->sw_idx >= sw_idx)
2520 /* if there is an old rule occupying our place remove it */
2521 if (rule && (rule->sw_idx == sw_idx)) {
2522 if (!input || (rule->filter.formatted.bkt_hash !=
2523 input->filter.formatted.bkt_hash)) {
2524 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2529 hlist_del(&rule->fdir_node);
2531 adapter->fdir_filter_count--;
2535 * If no input this was a delete, err should be 0 if a rule was
2536 * successfully found and removed from the list else -EINVAL
2541 /* initialize node and set software index */
2542 INIT_HLIST_NODE(&input->fdir_node);
2544 /* add filter to the list */
2546 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2548 hlist_add_head(&input->fdir_node,
2549 &adapter->fdir_filter_list);
2552 adapter->fdir_filter_count++;
2557 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2560 switch (fsp->flow_type & ~FLOW_EXT) {
2562 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2565 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2568 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2571 switch (fsp->h_u.usr_ip4_spec.proto) {
2573 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2576 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2579 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2582 if (!fsp->m_u.usr_ip4_spec.proto) {
2583 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2597 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2598 struct ethtool_rxnfc *cmd)
2600 struct ethtool_rx_flow_spec *fsp =
2601 (struct ethtool_rx_flow_spec *)&cmd->fs;
2602 struct ixgbe_hw *hw = &adapter->hw;
2603 struct ixgbe_fdir_filter *input;
2604 union ixgbe_atr_input mask;
2608 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2611 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2612 * we use the drop index.
2614 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2615 queue = IXGBE_FDIR_DROP_QUEUE;
2617 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2618 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2620 if (!vf && (ring >= adapter->num_rx_queues))
2623 ((vf > adapter->num_vfs) ||
2624 ring >= adapter->num_rx_queues_per_pool))
2627 /* Map the ring onto the absolute queue index */
2629 queue = adapter->rx_ring[ring]->reg_idx;
2632 adapter->num_rx_queues_per_pool) + ring;
2635 /* Don't allow indexes to exist outside of available space */
2636 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2637 e_err(drv, "Location out of range\n");
2641 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2645 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2648 input->sw_idx = fsp->location;
2650 /* record flow type */
2651 if (!ixgbe_flowspec_to_flow_type(fsp,
2652 &input->filter.formatted.flow_type)) {
2653 e_err(drv, "Unrecognized flow type\n");
2657 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2658 IXGBE_ATR_L4TYPE_MASK;
2660 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2661 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2663 /* Copy input into formatted structures */
2664 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2665 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2666 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2667 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2668 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2669 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2670 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2671 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2673 if (fsp->flow_type & FLOW_EXT) {
2674 input->filter.formatted.vm_pool =
2675 (unsigned char)ntohl(fsp->h_ext.data[1]);
2676 mask.formatted.vm_pool =
2677 (unsigned char)ntohl(fsp->m_ext.data[1]);
2678 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2679 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2680 input->filter.formatted.flex_bytes =
2681 fsp->h_ext.vlan_etype;
2682 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2685 /* determine if we need to drop or route the packet */
2686 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2687 input->action = IXGBE_FDIR_DROP_QUEUE;
2689 input->action = fsp->ring_cookie;
2691 spin_lock(&adapter->fdir_perfect_lock);
2693 if (hlist_empty(&adapter->fdir_filter_list)) {
2694 /* save mask and program input mask into HW */
2695 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2696 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2698 e_err(drv, "Error writing mask\n");
2699 goto err_out_w_lock;
2701 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2702 e_err(drv, "Only one mask supported per port\n");
2703 goto err_out_w_lock;
2706 /* apply mask and compute/store hash */
2707 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2709 /* program filters to filter memory */
2710 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2711 &input->filter, input->sw_idx, queue);
2713 goto err_out_w_lock;
2715 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2717 spin_unlock(&adapter->fdir_perfect_lock);
2721 spin_unlock(&adapter->fdir_perfect_lock);
2727 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2728 struct ethtool_rxnfc *cmd)
2730 struct ethtool_rx_flow_spec *fsp =
2731 (struct ethtool_rx_flow_spec *)&cmd->fs;
2734 spin_lock(&adapter->fdir_perfect_lock);
2735 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2736 spin_unlock(&adapter->fdir_perfect_lock);
2741 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2742 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2743 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2744 struct ethtool_rxnfc *nfc)
2746 u32 flags2 = adapter->flags2;
2749 * RSS does not support anything other than hashing
2750 * to queues on src and dst IPs and ports
2752 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2753 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2756 switch (nfc->flow_type) {
2759 if (!(nfc->data & RXH_IP_SRC) ||
2760 !(nfc->data & RXH_IP_DST) ||
2761 !(nfc->data & RXH_L4_B_0_1) ||
2762 !(nfc->data & RXH_L4_B_2_3))
2766 if (!(nfc->data & RXH_IP_SRC) ||
2767 !(nfc->data & RXH_IP_DST))
2769 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2771 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2773 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2774 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2781 if (!(nfc->data & RXH_IP_SRC) ||
2782 !(nfc->data & RXH_IP_DST))
2784 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2786 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2788 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2789 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2795 case AH_ESP_V4_FLOW:
2799 case AH_ESP_V6_FLOW:
2803 if (!(nfc->data & RXH_IP_SRC) ||
2804 !(nfc->data & RXH_IP_DST) ||
2805 (nfc->data & RXH_L4_B_0_1) ||
2806 (nfc->data & RXH_L4_B_2_3))
2813 /* if we changed something we need to update flags */
2814 if (flags2 != adapter->flags2) {
2815 struct ixgbe_hw *hw = &adapter->hw;
2817 unsigned int pf_pool = adapter->num_vfs;
2819 if ((hw->mac.type >= ixgbe_mac_X550) &&
2820 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2821 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2823 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2825 if ((flags2 & UDP_RSS_FLAGS) &&
2826 !(adapter->flags2 & UDP_RSS_FLAGS))
2827 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2829 adapter->flags2 = flags2;
2831 /* Perform hash on these packet types */
2832 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2833 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2834 | IXGBE_MRQC_RSS_FIELD_IPV6
2835 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2837 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2838 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2840 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2841 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2843 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2844 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2846 if ((hw->mac.type >= ixgbe_mac_X550) &&
2847 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2848 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2850 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2856 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2858 struct ixgbe_adapter *adapter = netdev_priv(dev);
2859 int ret = -EOPNOTSUPP;
2862 case ETHTOOL_SRXCLSRLINS:
2863 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2865 case ETHTOOL_SRXCLSRLDEL:
2866 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2869 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2878 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2880 if (adapter->hw.mac.type < ixgbe_mac_X550)
2886 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2888 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2890 return sizeof(adapter->rss_key);
2893 static u32 ixgbe_rss_indir_size(struct net_device *netdev)
2895 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2897 return ixgbe_rss_indir_tbl_entries(adapter);
2900 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
2902 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
2904 for (i = 0; i < reta_size; i++)
2905 indir[i] = adapter->rss_indir_tbl[i];
2908 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2914 *hfunc = ETH_RSS_HASH_TOP;
2917 ixgbe_get_reta(adapter, indir);
2920 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
2925 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
2926 const u8 *key, const u8 hfunc)
2928 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2930 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
2935 /* Fill out the redirection table */
2937 int max_queues = min_t(int, adapter->num_rx_queues,
2938 ixgbe_rss_indir_tbl_max(adapter));
2940 /*Allow at least 2 queues w/ SR-IOV.*/
2941 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
2945 /* Verify user input. */
2946 for (i = 0; i < reta_entries; i++)
2947 if (indir[i] >= max_queues)
2950 for (i = 0; i < reta_entries; i++)
2951 adapter->rss_indir_tbl[i] = indir[i];
2954 /* Fill out the rss hash key */
2956 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
2958 ixgbe_store_reta(adapter);
2963 static int ixgbe_get_ts_info(struct net_device *dev,
2964 struct ethtool_ts_info *info)
2966 struct ixgbe_adapter *adapter = netdev_priv(dev);
2968 switch (adapter->hw.mac.type) {
2969 case ixgbe_mac_X550:
2970 case ixgbe_mac_X550EM_x:
2971 case ixgbe_mac_X540:
2972 case ixgbe_mac_82599EB:
2973 info->so_timestamping =
2974 SOF_TIMESTAMPING_TX_SOFTWARE |
2975 SOF_TIMESTAMPING_RX_SOFTWARE |
2976 SOF_TIMESTAMPING_SOFTWARE |
2977 SOF_TIMESTAMPING_TX_HARDWARE |
2978 SOF_TIMESTAMPING_RX_HARDWARE |
2979 SOF_TIMESTAMPING_RAW_HARDWARE;
2981 if (adapter->ptp_clock)
2982 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2984 info->phc_index = -1;
2987 (1 << HWTSTAMP_TX_OFF) |
2988 (1 << HWTSTAMP_TX_ON);
2991 (1 << HWTSTAMP_FILTER_NONE) |
2992 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2993 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2994 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2997 return ethtool_op_get_ts_info(dev, info);
3002 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3004 unsigned int max_combined;
3005 u8 tcs = netdev_get_num_tc(adapter->netdev);
3007 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3008 /* We only support one q_vector without MSI-X */
3010 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3011 /* SR-IOV currently only allows one queue on the PF */
3013 } else if (tcs > 1) {
3014 /* For DCB report channels per traffic class */
3015 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3016 /* 8 TC w/ 4 queues per TC */
3018 } else if (tcs > 4) {
3019 /* 8 TC w/ 8 queues per TC */
3022 /* 4 TC w/ 16 queues per TC */
3025 } else if (adapter->atr_sample_rate) {
3026 /* support up to 64 queues with ATR */
3027 max_combined = IXGBE_MAX_FDIR_INDICES;
3029 /* support up to 16 queues with RSS */
3030 max_combined = ixgbe_max_rss_indices(adapter);
3033 return max_combined;
3036 static void ixgbe_get_channels(struct net_device *dev,
3037 struct ethtool_channels *ch)
3039 struct ixgbe_adapter *adapter = netdev_priv(dev);
3041 /* report maximum channels */
3042 ch->max_combined = ixgbe_max_channels(adapter);
3044 /* report info for other vector */
3045 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3046 ch->max_other = NON_Q_VECTORS;
3047 ch->other_count = NON_Q_VECTORS;
3050 /* record RSS queues */
3051 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3053 /* nothing else to report if RSS is disabled */
3054 if (ch->combined_count == 1)
3057 /* we do not support ATR queueing if SR-IOV is enabled */
3058 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3061 /* same thing goes for being DCB enabled */
3062 if (netdev_get_num_tc(dev) > 1)
3065 /* if ATR is disabled we can exit */
3066 if (!adapter->atr_sample_rate)
3069 /* report flow director queues as maximum channels */
3070 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3073 static int ixgbe_set_channels(struct net_device *dev,
3074 struct ethtool_channels *ch)
3076 struct ixgbe_adapter *adapter = netdev_priv(dev);
3077 unsigned int count = ch->combined_count;
3078 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3080 /* verify they are not requesting separate vectors */
3081 if (!count || ch->rx_count || ch->tx_count)
3084 /* verify other_count has not changed */
3085 if (ch->other_count != NON_Q_VECTORS)
3088 /* verify the number of channels does not exceed hardware limits */
3089 if (count > ixgbe_max_channels(adapter))
3092 /* update feature limits from largest to smallest supported values */
3093 adapter->ring_feature[RING_F_FDIR].limit = count;
3096 if (count > max_rss_indices)
3097 count = max_rss_indices;
3098 adapter->ring_feature[RING_F_RSS].limit = count;
3101 /* cap FCoE limit at 8 */
3102 if (count > IXGBE_FCRETA_SIZE)
3103 count = IXGBE_FCRETA_SIZE;
3104 adapter->ring_feature[RING_F_FCOE].limit = count;
3107 /* use setup TC to update any traffic class queue mapping */
3108 return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
3111 static int ixgbe_get_module_info(struct net_device *dev,
3112 struct ethtool_modinfo *modinfo)
3114 struct ixgbe_adapter *adapter = netdev_priv(dev);
3115 struct ixgbe_hw *hw = &adapter->hw;
3117 u8 sff8472_rev, addr_mode;
3118 bool page_swap = false;
3120 /* Check whether we support SFF-8472 or not */
3121 status = hw->phy.ops.read_i2c_eeprom(hw,
3122 IXGBE_SFF_SFF_8472_COMP,
3127 /* addressing mode is not supported */
3128 status = hw->phy.ops.read_i2c_eeprom(hw,
3129 IXGBE_SFF_SFF_8472_SWAP,
3134 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3135 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3139 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3140 /* We have a SFP, but it does not support SFF-8472 */
3141 modinfo->type = ETH_MODULE_SFF_8079;
3142 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3144 /* We have a SFP which supports a revision of SFF-8472. */
3145 modinfo->type = ETH_MODULE_SFF_8472;
3146 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3152 static int ixgbe_get_module_eeprom(struct net_device *dev,
3153 struct ethtool_eeprom *ee,
3156 struct ixgbe_adapter *adapter = netdev_priv(dev);
3157 struct ixgbe_hw *hw = &adapter->hw;
3158 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3165 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3166 /* I2C reads can take long time */
3167 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3170 if (i < ETH_MODULE_SFF_8079_LEN)
3171 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3173 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3178 data[i - ee->offset] = databyte;
3184 static const struct ethtool_ops ixgbe_ethtool_ops = {
3185 .get_settings = ixgbe_get_settings,
3186 .set_settings = ixgbe_set_settings,
3187 .get_drvinfo = ixgbe_get_drvinfo,
3188 .get_regs_len = ixgbe_get_regs_len,
3189 .get_regs = ixgbe_get_regs,
3190 .get_wol = ixgbe_get_wol,
3191 .set_wol = ixgbe_set_wol,
3192 .nway_reset = ixgbe_nway_reset,
3193 .get_link = ethtool_op_get_link,
3194 .get_eeprom_len = ixgbe_get_eeprom_len,
3195 .get_eeprom = ixgbe_get_eeprom,
3196 .set_eeprom = ixgbe_set_eeprom,
3197 .get_ringparam = ixgbe_get_ringparam,
3198 .set_ringparam = ixgbe_set_ringparam,
3199 .get_pauseparam = ixgbe_get_pauseparam,
3200 .set_pauseparam = ixgbe_set_pauseparam,
3201 .get_msglevel = ixgbe_get_msglevel,
3202 .set_msglevel = ixgbe_set_msglevel,
3203 .self_test = ixgbe_diag_test,
3204 .get_strings = ixgbe_get_strings,
3205 .set_phys_id = ixgbe_set_phys_id,
3206 .get_sset_count = ixgbe_get_sset_count,
3207 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3208 .get_coalesce = ixgbe_get_coalesce,
3209 .set_coalesce = ixgbe_set_coalesce,
3210 .get_rxnfc = ixgbe_get_rxnfc,
3211 .set_rxnfc = ixgbe_set_rxnfc,
3212 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3213 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3214 .get_rxfh = ixgbe_get_rxfh,
3215 .set_rxfh = ixgbe_set_rxfh,
3216 .get_channels = ixgbe_get_channels,
3217 .set_channels = ixgbe_set_channels,
3218 .get_ts_info = ixgbe_get_ts_info,
3219 .get_module_info = ixgbe_get_module_info,
3220 .get_module_eeprom = ixgbe_get_module_eeprom,
3223 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3225 netdev->ethtool_ops = &ixgbe_ethtool_ops;