1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
55 const char ixgbevf_driver_name[] = "ixgbevf";
56 static const char ixgbevf_driver_string[] =
57 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 #define DRV_VERSION "2.12.1-k"
60 const char ixgbevf_driver_version[] = DRV_VERSION;
61 static char ixgbevf_copyright[] =
62 "Copyright (c) 2009 - 2012 Intel Corporation.";
64 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
65 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_X540_vf] = &ixgbevf_X540_vf_info,
67 [board_X550_vf] = &ixgbevf_X550_vf_info,
68 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static const struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
84 /* required last entry */
87 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95 static int debug = -1;
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
101 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
102 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
104 schedule_work(&adapter->service_task);
107 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
111 /* flush memory to make sure state is correct before next watchdog */
112 smp_mb__before_atomic();
113 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
117 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
118 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
119 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
121 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
123 struct ixgbevf_adapter *adapter = hw->back;
128 dev_err(&adapter->pdev->dev, "Adapter removed\n");
129 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
130 ixgbevf_service_event_schedule(adapter);
133 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
137 /* The following check not only optimizes a bit by not
138 * performing a read on the status register when the
139 * register just read was a status register read that
140 * returned IXGBE_FAILED_READ_REG. It also blocks any
141 * potential recursion.
143 if (reg == IXGBE_VFSTATUS) {
144 ixgbevf_remove_adapter(hw);
147 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
148 if (value == IXGBE_FAILED_READ_REG)
149 ixgbevf_remove_adapter(hw);
152 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
154 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
157 if (IXGBE_REMOVED(reg_addr))
158 return IXGBE_FAILED_READ_REG;
159 value = readl(reg_addr + reg);
160 if (unlikely(value == IXGBE_FAILED_READ_REG))
161 ixgbevf_check_remove(hw, reg);
166 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
167 * @adapter: pointer to adapter struct
168 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 * @queue: queue to map the corresponding interrupt to
170 * @msix_vector: the vector to map to the corresponding queue
172 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
173 u8 queue, u8 msix_vector)
176 struct ixgbe_hw *hw = &adapter->hw;
178 if (direction == -1) {
180 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
184 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
186 /* Tx or Rx causes */
187 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
188 index = ((16 * (queue & 1)) + (8 * direction));
189 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
190 ivar &= ~(0xFF << index);
191 ivar |= (msix_vector << index);
192 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
196 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
197 struct ixgbevf_tx_buffer *tx_buffer)
199 if (tx_buffer->skb) {
200 dev_kfree_skb_any(tx_buffer->skb);
201 if (dma_unmap_len(tx_buffer, len))
202 dma_unmap_single(tx_ring->dev,
203 dma_unmap_addr(tx_buffer, dma),
204 dma_unmap_len(tx_buffer, len),
206 } else if (dma_unmap_len(tx_buffer, len)) {
207 dma_unmap_page(tx_ring->dev,
208 dma_unmap_addr(tx_buffer, dma),
209 dma_unmap_len(tx_buffer, len),
212 tx_buffer->next_to_watch = NULL;
213 tx_buffer->skb = NULL;
214 dma_unmap_len_set(tx_buffer, len, 0);
215 /* tx_buffer must be completely set up in the transmit path */
218 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
220 return ring->stats.packets;
223 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
226 struct ixgbe_hw *hw = &adapter->hw;
228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
232 return (head < tail) ?
233 tail - head : (tail + ring->count - head);
238 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
240 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
241 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
242 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
244 clear_check_for_tx_hang(tx_ring);
246 /* Check for a hung queue, but be thorough. This verifies
247 * that a transmit has been completed since the previous
248 * check AND there is at least one packet pending. The
249 * ARMED bit is set to indicate a potential hang.
251 if ((tx_done_old == tx_done) && tx_pending) {
252 /* make sure it is true for two checks in a row */
253 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
256 /* reset the countdown */
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
259 /* update completed stats and continue */
260 tx_ring->tx_stats.tx_done_old = tx_done;
265 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
267 /* Do the reset outside of interrupt context */
268 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
269 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
270 ixgbevf_service_event_schedule(adapter);
275 * ixgbevf_tx_timeout - Respond to a Tx Hang
276 * @netdev: network interface device structure
278 static void ixgbevf_tx_timeout(struct net_device *netdev)
280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
282 ixgbevf_tx_timeout_reset(adapter);
286 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
287 * @q_vector: board private structure
288 * @tx_ring: tx ring to clean
290 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
291 struct ixgbevf_ring *tx_ring)
293 struct ixgbevf_adapter *adapter = q_vector->adapter;
294 struct ixgbevf_tx_buffer *tx_buffer;
295 union ixgbe_adv_tx_desc *tx_desc;
296 unsigned int total_bytes = 0, total_packets = 0;
297 unsigned int budget = tx_ring->count / 2;
298 unsigned int i = tx_ring->next_to_clean;
300 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
303 tx_buffer = &tx_ring->tx_buffer_info[i];
304 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
308 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
310 /* if next_to_watch is not set then there is no work pending */
314 /* prevent any other reads prior to eop_desc */
315 read_barrier_depends();
317 /* if DD is not set pending work has not been completed */
318 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
321 /* clear next_to_watch to prevent false hangs */
322 tx_buffer->next_to_watch = NULL;
324 /* update the statistics for this packet */
325 total_bytes += tx_buffer->bytecount;
326 total_packets += tx_buffer->gso_segs;
329 dev_kfree_skb_any(tx_buffer->skb);
331 /* unmap skb header data */
332 dma_unmap_single(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
337 /* clear tx_buffer data */
338 tx_buffer->skb = NULL;
339 dma_unmap_len_set(tx_buffer, len, 0);
341 /* unmap remaining buffers */
342 while (tx_desc != eop_desc) {
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
358 dma_unmap_len_set(tx_buffer, len, 0);
362 /* move us one more past the eop_desc for start of next pkt */
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
372 /* issue prefetch for next Tx descriptor */
375 /* update budget accounting */
377 } while (likely(budget));
380 tx_ring->next_to_clean = i;
381 u64_stats_update_begin(&tx_ring->syncp);
382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
384 u64_stats_update_end(&tx_ring->syncp);
385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
394 pr_err("Detected Tx Unit Hang\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
404 tx_ring->queue_index,
405 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
406 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
407 tx_ring->next_to_use, i,
408 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
409 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
411 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
413 /* schedule immediate reset if we believe we hung */
414 ixgbevf_tx_timeout_reset(adapter);
419 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
420 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
421 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
422 /* Make sure that anybody stopping the queue after this
423 * sees the new next_to_clean.
427 if (__netif_subqueue_stopped(tx_ring->netdev,
428 tx_ring->queue_index) &&
429 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
430 netif_wake_subqueue(tx_ring->netdev,
431 tx_ring->queue_index);
432 ++tx_ring->tx_stats.restart_queue;
440 * ixgbevf_rx_skb - Helper function to determine proper Rx method
441 * @q_vector: structure containing interrupt and ring information
442 * @skb: packet to send up
444 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
447 #ifdef CONFIG_NET_RX_BUSY_POLL
448 skb_mark_napi_id(skb, &q_vector->napi);
450 if (ixgbevf_qv_busy_polling(q_vector)) {
451 netif_receive_skb(skb);
452 /* exit early if we busy polled */
455 #endif /* CONFIG_NET_RX_BUSY_POLL */
457 napi_gro_receive(&q_vector->napi, skb);
460 #define IXGBE_RSS_L4_TYPES_MASK \
461 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
462 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
463 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
464 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
466 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
467 union ixgbe_adv_rx_desc *rx_desc,
472 if (!(ring->netdev->features & NETIF_F_RXHASH))
475 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
476 IXGBE_RXDADV_RSSTYPE_MASK;
481 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
482 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
483 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
487 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
488 * @ring: structure containig ring specific data
489 * @rx_desc: current Rx descriptor being processed
490 * @skb: skb currently being received and modified
492 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
493 union ixgbe_adv_rx_desc *rx_desc,
496 skb_checksum_none_assert(skb);
498 /* Rx csum disabled */
499 if (!(ring->netdev->features & NETIF_F_RXCSUM))
502 /* if IP and error */
503 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
504 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
505 ring->rx_stats.csum_err++;
509 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
512 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
513 ring->rx_stats.csum_err++;
517 /* It must be a TCP or UDP packet with a valid checksum */
518 skb->ip_summed = CHECKSUM_UNNECESSARY;
522 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
523 * @rx_ring: rx descriptor ring packet is being transacted on
524 * @rx_desc: pointer to the EOP Rx descriptor
525 * @skb: pointer to current skb being populated
527 * This function checks the ring, descriptor, and packet information in
528 * order to populate the checksum, VLAN, protocol, and other fields within
531 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
532 union ixgbe_adv_rx_desc *rx_desc,
535 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
536 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
538 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
539 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
540 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
542 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
546 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
550 * ixgbevf_is_non_eop - process handling of non-EOP buffers
551 * @rx_ring: Rx ring being processed
552 * @rx_desc: Rx descriptor for current buffer
553 * @skb: current socket buffer containing buffer in progress
555 * This function updates next to clean. If the buffer is an EOP buffer
556 * this function exits returning false, otherwise it will place the
557 * sk_buff in the next buffer to be chained and return true indicating
558 * that this is in fact a non-EOP buffer.
560 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
561 union ixgbe_adv_rx_desc *rx_desc)
563 u32 ntc = rx_ring->next_to_clean + 1;
565 /* fetch, update, and store next to clean */
566 ntc = (ntc < rx_ring->count) ? ntc : 0;
567 rx_ring->next_to_clean = ntc;
569 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
571 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
577 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
578 struct ixgbevf_rx_buffer *bi)
580 struct page *page = bi->page;
581 dma_addr_t dma = bi->dma;
583 /* since we are recycling buffers we should seldom need to alloc */
587 /* alloc new page for storage */
588 page = dev_alloc_page();
589 if (unlikely(!page)) {
590 rx_ring->rx_stats.alloc_rx_page_failed++;
594 /* map page for use */
595 dma = dma_map_page(rx_ring->dev, page, 0,
596 PAGE_SIZE, DMA_FROM_DEVICE);
598 /* if mapping failed free memory back to system since
599 * there isn't much point in holding memory we can't use
601 if (dma_mapping_error(rx_ring->dev, dma)) {
604 rx_ring->rx_stats.alloc_rx_buff_failed++;
616 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
617 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
618 * @cleaned_count: number of buffers to replace
620 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
623 union ixgbe_adv_rx_desc *rx_desc;
624 struct ixgbevf_rx_buffer *bi;
625 unsigned int i = rx_ring->next_to_use;
627 /* nothing to do or no valid netdev defined */
628 if (!cleaned_count || !rx_ring->netdev)
631 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
632 bi = &rx_ring->rx_buffer_info[i];
636 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
639 /* Refresh the desc even if pkt_addr didn't change
640 * because each write-back erases this info.
642 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
648 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
649 bi = rx_ring->rx_buffer_info;
653 /* clear the hdr_addr for the next_to_use descriptor */
654 rx_desc->read.hdr_addr = 0;
657 } while (cleaned_count);
661 if (rx_ring->next_to_use != i) {
662 /* record the next descriptor to use */
663 rx_ring->next_to_use = i;
665 /* update next to alloc since we have filled the ring */
666 rx_ring->next_to_alloc = i;
668 /* Force memory writes to complete before letting h/w
669 * know there are new descriptors to fetch. (Only
670 * applicable for weak-ordered memory model archs,
674 ixgbevf_write_tail(rx_ring, i);
679 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
680 * @rx_ring: rx descriptor ring packet is being transacted on
681 * @rx_desc: pointer to the EOP Rx descriptor
682 * @skb: pointer to current skb being fixed
684 * Check for corrupted packet headers caused by senders on the local L2
685 * embedded NIC switch not setting up their Tx Descriptors right. These
686 * should be very rare.
688 * Also address the case where we are pulling data in on pages only
689 * and as such no data is present in the skb header.
691 * In addition if skb is not at least 60 bytes we need to pad it so that
692 * it is large enough to qualify as a valid Ethernet frame.
694 * Returns true if an error was encountered and skb was freed.
696 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
697 union ixgbe_adv_rx_desc *rx_desc,
700 /* verify that the packet does not have any known errors */
701 if (unlikely(ixgbevf_test_staterr(rx_desc,
702 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
703 struct net_device *netdev = rx_ring->netdev;
705 if (!(netdev->features & NETIF_F_RXALL)) {
706 dev_kfree_skb_any(skb);
711 /* if eth_skb_pad returns an error the skb was freed */
712 if (eth_skb_pad(skb))
719 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
720 * @rx_ring: rx descriptor ring to store buffers on
721 * @old_buff: donor buffer to have page reused
723 * Synchronizes page for reuse by the adapter
725 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
726 struct ixgbevf_rx_buffer *old_buff)
728 struct ixgbevf_rx_buffer *new_buff;
729 u16 nta = rx_ring->next_to_alloc;
731 new_buff = &rx_ring->rx_buffer_info[nta];
733 /* update, and store next to alloc */
735 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
737 /* transfer page from old buffer to new buffer */
738 new_buff->page = old_buff->page;
739 new_buff->dma = old_buff->dma;
740 new_buff->page_offset = old_buff->page_offset;
742 /* sync the buffer for use by the device */
743 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
744 new_buff->page_offset,
749 static inline bool ixgbevf_page_is_reserved(struct page *page)
751 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
755 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
756 * @rx_ring: rx descriptor ring to transact packets on
757 * @rx_buffer: buffer containing page to add
758 * @rx_desc: descriptor containing length of buffer written by hardware
759 * @skb: sk_buff to place the data into
761 * This function will add the data contained in rx_buffer->page to the skb.
762 * This is done either through a direct copy if the data in the buffer is
763 * less than the skb header size, otherwise it will just attach the page as
766 * The function will then update the page offset if necessary and return
767 * true if the buffer can be reused by the adapter.
769 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
770 struct ixgbevf_rx_buffer *rx_buffer,
771 union ixgbe_adv_rx_desc *rx_desc,
774 struct page *page = rx_buffer->page;
775 unsigned char *va = page_address(page) + rx_buffer->page_offset;
776 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
777 #if (PAGE_SIZE < 8192)
778 unsigned int truesize = IXGBEVF_RX_BUFSZ;
780 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
782 unsigned int pull_len;
784 if (unlikely(skb_is_nonlinear(skb)))
787 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
788 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
790 /* page is not reserved, we can reuse buffer as is */
791 if (likely(!ixgbevf_page_is_reserved(page)))
794 /* this page cannot be reused so discard it */
799 /* we need the header to contain the greater of either ETH_HLEN or
800 * 60 bytes if the skb->len is less than 60 for skb_pad.
802 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
804 /* align pull length to size of long to optimize memcpy performance */
805 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
807 /* update all of the pointers */
812 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
813 (unsigned long)va & ~PAGE_MASK, size, truesize);
815 /* avoid re-using remote pages */
816 if (unlikely(ixgbevf_page_is_reserved(page)))
819 #if (PAGE_SIZE < 8192)
820 /* if we are only owner of page we can reuse it */
821 if (unlikely(page_count(page) != 1))
824 /* flip page offset to other buffer */
825 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
828 /* move offset up to the next cache line */
829 rx_buffer->page_offset += truesize;
831 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
835 /* Even if we own the page, we are not allowed to use atomic_set()
836 * This would break get_page_unless_zero() users.
838 atomic_inc(&page->_count);
843 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
844 union ixgbe_adv_rx_desc *rx_desc,
847 struct ixgbevf_rx_buffer *rx_buffer;
850 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
851 page = rx_buffer->page;
855 void *page_addr = page_address(page) +
856 rx_buffer->page_offset;
858 /* prefetch first cache line of first page */
860 #if L1_CACHE_BYTES < 128
861 prefetch(page_addr + L1_CACHE_BYTES);
864 /* allocate a skb to store the frags */
865 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
866 IXGBEVF_RX_HDR_SIZE);
867 if (unlikely(!skb)) {
868 rx_ring->rx_stats.alloc_rx_buff_failed++;
872 /* we will be copying header into skb->data in
873 * pskb_may_pull so it is in our interest to prefetch
874 * it now to avoid a possible cache miss
876 prefetchw(skb->data);
879 /* we are reusing so sync this buffer for CPU use */
880 dma_sync_single_range_for_cpu(rx_ring->dev,
882 rx_buffer->page_offset,
886 /* pull page into skb */
887 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
888 /* hand second half of page back to the ring */
889 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
891 /* we are not reusing the buffer so unmap it */
892 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
893 PAGE_SIZE, DMA_FROM_DEVICE);
896 /* clear contents of buffer_info */
898 rx_buffer->page = NULL;
903 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
906 struct ixgbe_hw *hw = &adapter->hw;
908 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
911 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
912 struct ixgbevf_ring *rx_ring,
915 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
916 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
917 struct sk_buff *skb = rx_ring->skb;
919 while (likely(total_rx_packets < budget)) {
920 union ixgbe_adv_rx_desc *rx_desc;
922 /* return some buffers to hardware, one at a time is too slow */
923 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
924 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
928 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
930 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
933 /* This memory barrier is needed to keep us from reading
934 * any other fields out of the rx_desc until we know the
935 * RXD_STAT_DD bit is set
939 /* retrieve a buffer from the ring */
940 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
942 /* exit if we failed to retrieve a buffer */
948 /* fetch next buffer in frame if non-eop */
949 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
952 /* verify the packet layout is correct */
953 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
958 /* probably a little skewed due to removing CRC */
959 total_rx_bytes += skb->len;
961 /* Workaround hardware that can't do proper VEPA multicast
964 if ((skb->pkt_type == PACKET_BROADCAST ||
965 skb->pkt_type == PACKET_MULTICAST) &&
966 ether_addr_equal(rx_ring->netdev->dev_addr,
967 eth_hdr(skb)->h_source)) {
968 dev_kfree_skb_irq(skb);
972 /* populate checksum, VLAN, and protocol */
973 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
975 ixgbevf_rx_skb(q_vector, skb);
977 /* reset skb pointer */
980 /* update budget accounting */
984 /* place incomplete frames back on ring for completion */
987 u64_stats_update_begin(&rx_ring->syncp);
988 rx_ring->stats.packets += total_rx_packets;
989 rx_ring->stats.bytes += total_rx_bytes;
990 u64_stats_update_end(&rx_ring->syncp);
991 q_vector->rx.total_packets += total_rx_packets;
992 q_vector->rx.total_bytes += total_rx_bytes;
994 return total_rx_packets;
998 * ixgbevf_poll - NAPI polling calback
999 * @napi: napi struct with our devices info in it
1000 * @budget: amount of work driver is allowed to do this pass, in packets
1002 * This function will clean more than one or more rings associated with a
1005 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1007 struct ixgbevf_q_vector *q_vector =
1008 container_of(napi, struct ixgbevf_q_vector, napi);
1009 struct ixgbevf_adapter *adapter = q_vector->adapter;
1010 struct ixgbevf_ring *ring;
1011 int per_ring_budget, work_done = 0;
1012 bool clean_complete = true;
1014 ixgbevf_for_each_ring(ring, q_vector->tx)
1015 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
1017 #ifdef CONFIG_NET_RX_BUSY_POLL
1018 if (!ixgbevf_qv_lock_napi(q_vector))
1022 /* attempt to distribute budget to each queue fairly, but don't allow
1023 * the budget to go below 1 because we'll exit polling
1025 if (q_vector->rx.count > 1)
1026 per_ring_budget = max(budget/q_vector->rx.count, 1);
1028 per_ring_budget = budget;
1030 ixgbevf_for_each_ring(ring, q_vector->rx) {
1031 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1033 work_done += cleaned;
1034 clean_complete &= (cleaned < per_ring_budget);
1037 #ifdef CONFIG_NET_RX_BUSY_POLL
1038 ixgbevf_qv_unlock_napi(q_vector);
1041 /* If all work not completed, return budget and keep polling */
1042 if (!clean_complete)
1044 /* all work done, exit the polling mode */
1045 napi_complete_done(napi, work_done);
1046 if (adapter->rx_itr_setting & 1)
1047 ixgbevf_set_itr(q_vector);
1048 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1049 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1050 ixgbevf_irq_enable_queues(adapter,
1051 1 << q_vector->v_idx);
1057 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1058 * @q_vector: structure containing interrupt and ring information
1060 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1062 struct ixgbevf_adapter *adapter = q_vector->adapter;
1063 struct ixgbe_hw *hw = &adapter->hw;
1064 int v_idx = q_vector->v_idx;
1065 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1067 /* set the WDIS bit to not clear the timer bits and cause an
1068 * immediate assertion of the interrupt
1070 itr_reg |= IXGBE_EITR_CNT_WDIS;
1072 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1075 #ifdef CONFIG_NET_RX_BUSY_POLL
1076 /* must be called with local_bh_disable()d */
1077 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1079 struct ixgbevf_q_vector *q_vector =
1080 container_of(napi, struct ixgbevf_q_vector, napi);
1081 struct ixgbevf_adapter *adapter = q_vector->adapter;
1082 struct ixgbevf_ring *ring;
1085 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1086 return LL_FLUSH_FAILED;
1088 if (!ixgbevf_qv_lock_poll(q_vector))
1089 return LL_FLUSH_BUSY;
1091 ixgbevf_for_each_ring(ring, q_vector->rx) {
1092 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
1093 #ifdef BP_EXTENDED_STATS
1095 ring->stats.cleaned += found;
1097 ring->stats.misses++;
1103 ixgbevf_qv_unlock_poll(q_vector);
1107 #endif /* CONFIG_NET_RX_BUSY_POLL */
1110 * ixgbevf_configure_msix - Configure MSI-X hardware
1111 * @adapter: board private structure
1113 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1116 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1118 struct ixgbevf_q_vector *q_vector;
1119 int q_vectors, v_idx;
1121 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1122 adapter->eims_enable_mask = 0;
1124 /* Populate the IVAR table and set the ITR values to the
1125 * corresponding register.
1127 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1128 struct ixgbevf_ring *ring;
1130 q_vector = adapter->q_vector[v_idx];
1132 ixgbevf_for_each_ring(ring, q_vector->rx)
1133 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1135 ixgbevf_for_each_ring(ring, q_vector->tx)
1136 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1138 if (q_vector->tx.ring && !q_vector->rx.ring) {
1139 /* Tx only vector */
1140 if (adapter->tx_itr_setting == 1)
1141 q_vector->itr = IXGBE_10K_ITR;
1143 q_vector->itr = adapter->tx_itr_setting;
1145 /* Rx or Rx/Tx vector */
1146 if (adapter->rx_itr_setting == 1)
1147 q_vector->itr = IXGBE_20K_ITR;
1149 q_vector->itr = adapter->rx_itr_setting;
1152 /* add q_vector eims value to global eims_enable_mask */
1153 adapter->eims_enable_mask |= 1 << v_idx;
1155 ixgbevf_write_eitr(q_vector);
1158 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1159 /* setup eims_other and add value to global eims_enable_mask */
1160 adapter->eims_other = 1 << v_idx;
1161 adapter->eims_enable_mask |= adapter->eims_other;
1164 enum latency_range {
1168 latency_invalid = 255
1172 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1173 * @q_vector: structure containing interrupt and ring information
1174 * @ring_container: structure containing ring performance data
1176 * Stores a new ITR value based on packets and byte
1177 * counts during the last interrupt. The advantage of per interrupt
1178 * computation is faster updates and more accurate ITR for the current
1179 * traffic pattern. Constants in this function were computed
1180 * based on theoretical maximum wire speed and thresholds were set based
1181 * on testing data as well as attempting to minimize response time
1182 * while increasing bulk throughput.
1184 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1185 struct ixgbevf_ring_container *ring_container)
1187 int bytes = ring_container->total_bytes;
1188 int packets = ring_container->total_packets;
1191 u8 itr_setting = ring_container->itr;
1196 /* simple throttle rate management
1197 * 0-20MB/s lowest (100000 ints/s)
1198 * 20-100MB/s low (20000 ints/s)
1199 * 100-1249MB/s bulk (8000 ints/s)
1201 /* what was last interrupt timeslice? */
1202 timepassed_us = q_vector->itr >> 2;
1203 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1205 switch (itr_setting) {
1206 case lowest_latency:
1207 if (bytes_perint > 10)
1208 itr_setting = low_latency;
1211 if (bytes_perint > 20)
1212 itr_setting = bulk_latency;
1213 else if (bytes_perint <= 10)
1214 itr_setting = lowest_latency;
1217 if (bytes_perint <= 20)
1218 itr_setting = low_latency;
1222 /* clear work counters since we have the values we need */
1223 ring_container->total_bytes = 0;
1224 ring_container->total_packets = 0;
1226 /* write updated itr to ring container */
1227 ring_container->itr = itr_setting;
1230 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1232 u32 new_itr = q_vector->itr;
1235 ixgbevf_update_itr(q_vector, &q_vector->tx);
1236 ixgbevf_update_itr(q_vector, &q_vector->rx);
1238 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1240 switch (current_itr) {
1241 /* counts and packets in update_itr are dependent on these numbers */
1242 case lowest_latency:
1243 new_itr = IXGBE_100K_ITR;
1246 new_itr = IXGBE_20K_ITR;
1250 new_itr = IXGBE_8K_ITR;
1254 if (new_itr != q_vector->itr) {
1255 /* do an exponential smoothing */
1256 new_itr = (10 * new_itr * q_vector->itr) /
1257 ((9 * new_itr) + q_vector->itr);
1259 /* save the algorithm value here */
1260 q_vector->itr = new_itr;
1262 ixgbevf_write_eitr(q_vector);
1266 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1268 struct ixgbevf_adapter *adapter = data;
1269 struct ixgbe_hw *hw = &adapter->hw;
1271 hw->mac.get_link_status = 1;
1273 ixgbevf_service_event_schedule(adapter);
1275 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1281 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1283 * @data: pointer to our q_vector struct for this interrupt vector
1285 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1287 struct ixgbevf_q_vector *q_vector = data;
1289 /* EIAM disabled interrupts (on this vector) for us */
1290 if (q_vector->rx.ring || q_vector->tx.ring)
1291 napi_schedule(&q_vector->napi);
1296 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1299 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1301 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1302 q_vector->rx.ring = a->rx_ring[r_idx];
1303 q_vector->rx.count++;
1306 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1309 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1311 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1312 q_vector->tx.ring = a->tx_ring[t_idx];
1313 q_vector->tx.count++;
1317 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1318 * @adapter: board private structure to initialize
1320 * This function maps descriptor rings to the queue-specific vectors
1321 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1322 * one vector per ring/queue, but on a constrained vector budget, we
1323 * group the rings as "efficiently" as possible. You would add new
1324 * mapping configurations in here.
1326 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1330 int rxr_idx = 0, txr_idx = 0;
1331 int rxr_remaining = adapter->num_rx_queues;
1332 int txr_remaining = adapter->num_tx_queues;
1337 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1339 /* The ideal configuration...
1340 * We have enough vectors to map one per queue.
1342 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1343 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1344 map_vector_to_rxq(adapter, v_start, rxr_idx);
1346 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1347 map_vector_to_txq(adapter, v_start, txr_idx);
1351 /* If we don't have enough vectors for a 1-to-1
1352 * mapping, we'll have to group them so there are
1353 * multiple queues per vector.
1355 /* Re-adjusting *qpv takes care of the remainder. */
1356 for (i = v_start; i < q_vectors; i++) {
1357 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1358 for (j = 0; j < rqpv; j++) {
1359 map_vector_to_rxq(adapter, i, rxr_idx);
1364 for (i = v_start; i < q_vectors; i++) {
1365 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1366 for (j = 0; j < tqpv; j++) {
1367 map_vector_to_txq(adapter, i, txr_idx);
1378 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1379 * @adapter: board private structure
1381 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1382 * interrupts from the kernel.
1384 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1386 struct net_device *netdev = adapter->netdev;
1387 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1391 for (vector = 0; vector < q_vectors; vector++) {
1392 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1393 struct msix_entry *entry = &adapter->msix_entries[vector];
1395 if (q_vector->tx.ring && q_vector->rx.ring) {
1396 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1397 "%s-%s-%d", netdev->name, "TxRx", ri++);
1399 } else if (q_vector->rx.ring) {
1400 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1401 "%s-%s-%d", netdev->name, "rx", ri++);
1402 } else if (q_vector->tx.ring) {
1403 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1404 "%s-%s-%d", netdev->name, "tx", ti++);
1406 /* skip this unused q_vector */
1409 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1410 q_vector->name, q_vector);
1412 hw_dbg(&adapter->hw,
1413 "request_irq failed for MSIX interrupt Error: %d\n",
1415 goto free_queue_irqs;
1419 err = request_irq(adapter->msix_entries[vector].vector,
1420 &ixgbevf_msix_other, 0, netdev->name, adapter);
1422 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1424 goto free_queue_irqs;
1432 free_irq(adapter->msix_entries[vector].vector,
1433 adapter->q_vector[vector]);
1435 /* This failure is non-recoverable - it indicates the system is
1436 * out of MSIX vector resources and the VF driver cannot run
1437 * without them. Set the number of msix vectors to zero
1438 * indicating that not enough can be allocated. The error
1439 * will be returned to the user indicating device open failed.
1440 * Any further attempts to force the driver to open will also
1441 * fail. The only way to recover is to unload the driver and
1442 * reload it again. If the system has recovered some MSIX
1443 * vectors then it may succeed.
1445 adapter->num_msix_vectors = 0;
1449 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1451 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1453 for (i = 0; i < q_vectors; i++) {
1454 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1456 q_vector->rx.ring = NULL;
1457 q_vector->tx.ring = NULL;
1458 q_vector->rx.count = 0;
1459 q_vector->tx.count = 0;
1464 * ixgbevf_request_irq - initialize interrupts
1465 * @adapter: board private structure
1467 * Attempts to configure interrupts using the best available
1468 * capabilities of the hardware and kernel.
1470 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1474 err = ixgbevf_request_msix_irqs(adapter);
1477 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1482 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1486 q_vectors = adapter->num_msix_vectors;
1489 free_irq(adapter->msix_entries[i].vector, adapter);
1492 for (; i >= 0; i--) {
1493 /* free only the irqs that were actually requested */
1494 if (!adapter->q_vector[i]->rx.ring &&
1495 !adapter->q_vector[i]->tx.ring)
1498 free_irq(adapter->msix_entries[i].vector,
1499 adapter->q_vector[i]);
1502 ixgbevf_reset_q_vectors(adapter);
1506 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1507 * @adapter: board private structure
1509 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1511 struct ixgbe_hw *hw = &adapter->hw;
1514 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1515 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1516 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1518 IXGBE_WRITE_FLUSH(hw);
1520 for (i = 0; i < adapter->num_msix_vectors; i++)
1521 synchronize_irq(adapter->msix_entries[i].vector);
1525 * ixgbevf_irq_enable - Enable default interrupt generation settings
1526 * @adapter: board private structure
1528 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1530 struct ixgbe_hw *hw = &adapter->hw;
1532 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1533 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1534 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1538 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1539 * @adapter: board private structure
1540 * @ring: structure containing ring specific data
1542 * Configure the Tx descriptor ring after a reset.
1544 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1545 struct ixgbevf_ring *ring)
1547 struct ixgbe_hw *hw = &adapter->hw;
1548 u64 tdba = ring->dma;
1550 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1551 u8 reg_idx = ring->reg_idx;
1553 /* disable queue to avoid issues while updating state */
1554 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1555 IXGBE_WRITE_FLUSH(hw);
1557 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1558 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1559 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1560 ring->count * sizeof(union ixgbe_adv_tx_desc));
1562 /* disable head writeback */
1563 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1564 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1566 /* enable relaxed ordering */
1567 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1568 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1569 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1571 /* reset head and tail pointers */
1572 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1573 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1574 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1576 /* reset ntu and ntc to place SW in sync with hardwdare */
1577 ring->next_to_clean = 0;
1578 ring->next_to_use = 0;
1580 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1581 * to or less than the number of on chip descriptors, which is
1584 txdctl |= (8 << 16); /* WTHRESH = 8 */
1586 /* Setting PTHRESH to 32 both improves performance */
1587 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1588 32; /* PTHRESH = 32 */
1590 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1592 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1594 /* poll to verify queue is enabled */
1596 usleep_range(1000, 2000);
1597 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1598 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1600 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1604 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1605 * @adapter: board private structure
1607 * Configure the Tx unit of the MAC after a reset.
1609 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1613 /* Setup the HW Tx Head and Tail descriptor pointers */
1614 for (i = 0; i < adapter->num_tx_queues; i++)
1615 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1618 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1620 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1622 struct ixgbe_hw *hw = &adapter->hw;
1625 srrctl = IXGBE_SRRCTL_DROP_EN;
1627 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1628 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1629 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1631 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1634 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1636 struct ixgbe_hw *hw = &adapter->hw;
1638 /* PSRTYPE must be initialized in 82599 */
1639 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1640 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1641 IXGBE_PSRTYPE_L2HDR;
1643 if (adapter->num_rx_queues > 1)
1646 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1649 #define IXGBEVF_MAX_RX_DESC_POLL 10
1650 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1651 struct ixgbevf_ring *ring)
1653 struct ixgbe_hw *hw = &adapter->hw;
1654 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1656 u8 reg_idx = ring->reg_idx;
1658 if (IXGBE_REMOVED(hw->hw_addr))
1660 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1661 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1663 /* write value back with RXDCTL.ENABLE bit cleared */
1664 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1666 /* the hardware may take up to 100us to really disable the Rx queue */
1669 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1670 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1673 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1677 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1678 struct ixgbevf_ring *ring)
1680 struct ixgbe_hw *hw = &adapter->hw;
1681 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1683 u8 reg_idx = ring->reg_idx;
1685 if (IXGBE_REMOVED(hw->hw_addr))
1688 usleep_range(1000, 2000);
1689 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1690 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1693 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1697 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1699 struct ixgbe_hw *hw = &adapter->hw;
1700 u32 vfmrqc = 0, vfreta = 0;
1701 u16 rss_i = adapter->num_rx_queues;
1704 /* Fill out hash function seeds */
1705 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1706 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1707 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
1709 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1713 adapter->rss_indir_tbl[i] = j;
1715 vfreta |= j << (i & 0x3) * 8;
1717 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1722 /* Perform hash on these packet types */
1723 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1724 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1725 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1726 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1728 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1730 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1733 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1734 struct ixgbevf_ring *ring)
1736 struct ixgbe_hw *hw = &adapter->hw;
1737 u64 rdba = ring->dma;
1739 u8 reg_idx = ring->reg_idx;
1741 /* disable queue to avoid issues while updating state */
1742 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1743 ixgbevf_disable_rx_queue(adapter, ring);
1745 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1746 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1747 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1748 ring->count * sizeof(union ixgbe_adv_rx_desc));
1750 /* enable relaxed ordering */
1751 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1752 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1754 /* reset head and tail pointers */
1755 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1756 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1757 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1759 /* reset ntu and ntc to place SW in sync with hardwdare */
1760 ring->next_to_clean = 0;
1761 ring->next_to_use = 0;
1762 ring->next_to_alloc = 0;
1764 ixgbevf_configure_srrctl(adapter, reg_idx);
1766 /* allow any size packet since we can handle overflow */
1767 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1769 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1770 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1772 ixgbevf_rx_desc_queue_enable(adapter, ring);
1773 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1777 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1778 * @adapter: board private structure
1780 * Configure the Rx unit of the MAC after a reset.
1782 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1785 struct ixgbe_hw *hw = &adapter->hw;
1786 struct net_device *netdev = adapter->netdev;
1788 ixgbevf_setup_psrtype(adapter);
1789 if (hw->mac.type >= ixgbe_mac_X550_vf)
1790 ixgbevf_setup_vfmrqc(adapter);
1792 /* notify the PF of our intent to use this size of frame */
1793 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1795 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1796 * the Base and Length of the Rx Descriptor Ring
1798 for (i = 0; i < adapter->num_rx_queues; i++)
1799 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1802 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1803 __be16 proto, u16 vid)
1805 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1806 struct ixgbe_hw *hw = &adapter->hw;
1809 spin_lock_bh(&adapter->mbx_lock);
1811 /* add VID to filter table */
1812 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1814 spin_unlock_bh(&adapter->mbx_lock);
1816 /* translate error return types so error makes sense */
1817 if (err == IXGBE_ERR_MBX)
1820 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1823 set_bit(vid, adapter->active_vlans);
1828 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1829 __be16 proto, u16 vid)
1831 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1832 struct ixgbe_hw *hw = &adapter->hw;
1833 int err = -EOPNOTSUPP;
1835 spin_lock_bh(&adapter->mbx_lock);
1837 /* remove VID from filter table */
1838 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1840 spin_unlock_bh(&adapter->mbx_lock);
1842 clear_bit(vid, adapter->active_vlans);
1847 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1851 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1852 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1853 htons(ETH_P_8021Q), vid);
1856 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1858 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1859 struct ixgbe_hw *hw = &adapter->hw;
1862 if ((netdev_uc_count(netdev)) > 10) {
1863 pr_err("Too many unicast filters - No Space\n");
1867 if (!netdev_uc_empty(netdev)) {
1868 struct netdev_hw_addr *ha;
1870 netdev_for_each_uc_addr(ha, netdev) {
1871 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1875 /* If the list is empty then send message to PF driver to
1876 * clear all MAC VLANs on this VF.
1878 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1885 * ixgbevf_set_rx_mode - Multicast and unicast set
1886 * @netdev: network interface device structure
1888 * The set_rx_method entry point is called whenever the multicast address
1889 * list, unicast address list or the network interface flags are updated.
1890 * This routine is responsible for configuring the hardware for proper
1891 * multicast mode and configuring requested unicast filters.
1893 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1895 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1896 struct ixgbe_hw *hw = &adapter->hw;
1898 spin_lock_bh(&adapter->mbx_lock);
1900 /* reprogram multicast list */
1901 hw->mac.ops.update_mc_addr_list(hw, netdev);
1903 ixgbevf_write_uc_addr_list(netdev);
1905 spin_unlock_bh(&adapter->mbx_lock);
1908 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1911 struct ixgbevf_q_vector *q_vector;
1912 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1914 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1915 q_vector = adapter->q_vector[q_idx];
1916 #ifdef CONFIG_NET_RX_BUSY_POLL
1917 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1919 napi_enable(&q_vector->napi);
1923 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1926 struct ixgbevf_q_vector *q_vector;
1927 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1929 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1930 q_vector = adapter->q_vector[q_idx];
1931 napi_disable(&q_vector->napi);
1932 #ifdef CONFIG_NET_RX_BUSY_POLL
1933 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1934 pr_info("QV %d locked\n", q_idx);
1935 usleep_range(1000, 20000);
1937 #endif /* CONFIG_NET_RX_BUSY_POLL */
1941 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1943 struct ixgbe_hw *hw = &adapter->hw;
1944 unsigned int def_q = 0;
1945 unsigned int num_tcs = 0;
1946 unsigned int num_rx_queues = adapter->num_rx_queues;
1947 unsigned int num_tx_queues = adapter->num_tx_queues;
1950 spin_lock_bh(&adapter->mbx_lock);
1952 /* fetch queue configuration from the PF */
1953 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1955 spin_unlock_bh(&adapter->mbx_lock);
1961 /* we need only one Tx queue */
1964 /* update default Tx ring register index */
1965 adapter->tx_ring[0]->reg_idx = def_q;
1967 /* we need as many queues as traffic classes */
1968 num_rx_queues = num_tcs;
1971 /* if we have a bad config abort request queue reset */
1972 if ((adapter->num_rx_queues != num_rx_queues) ||
1973 (adapter->num_tx_queues != num_tx_queues)) {
1974 /* force mailbox timeout to prevent further messages */
1975 hw->mbx.timeout = 0;
1977 /* wait for watchdog to come around and bail us out */
1978 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1984 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1986 ixgbevf_configure_dcb(adapter);
1988 ixgbevf_set_rx_mode(adapter->netdev);
1990 ixgbevf_restore_vlan(adapter);
1992 ixgbevf_configure_tx(adapter);
1993 ixgbevf_configure_rx(adapter);
1996 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1998 /* Only save pre-reset stats if there are some */
1999 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2000 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2001 adapter->stats.base_vfgprc;
2002 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2003 adapter->stats.base_vfgptc;
2004 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2005 adapter->stats.base_vfgorc;
2006 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2007 adapter->stats.base_vfgotc;
2008 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2009 adapter->stats.base_vfmprc;
2013 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2015 struct ixgbe_hw *hw = &adapter->hw;
2017 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2018 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2019 adapter->stats.last_vfgorc |=
2020 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2021 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2022 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2023 adapter->stats.last_vfgotc |=
2024 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2025 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2027 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2028 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2029 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2030 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2031 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2034 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2036 struct ixgbe_hw *hw = &adapter->hw;
2037 int api[] = { ixgbe_mbox_api_12,
2040 ixgbe_mbox_api_unknown };
2041 int err = 0, idx = 0;
2043 spin_lock_bh(&adapter->mbx_lock);
2045 while (api[idx] != ixgbe_mbox_api_unknown) {
2046 err = ixgbevf_negotiate_api_version(hw, api[idx]);
2052 spin_unlock_bh(&adapter->mbx_lock);
2055 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2057 struct net_device *netdev = adapter->netdev;
2058 struct ixgbe_hw *hw = &adapter->hw;
2060 ixgbevf_configure_msix(adapter);
2062 spin_lock_bh(&adapter->mbx_lock);
2064 if (is_valid_ether_addr(hw->mac.addr))
2065 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2067 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2069 spin_unlock_bh(&adapter->mbx_lock);
2071 smp_mb__before_atomic();
2072 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2073 ixgbevf_napi_enable_all(adapter);
2075 /* clear any pending interrupts, may auto mask */
2076 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2077 ixgbevf_irq_enable(adapter);
2079 /* enable transmits */
2080 netif_tx_start_all_queues(netdev);
2082 ixgbevf_save_reset_stats(adapter);
2083 ixgbevf_init_last_counter_stats(adapter);
2085 hw->mac.get_link_status = 1;
2086 mod_timer(&adapter->service_timer, jiffies);
2089 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2091 ixgbevf_configure(adapter);
2093 ixgbevf_up_complete(adapter);
2097 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2098 * @rx_ring: ring to free buffers from
2100 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2102 struct device *dev = rx_ring->dev;
2106 /* Free Rx ring sk_buff */
2108 dev_kfree_skb(rx_ring->skb);
2109 rx_ring->skb = NULL;
2112 /* ring already cleared, nothing to do */
2113 if (!rx_ring->rx_buffer_info)
2116 /* Free all the Rx ring pages */
2117 for (i = 0; i < rx_ring->count; i++) {
2118 struct ixgbevf_rx_buffer *rx_buffer;
2120 rx_buffer = &rx_ring->rx_buffer_info[i];
2122 dma_unmap_page(dev, rx_buffer->dma,
2123 PAGE_SIZE, DMA_FROM_DEVICE);
2125 if (rx_buffer->page)
2126 __free_page(rx_buffer->page);
2127 rx_buffer->page = NULL;
2130 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2131 memset(rx_ring->rx_buffer_info, 0, size);
2133 /* Zero out the descriptor ring */
2134 memset(rx_ring->desc, 0, rx_ring->size);
2138 * ixgbevf_clean_tx_ring - Free Tx Buffers
2139 * @tx_ring: ring to be cleaned
2141 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2143 struct ixgbevf_tx_buffer *tx_buffer_info;
2147 if (!tx_ring->tx_buffer_info)
2150 /* Free all the Tx ring sk_buffs */
2151 for (i = 0; i < tx_ring->count; i++) {
2152 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2153 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2156 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2157 memset(tx_ring->tx_buffer_info, 0, size);
2159 memset(tx_ring->desc, 0, tx_ring->size);
2163 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2164 * @adapter: board private structure
2166 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2170 for (i = 0; i < adapter->num_rx_queues; i++)
2171 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2175 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2176 * @adapter: board private structure
2178 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2182 for (i = 0; i < adapter->num_tx_queues; i++)
2183 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2186 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2188 struct net_device *netdev = adapter->netdev;
2189 struct ixgbe_hw *hw = &adapter->hw;
2192 /* signal that we are down to the interrupt handler */
2193 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2194 return; /* do nothing if already down */
2196 /* disable all enabled Rx queues */
2197 for (i = 0; i < adapter->num_rx_queues; i++)
2198 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2200 usleep_range(10000, 20000);
2202 netif_tx_stop_all_queues(netdev);
2204 /* call carrier off first to avoid false dev_watchdog timeouts */
2205 netif_carrier_off(netdev);
2206 netif_tx_disable(netdev);
2208 ixgbevf_irq_disable(adapter);
2210 ixgbevf_napi_disable_all(adapter);
2212 del_timer_sync(&adapter->service_timer);
2214 /* disable transmits in the hardware now that interrupts are off */
2215 for (i = 0; i < adapter->num_tx_queues; i++) {
2216 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2218 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2219 IXGBE_TXDCTL_SWFLSH);
2222 if (!pci_channel_offline(adapter->pdev))
2223 ixgbevf_reset(adapter);
2225 ixgbevf_clean_all_tx_rings(adapter);
2226 ixgbevf_clean_all_rx_rings(adapter);
2229 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2231 WARN_ON(in_interrupt());
2233 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2236 ixgbevf_down(adapter);
2237 ixgbevf_up(adapter);
2239 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2242 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2244 struct ixgbe_hw *hw = &adapter->hw;
2245 struct net_device *netdev = adapter->netdev;
2247 if (hw->mac.ops.reset_hw(hw)) {
2248 hw_dbg(hw, "PF still resetting\n");
2250 hw->mac.ops.init_hw(hw);
2251 ixgbevf_negotiate_api(adapter);
2254 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2255 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2257 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2261 adapter->last_reset = jiffies;
2264 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2267 int vector_threshold;
2269 /* We'll want at least 2 (vector_threshold):
2270 * 1) TxQ[0] + RxQ[0] handler
2271 * 2) Other (Link Status Change, etc.)
2273 vector_threshold = MIN_MSIX_COUNT;
2275 /* The more we get, the more we will assign to Tx/Rx Cleanup
2276 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2277 * Right now, we simply care about how many we'll get; we'll
2278 * set them up later while requesting irq's.
2280 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2281 vector_threshold, vectors);
2284 dev_err(&adapter->pdev->dev,
2285 "Unable to allocate MSI-X interrupts\n");
2286 kfree(adapter->msix_entries);
2287 adapter->msix_entries = NULL;
2291 /* Adjust for only the vectors we'll use, which is minimum
2292 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2293 * vectors we were allocated.
2295 adapter->num_msix_vectors = vectors;
2301 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2302 * @adapter: board private structure to initialize
2304 * This is the top level queue allocation routine. The order here is very
2305 * important, starting with the "most" number of features turned on at once,
2306 * and ending with the smallest set of features. This way large combinations
2307 * can be allocated if they're turned on, and smaller combinations are the
2308 * fallthrough conditions.
2311 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2313 struct ixgbe_hw *hw = &adapter->hw;
2314 unsigned int def_q = 0;
2315 unsigned int num_tcs = 0;
2318 /* Start with base case */
2319 adapter->num_rx_queues = 1;
2320 adapter->num_tx_queues = 1;
2322 spin_lock_bh(&adapter->mbx_lock);
2324 /* fetch queue configuration from the PF */
2325 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2327 spin_unlock_bh(&adapter->mbx_lock);
2332 /* we need as many queues as traffic classes */
2334 adapter->num_rx_queues = num_tcs;
2336 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2338 switch (hw->api_version) {
2339 case ixgbe_mbox_api_11:
2340 case ixgbe_mbox_api_12:
2341 adapter->num_rx_queues = rss;
2342 adapter->num_tx_queues = rss;
2350 * ixgbevf_alloc_queues - Allocate memory for all rings
2351 * @adapter: board private structure to initialize
2353 * We allocate one ring per queue at run-time since we don't know the
2354 * number of queues at compile-time. The polling_netdev array is
2355 * intended for Multiqueue, but should work fine with a single queue.
2357 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2359 struct ixgbevf_ring *ring;
2362 for (; tx < adapter->num_tx_queues; tx++) {
2363 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2365 goto err_allocation;
2367 ring->dev = &adapter->pdev->dev;
2368 ring->netdev = adapter->netdev;
2369 ring->count = adapter->tx_ring_count;
2370 ring->queue_index = tx;
2373 adapter->tx_ring[tx] = ring;
2376 for (; rx < adapter->num_rx_queues; rx++) {
2377 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2379 goto err_allocation;
2381 ring->dev = &adapter->pdev->dev;
2382 ring->netdev = adapter->netdev;
2384 ring->count = adapter->rx_ring_count;
2385 ring->queue_index = rx;
2388 adapter->rx_ring[rx] = ring;
2395 kfree(adapter->tx_ring[--tx]);
2396 adapter->tx_ring[tx] = NULL;
2400 kfree(adapter->rx_ring[--rx]);
2401 adapter->rx_ring[rx] = NULL;
2407 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2408 * @adapter: board private structure to initialize
2410 * Attempt to configure the interrupts using the best available
2411 * capabilities of the hardware and the kernel.
2413 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2415 struct net_device *netdev = adapter->netdev;
2417 int vector, v_budget;
2419 /* It's easy to be greedy for MSI-X vectors, but it really
2420 * doesn't do us much good if we have a lot more vectors
2421 * than CPU's. So let's be conservative and only ask for
2422 * (roughly) the same number of vectors as there are CPU's.
2423 * The default is to use pairs of vectors.
2425 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2426 v_budget = min_t(int, v_budget, num_online_cpus());
2427 v_budget += NON_Q_VECTORS;
2429 /* A failure in MSI-X entry allocation isn't fatal, but it does
2430 * mean we disable MSI-X capabilities of the adapter.
2432 adapter->msix_entries = kcalloc(v_budget,
2433 sizeof(struct msix_entry), GFP_KERNEL);
2434 if (!adapter->msix_entries) {
2439 for (vector = 0; vector < v_budget; vector++)
2440 adapter->msix_entries[vector].entry = vector;
2442 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2446 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2450 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2457 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2458 * @adapter: board private structure to initialize
2460 * We allocate one q_vector per queue interrupt. If allocation fails we
2463 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2465 int q_idx, num_q_vectors;
2466 struct ixgbevf_q_vector *q_vector;
2468 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2470 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2471 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2474 q_vector->adapter = adapter;
2475 q_vector->v_idx = q_idx;
2476 netif_napi_add(adapter->netdev, &q_vector->napi,
2478 #ifdef CONFIG_NET_RX_BUSY_POLL
2479 napi_hash_add(&q_vector->napi);
2481 adapter->q_vector[q_idx] = q_vector;
2489 q_vector = adapter->q_vector[q_idx];
2490 #ifdef CONFIG_NET_RX_BUSY_POLL
2491 napi_hash_del(&q_vector->napi);
2493 netif_napi_del(&q_vector->napi);
2495 adapter->q_vector[q_idx] = NULL;
2501 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2502 * @adapter: board private structure to initialize
2504 * This function frees the memory allocated to the q_vectors. In addition if
2505 * NAPI is enabled it will delete any references to the NAPI struct prior
2506 * to freeing the q_vector.
2508 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2510 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2512 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2513 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2515 adapter->q_vector[q_idx] = NULL;
2516 #ifdef CONFIG_NET_RX_BUSY_POLL
2517 napi_hash_del(&q_vector->napi);
2519 netif_napi_del(&q_vector->napi);
2525 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2526 * @adapter: board private structure
2529 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2531 pci_disable_msix(adapter->pdev);
2532 kfree(adapter->msix_entries);
2533 adapter->msix_entries = NULL;
2537 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2538 * @adapter: board private structure to initialize
2541 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2545 /* Number of supported queues */
2546 ixgbevf_set_num_queues(adapter);
2548 err = ixgbevf_set_interrupt_capability(adapter);
2550 hw_dbg(&adapter->hw,
2551 "Unable to setup interrupt capabilities\n");
2552 goto err_set_interrupt;
2555 err = ixgbevf_alloc_q_vectors(adapter);
2557 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2558 goto err_alloc_q_vectors;
2561 err = ixgbevf_alloc_queues(adapter);
2563 pr_err("Unable to allocate memory for queues\n");
2564 goto err_alloc_queues;
2567 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2568 (adapter->num_rx_queues > 1) ? "Enabled" :
2569 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2571 set_bit(__IXGBEVF_DOWN, &adapter->state);
2575 ixgbevf_free_q_vectors(adapter);
2576 err_alloc_q_vectors:
2577 ixgbevf_reset_interrupt_capability(adapter);
2583 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2584 * @adapter: board private structure to clear interrupt scheme on
2586 * We go through and clear interrupt specific resources and reset the structure
2587 * to pre-load conditions
2589 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2593 for (i = 0; i < adapter->num_tx_queues; i++) {
2594 kfree(adapter->tx_ring[i]);
2595 adapter->tx_ring[i] = NULL;
2597 for (i = 0; i < adapter->num_rx_queues; i++) {
2598 kfree(adapter->rx_ring[i]);
2599 adapter->rx_ring[i] = NULL;
2602 adapter->num_tx_queues = 0;
2603 adapter->num_rx_queues = 0;
2605 ixgbevf_free_q_vectors(adapter);
2606 ixgbevf_reset_interrupt_capability(adapter);
2610 * ixgbevf_sw_init - Initialize general software structures
2611 * @adapter: board private structure to initialize
2613 * ixgbevf_sw_init initializes the Adapter private data structure.
2614 * Fields are initialized based on PCI device information and
2615 * OS network device settings (MTU size).
2617 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2619 struct ixgbe_hw *hw = &adapter->hw;
2620 struct pci_dev *pdev = adapter->pdev;
2621 struct net_device *netdev = adapter->netdev;
2624 /* PCI config space info */
2625 hw->vendor_id = pdev->vendor;
2626 hw->device_id = pdev->device;
2627 hw->revision_id = pdev->revision;
2628 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2629 hw->subsystem_device_id = pdev->subsystem_device;
2631 hw->mbx.ops.init_params(hw);
2633 /* assume legacy case in which PF would only give VF 2 queues */
2634 hw->mac.max_tx_queues = 2;
2635 hw->mac.max_rx_queues = 2;
2637 /* lock to protect mailbox accesses */
2638 spin_lock_init(&adapter->mbx_lock);
2640 err = hw->mac.ops.reset_hw(hw);
2642 dev_info(&pdev->dev,
2643 "PF still in reset state. Is the PF interface up?\n");
2645 err = hw->mac.ops.init_hw(hw);
2647 pr_err("init_shared_code failed: %d\n", err);
2650 ixgbevf_negotiate_api(adapter);
2651 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2653 dev_info(&pdev->dev, "Error reading MAC address\n");
2654 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2655 dev_info(&pdev->dev,
2656 "MAC address not assigned by administrator.\n");
2657 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2660 if (!is_valid_ether_addr(netdev->dev_addr)) {
2661 dev_info(&pdev->dev, "Assigning random MAC address\n");
2662 eth_hw_addr_random(netdev);
2663 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2666 /* Enable dynamic interrupt throttling rates */
2667 adapter->rx_itr_setting = 1;
2668 adapter->tx_itr_setting = 1;
2670 /* set default ring sizes */
2671 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2672 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2674 set_bit(__IXGBEVF_DOWN, &adapter->state);
2681 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2683 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2684 if (current_counter < last_counter) \
2685 counter += 0x100000000LL; \
2686 last_counter = current_counter; \
2687 counter &= 0xFFFFFFFF00000000LL; \
2688 counter |= current_counter; \
2691 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2693 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2694 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2695 u64 current_counter = (current_counter_msb << 32) | \
2696 current_counter_lsb; \
2697 if (current_counter < last_counter) \
2698 counter += 0x1000000000LL; \
2699 last_counter = current_counter; \
2700 counter &= 0xFFFFFFF000000000LL; \
2701 counter |= current_counter; \
2704 * ixgbevf_update_stats - Update the board statistics counters.
2705 * @adapter: board private structure
2707 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2709 struct ixgbe_hw *hw = &adapter->hw;
2712 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2713 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2716 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2717 adapter->stats.vfgprc);
2718 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2719 adapter->stats.vfgptc);
2720 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2721 adapter->stats.last_vfgorc,
2722 adapter->stats.vfgorc);
2723 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2724 adapter->stats.last_vfgotc,
2725 adapter->stats.vfgotc);
2726 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2727 adapter->stats.vfmprc);
2729 for (i = 0; i < adapter->num_rx_queues; i++) {
2730 adapter->hw_csum_rx_error +=
2731 adapter->rx_ring[i]->hw_csum_rx_error;
2732 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2737 * ixgbevf_service_timer - Timer Call-back
2738 * @data: pointer to adapter cast into an unsigned long
2740 static void ixgbevf_service_timer(unsigned long data)
2742 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2744 /* Reset the timer */
2745 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2747 ixgbevf_service_event_schedule(adapter);
2750 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2752 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2755 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2757 /* If we're already down or resetting, just bail */
2758 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2759 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2762 adapter->tx_timeout_count++;
2764 ixgbevf_reinit_locked(adapter);
2768 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2769 * @adapter: pointer to the device adapter structure
2771 * This function serves two purposes. First it strobes the interrupt lines
2772 * in order to make certain interrupts are occurring. Secondly it sets the
2773 * bits needed to check for TX hangs. As a result we should immediately
2774 * determine if a hang has occurred.
2776 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2778 struct ixgbe_hw *hw = &adapter->hw;
2782 /* If we're down or resetting, just bail */
2783 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2784 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2787 /* Force detection of hung controller */
2788 if (netif_carrier_ok(adapter->netdev)) {
2789 for (i = 0; i < adapter->num_tx_queues; i++)
2790 set_check_for_tx_hang(adapter->tx_ring[i]);
2793 /* get one bit for every active Tx/Rx interrupt vector */
2794 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2795 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2797 if (qv->rx.ring || qv->tx.ring)
2801 /* Cause software interrupt to ensure rings are cleaned */
2802 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2806 * ixgbevf_watchdog_update_link - update the link status
2807 * @adapter: pointer to the device adapter structure
2809 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2811 struct ixgbe_hw *hw = &adapter->hw;
2812 u32 link_speed = adapter->link_speed;
2813 bool link_up = adapter->link_up;
2816 spin_lock_bh(&adapter->mbx_lock);
2818 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2820 spin_unlock_bh(&adapter->mbx_lock);
2822 /* if check for link returns error we will need to reset */
2823 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2824 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
2828 adapter->link_up = link_up;
2829 adapter->link_speed = link_speed;
2833 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2834 * print link up message
2835 * @adapter: pointer to the device adapter structure
2837 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2839 struct net_device *netdev = adapter->netdev;
2841 /* only continue if link was previously down */
2842 if (netif_carrier_ok(netdev))
2845 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2846 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2848 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2850 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2854 netif_carrier_on(netdev);
2858 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2859 * print link down message
2860 * @adapter: pointer to the adapter structure
2862 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2864 struct net_device *netdev = adapter->netdev;
2866 adapter->link_speed = 0;
2868 /* only continue if link was up previously */
2869 if (!netif_carrier_ok(netdev))
2872 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2874 netif_carrier_off(netdev);
2878 * ixgbevf_watchdog_subtask - worker thread to bring link up
2879 * @work: pointer to work_struct containing our data
2881 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2883 /* if interface is down do nothing */
2884 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2885 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2888 ixgbevf_watchdog_update_link(adapter);
2890 if (adapter->link_up)
2891 ixgbevf_watchdog_link_is_up(adapter);
2893 ixgbevf_watchdog_link_is_down(adapter);
2895 ixgbevf_update_stats(adapter);
2899 * ixgbevf_service_task - manages and runs subtasks
2900 * @work: pointer to work_struct containing our data
2902 static void ixgbevf_service_task(struct work_struct *work)
2904 struct ixgbevf_adapter *adapter = container_of(work,
2905 struct ixgbevf_adapter,
2907 struct ixgbe_hw *hw = &adapter->hw;
2909 if (IXGBE_REMOVED(hw->hw_addr)) {
2910 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2912 ixgbevf_down(adapter);
2918 ixgbevf_queue_reset_subtask(adapter);
2919 ixgbevf_reset_subtask(adapter);
2920 ixgbevf_watchdog_subtask(adapter);
2921 ixgbevf_check_hang_subtask(adapter);
2923 ixgbevf_service_event_complete(adapter);
2927 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2928 * @tx_ring: Tx descriptor ring for a specific queue
2930 * Free all transmit software resources
2932 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2934 ixgbevf_clean_tx_ring(tx_ring);
2936 vfree(tx_ring->tx_buffer_info);
2937 tx_ring->tx_buffer_info = NULL;
2939 /* if not set, then don't free */
2943 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2946 tx_ring->desc = NULL;
2950 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2951 * @adapter: board private structure
2953 * Free all transmit software resources
2955 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2959 for (i = 0; i < adapter->num_tx_queues; i++)
2960 if (adapter->tx_ring[i]->desc)
2961 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2965 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2966 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2968 * Return 0 on success, negative on failure
2970 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2974 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2975 tx_ring->tx_buffer_info = vzalloc(size);
2976 if (!tx_ring->tx_buffer_info)
2979 /* round up to nearest 4K */
2980 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2981 tx_ring->size = ALIGN(tx_ring->size, 4096);
2983 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2984 &tx_ring->dma, GFP_KERNEL);
2991 vfree(tx_ring->tx_buffer_info);
2992 tx_ring->tx_buffer_info = NULL;
2993 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
2998 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2999 * @adapter: board private structure
3001 * If this function returns with an error, then it's possible one or
3002 * more of the rings is populated (while the rest are not). It is the
3003 * callers duty to clean those orphaned rings.
3005 * Return 0 on success, negative on failure
3007 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3011 for (i = 0; i < adapter->num_tx_queues; i++) {
3012 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3015 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3023 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3024 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3026 * Returns 0 on success, negative on failure
3028 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3032 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3033 rx_ring->rx_buffer_info = vzalloc(size);
3034 if (!rx_ring->rx_buffer_info)
3037 /* Round up to nearest 4K */
3038 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3039 rx_ring->size = ALIGN(rx_ring->size, 4096);
3041 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3042 &rx_ring->dma, GFP_KERNEL);
3049 vfree(rx_ring->rx_buffer_info);
3050 rx_ring->rx_buffer_info = NULL;
3051 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3056 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3057 * @adapter: board private structure
3059 * If this function returns with an error, then it's possible one or
3060 * more of the rings is populated (while the rest are not). It is the
3061 * callers duty to clean those orphaned rings.
3063 * Return 0 on success, negative on failure
3065 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3069 for (i = 0; i < adapter->num_rx_queues; i++) {
3070 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
3073 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3080 * ixgbevf_free_rx_resources - Free Rx Resources
3081 * @rx_ring: ring to clean the resources from
3083 * Free all receive software resources
3085 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3087 ixgbevf_clean_rx_ring(rx_ring);
3089 vfree(rx_ring->rx_buffer_info);
3090 rx_ring->rx_buffer_info = NULL;
3092 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3095 rx_ring->desc = NULL;
3099 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3100 * @adapter: board private structure
3102 * Free all receive software resources
3104 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3108 for (i = 0; i < adapter->num_rx_queues; i++)
3109 if (adapter->rx_ring[i]->desc)
3110 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3114 * ixgbevf_open - Called when a network interface is made active
3115 * @netdev: network interface device structure
3117 * Returns 0 on success, negative value on failure
3119 * The open entry point is called when a network interface is made
3120 * active by the system (IFF_UP). At this point all resources needed
3121 * for transmit and receive operations are allocated, the interrupt
3122 * handler is registered with the OS, the watchdog timer is started,
3123 * and the stack is notified that the interface is ready.
3125 static int ixgbevf_open(struct net_device *netdev)
3127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3128 struct ixgbe_hw *hw = &adapter->hw;
3131 /* A previous failure to open the device because of a lack of
3132 * available MSIX vector resources may have reset the number
3133 * of msix vectors variable to zero. The only way to recover
3134 * is to unload/reload the driver and hope that the system has
3135 * been able to recover some MSIX vector resources.
3137 if (!adapter->num_msix_vectors)
3140 if (hw->adapter_stopped) {
3141 ixgbevf_reset(adapter);
3142 /* if adapter is still stopped then PF isn't up and
3143 * the VF can't start.
3145 if (hw->adapter_stopped) {
3146 err = IXGBE_ERR_MBX;
3147 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3148 goto err_setup_reset;
3152 /* disallow open during test */
3153 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3156 netif_carrier_off(netdev);
3158 /* allocate transmit descriptors */
3159 err = ixgbevf_setup_all_tx_resources(adapter);
3163 /* allocate receive descriptors */
3164 err = ixgbevf_setup_all_rx_resources(adapter);
3168 ixgbevf_configure(adapter);
3170 /* Map the Tx/Rx rings to the vectors we were allotted.
3171 * if request_irq will be called in this function map_rings
3172 * must be called *before* up_complete
3174 ixgbevf_map_rings_to_vectors(adapter);
3176 err = ixgbevf_request_irq(adapter);
3180 ixgbevf_up_complete(adapter);
3185 ixgbevf_down(adapter);
3187 ixgbevf_free_all_rx_resources(adapter);
3189 ixgbevf_free_all_tx_resources(adapter);
3190 ixgbevf_reset(adapter);
3198 * ixgbevf_close - Disables a network interface
3199 * @netdev: network interface device structure
3201 * Returns 0, this is not allowed to fail
3203 * The close entry point is called when an interface is de-activated
3204 * by the OS. The hardware is still under the drivers control, but
3205 * needs to be disabled. A global MAC reset is issued to stop the
3206 * hardware, and all transmit and receive resources are freed.
3208 static int ixgbevf_close(struct net_device *netdev)
3210 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3212 ixgbevf_down(adapter);
3213 ixgbevf_free_irq(adapter);
3215 ixgbevf_free_all_tx_resources(adapter);
3216 ixgbevf_free_all_rx_resources(adapter);
3221 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3223 struct net_device *dev = adapter->netdev;
3225 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3228 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3230 /* if interface is down do nothing */
3231 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3232 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3235 /* Hardware has to reinitialize queues and interrupts to
3236 * match packet buffer alignment. Unfortunately, the
3237 * hardware is not flexible enough to do this dynamically.
3239 if (netif_running(dev))
3242 ixgbevf_clear_interrupt_scheme(adapter);
3243 ixgbevf_init_interrupt_scheme(adapter);
3245 if (netif_running(dev))
3249 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3250 u32 vlan_macip_lens, u32 type_tucmd,
3253 struct ixgbe_adv_tx_context_desc *context_desc;
3254 u16 i = tx_ring->next_to_use;
3256 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3259 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3261 /* set bits to identify this as an advanced context descriptor */
3262 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3264 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3265 context_desc->seqnum_seed = 0;
3266 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3267 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3270 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3271 struct ixgbevf_tx_buffer *first,
3274 struct sk_buff *skb = first->skb;
3275 u32 vlan_macip_lens, type_tucmd;
3276 u32 mss_l4len_idx, l4len;
3279 if (skb->ip_summed != CHECKSUM_PARTIAL)
3282 if (!skb_is_gso(skb))
3285 err = skb_cow_head(skb, 0);
3289 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3290 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3292 if (first->protocol == htons(ETH_P_IP)) {
3293 struct iphdr *iph = ip_hdr(skb);
3297 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3301 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3302 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3303 IXGBE_TX_FLAGS_CSUM |
3304 IXGBE_TX_FLAGS_IPV4;
3305 } else if (skb_is_gso_v6(skb)) {
3306 ipv6_hdr(skb)->payload_len = 0;
3307 tcp_hdr(skb)->check =
3308 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3309 &ipv6_hdr(skb)->daddr,
3311 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3312 IXGBE_TX_FLAGS_CSUM;
3315 /* compute header lengths */
3316 l4len = tcp_hdrlen(skb);
3318 *hdr_len = skb_transport_offset(skb) + l4len;
3320 /* update GSO size and bytecount with header size */
3321 first->gso_segs = skb_shinfo(skb)->gso_segs;
3322 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3324 /* mss_l4len_id: use 1 as index for TSO */
3325 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3326 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3327 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3329 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3330 vlan_macip_lens = skb_network_header_len(skb);
3331 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3332 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3334 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3335 type_tucmd, mss_l4len_idx);
3340 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3341 struct ixgbevf_tx_buffer *first)
3343 struct sk_buff *skb = first->skb;
3344 u32 vlan_macip_lens = 0;
3345 u32 mss_l4len_idx = 0;
3348 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3351 switch (first->protocol) {
3352 case htons(ETH_P_IP):
3353 vlan_macip_lens |= skb_network_header_len(skb);
3354 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3355 l4_hdr = ip_hdr(skb)->protocol;
3357 case htons(ETH_P_IPV6):
3358 vlan_macip_lens |= skb_network_header_len(skb);
3359 l4_hdr = ipv6_hdr(skb)->nexthdr;
3362 if (unlikely(net_ratelimit())) {
3363 dev_warn(tx_ring->dev,
3364 "partial checksum but proto=%x!\n",
3372 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3373 mss_l4len_idx = tcp_hdrlen(skb) <<
3374 IXGBE_ADVTXD_L4LEN_SHIFT;
3377 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3378 mss_l4len_idx = sizeof(struct sctphdr) <<
3379 IXGBE_ADVTXD_L4LEN_SHIFT;
3382 mss_l4len_idx = sizeof(struct udphdr) <<
3383 IXGBE_ADVTXD_L4LEN_SHIFT;
3386 if (unlikely(net_ratelimit())) {
3387 dev_warn(tx_ring->dev,
3388 "partial checksum but l4 proto=%x!\n",
3394 /* update TX checksum flag */
3395 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3398 /* vlan_macip_lens: MACLEN, VLAN tag */
3399 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3400 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3402 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3403 type_tucmd, mss_l4len_idx);
3406 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3408 /* set type for advanced descriptor with frame checksum insertion */
3409 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3410 IXGBE_ADVTXD_DCMD_IFCS |
3411 IXGBE_ADVTXD_DCMD_DEXT);
3413 /* set HW VLAN bit if VLAN is present */
3414 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3415 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3417 /* set segmentation enable bits for TSO/FSO */
3418 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3419 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3424 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3425 u32 tx_flags, unsigned int paylen)
3427 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3429 /* enable L4 checksum for TSO and TX checksum offload */
3430 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3431 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3433 /* enble IPv4 checksum for TSO */
3434 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3435 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3437 /* use index 1 context for TSO/FSO/FCOE */
3438 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3439 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3441 /* Check Context must be set if Tx switch is enabled, which it
3442 * always is for case where virtual functions are running
3444 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3446 tx_desc->read.olinfo_status = olinfo_status;
3449 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3450 struct ixgbevf_tx_buffer *first,
3454 struct sk_buff *skb = first->skb;
3455 struct ixgbevf_tx_buffer *tx_buffer;
3456 union ixgbe_adv_tx_desc *tx_desc;
3457 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3458 unsigned int data_len = skb->data_len;
3459 unsigned int size = skb_headlen(skb);
3460 unsigned int paylen = skb->len - hdr_len;
3461 u32 tx_flags = first->tx_flags;
3463 u16 i = tx_ring->next_to_use;
3465 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3467 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3468 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3470 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3471 if (dma_mapping_error(tx_ring->dev, dma))
3474 /* record length, and DMA address */
3475 dma_unmap_len_set(first, len, size);
3476 dma_unmap_addr_set(first, dma, dma);
3478 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3481 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3482 tx_desc->read.cmd_type_len =
3483 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3487 if (i == tx_ring->count) {
3488 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3492 dma += IXGBE_MAX_DATA_PER_TXD;
3493 size -= IXGBE_MAX_DATA_PER_TXD;
3495 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3496 tx_desc->read.olinfo_status = 0;
3499 if (likely(!data_len))
3502 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3506 if (i == tx_ring->count) {
3507 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3511 size = skb_frag_size(frag);
3514 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3516 if (dma_mapping_error(tx_ring->dev, dma))
3519 tx_buffer = &tx_ring->tx_buffer_info[i];
3520 dma_unmap_len_set(tx_buffer, len, size);
3521 dma_unmap_addr_set(tx_buffer, dma, dma);
3523 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3524 tx_desc->read.olinfo_status = 0;
3529 /* write last descriptor with RS and EOP bits */
3530 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3531 tx_desc->read.cmd_type_len = cmd_type;
3533 /* set the timestamp */
3534 first->time_stamp = jiffies;
3536 /* Force memory writes to complete before letting h/w know there
3537 * are new descriptors to fetch. (Only applicable for weak-ordered
3538 * memory model archs, such as IA-64).
3540 * We also need this memory barrier (wmb) to make certain all of the
3541 * status bits have been updated before next_to_watch is written.
3545 /* set next_to_watch value indicating a packet is present */
3546 first->next_to_watch = tx_desc;
3549 if (i == tx_ring->count)
3552 tx_ring->next_to_use = i;
3554 /* notify HW of packet */
3555 ixgbevf_write_tail(tx_ring, i);
3559 dev_err(tx_ring->dev, "TX DMA map failed\n");
3561 /* clear dma mappings for failed tx_buffer_info map */
3563 tx_buffer = &tx_ring->tx_buffer_info[i];
3564 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3565 if (tx_buffer == first)
3572 tx_ring->next_to_use = i;
3575 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3577 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3578 /* Herbert's original patch had:
3579 * smp_mb__after_netif_stop_queue();
3580 * but since that doesn't exist yet, just open code it.
3584 /* We need to check again in a case another CPU has just
3585 * made room available.
3587 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3590 /* A reprieve! - use start_queue because it doesn't call schedule */
3591 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3592 ++tx_ring->tx_stats.restart_queue;
3597 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3599 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3601 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3604 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3606 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3607 struct ixgbevf_tx_buffer *first;
3608 struct ixgbevf_ring *tx_ring;
3611 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3612 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3616 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3618 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3619 dev_kfree_skb_any(skb);
3620 return NETDEV_TX_OK;
3623 tx_ring = adapter->tx_ring[skb->queue_mapping];
3625 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3626 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3627 * + 2 desc gap to keep tail from touching head,
3628 * + 1 desc for context descriptor,
3629 * otherwise try next time
3631 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3632 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3633 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3635 count += skb_shinfo(skb)->nr_frags;
3637 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3638 tx_ring->tx_stats.tx_busy++;
3639 return NETDEV_TX_BUSY;
3642 /* record the location of the first descriptor for this packet */
3643 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3645 first->bytecount = skb->len;
3646 first->gso_segs = 1;
3648 if (skb_vlan_tag_present(skb)) {
3649 tx_flags |= skb_vlan_tag_get(skb);
3650 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3651 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3654 /* record initial flags and protocol */
3655 first->tx_flags = tx_flags;
3656 first->protocol = vlan_get_protocol(skb);
3658 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3662 ixgbevf_tx_csum(tx_ring, first);
3664 ixgbevf_tx_map(tx_ring, first, hdr_len);
3666 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3668 return NETDEV_TX_OK;
3671 dev_kfree_skb_any(first->skb);
3674 return NETDEV_TX_OK;
3678 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3679 * @netdev: network interface device structure
3680 * @p: pointer to an address structure
3682 * Returns 0 on success, negative on failure
3684 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3686 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3687 struct ixgbe_hw *hw = &adapter->hw;
3688 struct sockaddr *addr = p;
3690 if (!is_valid_ether_addr(addr->sa_data))
3691 return -EADDRNOTAVAIL;
3693 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3694 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3696 spin_lock_bh(&adapter->mbx_lock);
3698 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3700 spin_unlock_bh(&adapter->mbx_lock);
3706 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3707 * @netdev: network interface device structure
3708 * @new_mtu: new value for maximum frame size
3710 * Returns 0 on success, negative on failure
3712 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3714 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3715 struct ixgbe_hw *hw = &adapter->hw;
3716 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3717 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3719 switch (adapter->hw.api_version) {
3720 case ixgbe_mbox_api_11:
3721 case ixgbe_mbox_api_12:
3722 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3725 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
3726 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3730 /* MTU < 68 is an error and causes problems on some kernels */
3731 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3734 hw_dbg(hw, "changing MTU from %d to %d\n",
3735 netdev->mtu, new_mtu);
3736 /* must set new MTU before calling down or up */
3737 netdev->mtu = new_mtu;
3739 /* notify the PF of our intent to use this size of frame */
3740 ixgbevf_rlpml_set_vf(hw, max_frame);
3745 #ifdef CONFIG_NET_POLL_CONTROLLER
3746 /* Polling 'interrupt' - used by things like netconsole to send skbs
3747 * without having to re-enable interrupts. It's not called while
3748 * the interrupt routine is executing.
3750 static void ixgbevf_netpoll(struct net_device *netdev)
3752 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3755 /* if interface is down do nothing */
3756 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3758 for (i = 0; i < adapter->num_rx_queues; i++)
3759 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3761 #endif /* CONFIG_NET_POLL_CONTROLLER */
3763 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3765 struct net_device *netdev = pci_get_drvdata(pdev);
3766 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3771 netif_device_detach(netdev);
3773 if (netif_running(netdev)) {
3775 ixgbevf_down(adapter);
3776 ixgbevf_free_irq(adapter);
3777 ixgbevf_free_all_tx_resources(adapter);
3778 ixgbevf_free_all_rx_resources(adapter);
3782 ixgbevf_clear_interrupt_scheme(adapter);
3785 retval = pci_save_state(pdev);
3790 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3791 pci_disable_device(pdev);
3797 static int ixgbevf_resume(struct pci_dev *pdev)
3799 struct net_device *netdev = pci_get_drvdata(pdev);
3800 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3803 pci_restore_state(pdev);
3804 /* pci_restore_state clears dev->state_saved so call
3805 * pci_save_state to restore it.
3807 pci_save_state(pdev);
3809 err = pci_enable_device_mem(pdev);
3811 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3814 smp_mb__before_atomic();
3815 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3816 pci_set_master(pdev);
3818 ixgbevf_reset(adapter);
3821 err = ixgbevf_init_interrupt_scheme(adapter);
3824 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3828 if (netif_running(netdev)) {
3829 err = ixgbevf_open(netdev);
3834 netif_device_attach(netdev);
3839 #endif /* CONFIG_PM */
3840 static void ixgbevf_shutdown(struct pci_dev *pdev)
3842 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3845 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3846 struct rtnl_link_stats64 *stats)
3848 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3851 const struct ixgbevf_ring *ring;
3854 ixgbevf_update_stats(adapter);
3856 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3858 for (i = 0; i < adapter->num_rx_queues; i++) {
3859 ring = adapter->rx_ring[i];
3861 start = u64_stats_fetch_begin_irq(&ring->syncp);
3862 bytes = ring->stats.bytes;
3863 packets = ring->stats.packets;
3864 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3865 stats->rx_bytes += bytes;
3866 stats->rx_packets += packets;
3869 for (i = 0; i < adapter->num_tx_queues; i++) {
3870 ring = adapter->tx_ring[i];
3872 start = u64_stats_fetch_begin_irq(&ring->syncp);
3873 bytes = ring->stats.bytes;
3874 packets = ring->stats.packets;
3875 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3876 stats->tx_bytes += bytes;
3877 stats->tx_packets += packets;
3883 static const struct net_device_ops ixgbevf_netdev_ops = {
3884 .ndo_open = ixgbevf_open,
3885 .ndo_stop = ixgbevf_close,
3886 .ndo_start_xmit = ixgbevf_xmit_frame,
3887 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3888 .ndo_get_stats64 = ixgbevf_get_stats,
3889 .ndo_validate_addr = eth_validate_addr,
3890 .ndo_set_mac_address = ixgbevf_set_mac,
3891 .ndo_change_mtu = ixgbevf_change_mtu,
3892 .ndo_tx_timeout = ixgbevf_tx_timeout,
3893 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3894 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3895 #ifdef CONFIG_NET_RX_BUSY_POLL
3896 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3898 #ifdef CONFIG_NET_POLL_CONTROLLER
3899 .ndo_poll_controller = ixgbevf_netpoll,
3901 .ndo_features_check = passthru_features_check,
3904 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3906 dev->netdev_ops = &ixgbevf_netdev_ops;
3907 ixgbevf_set_ethtool_ops(dev);
3908 dev->watchdog_timeo = 5 * HZ;
3912 * ixgbevf_probe - Device Initialization Routine
3913 * @pdev: PCI device information struct
3914 * @ent: entry in ixgbevf_pci_tbl
3916 * Returns 0 on success, negative on failure
3918 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3919 * The OS initialization, configuring of the adapter private structure,
3920 * and a hardware reset occur.
3922 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3924 struct net_device *netdev;
3925 struct ixgbevf_adapter *adapter = NULL;
3926 struct ixgbe_hw *hw = NULL;
3927 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3928 int err, pci_using_dac;
3929 bool disable_dev = false;
3931 err = pci_enable_device(pdev);
3935 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3938 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3940 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
3946 err = pci_request_regions(pdev, ixgbevf_driver_name);
3948 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3952 pci_set_master(pdev);
3954 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3958 goto err_alloc_etherdev;
3961 SET_NETDEV_DEV(netdev, &pdev->dev);
3963 adapter = netdev_priv(netdev);
3965 adapter->netdev = netdev;
3966 adapter->pdev = pdev;
3969 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3971 /* call save state here in standalone driver because it relies on
3972 * adapter struct to exist, and needs to call netdev_priv
3974 pci_save_state(pdev);
3976 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3977 pci_resource_len(pdev, 0));
3978 adapter->io_addr = hw->hw_addr;
3984 ixgbevf_assign_netdev_ops(netdev);
3987 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3988 hw->mac.type = ii->mac;
3990 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3991 sizeof(struct ixgbe_mbx_operations));
3993 /* setup the private structure */
3994 err = ixgbevf_sw_init(adapter);
3998 /* The HW MAC address was set and/or determined in sw_init */
3999 if (!is_valid_ether_addr(netdev->dev_addr)) {
4000 pr_err("invalid MAC address\n");
4005 netdev->hw_features = NETIF_F_SG |
4012 netdev->features = netdev->hw_features |
4013 NETIF_F_HW_VLAN_CTAG_TX |
4014 NETIF_F_HW_VLAN_CTAG_RX |
4015 NETIF_F_HW_VLAN_CTAG_FILTER;
4017 netdev->vlan_features |= NETIF_F_TSO |
4024 netdev->features |= NETIF_F_HIGHDMA;
4026 netdev->priv_flags |= IFF_UNICAST_FLT;
4028 if (IXGBE_REMOVED(hw->hw_addr)) {
4033 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4034 (unsigned long)adapter);
4036 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4037 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4038 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4040 err = ixgbevf_init_interrupt_scheme(adapter);
4044 strcpy(netdev->name, "eth%d");
4046 err = register_netdev(netdev);
4050 pci_set_drvdata(pdev, netdev);
4051 netif_carrier_off(netdev);
4053 ixgbevf_init_last_counter_stats(adapter);
4055 /* print the VF info */
4056 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4057 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4059 switch (hw->mac.type) {
4060 case ixgbe_mac_X550_vf:
4061 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4063 case ixgbe_mac_X540_vf:
4064 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4066 case ixgbe_mac_82599_vf:
4068 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4075 ixgbevf_clear_interrupt_scheme(adapter);
4077 ixgbevf_reset_interrupt_capability(adapter);
4078 iounmap(adapter->io_addr);
4080 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4081 free_netdev(netdev);
4083 pci_release_regions(pdev);
4086 if (!adapter || disable_dev)
4087 pci_disable_device(pdev);
4092 * ixgbevf_remove - Device Removal Routine
4093 * @pdev: PCI device information struct
4095 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4096 * that it should release a PCI device. The could be caused by a
4097 * Hot-Plug event, or because the driver is going to be removed from
4100 static void ixgbevf_remove(struct pci_dev *pdev)
4102 struct net_device *netdev = pci_get_drvdata(pdev);
4103 struct ixgbevf_adapter *adapter;
4109 adapter = netdev_priv(netdev);
4111 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4112 cancel_work_sync(&adapter->service_task);
4114 if (netdev->reg_state == NETREG_REGISTERED)
4115 unregister_netdev(netdev);
4117 ixgbevf_clear_interrupt_scheme(adapter);
4118 ixgbevf_reset_interrupt_capability(adapter);
4120 iounmap(adapter->io_addr);
4121 pci_release_regions(pdev);
4123 hw_dbg(&adapter->hw, "Remove complete\n");
4125 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4126 free_netdev(netdev);
4129 pci_disable_device(pdev);
4133 * ixgbevf_io_error_detected - called when PCI error is detected
4134 * @pdev: Pointer to PCI device
4135 * @state: The current pci connection state
4137 * This function is called after a PCI bus error affecting
4138 * this device has been detected.
4140 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4141 pci_channel_state_t state)
4143 struct net_device *netdev = pci_get_drvdata(pdev);
4144 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4146 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4147 return PCI_ERS_RESULT_DISCONNECT;
4150 netif_device_detach(netdev);
4152 if (state == pci_channel_io_perm_failure) {
4154 return PCI_ERS_RESULT_DISCONNECT;
4157 if (netif_running(netdev))
4158 ixgbevf_down(adapter);
4160 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4161 pci_disable_device(pdev);
4164 /* Request a slot slot reset. */
4165 return PCI_ERS_RESULT_NEED_RESET;
4169 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4170 * @pdev: Pointer to PCI device
4172 * Restart the card from scratch, as if from a cold-boot. Implementation
4173 * resembles the first-half of the ixgbevf_resume routine.
4175 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4177 struct net_device *netdev = pci_get_drvdata(pdev);
4178 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4180 if (pci_enable_device_mem(pdev)) {
4182 "Cannot re-enable PCI device after reset.\n");
4183 return PCI_ERS_RESULT_DISCONNECT;
4186 smp_mb__before_atomic();
4187 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4188 pci_set_master(pdev);
4190 ixgbevf_reset(adapter);
4192 return PCI_ERS_RESULT_RECOVERED;
4196 * ixgbevf_io_resume - called when traffic can start flowing again.
4197 * @pdev: Pointer to PCI device
4199 * This callback is called when the error recovery driver tells us that
4200 * its OK to resume normal operation. Implementation resembles the
4201 * second-half of the ixgbevf_resume routine.
4203 static void ixgbevf_io_resume(struct pci_dev *pdev)
4205 struct net_device *netdev = pci_get_drvdata(pdev);
4206 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4208 if (netif_running(netdev))
4209 ixgbevf_up(adapter);
4211 netif_device_attach(netdev);
4214 /* PCI Error Recovery (ERS) */
4215 static const struct pci_error_handlers ixgbevf_err_handler = {
4216 .error_detected = ixgbevf_io_error_detected,
4217 .slot_reset = ixgbevf_io_slot_reset,
4218 .resume = ixgbevf_io_resume,
4221 static struct pci_driver ixgbevf_driver = {
4222 .name = ixgbevf_driver_name,
4223 .id_table = ixgbevf_pci_tbl,
4224 .probe = ixgbevf_probe,
4225 .remove = ixgbevf_remove,
4227 /* Power Management Hooks */
4228 .suspend = ixgbevf_suspend,
4229 .resume = ixgbevf_resume,
4231 .shutdown = ixgbevf_shutdown,
4232 .err_handler = &ixgbevf_err_handler
4236 * ixgbevf_init_module - Driver Registration Routine
4238 * ixgbevf_init_module is the first routine called when the driver is
4239 * loaded. All it does is register with the PCI subsystem.
4241 static int __init ixgbevf_init_module(void)
4245 pr_info("%s - version %s\n", ixgbevf_driver_string,
4246 ixgbevf_driver_version);
4248 pr_info("%s\n", ixgbevf_copyright);
4250 ret = pci_register_driver(&ixgbevf_driver);
4254 module_init(ixgbevf_init_module);
4257 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4259 * ixgbevf_exit_module is called just before the driver is removed
4262 static void __exit ixgbevf_exit_module(void)
4264 pci_unregister_driver(&ixgbevf_driver);
4269 * ixgbevf_get_hw_dev_name - return device name string
4270 * used by hardware layer to print debugging information
4272 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4274 struct ixgbevf_adapter *adapter = hw->back;
4276 return adapter->netdev->name;
4280 module_exit(ixgbevf_exit_module);
4282 /* ixgbevf_main.c */