]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/intel: use napi_complete_done()
[karo-tx-linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 *******************************************************************************/
26
27 /******************************************************************************
28  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
30
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
49 #include <linux/if.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
52
53 #include "ixgbevf.h"
54
55 const char ixgbevf_driver_name[] = "ixgbevf";
56 static const char ixgbevf_driver_string[] =
57         "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
58
59 #define DRV_VERSION "2.12.1-k"
60 const char ixgbevf_driver_version[] = DRV_VERSION;
61 static char ixgbevf_copyright[] =
62         "Copyright (c) 2009 - 2012 Intel Corporation.";
63
64 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
65         [board_82599_vf] = &ixgbevf_82599_vf_info,
66         [board_X540_vf]  = &ixgbevf_X540_vf_info,
67         [board_X550_vf]  = &ixgbevf_X550_vf_info,
68         [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
69 };
70
71 /* ixgbevf_pci_tbl - PCI Device ID Table
72  *
73  * Wildcard entries (PCI_ANY_ID) should come last
74  * Last entry must be all 0s
75  *
76  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77  *   Class, Class Mask, private data (not used) }
78  */
79 static const struct pci_device_id ixgbevf_pci_tbl[] = {
80         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
83         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
84         /* required last entry */
85         {0, }
86 };
87 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
88
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
93
94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95 static int debug = -1;
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98
99 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
100 {
101         if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
102             !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
103             !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
104                 schedule_work(&adapter->service_task);
105 }
106
107 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
108 {
109         BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
110
111         /* flush memory to make sure state is correct before next watchdog */
112         smp_mb__before_atomic();
113         clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
114 }
115
116 /* forward decls */
117 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
118 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
119 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
120
121 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
122 {
123         struct ixgbevf_adapter *adapter = hw->back;
124
125         if (!hw->hw_addr)
126                 return;
127         hw->hw_addr = NULL;
128         dev_err(&adapter->pdev->dev, "Adapter removed\n");
129         if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
130                 ixgbevf_service_event_schedule(adapter);
131 }
132
133 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
134 {
135         u32 value;
136
137         /* The following check not only optimizes a bit by not
138          * performing a read on the status register when the
139          * register just read was a status register read that
140          * returned IXGBE_FAILED_READ_REG. It also blocks any
141          * potential recursion.
142          */
143         if (reg == IXGBE_VFSTATUS) {
144                 ixgbevf_remove_adapter(hw);
145                 return;
146         }
147         value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
148         if (value == IXGBE_FAILED_READ_REG)
149                 ixgbevf_remove_adapter(hw);
150 }
151
152 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
153 {
154         u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
155         u32 value;
156
157         if (IXGBE_REMOVED(reg_addr))
158                 return IXGBE_FAILED_READ_REG;
159         value = readl(reg_addr + reg);
160         if (unlikely(value == IXGBE_FAILED_READ_REG))
161                 ixgbevf_check_remove(hw, reg);
162         return value;
163 }
164
165 /**
166  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
167  * @adapter: pointer to adapter struct
168  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169  * @queue: queue to map the corresponding interrupt to
170  * @msix_vector: the vector to map to the corresponding queue
171  **/
172 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
173                              u8 queue, u8 msix_vector)
174 {
175         u32 ivar, index;
176         struct ixgbe_hw *hw = &adapter->hw;
177
178         if (direction == -1) {
179                 /* other causes */
180                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
182                 ivar &= ~0xFF;
183                 ivar |= msix_vector;
184                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
185         } else {
186                 /* Tx or Rx causes */
187                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
188                 index = ((16 * (queue & 1)) + (8 * direction));
189                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
190                 ivar &= ~(0xFF << index);
191                 ivar |= (msix_vector << index);
192                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
193         }
194 }
195
196 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
197                                         struct ixgbevf_tx_buffer *tx_buffer)
198 {
199         if (tx_buffer->skb) {
200                 dev_kfree_skb_any(tx_buffer->skb);
201                 if (dma_unmap_len(tx_buffer, len))
202                         dma_unmap_single(tx_ring->dev,
203                                          dma_unmap_addr(tx_buffer, dma),
204                                          dma_unmap_len(tx_buffer, len),
205                                          DMA_TO_DEVICE);
206         } else if (dma_unmap_len(tx_buffer, len)) {
207                 dma_unmap_page(tx_ring->dev,
208                                dma_unmap_addr(tx_buffer, dma),
209                                dma_unmap_len(tx_buffer, len),
210                                DMA_TO_DEVICE);
211         }
212         tx_buffer->next_to_watch = NULL;
213         tx_buffer->skb = NULL;
214         dma_unmap_len_set(tx_buffer, len, 0);
215         /* tx_buffer must be completely set up in the transmit path */
216 }
217
218 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
219 {
220         return ring->stats.packets;
221 }
222
223 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
224 {
225         struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
226         struct ixgbe_hw *hw = &adapter->hw;
227
228         u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
229         u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
230
231         if (head != tail)
232                 return (head < tail) ?
233                         tail - head : (tail + ring->count - head);
234
235         return 0;
236 }
237
238 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
239 {
240         u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
241         u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
242         u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
243
244         clear_check_for_tx_hang(tx_ring);
245
246         /* Check for a hung queue, but be thorough. This verifies
247          * that a transmit has been completed since the previous
248          * check AND there is at least one packet pending. The
249          * ARMED bit is set to indicate a potential hang.
250          */
251         if ((tx_done_old == tx_done) && tx_pending) {
252                 /* make sure it is true for two checks in a row */
253                 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
254                                         &tx_ring->state);
255         }
256         /* reset the countdown */
257         clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
258
259         /* update completed stats and continue */
260         tx_ring->tx_stats.tx_done_old = tx_done;
261
262         return false;
263 }
264
265 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
266 {
267         /* Do the reset outside of interrupt context */
268         if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
269                 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
270                 ixgbevf_service_event_schedule(adapter);
271         }
272 }
273
274 /**
275  * ixgbevf_tx_timeout - Respond to a Tx Hang
276  * @netdev: network interface device structure
277  **/
278 static void ixgbevf_tx_timeout(struct net_device *netdev)
279 {
280         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
281
282         ixgbevf_tx_timeout_reset(adapter);
283 }
284
285 /**
286  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
287  * @q_vector: board private structure
288  * @tx_ring: tx ring to clean
289  **/
290 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
291                                  struct ixgbevf_ring *tx_ring)
292 {
293         struct ixgbevf_adapter *adapter = q_vector->adapter;
294         struct ixgbevf_tx_buffer *tx_buffer;
295         union ixgbe_adv_tx_desc *tx_desc;
296         unsigned int total_bytes = 0, total_packets = 0;
297         unsigned int budget = tx_ring->count / 2;
298         unsigned int i = tx_ring->next_to_clean;
299
300         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
301                 return true;
302
303         tx_buffer = &tx_ring->tx_buffer_info[i];
304         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
305         i -= tx_ring->count;
306
307         do {
308                 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
309
310                 /* if next_to_watch is not set then there is no work pending */
311                 if (!eop_desc)
312                         break;
313
314                 /* prevent any other reads prior to eop_desc */
315                 read_barrier_depends();
316
317                 /* if DD is not set pending work has not been completed */
318                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
319                         break;
320
321                 /* clear next_to_watch to prevent false hangs */
322                 tx_buffer->next_to_watch = NULL;
323
324                 /* update the statistics for this packet */
325                 total_bytes += tx_buffer->bytecount;
326                 total_packets += tx_buffer->gso_segs;
327
328                 /* free the skb */
329                 dev_kfree_skb_any(tx_buffer->skb);
330
331                 /* unmap skb header data */
332                 dma_unmap_single(tx_ring->dev,
333                                  dma_unmap_addr(tx_buffer, dma),
334                                  dma_unmap_len(tx_buffer, len),
335                                  DMA_TO_DEVICE);
336
337                 /* clear tx_buffer data */
338                 tx_buffer->skb = NULL;
339                 dma_unmap_len_set(tx_buffer, len, 0);
340
341                 /* unmap remaining buffers */
342                 while (tx_desc != eop_desc) {
343                         tx_buffer++;
344                         tx_desc++;
345                         i++;
346                         if (unlikely(!i)) {
347                                 i -= tx_ring->count;
348                                 tx_buffer = tx_ring->tx_buffer_info;
349                                 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
350                         }
351
352                         /* unmap any remaining paged data */
353                         if (dma_unmap_len(tx_buffer, len)) {
354                                 dma_unmap_page(tx_ring->dev,
355                                                dma_unmap_addr(tx_buffer, dma),
356                                                dma_unmap_len(tx_buffer, len),
357                                                DMA_TO_DEVICE);
358                                 dma_unmap_len_set(tx_buffer, len, 0);
359                         }
360                 }
361
362                 /* move us one more past the eop_desc for start of next pkt */
363                 tx_buffer++;
364                 tx_desc++;
365                 i++;
366                 if (unlikely(!i)) {
367                         i -= tx_ring->count;
368                         tx_buffer = tx_ring->tx_buffer_info;
369                         tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
370                 }
371
372                 /* issue prefetch for next Tx descriptor */
373                 prefetch(tx_desc);
374
375                 /* update budget accounting */
376                 budget--;
377         } while (likely(budget));
378
379         i += tx_ring->count;
380         tx_ring->next_to_clean = i;
381         u64_stats_update_begin(&tx_ring->syncp);
382         tx_ring->stats.bytes += total_bytes;
383         tx_ring->stats.packets += total_packets;
384         u64_stats_update_end(&tx_ring->syncp);
385         q_vector->tx.total_bytes += total_bytes;
386         q_vector->tx.total_packets += total_packets;
387
388         if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389                 struct ixgbe_hw *hw = &adapter->hw;
390                 union ixgbe_adv_tx_desc *eop_desc;
391
392                 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
393
394                 pr_err("Detected Tx Unit Hang\n"
395                        "  Tx Queue             <%d>\n"
396                        "  TDH, TDT             <%x>, <%x>\n"
397                        "  next_to_use          <%x>\n"
398                        "  next_to_clean        <%x>\n"
399                        "tx_buffer_info[next_to_clean]\n"
400                        "  next_to_watch        <%p>\n"
401                        "  eop_desc->wb.status  <%x>\n"
402                        "  time_stamp           <%lx>\n"
403                        "  jiffies              <%lx>\n",
404                        tx_ring->queue_index,
405                        IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
406                        IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
407                        tx_ring->next_to_use, i,
408                        eop_desc, (eop_desc ? eop_desc->wb.status : 0),
409                        tx_ring->tx_buffer_info[i].time_stamp, jiffies);
410
411                 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
412
413                 /* schedule immediate reset if we believe we hung */
414                 ixgbevf_tx_timeout_reset(adapter);
415
416                 return true;
417         }
418
419 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
420         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
421                      (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
422                 /* Make sure that anybody stopping the queue after this
423                  * sees the new next_to_clean.
424                  */
425                 smp_mb();
426
427                 if (__netif_subqueue_stopped(tx_ring->netdev,
428                                              tx_ring->queue_index) &&
429                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
430                         netif_wake_subqueue(tx_ring->netdev,
431                                             tx_ring->queue_index);
432                         ++tx_ring->tx_stats.restart_queue;
433                 }
434         }
435
436         return !!budget;
437 }
438
439 /**
440  * ixgbevf_rx_skb - Helper function to determine proper Rx method
441  * @q_vector: structure containing interrupt and ring information
442  * @skb: packet to send up
443  **/
444 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
445                            struct sk_buff *skb)
446 {
447 #ifdef CONFIG_NET_RX_BUSY_POLL
448         skb_mark_napi_id(skb, &q_vector->napi);
449
450         if (ixgbevf_qv_busy_polling(q_vector)) {
451                 netif_receive_skb(skb);
452                 /* exit early if we busy polled */
453                 return;
454         }
455 #endif /* CONFIG_NET_RX_BUSY_POLL */
456
457         napi_gro_receive(&q_vector->napi, skb);
458 }
459
460 #define IXGBE_RSS_L4_TYPES_MASK \
461         ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
462          (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
463          (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
464          (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
465
466 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
467                                    union ixgbe_adv_rx_desc *rx_desc,
468                                    struct sk_buff *skb)
469 {
470         u16 rss_type;
471
472         if (!(ring->netdev->features & NETIF_F_RXHASH))
473                 return;
474
475         rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
476                    IXGBE_RXDADV_RSSTYPE_MASK;
477
478         if (!rss_type)
479                 return;
480
481         skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
482                      (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
483                      PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
484 }
485
486 /**
487  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
488  * @ring: structure containig ring specific data
489  * @rx_desc: current Rx descriptor being processed
490  * @skb: skb currently being received and modified
491  **/
492 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
493                                        union ixgbe_adv_rx_desc *rx_desc,
494                                        struct sk_buff *skb)
495 {
496         skb_checksum_none_assert(skb);
497
498         /* Rx csum disabled */
499         if (!(ring->netdev->features & NETIF_F_RXCSUM))
500                 return;
501
502         /* if IP and error */
503         if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
504             ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
505                 ring->rx_stats.csum_err++;
506                 return;
507         }
508
509         if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
510                 return;
511
512         if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
513                 ring->rx_stats.csum_err++;
514                 return;
515         }
516
517         /* It must be a TCP or UDP packet with a valid checksum */
518         skb->ip_summed = CHECKSUM_UNNECESSARY;
519 }
520
521 /**
522  * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
523  * @rx_ring: rx descriptor ring packet is being transacted on
524  * @rx_desc: pointer to the EOP Rx descriptor
525  * @skb: pointer to current skb being populated
526  *
527  * This function checks the ring, descriptor, and packet information in
528  * order to populate the checksum, VLAN, protocol, and other fields within
529  * the skb.
530  **/
531 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
532                                        union ixgbe_adv_rx_desc *rx_desc,
533                                        struct sk_buff *skb)
534 {
535         ixgbevf_rx_hash(rx_ring, rx_desc, skb);
536         ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
537
538         if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
539                 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
540                 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
541
542                 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
543                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
544         }
545
546         skb->protocol = eth_type_trans(skb, rx_ring->netdev);
547 }
548
549 /**
550  * ixgbevf_is_non_eop - process handling of non-EOP buffers
551  * @rx_ring: Rx ring being processed
552  * @rx_desc: Rx descriptor for current buffer
553  * @skb: current socket buffer containing buffer in progress
554  *
555  * This function updates next to clean.  If the buffer is an EOP buffer
556  * this function exits returning false, otherwise it will place the
557  * sk_buff in the next buffer to be chained and return true indicating
558  * that this is in fact a non-EOP buffer.
559  **/
560 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
561                                union ixgbe_adv_rx_desc *rx_desc)
562 {
563         u32 ntc = rx_ring->next_to_clean + 1;
564
565         /* fetch, update, and store next to clean */
566         ntc = (ntc < rx_ring->count) ? ntc : 0;
567         rx_ring->next_to_clean = ntc;
568
569         prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
570
571         if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
572                 return false;
573
574         return true;
575 }
576
577 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
578                                       struct ixgbevf_rx_buffer *bi)
579 {
580         struct page *page = bi->page;
581         dma_addr_t dma = bi->dma;
582
583         /* since we are recycling buffers we should seldom need to alloc */
584         if (likely(page))
585                 return true;
586
587         /* alloc new page for storage */
588         page = dev_alloc_page();
589         if (unlikely(!page)) {
590                 rx_ring->rx_stats.alloc_rx_page_failed++;
591                 return false;
592         }
593
594         /* map page for use */
595         dma = dma_map_page(rx_ring->dev, page, 0,
596                            PAGE_SIZE, DMA_FROM_DEVICE);
597
598         /* if mapping failed free memory back to system since
599          * there isn't much point in holding memory we can't use
600          */
601         if (dma_mapping_error(rx_ring->dev, dma)) {
602                 __free_page(page);
603
604                 rx_ring->rx_stats.alloc_rx_buff_failed++;
605                 return false;
606         }
607
608         bi->dma = dma;
609         bi->page = page;
610         bi->page_offset = 0;
611
612         return true;
613 }
614
615 /**
616  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
617  * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
618  * @cleaned_count: number of buffers to replace
619  **/
620 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
621                                      u16 cleaned_count)
622 {
623         union ixgbe_adv_rx_desc *rx_desc;
624         struct ixgbevf_rx_buffer *bi;
625         unsigned int i = rx_ring->next_to_use;
626
627         /* nothing to do or no valid netdev defined */
628         if (!cleaned_count || !rx_ring->netdev)
629                 return;
630
631         rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
632         bi = &rx_ring->rx_buffer_info[i];
633         i -= rx_ring->count;
634
635         do {
636                 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
637                         break;
638
639                 /* Refresh the desc even if pkt_addr didn't change
640                  * because each write-back erases this info.
641                  */
642                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
643
644                 rx_desc++;
645                 bi++;
646                 i++;
647                 if (unlikely(!i)) {
648                         rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
649                         bi = rx_ring->rx_buffer_info;
650                         i -= rx_ring->count;
651                 }
652
653                 /* clear the hdr_addr for the next_to_use descriptor */
654                 rx_desc->read.hdr_addr = 0;
655
656                 cleaned_count--;
657         } while (cleaned_count);
658
659         i += rx_ring->count;
660
661         if (rx_ring->next_to_use != i) {
662                 /* record the next descriptor to use */
663                 rx_ring->next_to_use = i;
664
665                 /* update next to alloc since we have filled the ring */
666                 rx_ring->next_to_alloc = i;
667
668                 /* Force memory writes to complete before letting h/w
669                  * know there are new descriptors to fetch.  (Only
670                  * applicable for weak-ordered memory model archs,
671                  * such as IA-64).
672                  */
673                 wmb();
674                 ixgbevf_write_tail(rx_ring, i);
675         }
676 }
677
678 /**
679  * ixgbevf_cleanup_headers - Correct corrupted or empty headers
680  * @rx_ring: rx descriptor ring packet is being transacted on
681  * @rx_desc: pointer to the EOP Rx descriptor
682  * @skb: pointer to current skb being fixed
683  *
684  * Check for corrupted packet headers caused by senders on the local L2
685  * embedded NIC switch not setting up their Tx Descriptors right.  These
686  * should be very rare.
687  *
688  * Also address the case where we are pulling data in on pages only
689  * and as such no data is present in the skb header.
690  *
691  * In addition if skb is not at least 60 bytes we need to pad it so that
692  * it is large enough to qualify as a valid Ethernet frame.
693  *
694  * Returns true if an error was encountered and skb was freed.
695  **/
696 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
697                                     union ixgbe_adv_rx_desc *rx_desc,
698                                     struct sk_buff *skb)
699 {
700         /* verify that the packet does not have any known errors */
701         if (unlikely(ixgbevf_test_staterr(rx_desc,
702                                           IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
703                 struct net_device *netdev = rx_ring->netdev;
704
705                 if (!(netdev->features & NETIF_F_RXALL)) {
706                         dev_kfree_skb_any(skb);
707                         return true;
708                 }
709         }
710
711         /* if eth_skb_pad returns an error the skb was freed */
712         if (eth_skb_pad(skb))
713                 return true;
714
715         return false;
716 }
717
718 /**
719  * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
720  * @rx_ring: rx descriptor ring to store buffers on
721  * @old_buff: donor buffer to have page reused
722  *
723  * Synchronizes page for reuse by the adapter
724  **/
725 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
726                                   struct ixgbevf_rx_buffer *old_buff)
727 {
728         struct ixgbevf_rx_buffer *new_buff;
729         u16 nta = rx_ring->next_to_alloc;
730
731         new_buff = &rx_ring->rx_buffer_info[nta];
732
733         /* update, and store next to alloc */
734         nta++;
735         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
736
737         /* transfer page from old buffer to new buffer */
738         new_buff->page = old_buff->page;
739         new_buff->dma = old_buff->dma;
740         new_buff->page_offset = old_buff->page_offset;
741
742         /* sync the buffer for use by the device */
743         dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
744                                          new_buff->page_offset,
745                                          IXGBEVF_RX_BUFSZ,
746                                          DMA_FROM_DEVICE);
747 }
748
749 static inline bool ixgbevf_page_is_reserved(struct page *page)
750 {
751         return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
752 }
753
754 /**
755  * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
756  * @rx_ring: rx descriptor ring to transact packets on
757  * @rx_buffer: buffer containing page to add
758  * @rx_desc: descriptor containing length of buffer written by hardware
759  * @skb: sk_buff to place the data into
760  *
761  * This function will add the data contained in rx_buffer->page to the skb.
762  * This is done either through a direct copy if the data in the buffer is
763  * less than the skb header size, otherwise it will just attach the page as
764  * a frag to the skb.
765  *
766  * The function will then update the page offset if necessary and return
767  * true if the buffer can be reused by the adapter.
768  **/
769 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
770                                 struct ixgbevf_rx_buffer *rx_buffer,
771                                 union ixgbe_adv_rx_desc *rx_desc,
772                                 struct sk_buff *skb)
773 {
774         struct page *page = rx_buffer->page;
775         unsigned char *va = page_address(page) + rx_buffer->page_offset;
776         unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
777 #if (PAGE_SIZE < 8192)
778         unsigned int truesize = IXGBEVF_RX_BUFSZ;
779 #else
780         unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
781 #endif
782         unsigned int pull_len;
783
784         if (unlikely(skb_is_nonlinear(skb)))
785                 goto add_tail_frag;
786
787         if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
788                 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
789
790                 /* page is not reserved, we can reuse buffer as is */
791                 if (likely(!ixgbevf_page_is_reserved(page)))
792                         return true;
793
794                 /* this page cannot be reused so discard it */
795                 put_page(page);
796                 return false;
797         }
798
799         /* we need the header to contain the greater of either ETH_HLEN or
800          * 60 bytes if the skb->len is less than 60 for skb_pad.
801          */
802         pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
803
804         /* align pull length to size of long to optimize memcpy performance */
805         memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
806
807         /* update all of the pointers */
808         va += pull_len;
809         size -= pull_len;
810
811 add_tail_frag:
812         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
813                         (unsigned long)va & ~PAGE_MASK, size, truesize);
814
815         /* avoid re-using remote pages */
816         if (unlikely(ixgbevf_page_is_reserved(page)))
817                 return false;
818
819 #if (PAGE_SIZE < 8192)
820         /* if we are only owner of page we can reuse it */
821         if (unlikely(page_count(page) != 1))
822                 return false;
823
824         /* flip page offset to other buffer */
825         rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
826
827 #else
828         /* move offset up to the next cache line */
829         rx_buffer->page_offset += truesize;
830
831         if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
832                 return false;
833
834 #endif
835         /* Even if we own the page, we are not allowed to use atomic_set()
836          * This would break get_page_unless_zero() users.
837          */
838         atomic_inc(&page->_count);
839
840         return true;
841 }
842
843 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
844                                                union ixgbe_adv_rx_desc *rx_desc,
845                                                struct sk_buff *skb)
846 {
847         struct ixgbevf_rx_buffer *rx_buffer;
848         struct page *page;
849
850         rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
851         page = rx_buffer->page;
852         prefetchw(page);
853
854         if (likely(!skb)) {
855                 void *page_addr = page_address(page) +
856                                   rx_buffer->page_offset;
857
858                 /* prefetch first cache line of first page */
859                 prefetch(page_addr);
860 #if L1_CACHE_BYTES < 128
861                 prefetch(page_addr + L1_CACHE_BYTES);
862 #endif
863
864                 /* allocate a skb to store the frags */
865                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
866                                                 IXGBEVF_RX_HDR_SIZE);
867                 if (unlikely(!skb)) {
868                         rx_ring->rx_stats.alloc_rx_buff_failed++;
869                         return NULL;
870                 }
871
872                 /* we will be copying header into skb->data in
873                  * pskb_may_pull so it is in our interest to prefetch
874                  * it now to avoid a possible cache miss
875                  */
876                 prefetchw(skb->data);
877         }
878
879         /* we are reusing so sync this buffer for CPU use */
880         dma_sync_single_range_for_cpu(rx_ring->dev,
881                                       rx_buffer->dma,
882                                       rx_buffer->page_offset,
883                                       IXGBEVF_RX_BUFSZ,
884                                       DMA_FROM_DEVICE);
885
886         /* pull page into skb */
887         if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
888                 /* hand second half of page back to the ring */
889                 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
890         } else {
891                 /* we are not reusing the buffer so unmap it */
892                 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
893                                PAGE_SIZE, DMA_FROM_DEVICE);
894         }
895
896         /* clear contents of buffer_info */
897         rx_buffer->dma = 0;
898         rx_buffer->page = NULL;
899
900         return skb;
901 }
902
903 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
904                                              u32 qmask)
905 {
906         struct ixgbe_hw *hw = &adapter->hw;
907
908         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
909 }
910
911 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
912                                 struct ixgbevf_ring *rx_ring,
913                                 int budget)
914 {
915         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
916         u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
917         struct sk_buff *skb = rx_ring->skb;
918
919         while (likely(total_rx_packets < budget)) {
920                 union ixgbe_adv_rx_desc *rx_desc;
921
922                 /* return some buffers to hardware, one at a time is too slow */
923                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
924                         ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
925                         cleaned_count = 0;
926                 }
927
928                 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
929
930                 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
931                         break;
932
933                 /* This memory barrier is needed to keep us from reading
934                  * any other fields out of the rx_desc until we know the
935                  * RXD_STAT_DD bit is set
936                  */
937                 rmb();
938
939                 /* retrieve a buffer from the ring */
940                 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
941
942                 /* exit if we failed to retrieve a buffer */
943                 if (!skb)
944                         break;
945
946                 cleaned_count++;
947
948                 /* fetch next buffer in frame if non-eop */
949                 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
950                         continue;
951
952                 /* verify the packet layout is correct */
953                 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
954                         skb = NULL;
955                         continue;
956                 }
957
958                 /* probably a little skewed due to removing CRC */
959                 total_rx_bytes += skb->len;
960
961                 /* Workaround hardware that can't do proper VEPA multicast
962                  * source pruning.
963                  */
964                 if ((skb->pkt_type == PACKET_BROADCAST ||
965                      skb->pkt_type == PACKET_MULTICAST) &&
966                     ether_addr_equal(rx_ring->netdev->dev_addr,
967                                      eth_hdr(skb)->h_source)) {
968                         dev_kfree_skb_irq(skb);
969                         continue;
970                 }
971
972                 /* populate checksum, VLAN, and protocol */
973                 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
974
975                 ixgbevf_rx_skb(q_vector, skb);
976
977                 /* reset skb pointer */
978                 skb = NULL;
979
980                 /* update budget accounting */
981                 total_rx_packets++;
982         }
983
984         /* place incomplete frames back on ring for completion */
985         rx_ring->skb = skb;
986
987         u64_stats_update_begin(&rx_ring->syncp);
988         rx_ring->stats.packets += total_rx_packets;
989         rx_ring->stats.bytes += total_rx_bytes;
990         u64_stats_update_end(&rx_ring->syncp);
991         q_vector->rx.total_packets += total_rx_packets;
992         q_vector->rx.total_bytes += total_rx_bytes;
993
994         return total_rx_packets;
995 }
996
997 /**
998  * ixgbevf_poll - NAPI polling calback
999  * @napi: napi struct with our devices info in it
1000  * @budget: amount of work driver is allowed to do this pass, in packets
1001  *
1002  * This function will clean more than one or more rings associated with a
1003  * q_vector.
1004  **/
1005 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1006 {
1007         struct ixgbevf_q_vector *q_vector =
1008                 container_of(napi, struct ixgbevf_q_vector, napi);
1009         struct ixgbevf_adapter *adapter = q_vector->adapter;
1010         struct ixgbevf_ring *ring;
1011         int per_ring_budget, work_done = 0;
1012         bool clean_complete = true;
1013
1014         ixgbevf_for_each_ring(ring, q_vector->tx)
1015                 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
1016
1017 #ifdef CONFIG_NET_RX_BUSY_POLL
1018         if (!ixgbevf_qv_lock_napi(q_vector))
1019                 return budget;
1020 #endif
1021
1022         /* attempt to distribute budget to each queue fairly, but don't allow
1023          * the budget to go below 1 because we'll exit polling
1024          */
1025         if (q_vector->rx.count > 1)
1026                 per_ring_budget = max(budget/q_vector->rx.count, 1);
1027         else
1028                 per_ring_budget = budget;
1029
1030         ixgbevf_for_each_ring(ring, q_vector->rx) {
1031                 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1032                                                    per_ring_budget);
1033                 work_done += cleaned;
1034                 clean_complete &= (cleaned < per_ring_budget);
1035         }
1036
1037 #ifdef CONFIG_NET_RX_BUSY_POLL
1038         ixgbevf_qv_unlock_napi(q_vector);
1039 #endif
1040
1041         /* If all work not completed, return budget and keep polling */
1042         if (!clean_complete)
1043                 return budget;
1044         /* all work done, exit the polling mode */
1045         napi_complete_done(napi, work_done);
1046         if (adapter->rx_itr_setting & 1)
1047                 ixgbevf_set_itr(q_vector);
1048         if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1049             !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1050                 ixgbevf_irq_enable_queues(adapter,
1051                                           1 << q_vector->v_idx);
1052
1053         return 0;
1054 }
1055
1056 /**
1057  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1058  * @q_vector: structure containing interrupt and ring information
1059  **/
1060 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1061 {
1062         struct ixgbevf_adapter *adapter = q_vector->adapter;
1063         struct ixgbe_hw *hw = &adapter->hw;
1064         int v_idx = q_vector->v_idx;
1065         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1066
1067         /* set the WDIS bit to not clear the timer bits and cause an
1068          * immediate assertion of the interrupt
1069          */
1070         itr_reg |= IXGBE_EITR_CNT_WDIS;
1071
1072         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1073 }
1074
1075 #ifdef CONFIG_NET_RX_BUSY_POLL
1076 /* must be called with local_bh_disable()d */
1077 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1078 {
1079         struct ixgbevf_q_vector *q_vector =
1080                         container_of(napi, struct ixgbevf_q_vector, napi);
1081         struct ixgbevf_adapter *adapter = q_vector->adapter;
1082         struct ixgbevf_ring  *ring;
1083         int found = 0;
1084
1085         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1086                 return LL_FLUSH_FAILED;
1087
1088         if (!ixgbevf_qv_lock_poll(q_vector))
1089                 return LL_FLUSH_BUSY;
1090
1091         ixgbevf_for_each_ring(ring, q_vector->rx) {
1092                 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
1093 #ifdef BP_EXTENDED_STATS
1094                 if (found)
1095                         ring->stats.cleaned += found;
1096                 else
1097                         ring->stats.misses++;
1098 #endif
1099                 if (found)
1100                         break;
1101         }
1102
1103         ixgbevf_qv_unlock_poll(q_vector);
1104
1105         return found;
1106 }
1107 #endif /* CONFIG_NET_RX_BUSY_POLL */
1108
1109 /**
1110  * ixgbevf_configure_msix - Configure MSI-X hardware
1111  * @adapter: board private structure
1112  *
1113  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1114  * interrupts.
1115  **/
1116 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1117 {
1118         struct ixgbevf_q_vector *q_vector;
1119         int q_vectors, v_idx;
1120
1121         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1122         adapter->eims_enable_mask = 0;
1123
1124         /* Populate the IVAR table and set the ITR values to the
1125          * corresponding register.
1126          */
1127         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1128                 struct ixgbevf_ring *ring;
1129
1130                 q_vector = adapter->q_vector[v_idx];
1131
1132                 ixgbevf_for_each_ring(ring, q_vector->rx)
1133                         ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1134
1135                 ixgbevf_for_each_ring(ring, q_vector->tx)
1136                         ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1137
1138                 if (q_vector->tx.ring && !q_vector->rx.ring) {
1139                         /* Tx only vector */
1140                         if (adapter->tx_itr_setting == 1)
1141                                 q_vector->itr = IXGBE_10K_ITR;
1142                         else
1143                                 q_vector->itr = adapter->tx_itr_setting;
1144                 } else {
1145                         /* Rx or Rx/Tx vector */
1146                         if (adapter->rx_itr_setting == 1)
1147                                 q_vector->itr = IXGBE_20K_ITR;
1148                         else
1149                                 q_vector->itr = adapter->rx_itr_setting;
1150                 }
1151
1152                 /* add q_vector eims value to global eims_enable_mask */
1153                 adapter->eims_enable_mask |= 1 << v_idx;
1154
1155                 ixgbevf_write_eitr(q_vector);
1156         }
1157
1158         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1159         /* setup eims_other and add value to global eims_enable_mask */
1160         adapter->eims_other = 1 << v_idx;
1161         adapter->eims_enable_mask |= adapter->eims_other;
1162 }
1163
1164 enum latency_range {
1165         lowest_latency = 0,
1166         low_latency = 1,
1167         bulk_latency = 2,
1168         latency_invalid = 255
1169 };
1170
1171 /**
1172  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1173  * @q_vector: structure containing interrupt and ring information
1174  * @ring_container: structure containing ring performance data
1175  *
1176  * Stores a new ITR value based on packets and byte
1177  * counts during the last interrupt.  The advantage of per interrupt
1178  * computation is faster updates and more accurate ITR for the current
1179  * traffic pattern.  Constants in this function were computed
1180  * based on theoretical maximum wire speed and thresholds were set based
1181  * on testing data as well as attempting to minimize response time
1182  * while increasing bulk throughput.
1183  **/
1184 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1185                                struct ixgbevf_ring_container *ring_container)
1186 {
1187         int bytes = ring_container->total_bytes;
1188         int packets = ring_container->total_packets;
1189         u32 timepassed_us;
1190         u64 bytes_perint;
1191         u8 itr_setting = ring_container->itr;
1192
1193         if (packets == 0)
1194                 return;
1195
1196         /* simple throttle rate management
1197          *    0-20MB/s lowest (100000 ints/s)
1198          *   20-100MB/s low   (20000 ints/s)
1199          *  100-1249MB/s bulk (8000 ints/s)
1200          */
1201         /* what was last interrupt timeslice? */
1202         timepassed_us = q_vector->itr >> 2;
1203         bytes_perint = bytes / timepassed_us; /* bytes/usec */
1204
1205         switch (itr_setting) {
1206         case lowest_latency:
1207                 if (bytes_perint > 10)
1208                         itr_setting = low_latency;
1209                 break;
1210         case low_latency:
1211                 if (bytes_perint > 20)
1212                         itr_setting = bulk_latency;
1213                 else if (bytes_perint <= 10)
1214                         itr_setting = lowest_latency;
1215                 break;
1216         case bulk_latency:
1217                 if (bytes_perint <= 20)
1218                         itr_setting = low_latency;
1219                 break;
1220         }
1221
1222         /* clear work counters since we have the values we need */
1223         ring_container->total_bytes = 0;
1224         ring_container->total_packets = 0;
1225
1226         /* write updated itr to ring container */
1227         ring_container->itr = itr_setting;
1228 }
1229
1230 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1231 {
1232         u32 new_itr = q_vector->itr;
1233         u8 current_itr;
1234
1235         ixgbevf_update_itr(q_vector, &q_vector->tx);
1236         ixgbevf_update_itr(q_vector, &q_vector->rx);
1237
1238         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1239
1240         switch (current_itr) {
1241         /* counts and packets in update_itr are dependent on these numbers */
1242         case lowest_latency:
1243                 new_itr = IXGBE_100K_ITR;
1244                 break;
1245         case low_latency:
1246                 new_itr = IXGBE_20K_ITR;
1247                 break;
1248         case bulk_latency:
1249         default:
1250                 new_itr = IXGBE_8K_ITR;
1251                 break;
1252         }
1253
1254         if (new_itr != q_vector->itr) {
1255                 /* do an exponential smoothing */
1256                 new_itr = (10 * new_itr * q_vector->itr) /
1257                           ((9 * new_itr) + q_vector->itr);
1258
1259                 /* save the algorithm value here */
1260                 q_vector->itr = new_itr;
1261
1262                 ixgbevf_write_eitr(q_vector);
1263         }
1264 }
1265
1266 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1267 {
1268         struct ixgbevf_adapter *adapter = data;
1269         struct ixgbe_hw *hw = &adapter->hw;
1270
1271         hw->mac.get_link_status = 1;
1272
1273         ixgbevf_service_event_schedule(adapter);
1274
1275         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1276
1277         return IRQ_HANDLED;
1278 }
1279
1280 /**
1281  * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1282  * @irq: unused
1283  * @data: pointer to our q_vector struct for this interrupt vector
1284  **/
1285 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1286 {
1287         struct ixgbevf_q_vector *q_vector = data;
1288
1289         /* EIAM disabled interrupts (on this vector) for us */
1290         if (q_vector->rx.ring || q_vector->tx.ring)
1291                 napi_schedule(&q_vector->napi);
1292
1293         return IRQ_HANDLED;
1294 }
1295
1296 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1297                                      int r_idx)
1298 {
1299         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1300
1301         a->rx_ring[r_idx]->next = q_vector->rx.ring;
1302         q_vector->rx.ring = a->rx_ring[r_idx];
1303         q_vector->rx.count++;
1304 }
1305
1306 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1307                                      int t_idx)
1308 {
1309         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1310
1311         a->tx_ring[t_idx]->next = q_vector->tx.ring;
1312         q_vector->tx.ring = a->tx_ring[t_idx];
1313         q_vector->tx.count++;
1314 }
1315
1316 /**
1317  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1318  * @adapter: board private structure to initialize
1319  *
1320  * This function maps descriptor rings to the queue-specific vectors
1321  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1322  * one vector per ring/queue, but on a constrained vector budget, we
1323  * group the rings as "efficiently" as possible.  You would add new
1324  * mapping configurations in here.
1325  **/
1326 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1327 {
1328         int q_vectors;
1329         int v_start = 0;
1330         int rxr_idx = 0, txr_idx = 0;
1331         int rxr_remaining = adapter->num_rx_queues;
1332         int txr_remaining = adapter->num_tx_queues;
1333         int i, j;
1334         int rqpv, tqpv;
1335         int err = 0;
1336
1337         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1338
1339         /* The ideal configuration...
1340          * We have enough vectors to map one per queue.
1341          */
1342         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1343                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1344                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1345
1346                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1347                         map_vector_to_txq(adapter, v_start, txr_idx);
1348                 goto out;
1349         }
1350
1351         /* If we don't have enough vectors for a 1-to-1
1352          * mapping, we'll have to group them so there are
1353          * multiple queues per vector.
1354          */
1355         /* Re-adjusting *qpv takes care of the remainder. */
1356         for (i = v_start; i < q_vectors; i++) {
1357                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1358                 for (j = 0; j < rqpv; j++) {
1359                         map_vector_to_rxq(adapter, i, rxr_idx);
1360                         rxr_idx++;
1361                         rxr_remaining--;
1362                 }
1363         }
1364         for (i = v_start; i < q_vectors; i++) {
1365                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1366                 for (j = 0; j < tqpv; j++) {
1367                         map_vector_to_txq(adapter, i, txr_idx);
1368                         txr_idx++;
1369                         txr_remaining--;
1370                 }
1371         }
1372
1373 out:
1374         return err;
1375 }
1376
1377 /**
1378  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1379  * @adapter: board private structure
1380  *
1381  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1382  * interrupts from the kernel.
1383  **/
1384 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1385 {
1386         struct net_device *netdev = adapter->netdev;
1387         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1388         int vector, err;
1389         int ri = 0, ti = 0;
1390
1391         for (vector = 0; vector < q_vectors; vector++) {
1392                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1393                 struct msix_entry *entry = &adapter->msix_entries[vector];
1394
1395                 if (q_vector->tx.ring && q_vector->rx.ring) {
1396                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1397                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
1398                         ti++;
1399                 } else if (q_vector->rx.ring) {
1400                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1401                                  "%s-%s-%d", netdev->name, "rx", ri++);
1402                 } else if (q_vector->tx.ring) {
1403                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1404                                  "%s-%s-%d", netdev->name, "tx", ti++);
1405                 } else {
1406                         /* skip this unused q_vector */
1407                         continue;
1408                 }
1409                 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1410                                   q_vector->name, q_vector);
1411                 if (err) {
1412                         hw_dbg(&adapter->hw,
1413                                "request_irq failed for MSIX interrupt Error: %d\n",
1414                                err);
1415                         goto free_queue_irqs;
1416                 }
1417         }
1418
1419         err = request_irq(adapter->msix_entries[vector].vector,
1420                           &ixgbevf_msix_other, 0, netdev->name, adapter);
1421         if (err) {
1422                 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1423                        err);
1424                 goto free_queue_irqs;
1425         }
1426
1427         return 0;
1428
1429 free_queue_irqs:
1430         while (vector) {
1431                 vector--;
1432                 free_irq(adapter->msix_entries[vector].vector,
1433                          adapter->q_vector[vector]);
1434         }
1435         /* This failure is non-recoverable - it indicates the system is
1436          * out of MSIX vector resources and the VF driver cannot run
1437          * without them.  Set the number of msix vectors to zero
1438          * indicating that not enough can be allocated.  The error
1439          * will be returned to the user indicating device open failed.
1440          * Any further attempts to force the driver to open will also
1441          * fail.  The only way to recover is to unload the driver and
1442          * reload it again.  If the system has recovered some MSIX
1443          * vectors then it may succeed.
1444          */
1445         adapter->num_msix_vectors = 0;
1446         return err;
1447 }
1448
1449 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1450 {
1451         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1452
1453         for (i = 0; i < q_vectors; i++) {
1454                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1455
1456                 q_vector->rx.ring = NULL;
1457                 q_vector->tx.ring = NULL;
1458                 q_vector->rx.count = 0;
1459                 q_vector->tx.count = 0;
1460         }
1461 }
1462
1463 /**
1464  * ixgbevf_request_irq - initialize interrupts
1465  * @adapter: board private structure
1466  *
1467  * Attempts to configure interrupts using the best available
1468  * capabilities of the hardware and kernel.
1469  **/
1470 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1471 {
1472         int err = 0;
1473
1474         err = ixgbevf_request_msix_irqs(adapter);
1475
1476         if (err)
1477                 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1478
1479         return err;
1480 }
1481
1482 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1483 {
1484         int i, q_vectors;
1485
1486         q_vectors = adapter->num_msix_vectors;
1487         i = q_vectors - 1;
1488
1489         free_irq(adapter->msix_entries[i].vector, adapter);
1490         i--;
1491
1492         for (; i >= 0; i--) {
1493                 /* free only the irqs that were actually requested */
1494                 if (!adapter->q_vector[i]->rx.ring &&
1495                     !adapter->q_vector[i]->tx.ring)
1496                         continue;
1497
1498                 free_irq(adapter->msix_entries[i].vector,
1499                          adapter->q_vector[i]);
1500         }
1501
1502         ixgbevf_reset_q_vectors(adapter);
1503 }
1504
1505 /**
1506  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1507  * @adapter: board private structure
1508  **/
1509 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1510 {
1511         struct ixgbe_hw *hw = &adapter->hw;
1512         int i;
1513
1514         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1515         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1516         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1517
1518         IXGBE_WRITE_FLUSH(hw);
1519
1520         for (i = 0; i < adapter->num_msix_vectors; i++)
1521                 synchronize_irq(adapter->msix_entries[i].vector);
1522 }
1523
1524 /**
1525  * ixgbevf_irq_enable - Enable default interrupt generation settings
1526  * @adapter: board private structure
1527  **/
1528 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1529 {
1530         struct ixgbe_hw *hw = &adapter->hw;
1531
1532         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1533         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1534         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1535 }
1536
1537 /**
1538  * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1539  * @adapter: board private structure
1540  * @ring: structure containing ring specific data
1541  *
1542  * Configure the Tx descriptor ring after a reset.
1543  **/
1544 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1545                                       struct ixgbevf_ring *ring)
1546 {
1547         struct ixgbe_hw *hw = &adapter->hw;
1548         u64 tdba = ring->dma;
1549         int wait_loop = 10;
1550         u32 txdctl = IXGBE_TXDCTL_ENABLE;
1551         u8 reg_idx = ring->reg_idx;
1552
1553         /* disable queue to avoid issues while updating state */
1554         IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1555         IXGBE_WRITE_FLUSH(hw);
1556
1557         IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1558         IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1559         IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1560                         ring->count * sizeof(union ixgbe_adv_tx_desc));
1561
1562         /* disable head writeback */
1563         IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1564         IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1565
1566         /* enable relaxed ordering */
1567         IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1568                         (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1569                          IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1570
1571         /* reset head and tail pointers */
1572         IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1573         IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1574         ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1575
1576         /* reset ntu and ntc to place SW in sync with hardwdare */
1577         ring->next_to_clean = 0;
1578         ring->next_to_use = 0;
1579
1580         /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1581          * to or less than the number of on chip descriptors, which is
1582          * currently 40.
1583          */
1584         txdctl |= (8 << 16);    /* WTHRESH = 8 */
1585
1586         /* Setting PTHRESH to 32 both improves performance */
1587         txdctl |= (1 << 8) |    /* HTHRESH = 1 */
1588                   32;          /* PTHRESH = 32 */
1589
1590         clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1591
1592         IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1593
1594         /* poll to verify queue is enabled */
1595         do {
1596                 usleep_range(1000, 2000);
1597                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1598         }  while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1599         if (!wait_loop)
1600                 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1601 }
1602
1603 /**
1604  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1605  * @adapter: board private structure
1606  *
1607  * Configure the Tx unit of the MAC after a reset.
1608  **/
1609 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1610 {
1611         u32 i;
1612
1613         /* Setup the HW Tx Head and Tail descriptor pointers */
1614         for (i = 0; i < adapter->num_tx_queues; i++)
1615                 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1616 }
1617
1618 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1619
1620 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1621 {
1622         struct ixgbe_hw *hw = &adapter->hw;
1623         u32 srrctl;
1624
1625         srrctl = IXGBE_SRRCTL_DROP_EN;
1626
1627         srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1628         srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1629         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1630
1631         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1632 }
1633
1634 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1635 {
1636         struct ixgbe_hw *hw = &adapter->hw;
1637
1638         /* PSRTYPE must be initialized in 82599 */
1639         u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1640                       IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1641                       IXGBE_PSRTYPE_L2HDR;
1642
1643         if (adapter->num_rx_queues > 1)
1644                 psrtype |= 1 << 29;
1645
1646         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1647 }
1648
1649 #define IXGBEVF_MAX_RX_DESC_POLL 10
1650 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1651                                      struct ixgbevf_ring *ring)
1652 {
1653         struct ixgbe_hw *hw = &adapter->hw;
1654         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1655         u32 rxdctl;
1656         u8 reg_idx = ring->reg_idx;
1657
1658         if (IXGBE_REMOVED(hw->hw_addr))
1659                 return;
1660         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1661         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1662
1663         /* write value back with RXDCTL.ENABLE bit cleared */
1664         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1665
1666         /* the hardware may take up to 100us to really disable the Rx queue */
1667         do {
1668                 udelay(10);
1669                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1670         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1671
1672         if (!wait_loop)
1673                 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1674                        reg_idx);
1675 }
1676
1677 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1678                                          struct ixgbevf_ring *ring)
1679 {
1680         struct ixgbe_hw *hw = &adapter->hw;
1681         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1682         u32 rxdctl;
1683         u8 reg_idx = ring->reg_idx;
1684
1685         if (IXGBE_REMOVED(hw->hw_addr))
1686                 return;
1687         do {
1688                 usleep_range(1000, 2000);
1689                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1690         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1691
1692         if (!wait_loop)
1693                 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1694                        reg_idx);
1695 }
1696
1697 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1698 {
1699         struct ixgbe_hw *hw = &adapter->hw;
1700         u32 vfmrqc = 0, vfreta = 0;
1701         u16 rss_i = adapter->num_rx_queues;
1702         u8 i, j;
1703
1704         /* Fill out hash function seeds */
1705         netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1706         for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1707                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
1708
1709         for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1710                 if (j == rss_i)
1711                         j = 0;
1712
1713                 adapter->rss_indir_tbl[i] = j;
1714
1715                 vfreta |= j << (i & 0x3) * 8;
1716                 if ((i & 3) == 3) {
1717                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1718                         vfreta = 0;
1719                 }
1720         }
1721
1722         /* Perform hash on these packet types */
1723         vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1724                 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1725                 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1726                 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1727
1728         vfmrqc |= IXGBE_VFMRQC_RSSEN;
1729
1730         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1731 }
1732
1733 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1734                                       struct ixgbevf_ring *ring)
1735 {
1736         struct ixgbe_hw *hw = &adapter->hw;
1737         u64 rdba = ring->dma;
1738         u32 rxdctl;
1739         u8 reg_idx = ring->reg_idx;
1740
1741         /* disable queue to avoid issues while updating state */
1742         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1743         ixgbevf_disable_rx_queue(adapter, ring);
1744
1745         IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1746         IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1747         IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1748                         ring->count * sizeof(union ixgbe_adv_rx_desc));
1749
1750         /* enable relaxed ordering */
1751         IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1752                         IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1753
1754         /* reset head and tail pointers */
1755         IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1756         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1757         ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1758
1759         /* reset ntu and ntc to place SW in sync with hardwdare */
1760         ring->next_to_clean = 0;
1761         ring->next_to_use = 0;
1762         ring->next_to_alloc = 0;
1763
1764         ixgbevf_configure_srrctl(adapter, reg_idx);
1765
1766         /* allow any size packet since we can handle overflow */
1767         rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1768
1769         rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1770         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1771
1772         ixgbevf_rx_desc_queue_enable(adapter, ring);
1773         ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1774 }
1775
1776 /**
1777  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1778  * @adapter: board private structure
1779  *
1780  * Configure the Rx unit of the MAC after a reset.
1781  **/
1782 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1783 {
1784         int i;
1785         struct ixgbe_hw *hw = &adapter->hw;
1786         struct net_device *netdev = adapter->netdev;
1787
1788         ixgbevf_setup_psrtype(adapter);
1789         if (hw->mac.type >= ixgbe_mac_X550_vf)
1790                 ixgbevf_setup_vfmrqc(adapter);
1791
1792         /* notify the PF of our intent to use this size of frame */
1793         ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1794
1795         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1796          * the Base and Length of the Rx Descriptor Ring
1797          */
1798         for (i = 0; i < adapter->num_rx_queues; i++)
1799                 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1800 }
1801
1802 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1803                                    __be16 proto, u16 vid)
1804 {
1805         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1806         struct ixgbe_hw *hw = &adapter->hw;
1807         int err;
1808
1809         spin_lock_bh(&adapter->mbx_lock);
1810
1811         /* add VID to filter table */
1812         err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1813
1814         spin_unlock_bh(&adapter->mbx_lock);
1815
1816         /* translate error return types so error makes sense */
1817         if (err == IXGBE_ERR_MBX)
1818                 return -EIO;
1819
1820         if (err == IXGBE_ERR_INVALID_ARGUMENT)
1821                 return -EACCES;
1822
1823         set_bit(vid, adapter->active_vlans);
1824
1825         return err;
1826 }
1827
1828 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1829                                     __be16 proto, u16 vid)
1830 {
1831         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1832         struct ixgbe_hw *hw = &adapter->hw;
1833         int err = -EOPNOTSUPP;
1834
1835         spin_lock_bh(&adapter->mbx_lock);
1836
1837         /* remove VID from filter table */
1838         err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1839
1840         spin_unlock_bh(&adapter->mbx_lock);
1841
1842         clear_bit(vid, adapter->active_vlans);
1843
1844         return err;
1845 }
1846
1847 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1848 {
1849         u16 vid;
1850
1851         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1852                 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1853                                         htons(ETH_P_8021Q), vid);
1854 }
1855
1856 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1857 {
1858         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1859         struct ixgbe_hw *hw = &adapter->hw;
1860         int count = 0;
1861
1862         if ((netdev_uc_count(netdev)) > 10) {
1863                 pr_err("Too many unicast filters - No Space\n");
1864                 return -ENOSPC;
1865         }
1866
1867         if (!netdev_uc_empty(netdev)) {
1868                 struct netdev_hw_addr *ha;
1869
1870                 netdev_for_each_uc_addr(ha, netdev) {
1871                         hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1872                         udelay(200);
1873                 }
1874         } else {
1875                 /* If the list is empty then send message to PF driver to
1876                  * clear all MAC VLANs on this VF.
1877                  */
1878                 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1879         }
1880
1881         return count;
1882 }
1883
1884 /**
1885  * ixgbevf_set_rx_mode - Multicast and unicast set
1886  * @netdev: network interface device structure
1887  *
1888  * The set_rx_method entry point is called whenever the multicast address
1889  * list, unicast address list or the network interface flags are updated.
1890  * This routine is responsible for configuring the hardware for proper
1891  * multicast mode and configuring requested unicast filters.
1892  **/
1893 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1894 {
1895         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1896         struct ixgbe_hw *hw = &adapter->hw;
1897
1898         spin_lock_bh(&adapter->mbx_lock);
1899
1900         /* reprogram multicast list */
1901         hw->mac.ops.update_mc_addr_list(hw, netdev);
1902
1903         ixgbevf_write_uc_addr_list(netdev);
1904
1905         spin_unlock_bh(&adapter->mbx_lock);
1906 }
1907
1908 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1909 {
1910         int q_idx;
1911         struct ixgbevf_q_vector *q_vector;
1912         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1913
1914         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1915                 q_vector = adapter->q_vector[q_idx];
1916 #ifdef CONFIG_NET_RX_BUSY_POLL
1917                 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1918 #endif
1919                 napi_enable(&q_vector->napi);
1920         }
1921 }
1922
1923 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1924 {
1925         int q_idx;
1926         struct ixgbevf_q_vector *q_vector;
1927         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1928
1929         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1930                 q_vector = adapter->q_vector[q_idx];
1931                 napi_disable(&q_vector->napi);
1932 #ifdef CONFIG_NET_RX_BUSY_POLL
1933                 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1934                         pr_info("QV %d locked\n", q_idx);
1935                         usleep_range(1000, 20000);
1936                 }
1937 #endif /* CONFIG_NET_RX_BUSY_POLL */
1938         }
1939 }
1940
1941 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1942 {
1943         struct ixgbe_hw *hw = &adapter->hw;
1944         unsigned int def_q = 0;
1945         unsigned int num_tcs = 0;
1946         unsigned int num_rx_queues = adapter->num_rx_queues;
1947         unsigned int num_tx_queues = adapter->num_tx_queues;
1948         int err;
1949
1950         spin_lock_bh(&adapter->mbx_lock);
1951
1952         /* fetch queue configuration from the PF */
1953         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1954
1955         spin_unlock_bh(&adapter->mbx_lock);
1956
1957         if (err)
1958                 return err;
1959
1960         if (num_tcs > 1) {
1961                 /* we need only one Tx queue */
1962                 num_tx_queues = 1;
1963
1964                 /* update default Tx ring register index */
1965                 adapter->tx_ring[0]->reg_idx = def_q;
1966
1967                 /* we need as many queues as traffic classes */
1968                 num_rx_queues = num_tcs;
1969         }
1970
1971         /* if we have a bad config abort request queue reset */
1972         if ((adapter->num_rx_queues != num_rx_queues) ||
1973             (adapter->num_tx_queues != num_tx_queues)) {
1974                 /* force mailbox timeout to prevent further messages */
1975                 hw->mbx.timeout = 0;
1976
1977                 /* wait for watchdog to come around and bail us out */
1978                 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1979         }
1980
1981         return 0;
1982 }
1983
1984 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1985 {
1986         ixgbevf_configure_dcb(adapter);
1987
1988         ixgbevf_set_rx_mode(adapter->netdev);
1989
1990         ixgbevf_restore_vlan(adapter);
1991
1992         ixgbevf_configure_tx(adapter);
1993         ixgbevf_configure_rx(adapter);
1994 }
1995
1996 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1997 {
1998         /* Only save pre-reset stats if there are some */
1999         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2000                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2001                         adapter->stats.base_vfgprc;
2002                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2003                         adapter->stats.base_vfgptc;
2004                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2005                         adapter->stats.base_vfgorc;
2006                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2007                         adapter->stats.base_vfgotc;
2008                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2009                         adapter->stats.base_vfmprc;
2010         }
2011 }
2012
2013 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2014 {
2015         struct ixgbe_hw *hw = &adapter->hw;
2016
2017         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2018         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2019         adapter->stats.last_vfgorc |=
2020                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2021         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2022         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2023         adapter->stats.last_vfgotc |=
2024                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2025         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2026
2027         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2028         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2029         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2030         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2031         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2032 }
2033
2034 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2035 {
2036         struct ixgbe_hw *hw = &adapter->hw;
2037         int api[] = { ixgbe_mbox_api_12,
2038                       ixgbe_mbox_api_11,
2039                       ixgbe_mbox_api_10,
2040                       ixgbe_mbox_api_unknown };
2041         int err = 0, idx = 0;
2042
2043         spin_lock_bh(&adapter->mbx_lock);
2044
2045         while (api[idx] != ixgbe_mbox_api_unknown) {
2046                 err = ixgbevf_negotiate_api_version(hw, api[idx]);
2047                 if (!err)
2048                         break;
2049                 idx++;
2050         }
2051
2052         spin_unlock_bh(&adapter->mbx_lock);
2053 }
2054
2055 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2056 {
2057         struct net_device *netdev = adapter->netdev;
2058         struct ixgbe_hw *hw = &adapter->hw;
2059
2060         ixgbevf_configure_msix(adapter);
2061
2062         spin_lock_bh(&adapter->mbx_lock);
2063
2064         if (is_valid_ether_addr(hw->mac.addr))
2065                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2066         else
2067                 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2068
2069         spin_unlock_bh(&adapter->mbx_lock);
2070
2071         smp_mb__before_atomic();
2072         clear_bit(__IXGBEVF_DOWN, &adapter->state);
2073         ixgbevf_napi_enable_all(adapter);
2074
2075         /* clear any pending interrupts, may auto mask */
2076         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2077         ixgbevf_irq_enable(adapter);
2078
2079         /* enable transmits */
2080         netif_tx_start_all_queues(netdev);
2081
2082         ixgbevf_save_reset_stats(adapter);
2083         ixgbevf_init_last_counter_stats(adapter);
2084
2085         hw->mac.get_link_status = 1;
2086         mod_timer(&adapter->service_timer, jiffies);
2087 }
2088
2089 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2090 {
2091         ixgbevf_configure(adapter);
2092
2093         ixgbevf_up_complete(adapter);
2094 }
2095
2096 /**
2097  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2098  * @rx_ring: ring to free buffers from
2099  **/
2100 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2101 {
2102         struct device *dev = rx_ring->dev;
2103         unsigned long size;
2104         unsigned int i;
2105
2106         /* Free Rx ring sk_buff */
2107         if (rx_ring->skb) {
2108                 dev_kfree_skb(rx_ring->skb);
2109                 rx_ring->skb = NULL;
2110         }
2111
2112         /* ring already cleared, nothing to do */
2113         if (!rx_ring->rx_buffer_info)
2114                 return;
2115
2116         /* Free all the Rx ring pages */
2117         for (i = 0; i < rx_ring->count; i++) {
2118                 struct ixgbevf_rx_buffer *rx_buffer;
2119
2120                 rx_buffer = &rx_ring->rx_buffer_info[i];
2121                 if (rx_buffer->dma)
2122                         dma_unmap_page(dev, rx_buffer->dma,
2123                                        PAGE_SIZE, DMA_FROM_DEVICE);
2124                 rx_buffer->dma = 0;
2125                 if (rx_buffer->page)
2126                         __free_page(rx_buffer->page);
2127                 rx_buffer->page = NULL;
2128         }
2129
2130         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2131         memset(rx_ring->rx_buffer_info, 0, size);
2132
2133         /* Zero out the descriptor ring */
2134         memset(rx_ring->desc, 0, rx_ring->size);
2135 }
2136
2137 /**
2138  * ixgbevf_clean_tx_ring - Free Tx Buffers
2139  * @tx_ring: ring to be cleaned
2140  **/
2141 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2142 {
2143         struct ixgbevf_tx_buffer *tx_buffer_info;
2144         unsigned long size;
2145         unsigned int i;
2146
2147         if (!tx_ring->tx_buffer_info)
2148                 return;
2149
2150         /* Free all the Tx ring sk_buffs */
2151         for (i = 0; i < tx_ring->count; i++) {
2152                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2153                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2154         }
2155
2156         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2157         memset(tx_ring->tx_buffer_info, 0, size);
2158
2159         memset(tx_ring->desc, 0, tx_ring->size);
2160 }
2161
2162 /**
2163  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2164  * @adapter: board private structure
2165  **/
2166 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2167 {
2168         int i;
2169
2170         for (i = 0; i < adapter->num_rx_queues; i++)
2171                 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2172 }
2173
2174 /**
2175  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2176  * @adapter: board private structure
2177  **/
2178 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2179 {
2180         int i;
2181
2182         for (i = 0; i < adapter->num_tx_queues; i++)
2183                 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2184 }
2185
2186 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2187 {
2188         struct net_device *netdev = adapter->netdev;
2189         struct ixgbe_hw *hw = &adapter->hw;
2190         int i;
2191
2192         /* signal that we are down to the interrupt handler */
2193         if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2194                 return; /* do nothing if already down */
2195
2196         /* disable all enabled Rx queues */
2197         for (i = 0; i < adapter->num_rx_queues; i++)
2198                 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2199
2200         usleep_range(10000, 20000);
2201
2202         netif_tx_stop_all_queues(netdev);
2203
2204         /* call carrier off first to avoid false dev_watchdog timeouts */
2205         netif_carrier_off(netdev);
2206         netif_tx_disable(netdev);
2207
2208         ixgbevf_irq_disable(adapter);
2209
2210         ixgbevf_napi_disable_all(adapter);
2211
2212         del_timer_sync(&adapter->service_timer);
2213
2214         /* disable transmits in the hardware now that interrupts are off */
2215         for (i = 0; i < adapter->num_tx_queues; i++) {
2216                 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2217
2218                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2219                                 IXGBE_TXDCTL_SWFLSH);
2220         }
2221
2222         if (!pci_channel_offline(adapter->pdev))
2223                 ixgbevf_reset(adapter);
2224
2225         ixgbevf_clean_all_tx_rings(adapter);
2226         ixgbevf_clean_all_rx_rings(adapter);
2227 }
2228
2229 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2230 {
2231         WARN_ON(in_interrupt());
2232
2233         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2234                 msleep(1);
2235
2236         ixgbevf_down(adapter);
2237         ixgbevf_up(adapter);
2238
2239         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2240 }
2241
2242 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2243 {
2244         struct ixgbe_hw *hw = &adapter->hw;
2245         struct net_device *netdev = adapter->netdev;
2246
2247         if (hw->mac.ops.reset_hw(hw)) {
2248                 hw_dbg(hw, "PF still resetting\n");
2249         } else {
2250                 hw->mac.ops.init_hw(hw);
2251                 ixgbevf_negotiate_api(adapter);
2252         }
2253
2254         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2255                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2256                        netdev->addr_len);
2257                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2258                        netdev->addr_len);
2259         }
2260
2261         adapter->last_reset = jiffies;
2262 }
2263
2264 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2265                                         int vectors)
2266 {
2267         int vector_threshold;
2268
2269         /* We'll want at least 2 (vector_threshold):
2270          * 1) TxQ[0] + RxQ[0] handler
2271          * 2) Other (Link Status Change, etc.)
2272          */
2273         vector_threshold = MIN_MSIX_COUNT;
2274
2275         /* The more we get, the more we will assign to Tx/Rx Cleanup
2276          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2277          * Right now, we simply care about how many we'll get; we'll
2278          * set them up later while requesting irq's.
2279          */
2280         vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2281                                         vector_threshold, vectors);
2282
2283         if (vectors < 0) {
2284                 dev_err(&adapter->pdev->dev,
2285                         "Unable to allocate MSI-X interrupts\n");
2286                 kfree(adapter->msix_entries);
2287                 adapter->msix_entries = NULL;
2288                 return vectors;
2289         }
2290
2291         /* Adjust for only the vectors we'll use, which is minimum
2292          * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2293          * vectors we were allocated.
2294          */
2295         adapter->num_msix_vectors = vectors;
2296
2297         return 0;
2298 }
2299
2300 /**
2301  * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2302  * @adapter: board private structure to initialize
2303  *
2304  * This is the top level queue allocation routine.  The order here is very
2305  * important, starting with the "most" number of features turned on at once,
2306  * and ending with the smallest set of features.  This way large combinations
2307  * can be allocated if they're turned on, and smaller combinations are the
2308  * fallthrough conditions.
2309  *
2310  **/
2311 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2312 {
2313         struct ixgbe_hw *hw = &adapter->hw;
2314         unsigned int def_q = 0;
2315         unsigned int num_tcs = 0;
2316         int err;
2317
2318         /* Start with base case */
2319         adapter->num_rx_queues = 1;
2320         adapter->num_tx_queues = 1;
2321
2322         spin_lock_bh(&adapter->mbx_lock);
2323
2324         /* fetch queue configuration from the PF */
2325         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2326
2327         spin_unlock_bh(&adapter->mbx_lock);
2328
2329         if (err)
2330                 return;
2331
2332         /* we need as many queues as traffic classes */
2333         if (num_tcs > 1) {
2334                 adapter->num_rx_queues = num_tcs;
2335         } else {
2336                 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2337
2338                 switch (hw->api_version) {
2339                 case ixgbe_mbox_api_11:
2340                 case ixgbe_mbox_api_12:
2341                         adapter->num_rx_queues = rss;
2342                         adapter->num_tx_queues = rss;
2343                 default:
2344                         break;
2345                 }
2346         }
2347 }
2348
2349 /**
2350  * ixgbevf_alloc_queues - Allocate memory for all rings
2351  * @adapter: board private structure to initialize
2352  *
2353  * We allocate one ring per queue at run-time since we don't know the
2354  * number of queues at compile-time.  The polling_netdev array is
2355  * intended for Multiqueue, but should work fine with a single queue.
2356  **/
2357 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2358 {
2359         struct ixgbevf_ring *ring;
2360         int rx = 0, tx = 0;
2361
2362         for (; tx < adapter->num_tx_queues; tx++) {
2363                 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2364                 if (!ring)
2365                         goto err_allocation;
2366
2367                 ring->dev = &adapter->pdev->dev;
2368                 ring->netdev = adapter->netdev;
2369                 ring->count = adapter->tx_ring_count;
2370                 ring->queue_index = tx;
2371                 ring->reg_idx = tx;
2372
2373                 adapter->tx_ring[tx] = ring;
2374         }
2375
2376         for (; rx < adapter->num_rx_queues; rx++) {
2377                 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2378                 if (!ring)
2379                         goto err_allocation;
2380
2381                 ring->dev = &adapter->pdev->dev;
2382                 ring->netdev = adapter->netdev;
2383
2384                 ring->count = adapter->rx_ring_count;
2385                 ring->queue_index = rx;
2386                 ring->reg_idx = rx;
2387
2388                 adapter->rx_ring[rx] = ring;
2389         }
2390
2391         return 0;
2392
2393 err_allocation:
2394         while (tx) {
2395                 kfree(adapter->tx_ring[--tx]);
2396                 adapter->tx_ring[tx] = NULL;
2397         }
2398
2399         while (rx) {
2400                 kfree(adapter->rx_ring[--rx]);
2401                 adapter->rx_ring[rx] = NULL;
2402         }
2403         return -ENOMEM;
2404 }
2405
2406 /**
2407  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2408  * @adapter: board private structure to initialize
2409  *
2410  * Attempt to configure the interrupts using the best available
2411  * capabilities of the hardware and the kernel.
2412  **/
2413 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2414 {
2415         struct net_device *netdev = adapter->netdev;
2416         int err = 0;
2417         int vector, v_budget;
2418
2419         /* It's easy to be greedy for MSI-X vectors, but it really
2420          * doesn't do us much good if we have a lot more vectors
2421          * than CPU's.  So let's be conservative and only ask for
2422          * (roughly) the same number of vectors as there are CPU's.
2423          * The default is to use pairs of vectors.
2424          */
2425         v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2426         v_budget = min_t(int, v_budget, num_online_cpus());
2427         v_budget += NON_Q_VECTORS;
2428
2429         /* A failure in MSI-X entry allocation isn't fatal, but it does
2430          * mean we disable MSI-X capabilities of the adapter.
2431          */
2432         adapter->msix_entries = kcalloc(v_budget,
2433                                         sizeof(struct msix_entry), GFP_KERNEL);
2434         if (!adapter->msix_entries) {
2435                 err = -ENOMEM;
2436                 goto out;
2437         }
2438
2439         for (vector = 0; vector < v_budget; vector++)
2440                 adapter->msix_entries[vector].entry = vector;
2441
2442         err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2443         if (err)
2444                 goto out;
2445
2446         err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2447         if (err)
2448                 goto out;
2449
2450         err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2451
2452 out:
2453         return err;
2454 }
2455
2456 /**
2457  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2458  * @adapter: board private structure to initialize
2459  *
2460  * We allocate one q_vector per queue interrupt.  If allocation fails we
2461  * return -ENOMEM.
2462  **/
2463 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2464 {
2465         int q_idx, num_q_vectors;
2466         struct ixgbevf_q_vector *q_vector;
2467
2468         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2469
2470         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2471                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2472                 if (!q_vector)
2473                         goto err_out;
2474                 q_vector->adapter = adapter;
2475                 q_vector->v_idx = q_idx;
2476                 netif_napi_add(adapter->netdev, &q_vector->napi,
2477                                ixgbevf_poll, 64);
2478 #ifdef CONFIG_NET_RX_BUSY_POLL
2479                 napi_hash_add(&q_vector->napi);
2480 #endif
2481                 adapter->q_vector[q_idx] = q_vector;
2482         }
2483
2484         return 0;
2485
2486 err_out:
2487         while (q_idx) {
2488                 q_idx--;
2489                 q_vector = adapter->q_vector[q_idx];
2490 #ifdef CONFIG_NET_RX_BUSY_POLL
2491                 napi_hash_del(&q_vector->napi);
2492 #endif
2493                 netif_napi_del(&q_vector->napi);
2494                 kfree(q_vector);
2495                 adapter->q_vector[q_idx] = NULL;
2496         }
2497         return -ENOMEM;
2498 }
2499
2500 /**
2501  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2502  * @adapter: board private structure to initialize
2503  *
2504  * This function frees the memory allocated to the q_vectors.  In addition if
2505  * NAPI is enabled it will delete any references to the NAPI struct prior
2506  * to freeing the q_vector.
2507  **/
2508 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2509 {
2510         int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2511
2512         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2513                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2514
2515                 adapter->q_vector[q_idx] = NULL;
2516 #ifdef CONFIG_NET_RX_BUSY_POLL
2517                 napi_hash_del(&q_vector->napi);
2518 #endif
2519                 netif_napi_del(&q_vector->napi);
2520                 kfree(q_vector);
2521         }
2522 }
2523
2524 /**
2525  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2526  * @adapter: board private structure
2527  *
2528  **/
2529 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2530 {
2531         pci_disable_msix(adapter->pdev);
2532         kfree(adapter->msix_entries);
2533         adapter->msix_entries = NULL;
2534 }
2535
2536 /**
2537  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2538  * @adapter: board private structure to initialize
2539  *
2540  **/
2541 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2542 {
2543         int err;
2544
2545         /* Number of supported queues */
2546         ixgbevf_set_num_queues(adapter);
2547
2548         err = ixgbevf_set_interrupt_capability(adapter);
2549         if (err) {
2550                 hw_dbg(&adapter->hw,
2551                        "Unable to setup interrupt capabilities\n");
2552                 goto err_set_interrupt;
2553         }
2554
2555         err = ixgbevf_alloc_q_vectors(adapter);
2556         if (err) {
2557                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2558                 goto err_alloc_q_vectors;
2559         }
2560
2561         err = ixgbevf_alloc_queues(adapter);
2562         if (err) {
2563                 pr_err("Unable to allocate memory for queues\n");
2564                 goto err_alloc_queues;
2565         }
2566
2567         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2568                (adapter->num_rx_queues > 1) ? "Enabled" :
2569                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2570
2571         set_bit(__IXGBEVF_DOWN, &adapter->state);
2572
2573         return 0;
2574 err_alloc_queues:
2575         ixgbevf_free_q_vectors(adapter);
2576 err_alloc_q_vectors:
2577         ixgbevf_reset_interrupt_capability(adapter);
2578 err_set_interrupt:
2579         return err;
2580 }
2581
2582 /**
2583  * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2584  * @adapter: board private structure to clear interrupt scheme on
2585  *
2586  * We go through and clear interrupt specific resources and reset the structure
2587  * to pre-load conditions
2588  **/
2589 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2590 {
2591         int i;
2592
2593         for (i = 0; i < adapter->num_tx_queues; i++) {
2594                 kfree(adapter->tx_ring[i]);
2595                 adapter->tx_ring[i] = NULL;
2596         }
2597         for (i = 0; i < adapter->num_rx_queues; i++) {
2598                 kfree(adapter->rx_ring[i]);
2599                 adapter->rx_ring[i] = NULL;
2600         }
2601
2602         adapter->num_tx_queues = 0;
2603         adapter->num_rx_queues = 0;
2604
2605         ixgbevf_free_q_vectors(adapter);
2606         ixgbevf_reset_interrupt_capability(adapter);
2607 }
2608
2609 /**
2610  * ixgbevf_sw_init - Initialize general software structures
2611  * @adapter: board private structure to initialize
2612  *
2613  * ixgbevf_sw_init initializes the Adapter private data structure.
2614  * Fields are initialized based on PCI device information and
2615  * OS network device settings (MTU size).
2616  **/
2617 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2618 {
2619         struct ixgbe_hw *hw = &adapter->hw;
2620         struct pci_dev *pdev = adapter->pdev;
2621         struct net_device *netdev = adapter->netdev;
2622         int err;
2623
2624         /* PCI config space info */
2625         hw->vendor_id = pdev->vendor;
2626         hw->device_id = pdev->device;
2627         hw->revision_id = pdev->revision;
2628         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2629         hw->subsystem_device_id = pdev->subsystem_device;
2630
2631         hw->mbx.ops.init_params(hw);
2632
2633         /* assume legacy case in which PF would only give VF 2 queues */
2634         hw->mac.max_tx_queues = 2;
2635         hw->mac.max_rx_queues = 2;
2636
2637         /* lock to protect mailbox accesses */
2638         spin_lock_init(&adapter->mbx_lock);
2639
2640         err = hw->mac.ops.reset_hw(hw);
2641         if (err) {
2642                 dev_info(&pdev->dev,
2643                          "PF still in reset state.  Is the PF interface up?\n");
2644         } else {
2645                 err = hw->mac.ops.init_hw(hw);
2646                 if (err) {
2647                         pr_err("init_shared_code failed: %d\n", err);
2648                         goto out;
2649                 }
2650                 ixgbevf_negotiate_api(adapter);
2651                 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2652                 if (err)
2653                         dev_info(&pdev->dev, "Error reading MAC address\n");
2654                 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2655                         dev_info(&pdev->dev,
2656                                  "MAC address not assigned by administrator.\n");
2657                 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2658         }
2659
2660         if (!is_valid_ether_addr(netdev->dev_addr)) {
2661                 dev_info(&pdev->dev, "Assigning random MAC address\n");
2662                 eth_hw_addr_random(netdev);
2663                 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2664         }
2665
2666         /* Enable dynamic interrupt throttling rates */
2667         adapter->rx_itr_setting = 1;
2668         adapter->tx_itr_setting = 1;
2669
2670         /* set default ring sizes */
2671         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2672         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2673
2674         set_bit(__IXGBEVF_DOWN, &adapter->state);
2675         return 0;
2676
2677 out:
2678         return err;
2679 }
2680
2681 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2682         {                                                       \
2683                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2684                 if (current_counter < last_counter)             \
2685                         counter += 0x100000000LL;               \
2686                 last_counter = current_counter;                 \
2687                 counter &= 0xFFFFFFFF00000000LL;                \
2688                 counter |= current_counter;                     \
2689         }
2690
2691 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2692         {                                                                \
2693                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2694                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2695                 u64 current_counter = (current_counter_msb << 32) |      \
2696                         current_counter_lsb;                             \
2697                 if (current_counter < last_counter)                      \
2698                         counter += 0x1000000000LL;                       \
2699                 last_counter = current_counter;                          \
2700                 counter &= 0xFFFFFFF000000000LL;                         \
2701                 counter |= current_counter;                              \
2702         }
2703 /**
2704  * ixgbevf_update_stats - Update the board statistics counters.
2705  * @adapter: board private structure
2706  **/
2707 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2708 {
2709         struct ixgbe_hw *hw = &adapter->hw;
2710         int i;
2711
2712         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2713             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2714                 return;
2715
2716         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2717                                 adapter->stats.vfgprc);
2718         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2719                                 adapter->stats.vfgptc);
2720         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2721                                 adapter->stats.last_vfgorc,
2722                                 adapter->stats.vfgorc);
2723         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2724                                 adapter->stats.last_vfgotc,
2725                                 adapter->stats.vfgotc);
2726         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2727                                 adapter->stats.vfmprc);
2728
2729         for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2730                 adapter->hw_csum_rx_error +=
2731                         adapter->rx_ring[i]->hw_csum_rx_error;
2732                 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2733         }
2734 }
2735
2736 /**
2737  * ixgbevf_service_timer - Timer Call-back
2738  * @data: pointer to adapter cast into an unsigned long
2739  **/
2740 static void ixgbevf_service_timer(unsigned long data)
2741 {
2742         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2743
2744         /* Reset the timer */
2745         mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2746
2747         ixgbevf_service_event_schedule(adapter);
2748 }
2749
2750 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2751 {
2752         if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2753                 return;
2754
2755         adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2756
2757         /* If we're already down or resetting, just bail */
2758         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2759             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2760                 return;
2761
2762         adapter->tx_timeout_count++;
2763
2764         ixgbevf_reinit_locked(adapter);
2765 }
2766
2767 /**
2768  * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2769  * @adapter: pointer to the device adapter structure
2770  *
2771  * This function serves two purposes.  First it strobes the interrupt lines
2772  * in order to make certain interrupts are occurring.  Secondly it sets the
2773  * bits needed to check for TX hangs.  As a result we should immediately
2774  * determine if a hang has occurred.
2775  **/
2776 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2777 {
2778         struct ixgbe_hw *hw = &adapter->hw;
2779         u32 eics = 0;
2780         int i;
2781
2782         /* If we're down or resetting, just bail */
2783         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2784             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2785                 return;
2786
2787         /* Force detection of hung controller */
2788         if (netif_carrier_ok(adapter->netdev)) {
2789                 for (i = 0; i < adapter->num_tx_queues; i++)
2790                         set_check_for_tx_hang(adapter->tx_ring[i]);
2791         }
2792
2793         /* get one bit for every active Tx/Rx interrupt vector */
2794         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2795                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2796
2797                 if (qv->rx.ring || qv->tx.ring)
2798                         eics |= 1 << i;
2799         }
2800
2801         /* Cause software interrupt to ensure rings are cleaned */
2802         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2803 }
2804
2805 /**
2806  * ixgbevf_watchdog_update_link - update the link status
2807  * @adapter: pointer to the device adapter structure
2808  **/
2809 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2810 {
2811         struct ixgbe_hw *hw = &adapter->hw;
2812         u32 link_speed = adapter->link_speed;
2813         bool link_up = adapter->link_up;
2814         s32 err;
2815
2816         spin_lock_bh(&adapter->mbx_lock);
2817
2818         err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2819
2820         spin_unlock_bh(&adapter->mbx_lock);
2821
2822         /* if check for link returns error we will need to reset */
2823         if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2824                 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
2825                 link_up = false;
2826         }
2827
2828         adapter->link_up = link_up;
2829         adapter->link_speed = link_speed;
2830 }
2831
2832 /**
2833  * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2834  *                               print link up message
2835  * @adapter: pointer to the device adapter structure
2836  **/
2837 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2838 {
2839         struct net_device *netdev = adapter->netdev;
2840
2841         /* only continue if link was previously down */
2842         if (netif_carrier_ok(netdev))
2843                 return;
2844
2845         dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2846                  (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2847                  "10 Gbps" :
2848                  (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2849                  "1 Gbps" :
2850                  (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2851                  "100 Mbps" :
2852                  "unknown speed");
2853
2854         netif_carrier_on(netdev);
2855 }
2856
2857 /**
2858  * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2859  *                                 print link down message
2860  * @adapter: pointer to the adapter structure
2861  **/
2862 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2863 {
2864         struct net_device *netdev = adapter->netdev;
2865
2866         adapter->link_speed = 0;
2867
2868         /* only continue if link was up previously */
2869         if (!netif_carrier_ok(netdev))
2870                 return;
2871
2872         dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2873
2874         netif_carrier_off(netdev);
2875 }
2876
2877 /**
2878  * ixgbevf_watchdog_subtask - worker thread to bring link up
2879  * @work: pointer to work_struct containing our data
2880  **/
2881 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2882 {
2883         /* if interface is down do nothing */
2884         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2885             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2886                 return;
2887
2888         ixgbevf_watchdog_update_link(adapter);
2889
2890         if (adapter->link_up)
2891                 ixgbevf_watchdog_link_is_up(adapter);
2892         else
2893                 ixgbevf_watchdog_link_is_down(adapter);
2894
2895         ixgbevf_update_stats(adapter);
2896 }
2897
2898 /**
2899  * ixgbevf_service_task - manages and runs subtasks
2900  * @work: pointer to work_struct containing our data
2901  **/
2902 static void ixgbevf_service_task(struct work_struct *work)
2903 {
2904         struct ixgbevf_adapter *adapter = container_of(work,
2905                                                        struct ixgbevf_adapter,
2906                                                        service_task);
2907         struct ixgbe_hw *hw = &adapter->hw;
2908
2909         if (IXGBE_REMOVED(hw->hw_addr)) {
2910                 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2911                         rtnl_lock();
2912                         ixgbevf_down(adapter);
2913                         rtnl_unlock();
2914                 }
2915                 return;
2916         }
2917
2918         ixgbevf_queue_reset_subtask(adapter);
2919         ixgbevf_reset_subtask(adapter);
2920         ixgbevf_watchdog_subtask(adapter);
2921         ixgbevf_check_hang_subtask(adapter);
2922
2923         ixgbevf_service_event_complete(adapter);
2924 }
2925
2926 /**
2927  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2928  * @tx_ring: Tx descriptor ring for a specific queue
2929  *
2930  * Free all transmit software resources
2931  **/
2932 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2933 {
2934         ixgbevf_clean_tx_ring(tx_ring);
2935
2936         vfree(tx_ring->tx_buffer_info);
2937         tx_ring->tx_buffer_info = NULL;
2938
2939         /* if not set, then don't free */
2940         if (!tx_ring->desc)
2941                 return;
2942
2943         dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2944                           tx_ring->dma);
2945
2946         tx_ring->desc = NULL;
2947 }
2948
2949 /**
2950  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2951  * @adapter: board private structure
2952  *
2953  * Free all transmit software resources
2954  **/
2955 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2956 {
2957         int i;
2958
2959         for (i = 0; i < adapter->num_tx_queues; i++)
2960                 if (adapter->tx_ring[i]->desc)
2961                         ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2962 }
2963
2964 /**
2965  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2966  * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2967  *
2968  * Return 0 on success, negative on failure
2969  **/
2970 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2971 {
2972         int size;
2973
2974         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2975         tx_ring->tx_buffer_info = vzalloc(size);
2976         if (!tx_ring->tx_buffer_info)
2977                 goto err;
2978
2979         /* round up to nearest 4K */
2980         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2981         tx_ring->size = ALIGN(tx_ring->size, 4096);
2982
2983         tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2984                                            &tx_ring->dma, GFP_KERNEL);
2985         if (!tx_ring->desc)
2986                 goto err;
2987
2988         return 0;
2989
2990 err:
2991         vfree(tx_ring->tx_buffer_info);
2992         tx_ring->tx_buffer_info = NULL;
2993         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
2994         return -ENOMEM;
2995 }
2996
2997 /**
2998  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2999  * @adapter: board private structure
3000  *
3001  * If this function returns with an error, then it's possible one or
3002  * more of the rings is populated (while the rest are not).  It is the
3003  * callers duty to clean those orphaned rings.
3004  *
3005  * Return 0 on success, negative on failure
3006  **/
3007 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3008 {
3009         int i, err = 0;
3010
3011         for (i = 0; i < adapter->num_tx_queues; i++) {
3012                 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3013                 if (!err)
3014                         continue;
3015                 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3016                 break;
3017         }
3018
3019         return err;
3020 }
3021
3022 /**
3023  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3024  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3025  *
3026  * Returns 0 on success, negative on failure
3027  **/
3028 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3029 {
3030         int size;
3031
3032         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3033         rx_ring->rx_buffer_info = vzalloc(size);
3034         if (!rx_ring->rx_buffer_info)
3035                 goto err;
3036
3037         /* Round up to nearest 4K */
3038         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3039         rx_ring->size = ALIGN(rx_ring->size, 4096);
3040
3041         rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3042                                            &rx_ring->dma, GFP_KERNEL);
3043
3044         if (!rx_ring->desc)
3045                 goto err;
3046
3047         return 0;
3048 err:
3049         vfree(rx_ring->rx_buffer_info);
3050         rx_ring->rx_buffer_info = NULL;
3051         dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3052         return -ENOMEM;
3053 }
3054
3055 /**
3056  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3057  * @adapter: board private structure
3058  *
3059  * If this function returns with an error, then it's possible one or
3060  * more of the rings is populated (while the rest are not).  It is the
3061  * callers duty to clean those orphaned rings.
3062  *
3063  * Return 0 on success, negative on failure
3064  **/
3065 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3066 {
3067         int i, err = 0;
3068
3069         for (i = 0; i < adapter->num_rx_queues; i++) {
3070                 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
3071                 if (!err)
3072                         continue;
3073                 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3074                 break;
3075         }
3076         return err;
3077 }
3078
3079 /**
3080  * ixgbevf_free_rx_resources - Free Rx Resources
3081  * @rx_ring: ring to clean the resources from
3082  *
3083  * Free all receive software resources
3084  **/
3085 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3086 {
3087         ixgbevf_clean_rx_ring(rx_ring);
3088
3089         vfree(rx_ring->rx_buffer_info);
3090         rx_ring->rx_buffer_info = NULL;
3091
3092         dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3093                           rx_ring->dma);
3094
3095         rx_ring->desc = NULL;
3096 }
3097
3098 /**
3099  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3100  * @adapter: board private structure
3101  *
3102  * Free all receive software resources
3103  **/
3104 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3105 {
3106         int i;
3107
3108         for (i = 0; i < adapter->num_rx_queues; i++)
3109                 if (adapter->rx_ring[i]->desc)
3110                         ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3111 }
3112
3113 /**
3114  * ixgbevf_open - Called when a network interface is made active
3115  * @netdev: network interface device structure
3116  *
3117  * Returns 0 on success, negative value on failure
3118  *
3119  * The open entry point is called when a network interface is made
3120  * active by the system (IFF_UP).  At this point all resources needed
3121  * for transmit and receive operations are allocated, the interrupt
3122  * handler is registered with the OS, the watchdog timer is started,
3123  * and the stack is notified that the interface is ready.
3124  **/
3125 static int ixgbevf_open(struct net_device *netdev)
3126 {
3127         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3128         struct ixgbe_hw *hw = &adapter->hw;
3129         int err;
3130
3131         /* A previous failure to open the device because of a lack of
3132          * available MSIX vector resources may have reset the number
3133          * of msix vectors variable to zero.  The only way to recover
3134          * is to unload/reload the driver and hope that the system has
3135          * been able to recover some MSIX vector resources.
3136          */
3137         if (!adapter->num_msix_vectors)
3138                 return -ENOMEM;
3139
3140         if (hw->adapter_stopped) {
3141                 ixgbevf_reset(adapter);
3142                 /* if adapter is still stopped then PF isn't up and
3143                  * the VF can't start.
3144                  */
3145                 if (hw->adapter_stopped) {
3146                         err = IXGBE_ERR_MBX;
3147                         pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3148                         goto err_setup_reset;
3149                 }
3150         }
3151
3152         /* disallow open during test */
3153         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3154                 return -EBUSY;
3155
3156         netif_carrier_off(netdev);
3157
3158         /* allocate transmit descriptors */
3159         err = ixgbevf_setup_all_tx_resources(adapter);
3160         if (err)
3161                 goto err_setup_tx;
3162
3163         /* allocate receive descriptors */
3164         err = ixgbevf_setup_all_rx_resources(adapter);
3165         if (err)
3166                 goto err_setup_rx;
3167
3168         ixgbevf_configure(adapter);
3169
3170         /* Map the Tx/Rx rings to the vectors we were allotted.
3171          * if request_irq will be called in this function map_rings
3172          * must be called *before* up_complete
3173          */
3174         ixgbevf_map_rings_to_vectors(adapter);
3175
3176         err = ixgbevf_request_irq(adapter);
3177         if (err)
3178                 goto err_req_irq;
3179
3180         ixgbevf_up_complete(adapter);
3181
3182         return 0;
3183
3184 err_req_irq:
3185         ixgbevf_down(adapter);
3186 err_setup_rx:
3187         ixgbevf_free_all_rx_resources(adapter);
3188 err_setup_tx:
3189         ixgbevf_free_all_tx_resources(adapter);
3190         ixgbevf_reset(adapter);
3191
3192 err_setup_reset:
3193
3194         return err;
3195 }
3196
3197 /**
3198  * ixgbevf_close - Disables a network interface
3199  * @netdev: network interface device structure
3200  *
3201  * Returns 0, this is not allowed to fail
3202  *
3203  * The close entry point is called when an interface is de-activated
3204  * by the OS.  The hardware is still under the drivers control, but
3205  * needs to be disabled.  A global MAC reset is issued to stop the
3206  * hardware, and all transmit and receive resources are freed.
3207  **/
3208 static int ixgbevf_close(struct net_device *netdev)
3209 {
3210         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3211
3212         ixgbevf_down(adapter);
3213         ixgbevf_free_irq(adapter);
3214
3215         ixgbevf_free_all_tx_resources(adapter);
3216         ixgbevf_free_all_rx_resources(adapter);
3217
3218         return 0;
3219 }
3220
3221 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3222 {
3223         struct net_device *dev = adapter->netdev;
3224
3225         if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3226                 return;
3227
3228         adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3229
3230         /* if interface is down do nothing */
3231         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3232             test_bit(__IXGBEVF_RESETTING, &adapter->state))
3233                 return;
3234
3235         /* Hardware has to reinitialize queues and interrupts to
3236          * match packet buffer alignment. Unfortunately, the
3237          * hardware is not flexible enough to do this dynamically.
3238          */
3239         if (netif_running(dev))
3240                 ixgbevf_close(dev);
3241
3242         ixgbevf_clear_interrupt_scheme(adapter);
3243         ixgbevf_init_interrupt_scheme(adapter);
3244
3245         if (netif_running(dev))
3246                 ixgbevf_open(dev);
3247 }
3248
3249 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3250                                 u32 vlan_macip_lens, u32 type_tucmd,
3251                                 u32 mss_l4len_idx)
3252 {
3253         struct ixgbe_adv_tx_context_desc *context_desc;
3254         u16 i = tx_ring->next_to_use;
3255
3256         context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3257
3258         i++;
3259         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3260
3261         /* set bits to identify this as an advanced context descriptor */
3262         type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3263
3264         context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
3265         context_desc->seqnum_seed       = 0;
3266         context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
3267         context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
3268 }
3269
3270 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3271                        struct ixgbevf_tx_buffer *first,
3272                        u8 *hdr_len)
3273 {
3274         struct sk_buff *skb = first->skb;
3275         u32 vlan_macip_lens, type_tucmd;
3276         u32 mss_l4len_idx, l4len;
3277         int err;
3278
3279         if (skb->ip_summed != CHECKSUM_PARTIAL)
3280                 return 0;
3281
3282         if (!skb_is_gso(skb))
3283                 return 0;
3284
3285         err = skb_cow_head(skb, 0);
3286         if (err < 0)
3287                 return err;
3288
3289         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3290         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3291
3292         if (first->protocol == htons(ETH_P_IP)) {
3293                 struct iphdr *iph = ip_hdr(skb);
3294
3295                 iph->tot_len = 0;
3296                 iph->check = 0;
3297                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3298                                                          iph->daddr, 0,
3299                                                          IPPROTO_TCP,
3300                                                          0);
3301                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3302                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3303                                    IXGBE_TX_FLAGS_CSUM |
3304                                    IXGBE_TX_FLAGS_IPV4;
3305         } else if (skb_is_gso_v6(skb)) {
3306                 ipv6_hdr(skb)->payload_len = 0;
3307                 tcp_hdr(skb)->check =
3308                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3309                                      &ipv6_hdr(skb)->daddr,
3310                                      0, IPPROTO_TCP, 0);
3311                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3312                                    IXGBE_TX_FLAGS_CSUM;
3313         }
3314
3315         /* compute header lengths */
3316         l4len = tcp_hdrlen(skb);
3317         *hdr_len += l4len;
3318         *hdr_len = skb_transport_offset(skb) + l4len;
3319
3320         /* update GSO size and bytecount with header size */
3321         first->gso_segs = skb_shinfo(skb)->gso_segs;
3322         first->bytecount += (first->gso_segs - 1) * *hdr_len;
3323
3324         /* mss_l4len_id: use 1 as index for TSO */
3325         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3326         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3327         mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3328
3329         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3330         vlan_macip_lens = skb_network_header_len(skb);
3331         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3332         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3333
3334         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3335                             type_tucmd, mss_l4len_idx);
3336
3337         return 1;
3338 }
3339
3340 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3341                             struct ixgbevf_tx_buffer *first)
3342 {
3343         struct sk_buff *skb = first->skb;
3344         u32 vlan_macip_lens = 0;
3345         u32 mss_l4len_idx = 0;
3346         u32 type_tucmd = 0;
3347
3348         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3349                 u8 l4_hdr = 0;
3350
3351                 switch (first->protocol) {
3352                 case htons(ETH_P_IP):
3353                         vlan_macip_lens |= skb_network_header_len(skb);
3354                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3355                         l4_hdr = ip_hdr(skb)->protocol;
3356                         break;
3357                 case htons(ETH_P_IPV6):
3358                         vlan_macip_lens |= skb_network_header_len(skb);
3359                         l4_hdr = ipv6_hdr(skb)->nexthdr;
3360                         break;
3361                 default:
3362                         if (unlikely(net_ratelimit())) {
3363                                 dev_warn(tx_ring->dev,
3364                                          "partial checksum but proto=%x!\n",
3365                                          first->protocol);
3366                         }
3367                         break;
3368                 }
3369
3370                 switch (l4_hdr) {
3371                 case IPPROTO_TCP:
3372                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3373                         mss_l4len_idx = tcp_hdrlen(skb) <<
3374                                         IXGBE_ADVTXD_L4LEN_SHIFT;
3375                         break;
3376                 case IPPROTO_SCTP:
3377                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3378                         mss_l4len_idx = sizeof(struct sctphdr) <<
3379                                         IXGBE_ADVTXD_L4LEN_SHIFT;
3380                         break;
3381                 case IPPROTO_UDP:
3382                         mss_l4len_idx = sizeof(struct udphdr) <<
3383                                         IXGBE_ADVTXD_L4LEN_SHIFT;
3384                         break;
3385                 default:
3386                         if (unlikely(net_ratelimit())) {
3387                                 dev_warn(tx_ring->dev,
3388                                          "partial checksum but l4 proto=%x!\n",
3389                                          l4_hdr);
3390                         }
3391                         break;
3392                 }
3393
3394                 /* update TX checksum flag */
3395                 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3396         }
3397
3398         /* vlan_macip_lens: MACLEN, VLAN tag */
3399         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3400         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3401
3402         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3403                             type_tucmd, mss_l4len_idx);
3404 }
3405
3406 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3407 {
3408         /* set type for advanced descriptor with frame checksum insertion */
3409         __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3410                                       IXGBE_ADVTXD_DCMD_IFCS |
3411                                       IXGBE_ADVTXD_DCMD_DEXT);
3412
3413         /* set HW VLAN bit if VLAN is present */
3414         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3415                 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3416
3417         /* set segmentation enable bits for TSO/FSO */
3418         if (tx_flags & IXGBE_TX_FLAGS_TSO)
3419                 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3420
3421         return cmd_type;
3422 }
3423
3424 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3425                                      u32 tx_flags, unsigned int paylen)
3426 {
3427         __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3428
3429         /* enable L4 checksum for TSO and TX checksum offload */
3430         if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3431                 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3432
3433         /* enble IPv4 checksum for TSO */
3434         if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3435                 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3436
3437         /* use index 1 context for TSO/FSO/FCOE */
3438         if (tx_flags & IXGBE_TX_FLAGS_TSO)
3439                 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3440
3441         /* Check Context must be set if Tx switch is enabled, which it
3442          * always is for case where virtual functions are running
3443          */
3444         olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3445
3446         tx_desc->read.olinfo_status = olinfo_status;
3447 }
3448
3449 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3450                            struct ixgbevf_tx_buffer *first,
3451                            const u8 hdr_len)
3452 {
3453         dma_addr_t dma;
3454         struct sk_buff *skb = first->skb;
3455         struct ixgbevf_tx_buffer *tx_buffer;
3456         union ixgbe_adv_tx_desc *tx_desc;
3457         struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3458         unsigned int data_len = skb->data_len;
3459         unsigned int size = skb_headlen(skb);
3460         unsigned int paylen = skb->len - hdr_len;
3461         u32 tx_flags = first->tx_flags;
3462         __le32 cmd_type;
3463         u16 i = tx_ring->next_to_use;
3464
3465         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3466
3467         ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3468         cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3469
3470         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3471         if (dma_mapping_error(tx_ring->dev, dma))
3472                 goto dma_error;
3473
3474         /* record length, and DMA address */
3475         dma_unmap_len_set(first, len, size);
3476         dma_unmap_addr_set(first, dma, dma);
3477
3478         tx_desc->read.buffer_addr = cpu_to_le64(dma);
3479
3480         for (;;) {
3481                 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3482                         tx_desc->read.cmd_type_len =
3483                                 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3484
3485                         i++;
3486                         tx_desc++;
3487                         if (i == tx_ring->count) {
3488                                 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3489                                 i = 0;
3490                         }
3491
3492                         dma += IXGBE_MAX_DATA_PER_TXD;
3493                         size -= IXGBE_MAX_DATA_PER_TXD;
3494
3495                         tx_desc->read.buffer_addr = cpu_to_le64(dma);
3496                         tx_desc->read.olinfo_status = 0;
3497                 }
3498
3499                 if (likely(!data_len))
3500                         break;
3501
3502                 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3503
3504                 i++;
3505                 tx_desc++;
3506                 if (i == tx_ring->count) {
3507                         tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3508                         i = 0;
3509                 }
3510
3511                 size = skb_frag_size(frag);
3512                 data_len -= size;
3513
3514                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3515                                        DMA_TO_DEVICE);
3516                 if (dma_mapping_error(tx_ring->dev, dma))
3517                         goto dma_error;
3518
3519                 tx_buffer = &tx_ring->tx_buffer_info[i];
3520                 dma_unmap_len_set(tx_buffer, len, size);
3521                 dma_unmap_addr_set(tx_buffer, dma, dma);
3522
3523                 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3524                 tx_desc->read.olinfo_status = 0;
3525
3526                 frag++;
3527         }
3528
3529         /* write last descriptor with RS and EOP bits */
3530         cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3531         tx_desc->read.cmd_type_len = cmd_type;
3532
3533         /* set the timestamp */
3534         first->time_stamp = jiffies;
3535
3536         /* Force memory writes to complete before letting h/w know there
3537          * are new descriptors to fetch.  (Only applicable for weak-ordered
3538          * memory model archs, such as IA-64).
3539          *
3540          * We also need this memory barrier (wmb) to make certain all of the
3541          * status bits have been updated before next_to_watch is written.
3542          */
3543         wmb();
3544
3545         /* set next_to_watch value indicating a packet is present */
3546         first->next_to_watch = tx_desc;
3547
3548         i++;
3549         if (i == tx_ring->count)
3550                 i = 0;
3551
3552         tx_ring->next_to_use = i;
3553
3554         /* notify HW of packet */
3555         ixgbevf_write_tail(tx_ring, i);
3556
3557         return;
3558 dma_error:
3559         dev_err(tx_ring->dev, "TX DMA map failed\n");
3560
3561         /* clear dma mappings for failed tx_buffer_info map */
3562         for (;;) {
3563                 tx_buffer = &tx_ring->tx_buffer_info[i];
3564                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3565                 if (tx_buffer == first)
3566                         break;
3567                 if (i == 0)
3568                         i = tx_ring->count;
3569                 i--;
3570         }
3571
3572         tx_ring->next_to_use = i;
3573 }
3574
3575 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3576 {
3577         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3578         /* Herbert's original patch had:
3579          *  smp_mb__after_netif_stop_queue();
3580          * but since that doesn't exist yet, just open code it.
3581          */
3582         smp_mb();
3583
3584         /* We need to check again in a case another CPU has just
3585          * made room available.
3586          */
3587         if (likely(ixgbevf_desc_unused(tx_ring) < size))
3588                 return -EBUSY;
3589
3590         /* A reprieve! - use start_queue because it doesn't call schedule */
3591         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3592         ++tx_ring->tx_stats.restart_queue;
3593
3594         return 0;
3595 }
3596
3597 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3598 {
3599         if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3600                 return 0;
3601         return __ixgbevf_maybe_stop_tx(tx_ring, size);
3602 }
3603
3604 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3605 {
3606         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3607         struct ixgbevf_tx_buffer *first;
3608         struct ixgbevf_ring *tx_ring;
3609         int tso;
3610         u32 tx_flags = 0;
3611         u16 count = TXD_USE_COUNT(skb_headlen(skb));
3612 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3613         unsigned short f;
3614 #endif
3615         u8 hdr_len = 0;
3616         u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3617
3618         if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3619                 dev_kfree_skb_any(skb);
3620                 return NETDEV_TX_OK;
3621         }
3622
3623         tx_ring = adapter->tx_ring[skb->queue_mapping];
3624
3625         /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3626          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3627          *       + 2 desc gap to keep tail from touching head,
3628          *       + 1 desc for context descriptor,
3629          * otherwise try next time
3630          */
3631 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3632         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3633                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3634 #else
3635         count += skb_shinfo(skb)->nr_frags;
3636 #endif
3637         if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3638                 tx_ring->tx_stats.tx_busy++;
3639                 return NETDEV_TX_BUSY;
3640         }
3641
3642         /* record the location of the first descriptor for this packet */
3643         first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3644         first->skb = skb;
3645         first->bytecount = skb->len;
3646         first->gso_segs = 1;
3647
3648         if (skb_vlan_tag_present(skb)) {
3649                 tx_flags |= skb_vlan_tag_get(skb);
3650                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3651                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3652         }
3653
3654         /* record initial flags and protocol */
3655         first->tx_flags = tx_flags;
3656         first->protocol = vlan_get_protocol(skb);
3657
3658         tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3659         if (tso < 0)
3660                 goto out_drop;
3661         else if (!tso)
3662                 ixgbevf_tx_csum(tx_ring, first);
3663
3664         ixgbevf_tx_map(tx_ring, first, hdr_len);
3665
3666         ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3667
3668         return NETDEV_TX_OK;
3669
3670 out_drop:
3671         dev_kfree_skb_any(first->skb);
3672         first->skb = NULL;
3673
3674         return NETDEV_TX_OK;
3675 }
3676
3677 /**
3678  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3679  * @netdev: network interface device structure
3680  * @p: pointer to an address structure
3681  *
3682  * Returns 0 on success, negative on failure
3683  **/
3684 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3685 {
3686         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3687         struct ixgbe_hw *hw = &adapter->hw;
3688         struct sockaddr *addr = p;
3689
3690         if (!is_valid_ether_addr(addr->sa_data))
3691                 return -EADDRNOTAVAIL;
3692
3693         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3694         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3695
3696         spin_lock_bh(&adapter->mbx_lock);
3697
3698         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3699
3700         spin_unlock_bh(&adapter->mbx_lock);
3701
3702         return 0;
3703 }
3704
3705 /**
3706  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3707  * @netdev: network interface device structure
3708  * @new_mtu: new value for maximum frame size
3709  *
3710  * Returns 0 on success, negative on failure
3711  **/
3712 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3713 {
3714         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3715         struct ixgbe_hw *hw = &adapter->hw;
3716         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3717         int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3718
3719         switch (adapter->hw.api_version) {
3720         case ixgbe_mbox_api_11:
3721         case ixgbe_mbox_api_12:
3722                 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3723                 break;
3724         default:
3725                 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
3726                         max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3727                 break;
3728         }
3729
3730         /* MTU < 68 is an error and causes problems on some kernels */
3731         if ((new_mtu < 68) || (max_frame > max_possible_frame))
3732                 return -EINVAL;
3733
3734         hw_dbg(hw, "changing MTU from %d to %d\n",
3735                netdev->mtu, new_mtu);
3736         /* must set new MTU before calling down or up */
3737         netdev->mtu = new_mtu;
3738
3739         /* notify the PF of our intent to use this size of frame */
3740         ixgbevf_rlpml_set_vf(hw, max_frame);
3741
3742         return 0;
3743 }
3744
3745 #ifdef CONFIG_NET_POLL_CONTROLLER
3746 /* Polling 'interrupt' - used by things like netconsole to send skbs
3747  * without having to re-enable interrupts. It's not called while
3748  * the interrupt routine is executing.
3749  */
3750 static void ixgbevf_netpoll(struct net_device *netdev)
3751 {
3752         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3753         int i;
3754
3755         /* if interface is down do nothing */
3756         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3757                 return;
3758         for (i = 0; i < adapter->num_rx_queues; i++)
3759                 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3760 }
3761 #endif /* CONFIG_NET_POLL_CONTROLLER */
3762
3763 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3764 {
3765         struct net_device *netdev = pci_get_drvdata(pdev);
3766         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3767 #ifdef CONFIG_PM
3768         int retval = 0;
3769 #endif
3770
3771         netif_device_detach(netdev);
3772
3773         if (netif_running(netdev)) {
3774                 rtnl_lock();
3775                 ixgbevf_down(adapter);
3776                 ixgbevf_free_irq(adapter);
3777                 ixgbevf_free_all_tx_resources(adapter);
3778                 ixgbevf_free_all_rx_resources(adapter);
3779                 rtnl_unlock();
3780         }
3781
3782         ixgbevf_clear_interrupt_scheme(adapter);
3783
3784 #ifdef CONFIG_PM
3785         retval = pci_save_state(pdev);
3786         if (retval)
3787                 return retval;
3788
3789 #endif
3790         if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3791                 pci_disable_device(pdev);
3792
3793         return 0;
3794 }
3795
3796 #ifdef CONFIG_PM
3797 static int ixgbevf_resume(struct pci_dev *pdev)
3798 {
3799         struct net_device *netdev = pci_get_drvdata(pdev);
3800         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3801         u32 err;
3802
3803         pci_restore_state(pdev);
3804         /* pci_restore_state clears dev->state_saved so call
3805          * pci_save_state to restore it.
3806          */
3807         pci_save_state(pdev);
3808
3809         err = pci_enable_device_mem(pdev);
3810         if (err) {
3811                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3812                 return err;
3813         }
3814         smp_mb__before_atomic();
3815         clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3816         pci_set_master(pdev);
3817
3818         ixgbevf_reset(adapter);
3819
3820         rtnl_lock();
3821         err = ixgbevf_init_interrupt_scheme(adapter);
3822         rtnl_unlock();
3823         if (err) {
3824                 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3825                 return err;
3826         }
3827
3828         if (netif_running(netdev)) {
3829                 err = ixgbevf_open(netdev);
3830                 if (err)
3831                         return err;
3832         }
3833
3834         netif_device_attach(netdev);
3835
3836         return err;
3837 }
3838
3839 #endif /* CONFIG_PM */
3840 static void ixgbevf_shutdown(struct pci_dev *pdev)
3841 {
3842         ixgbevf_suspend(pdev, PMSG_SUSPEND);
3843 }
3844
3845 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3846                                                 struct rtnl_link_stats64 *stats)
3847 {
3848         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3849         unsigned int start;
3850         u64 bytes, packets;
3851         const struct ixgbevf_ring *ring;
3852         int i;
3853
3854         ixgbevf_update_stats(adapter);
3855
3856         stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3857
3858         for (i = 0; i < adapter->num_rx_queues; i++) {
3859                 ring = adapter->rx_ring[i];
3860                 do {
3861                         start = u64_stats_fetch_begin_irq(&ring->syncp);
3862                         bytes = ring->stats.bytes;
3863                         packets = ring->stats.packets;
3864                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3865                 stats->rx_bytes += bytes;
3866                 stats->rx_packets += packets;
3867         }
3868
3869         for (i = 0; i < adapter->num_tx_queues; i++) {
3870                 ring = adapter->tx_ring[i];
3871                 do {
3872                         start = u64_stats_fetch_begin_irq(&ring->syncp);
3873                         bytes = ring->stats.bytes;
3874                         packets = ring->stats.packets;
3875                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3876                 stats->tx_bytes += bytes;
3877                 stats->tx_packets += packets;
3878         }
3879
3880         return stats;
3881 }
3882
3883 static const struct net_device_ops ixgbevf_netdev_ops = {
3884         .ndo_open               = ixgbevf_open,
3885         .ndo_stop               = ixgbevf_close,
3886         .ndo_start_xmit         = ixgbevf_xmit_frame,
3887         .ndo_set_rx_mode        = ixgbevf_set_rx_mode,
3888         .ndo_get_stats64        = ixgbevf_get_stats,
3889         .ndo_validate_addr      = eth_validate_addr,
3890         .ndo_set_mac_address    = ixgbevf_set_mac,
3891         .ndo_change_mtu         = ixgbevf_change_mtu,
3892         .ndo_tx_timeout         = ixgbevf_tx_timeout,
3893         .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
3894         .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
3895 #ifdef CONFIG_NET_RX_BUSY_POLL
3896         .ndo_busy_poll          = ixgbevf_busy_poll_recv,
3897 #endif
3898 #ifdef CONFIG_NET_POLL_CONTROLLER
3899         .ndo_poll_controller    = ixgbevf_netpoll,
3900 #endif
3901         .ndo_features_check     = passthru_features_check,
3902 };
3903
3904 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3905 {
3906         dev->netdev_ops = &ixgbevf_netdev_ops;
3907         ixgbevf_set_ethtool_ops(dev);
3908         dev->watchdog_timeo = 5 * HZ;
3909 }
3910
3911 /**
3912  * ixgbevf_probe - Device Initialization Routine
3913  * @pdev: PCI device information struct
3914  * @ent: entry in ixgbevf_pci_tbl
3915  *
3916  * Returns 0 on success, negative on failure
3917  *
3918  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3919  * The OS initialization, configuring of the adapter private structure,
3920  * and a hardware reset occur.
3921  **/
3922 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3923 {
3924         struct net_device *netdev;
3925         struct ixgbevf_adapter *adapter = NULL;
3926         struct ixgbe_hw *hw = NULL;
3927         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3928         int err, pci_using_dac;
3929         bool disable_dev = false;
3930
3931         err = pci_enable_device(pdev);
3932         if (err)
3933                 return err;
3934
3935         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3936                 pci_using_dac = 1;
3937         } else {
3938                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3939                 if (err) {
3940                         dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
3941                         goto err_dma;
3942                 }
3943                 pci_using_dac = 0;
3944         }
3945
3946         err = pci_request_regions(pdev, ixgbevf_driver_name);
3947         if (err) {
3948                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3949                 goto err_pci_reg;
3950         }
3951
3952         pci_set_master(pdev);
3953
3954         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3955                                    MAX_TX_QUEUES);
3956         if (!netdev) {
3957                 err = -ENOMEM;
3958                 goto err_alloc_etherdev;
3959         }
3960
3961         SET_NETDEV_DEV(netdev, &pdev->dev);
3962
3963         adapter = netdev_priv(netdev);
3964
3965         adapter->netdev = netdev;
3966         adapter->pdev = pdev;
3967         hw = &adapter->hw;
3968         hw->back = adapter;
3969         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3970
3971         /* call save state here in standalone driver because it relies on
3972          * adapter struct to exist, and needs to call netdev_priv
3973          */
3974         pci_save_state(pdev);
3975
3976         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3977                               pci_resource_len(pdev, 0));
3978         adapter->io_addr = hw->hw_addr;
3979         if (!hw->hw_addr) {
3980                 err = -EIO;
3981                 goto err_ioremap;
3982         }
3983
3984         ixgbevf_assign_netdev_ops(netdev);
3985
3986         /* Setup HW API */
3987         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3988         hw->mac.type  = ii->mac;
3989
3990         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3991                sizeof(struct ixgbe_mbx_operations));
3992
3993         /* setup the private structure */
3994         err = ixgbevf_sw_init(adapter);
3995         if (err)
3996                 goto err_sw_init;
3997
3998         /* The HW MAC address was set and/or determined in sw_init */
3999         if (!is_valid_ether_addr(netdev->dev_addr)) {
4000                 pr_err("invalid MAC address\n");
4001                 err = -EIO;
4002                 goto err_sw_init;
4003         }
4004
4005         netdev->hw_features = NETIF_F_SG |
4006                               NETIF_F_IP_CSUM |
4007                               NETIF_F_IPV6_CSUM |
4008                               NETIF_F_TSO |
4009                               NETIF_F_TSO6 |
4010                               NETIF_F_RXCSUM;
4011
4012         netdev->features = netdev->hw_features |
4013                            NETIF_F_HW_VLAN_CTAG_TX |
4014                            NETIF_F_HW_VLAN_CTAG_RX |
4015                            NETIF_F_HW_VLAN_CTAG_FILTER;
4016
4017         netdev->vlan_features |= NETIF_F_TSO |
4018                                  NETIF_F_TSO6 |
4019                                  NETIF_F_IP_CSUM |
4020                                  NETIF_F_IPV6_CSUM |
4021                                  NETIF_F_SG;
4022
4023         if (pci_using_dac)
4024                 netdev->features |= NETIF_F_HIGHDMA;
4025
4026         netdev->priv_flags |= IFF_UNICAST_FLT;
4027
4028         if (IXGBE_REMOVED(hw->hw_addr)) {
4029                 err = -EIO;
4030                 goto err_sw_init;
4031         }
4032
4033         setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4034                     (unsigned long)adapter);
4035
4036         INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4037         set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4038         clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4039
4040         err = ixgbevf_init_interrupt_scheme(adapter);
4041         if (err)
4042                 goto err_sw_init;
4043
4044         strcpy(netdev->name, "eth%d");
4045
4046         err = register_netdev(netdev);
4047         if (err)
4048                 goto err_register;
4049
4050         pci_set_drvdata(pdev, netdev);
4051         netif_carrier_off(netdev);
4052
4053         ixgbevf_init_last_counter_stats(adapter);
4054
4055         /* print the VF info */
4056         dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4057         dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4058
4059         switch (hw->mac.type) {
4060         case ixgbe_mac_X550_vf:
4061                 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4062                 break;
4063         case ixgbe_mac_X540_vf:
4064                 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4065                 break;
4066         case ixgbe_mac_82599_vf:
4067         default:
4068                 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4069                 break;
4070         }
4071
4072         return 0;
4073
4074 err_register:
4075         ixgbevf_clear_interrupt_scheme(adapter);
4076 err_sw_init:
4077         ixgbevf_reset_interrupt_capability(adapter);
4078         iounmap(adapter->io_addr);
4079 err_ioremap:
4080         disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4081         free_netdev(netdev);
4082 err_alloc_etherdev:
4083         pci_release_regions(pdev);
4084 err_pci_reg:
4085 err_dma:
4086         if (!adapter || disable_dev)
4087                 pci_disable_device(pdev);
4088         return err;
4089 }
4090
4091 /**
4092  * ixgbevf_remove - Device Removal Routine
4093  * @pdev: PCI device information struct
4094  *
4095  * ixgbevf_remove is called by the PCI subsystem to alert the driver
4096  * that it should release a PCI device.  The could be caused by a
4097  * Hot-Plug event, or because the driver is going to be removed from
4098  * memory.
4099  **/
4100 static void ixgbevf_remove(struct pci_dev *pdev)
4101 {
4102         struct net_device *netdev = pci_get_drvdata(pdev);
4103         struct ixgbevf_adapter *adapter;
4104         bool disable_dev;
4105
4106         if (!netdev)
4107                 return;
4108
4109         adapter = netdev_priv(netdev);
4110
4111         set_bit(__IXGBEVF_REMOVING, &adapter->state);
4112         cancel_work_sync(&adapter->service_task);
4113
4114         if (netdev->reg_state == NETREG_REGISTERED)
4115                 unregister_netdev(netdev);
4116
4117         ixgbevf_clear_interrupt_scheme(adapter);
4118         ixgbevf_reset_interrupt_capability(adapter);
4119
4120         iounmap(adapter->io_addr);
4121         pci_release_regions(pdev);
4122
4123         hw_dbg(&adapter->hw, "Remove complete\n");
4124
4125         disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4126         free_netdev(netdev);
4127
4128         if (disable_dev)
4129                 pci_disable_device(pdev);
4130 }
4131
4132 /**
4133  * ixgbevf_io_error_detected - called when PCI error is detected
4134  * @pdev: Pointer to PCI device
4135  * @state: The current pci connection state
4136  *
4137  * This function is called after a PCI bus error affecting
4138  * this device has been detected.
4139  **/
4140 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4141                                                   pci_channel_state_t state)
4142 {
4143         struct net_device *netdev = pci_get_drvdata(pdev);
4144         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4145
4146         if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4147                 return PCI_ERS_RESULT_DISCONNECT;
4148
4149         rtnl_lock();
4150         netif_device_detach(netdev);
4151
4152         if (state == pci_channel_io_perm_failure) {
4153                 rtnl_unlock();
4154                 return PCI_ERS_RESULT_DISCONNECT;
4155         }
4156
4157         if (netif_running(netdev))
4158                 ixgbevf_down(adapter);
4159
4160         if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4161                 pci_disable_device(pdev);
4162         rtnl_unlock();
4163
4164         /* Request a slot slot reset. */
4165         return PCI_ERS_RESULT_NEED_RESET;
4166 }
4167
4168 /**
4169  * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4170  * @pdev: Pointer to PCI device
4171  *
4172  * Restart the card from scratch, as if from a cold-boot. Implementation
4173  * resembles the first-half of the ixgbevf_resume routine.
4174  **/
4175 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4176 {
4177         struct net_device *netdev = pci_get_drvdata(pdev);
4178         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4179
4180         if (pci_enable_device_mem(pdev)) {
4181                 dev_err(&pdev->dev,
4182                         "Cannot re-enable PCI device after reset.\n");
4183                 return PCI_ERS_RESULT_DISCONNECT;
4184         }
4185
4186         smp_mb__before_atomic();
4187         clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4188         pci_set_master(pdev);
4189
4190         ixgbevf_reset(adapter);
4191
4192         return PCI_ERS_RESULT_RECOVERED;
4193 }
4194
4195 /**
4196  * ixgbevf_io_resume - called when traffic can start flowing again.
4197  * @pdev: Pointer to PCI device
4198  *
4199  * This callback is called when the error recovery driver tells us that
4200  * its OK to resume normal operation. Implementation resembles the
4201  * second-half of the ixgbevf_resume routine.
4202  **/
4203 static void ixgbevf_io_resume(struct pci_dev *pdev)
4204 {
4205         struct net_device *netdev = pci_get_drvdata(pdev);
4206         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4207
4208         if (netif_running(netdev))
4209                 ixgbevf_up(adapter);
4210
4211         netif_device_attach(netdev);
4212 }
4213
4214 /* PCI Error Recovery (ERS) */
4215 static const struct pci_error_handlers ixgbevf_err_handler = {
4216         .error_detected = ixgbevf_io_error_detected,
4217         .slot_reset = ixgbevf_io_slot_reset,
4218         .resume = ixgbevf_io_resume,
4219 };
4220
4221 static struct pci_driver ixgbevf_driver = {
4222         .name           = ixgbevf_driver_name,
4223         .id_table       = ixgbevf_pci_tbl,
4224         .probe          = ixgbevf_probe,
4225         .remove         = ixgbevf_remove,
4226 #ifdef CONFIG_PM
4227         /* Power Management Hooks */
4228         .suspend        = ixgbevf_suspend,
4229         .resume         = ixgbevf_resume,
4230 #endif
4231         .shutdown       = ixgbevf_shutdown,
4232         .err_handler    = &ixgbevf_err_handler
4233 };
4234
4235 /**
4236  * ixgbevf_init_module - Driver Registration Routine
4237  *
4238  * ixgbevf_init_module is the first routine called when the driver is
4239  * loaded. All it does is register with the PCI subsystem.
4240  **/
4241 static int __init ixgbevf_init_module(void)
4242 {
4243         int ret;
4244
4245         pr_info("%s - version %s\n", ixgbevf_driver_string,
4246                 ixgbevf_driver_version);
4247
4248         pr_info("%s\n", ixgbevf_copyright);
4249
4250         ret = pci_register_driver(&ixgbevf_driver);
4251         return ret;
4252 }
4253
4254 module_init(ixgbevf_init_module);
4255
4256 /**
4257  * ixgbevf_exit_module - Driver Exit Cleanup Routine
4258  *
4259  * ixgbevf_exit_module is called just before the driver is removed
4260  * from memory.
4261  **/
4262 static void __exit ixgbevf_exit_module(void)
4263 {
4264         pci_unregister_driver(&ixgbevf_driver);
4265 }
4266
4267 #ifdef DEBUG
4268 /**
4269  * ixgbevf_get_hw_dev_name - return device name string
4270  * used by hardware layer to print debugging information
4271  **/
4272 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4273 {
4274         struct ixgbevf_adapter *adapter = hw->back;
4275
4276         return adapter->netdev->name;
4277 }
4278
4279 #endif
4280 module_exit(ixgbevf_exit_module);
4281
4282 /* ixgbevf_main.c */