]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
[karo-tx-linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/in.h>
36 #include <linux/interrupt.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/sctp.h>
40 #include <linux/pkt_sched.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/etherdevice.h>
46 #include <linux/ethtool.h>
47 #include <linux/if.h>
48 #include <linux/if_vlan.h>
49 #include <linux/if_macvlan.h>
50 #include <linux/if_bridge.h>
51 #include <linux/prefetch.h>
52 #include <scsi/fc/fc_fcoe.h>
53 #include <net/vxlan.h>
54
55 #ifdef CONFIG_OF
56 #include <linux/of_net.h>
57 #endif
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #include "ixgbe.h"
65 #include "ixgbe_common.h"
66 #include "ixgbe_dcb_82599.h"
67 #include "ixgbe_sriov.h"
68 #ifdef CONFIG_IXGBE_VXLAN
69 #include <net/vxlan.h>
70 #endif
71
72 char ixgbe_driver_name[] = "ixgbe";
73 static const char ixgbe_driver_string[] =
74                               "Intel(R) 10 Gigabit PCI Express Network Driver";
75 #ifdef IXGBE_FCOE
76 char ixgbe_default_device_descr[] =
77                               "Intel(R) 10 Gigabit Network Connection";
78 #else
79 static char ixgbe_default_device_descr[] =
80                               "Intel(R) 10 Gigabit Network Connection";
81 #endif
82 #define DRV_VERSION "4.2.1-k"
83 const char ixgbe_driver_version[] = DRV_VERSION;
84 static const char ixgbe_copyright[] =
85                                 "Copyright (c) 1999-2015 Intel Corporation.";
86
87 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
88
89 static const struct ixgbe_info *ixgbe_info_tbl[] = {
90         [board_82598]           = &ixgbe_82598_info,
91         [board_82599]           = &ixgbe_82599_info,
92         [board_X540]            = &ixgbe_X540_info,
93         [board_X550]            = &ixgbe_X550_info,
94         [board_X550EM_x]        = &ixgbe_X550EM_x_info,
95 };
96
97 /* ixgbe_pci_tbl - PCI Device ID Table
98  *
99  * Wildcard entries (PCI_ANY_ID) should come last
100  * Last entry must be all 0s
101  *
102  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
103  *   Class, Class Mask, private data (not used) }
104  */
105 static const struct pci_device_id ixgbe_pci_tbl[] = {
106         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
107         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
108         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
109         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
110         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
111         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
112         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
113         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
114         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
115         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
116         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
117         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
118         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
119         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
120         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
121         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
122         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
123         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
124         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
125         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
126         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
127         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
128         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
129         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
130         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
131         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
132         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
133         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
134         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
135         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
136         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
137         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
138         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
139         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
140         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
141         /* required last entry */
142         {0, }
143 };
144 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
145
146 #ifdef CONFIG_IXGBE_DCA
147 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
148                             void *p);
149 static struct notifier_block dca_notifier = {
150         .notifier_call = ixgbe_notify_dca,
151         .next          = NULL,
152         .priority      = 0
153 };
154 #endif
155
156 #ifdef CONFIG_PCI_IOV
157 static unsigned int max_vfs;
158 module_param(max_vfs, uint, 0);
159 MODULE_PARM_DESC(max_vfs,
160                  "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
161 #endif /* CONFIG_PCI_IOV */
162
163 static unsigned int allow_unsupported_sfp;
164 module_param(allow_unsupported_sfp, uint, 0);
165 MODULE_PARM_DESC(allow_unsupported_sfp,
166                  "Allow unsupported and untested SFP+ modules on 82599-based adapters");
167
168 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
169 static int debug = -1;
170 module_param(debug, int, 0);
171 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
172
173 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
174 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
175 MODULE_LICENSE("GPL");
176 MODULE_VERSION(DRV_VERSION);
177
178 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
179
180 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
181                                           u32 reg, u16 *value)
182 {
183         struct pci_dev *parent_dev;
184         struct pci_bus *parent_bus;
185
186         parent_bus = adapter->pdev->bus->parent;
187         if (!parent_bus)
188                 return -1;
189
190         parent_dev = parent_bus->self;
191         if (!parent_dev)
192                 return -1;
193
194         if (!pci_is_pcie(parent_dev))
195                 return -1;
196
197         pcie_capability_read_word(parent_dev, reg, value);
198         if (*value == IXGBE_FAILED_READ_CFG_WORD &&
199             ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
200                 return -1;
201         return 0;
202 }
203
204 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
205 {
206         struct ixgbe_hw *hw = &adapter->hw;
207         u16 link_status = 0;
208         int err;
209
210         hw->bus.type = ixgbe_bus_type_pci_express;
211
212         /* Get the negotiated link width and speed from PCI config space of the
213          * parent, as this device is behind a switch
214          */
215         err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
216
217         /* assume caller will handle error case */
218         if (err)
219                 return err;
220
221         hw->bus.width = ixgbe_convert_bus_width(link_status);
222         hw->bus.speed = ixgbe_convert_bus_speed(link_status);
223
224         return 0;
225 }
226
227 /**
228  * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
229  * @hw: hw specific details
230  *
231  * This function is used by probe to determine whether a device's PCI-Express
232  * bandwidth details should be gathered from the parent bus instead of from the
233  * device. Used to ensure that various locations all have the correct device ID
234  * checks.
235  */
236 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
237 {
238         switch (hw->device_id) {
239         case IXGBE_DEV_ID_82599_SFP_SF_QP:
240         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
241                 return true;
242         default:
243                 return false;
244         }
245 }
246
247 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
248                                      int expected_gts)
249 {
250         struct ixgbe_hw *hw = &adapter->hw;
251         int max_gts = 0;
252         enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
253         enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
254         struct pci_dev *pdev;
255
256         /* Some devices are not connected over PCIe and thus do not negotiate
257          * speed. These devices do not have valid bus info, and thus any report
258          * we generate may not be correct.
259          */
260         if (hw->bus.type == ixgbe_bus_type_internal)
261                 return;
262
263         /* determine whether to use the parent device */
264         if (ixgbe_pcie_from_parent(&adapter->hw))
265                 pdev = adapter->pdev->bus->parent->self;
266         else
267                 pdev = adapter->pdev;
268
269         if (pcie_get_minimum_link(pdev, &speed, &width) ||
270             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
271                 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
272                 return;
273         }
274
275         switch (speed) {
276         case PCIE_SPEED_2_5GT:
277                 /* 8b/10b encoding reduces max throughput by 20% */
278                 max_gts = 2 * width;
279                 break;
280         case PCIE_SPEED_5_0GT:
281                 /* 8b/10b encoding reduces max throughput by 20% */
282                 max_gts = 4 * width;
283                 break;
284         case PCIE_SPEED_8_0GT:
285                 /* 128b/130b encoding reduces throughput by less than 2% */
286                 max_gts = 8 * width;
287                 break;
288         default:
289                 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
290                 return;
291         }
292
293         e_dev_info("PCI Express bandwidth of %dGT/s available\n",
294                    max_gts);
295         e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
296                    (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
297                     speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
298                     speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
299                     "Unknown"),
300                    width,
301                    (speed == PCIE_SPEED_2_5GT ? "20%" :
302                     speed == PCIE_SPEED_5_0GT ? "20%" :
303                     speed == PCIE_SPEED_8_0GT ? "<2%" :
304                     "Unknown"));
305
306         if (max_gts < expected_gts) {
307                 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
308                 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
309                         expected_gts);
310                 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
311         }
312 }
313
314 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
315 {
316         if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
317             !test_bit(__IXGBE_REMOVING, &adapter->state) &&
318             !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
319                 schedule_work(&adapter->service_task);
320 }
321
322 static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
323 {
324         struct ixgbe_adapter *adapter = hw->back;
325
326         if (!hw->hw_addr)
327                 return;
328         hw->hw_addr = NULL;
329         e_dev_err("Adapter removed\n");
330         if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
331                 ixgbe_service_event_schedule(adapter);
332 }
333
334 static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
335 {
336         u32 value;
337
338         /* The following check not only optimizes a bit by not
339          * performing a read on the status register when the
340          * register just read was a status register read that
341          * returned IXGBE_FAILED_READ_REG. It also blocks any
342          * potential recursion.
343          */
344         if (reg == IXGBE_STATUS) {
345                 ixgbe_remove_adapter(hw);
346                 return;
347         }
348         value = ixgbe_read_reg(hw, IXGBE_STATUS);
349         if (value == IXGBE_FAILED_READ_REG)
350                 ixgbe_remove_adapter(hw);
351 }
352
353 /**
354  * ixgbe_read_reg - Read from device register
355  * @hw: hw specific details
356  * @reg: offset of register to read
357  *
358  * Returns : value read or IXGBE_FAILED_READ_REG if removed
359  *
360  * This function is used to read device registers. It checks for device
361  * removal by confirming any read that returns all ones by checking the
362  * status register value for all ones. This function avoids reading from
363  * the hardware if a removal was previously detected in which case it
364  * returns IXGBE_FAILED_READ_REG (all ones).
365  */
366 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
367 {
368         u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
369         u32 value;
370
371         if (ixgbe_removed(reg_addr))
372                 return IXGBE_FAILED_READ_REG;
373         value = readl(reg_addr + reg);
374         if (unlikely(value == IXGBE_FAILED_READ_REG))
375                 ixgbe_check_remove(hw, reg);
376         return value;
377 }
378
379 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
380 {
381         u16 value;
382
383         pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
384         if (value == IXGBE_FAILED_READ_CFG_WORD) {
385                 ixgbe_remove_adapter(hw);
386                 return true;
387         }
388         return false;
389 }
390
391 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
392 {
393         struct ixgbe_adapter *adapter = hw->back;
394         u16 value;
395
396         if (ixgbe_removed(hw->hw_addr))
397                 return IXGBE_FAILED_READ_CFG_WORD;
398         pci_read_config_word(adapter->pdev, reg, &value);
399         if (value == IXGBE_FAILED_READ_CFG_WORD &&
400             ixgbe_check_cfg_remove(hw, adapter->pdev))
401                 return IXGBE_FAILED_READ_CFG_WORD;
402         return value;
403 }
404
405 #ifdef CONFIG_PCI_IOV
406 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
407 {
408         struct ixgbe_adapter *adapter = hw->back;
409         u32 value;
410
411         if (ixgbe_removed(hw->hw_addr))
412                 return IXGBE_FAILED_READ_CFG_DWORD;
413         pci_read_config_dword(adapter->pdev, reg, &value);
414         if (value == IXGBE_FAILED_READ_CFG_DWORD &&
415             ixgbe_check_cfg_remove(hw, adapter->pdev))
416                 return IXGBE_FAILED_READ_CFG_DWORD;
417         return value;
418 }
419 #endif /* CONFIG_PCI_IOV */
420
421 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
422 {
423         struct ixgbe_adapter *adapter = hw->back;
424
425         if (ixgbe_removed(hw->hw_addr))
426                 return;
427         pci_write_config_word(adapter->pdev, reg, value);
428 }
429
430 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
431 {
432         BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
433
434         /* flush memory to make sure state is correct before next watchdog */
435         smp_mb__before_atomic();
436         clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
437 }
438
439 struct ixgbe_reg_info {
440         u32 ofs;
441         char *name;
442 };
443
444 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
445
446         /* General Registers */
447         {IXGBE_CTRL, "CTRL"},
448         {IXGBE_STATUS, "STATUS"},
449         {IXGBE_CTRL_EXT, "CTRL_EXT"},
450
451         /* Interrupt Registers */
452         {IXGBE_EICR, "EICR"},
453
454         /* RX Registers */
455         {IXGBE_SRRCTL(0), "SRRCTL"},
456         {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
457         {IXGBE_RDLEN(0), "RDLEN"},
458         {IXGBE_RDH(0), "RDH"},
459         {IXGBE_RDT(0), "RDT"},
460         {IXGBE_RXDCTL(0), "RXDCTL"},
461         {IXGBE_RDBAL(0), "RDBAL"},
462         {IXGBE_RDBAH(0), "RDBAH"},
463
464         /* TX Registers */
465         {IXGBE_TDBAL(0), "TDBAL"},
466         {IXGBE_TDBAH(0), "TDBAH"},
467         {IXGBE_TDLEN(0), "TDLEN"},
468         {IXGBE_TDH(0), "TDH"},
469         {IXGBE_TDT(0), "TDT"},
470         {IXGBE_TXDCTL(0), "TXDCTL"},
471
472         /* List Terminator */
473         { .name = NULL }
474 };
475
476
477 /*
478  * ixgbe_regdump - register printout routine
479  */
480 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
481 {
482         int i = 0, j = 0;
483         char rname[16];
484         u32 regs[64];
485
486         switch (reginfo->ofs) {
487         case IXGBE_SRRCTL(0):
488                 for (i = 0; i < 64; i++)
489                         regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
490                 break;
491         case IXGBE_DCA_RXCTRL(0):
492                 for (i = 0; i < 64; i++)
493                         regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
494                 break;
495         case IXGBE_RDLEN(0):
496                 for (i = 0; i < 64; i++)
497                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
498                 break;
499         case IXGBE_RDH(0):
500                 for (i = 0; i < 64; i++)
501                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
502                 break;
503         case IXGBE_RDT(0):
504                 for (i = 0; i < 64; i++)
505                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
506                 break;
507         case IXGBE_RXDCTL(0):
508                 for (i = 0; i < 64; i++)
509                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
510                 break;
511         case IXGBE_RDBAL(0):
512                 for (i = 0; i < 64; i++)
513                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
514                 break;
515         case IXGBE_RDBAH(0):
516                 for (i = 0; i < 64; i++)
517                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
518                 break;
519         case IXGBE_TDBAL(0):
520                 for (i = 0; i < 64; i++)
521                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
522                 break;
523         case IXGBE_TDBAH(0):
524                 for (i = 0; i < 64; i++)
525                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
526                 break;
527         case IXGBE_TDLEN(0):
528                 for (i = 0; i < 64; i++)
529                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
530                 break;
531         case IXGBE_TDH(0):
532                 for (i = 0; i < 64; i++)
533                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
534                 break;
535         case IXGBE_TDT(0):
536                 for (i = 0; i < 64; i++)
537                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
538                 break;
539         case IXGBE_TXDCTL(0):
540                 for (i = 0; i < 64; i++)
541                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
542                 break;
543         default:
544                 pr_info("%-15s %08x\n", reginfo->name,
545                         IXGBE_READ_REG(hw, reginfo->ofs));
546                 return;
547         }
548
549         for (i = 0; i < 8; i++) {
550                 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
551                 pr_err("%-15s", rname);
552                 for (j = 0; j < 8; j++)
553                         pr_cont(" %08x", regs[i*8+j]);
554                 pr_cont("\n");
555         }
556
557 }
558
559 /*
560  * ixgbe_dump - Print registers, tx-rings and rx-rings
561  */
562 static void ixgbe_dump(struct ixgbe_adapter *adapter)
563 {
564         struct net_device *netdev = adapter->netdev;
565         struct ixgbe_hw *hw = &adapter->hw;
566         struct ixgbe_reg_info *reginfo;
567         int n = 0;
568         struct ixgbe_ring *tx_ring;
569         struct ixgbe_tx_buffer *tx_buffer;
570         union ixgbe_adv_tx_desc *tx_desc;
571         struct my_u0 { u64 a; u64 b; } *u0;
572         struct ixgbe_ring *rx_ring;
573         union ixgbe_adv_rx_desc *rx_desc;
574         struct ixgbe_rx_buffer *rx_buffer_info;
575         u32 staterr;
576         int i = 0;
577
578         if (!netif_msg_hw(adapter))
579                 return;
580
581         /* Print netdevice Info */
582         if (netdev) {
583                 dev_info(&adapter->pdev->dev, "Net device Info\n");
584                 pr_info("Device Name     state            "
585                         "trans_start      last_rx\n");
586                 pr_info("%-15s %016lX %016lX %016lX\n",
587                         netdev->name,
588                         netdev->state,
589                         netdev->trans_start,
590                         netdev->last_rx);
591         }
592
593         /* Print Registers */
594         dev_info(&adapter->pdev->dev, "Register Dump\n");
595         pr_info(" Register Name   Value\n");
596         for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
597              reginfo->name; reginfo++) {
598                 ixgbe_regdump(hw, reginfo);
599         }
600
601         /* Print TX Ring Summary */
602         if (!netdev || !netif_running(netdev))
603                 return;
604
605         dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
606         pr_info(" %s     %s              %s        %s\n",
607                 "Queue [NTU] [NTC] [bi(ntc)->dma  ]",
608                 "leng", "ntw", "timestamp");
609         for (n = 0; n < adapter->num_tx_queues; n++) {
610                 tx_ring = adapter->tx_ring[n];
611                 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
612                 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
613                            n, tx_ring->next_to_use, tx_ring->next_to_clean,
614                            (u64)dma_unmap_addr(tx_buffer, dma),
615                            dma_unmap_len(tx_buffer, len),
616                            tx_buffer->next_to_watch,
617                            (u64)tx_buffer->time_stamp);
618         }
619
620         /* Print TX Rings */
621         if (!netif_msg_tx_done(adapter))
622                 goto rx_ring_summary;
623
624         dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
625
626         /* Transmit Descriptor Formats
627          *
628          * 82598 Advanced Transmit Descriptor
629          *   +--------------------------------------------------------------+
630          * 0 |         Buffer Address [63:0]                                |
631          *   +--------------------------------------------------------------+
632          * 8 |  PAYLEN  | POPTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
633          *   +--------------------------------------------------------------+
634          *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
635          *
636          * 82598 Advanced Transmit Descriptor (Write-Back Format)
637          *   +--------------------------------------------------------------+
638          * 0 |                          RSV [63:0]                          |
639          *   +--------------------------------------------------------------+
640          * 8 |            RSV           |  STA  |          NXTSEQ           |
641          *   +--------------------------------------------------------------+
642          *   63                       36 35   32 31                         0
643          *
644          * 82599+ Advanced Transmit Descriptor
645          *   +--------------------------------------------------------------+
646          * 0 |         Buffer Address [63:0]                                |
647          *   +--------------------------------------------------------------+
648          * 8 |PAYLEN  |POPTS|CC|IDX  |STA  |DCMD  |DTYP |MAC  |RSV  |DTALEN |
649          *   +--------------------------------------------------------------+
650          *   63     46 45 40 39 38 36 35 32 31  24 23 20 19 18 17 16 15     0
651          *
652          * 82599+ Advanced Transmit Descriptor (Write-Back Format)
653          *   +--------------------------------------------------------------+
654          * 0 |                          RSV [63:0]                          |
655          *   +--------------------------------------------------------------+
656          * 8 |            RSV           |  STA  |           RSV             |
657          *   +--------------------------------------------------------------+
658          *   63                       36 35   32 31                         0
659          */
660
661         for (n = 0; n < adapter->num_tx_queues; n++) {
662                 tx_ring = adapter->tx_ring[n];
663                 pr_info("------------------------------------\n");
664                 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
665                 pr_info("------------------------------------\n");
666                 pr_info("%s%s    %s              %s        %s          %s\n",
667                         "T [desc]     [address 63:0  ] ",
668                         "[PlPOIdStDDt Ln] [bi->dma       ] ",
669                         "leng", "ntw", "timestamp", "bi->skb");
670
671                 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
672                         tx_desc = IXGBE_TX_DESC(tx_ring, i);
673                         tx_buffer = &tx_ring->tx_buffer_info[i];
674                         u0 = (struct my_u0 *)tx_desc;
675                         if (dma_unmap_len(tx_buffer, len) > 0) {
676                                 pr_info("T [0x%03X]    %016llX %016llX %016llX %08X %p %016llX %p",
677                                         i,
678                                         le64_to_cpu(u0->a),
679                                         le64_to_cpu(u0->b),
680                                         (u64)dma_unmap_addr(tx_buffer, dma),
681                                         dma_unmap_len(tx_buffer, len),
682                                         tx_buffer->next_to_watch,
683                                         (u64)tx_buffer->time_stamp,
684                                         tx_buffer->skb);
685                                 if (i == tx_ring->next_to_use &&
686                                         i == tx_ring->next_to_clean)
687                                         pr_cont(" NTC/U\n");
688                                 else if (i == tx_ring->next_to_use)
689                                         pr_cont(" NTU\n");
690                                 else if (i == tx_ring->next_to_clean)
691                                         pr_cont(" NTC\n");
692                                 else
693                                         pr_cont("\n");
694
695                                 if (netif_msg_pktdata(adapter) &&
696                                     tx_buffer->skb)
697                                         print_hex_dump(KERN_INFO, "",
698                                                 DUMP_PREFIX_ADDRESS, 16, 1,
699                                                 tx_buffer->skb->data,
700                                                 dma_unmap_len(tx_buffer, len),
701                                                 true);
702                         }
703                 }
704         }
705
706         /* Print RX Rings Summary */
707 rx_ring_summary:
708         dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
709         pr_info("Queue [NTU] [NTC]\n");
710         for (n = 0; n < adapter->num_rx_queues; n++) {
711                 rx_ring = adapter->rx_ring[n];
712                 pr_info("%5d %5X %5X\n",
713                         n, rx_ring->next_to_use, rx_ring->next_to_clean);
714         }
715
716         /* Print RX Rings */
717         if (!netif_msg_rx_status(adapter))
718                 return;
719
720         dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
721
722         /* Receive Descriptor Formats
723          *
724          * 82598 Advanced Receive Descriptor (Read) Format
725          *    63                                           1        0
726          *    +-----------------------------------------------------+
727          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
728          *    +----------------------------------------------+------+
729          *  8 |       Header Buffer Address [63:1]           |  DD  |
730          *    +-----------------------------------------------------+
731          *
732          *
733          * 82598 Advanced Receive Descriptor (Write-Back) Format
734          *
735          *   63       48 47    32 31  30      21 20 16 15   4 3     0
736          *   +------------------------------------------------------+
737          * 0 |       RSS Hash /  |SPH| HDR_LEN  | RSV |Packet|  RSS |
738          *   | Packet   | IP     |   |          |     | Type | Type |
739          *   | Checksum | Ident  |   |          |     |      |      |
740          *   +------------------------------------------------------+
741          * 8 | VLAN Tag | Length | Extended Error | Extended Status |
742          *   +------------------------------------------------------+
743          *   63       48 47    32 31            20 19               0
744          *
745          * 82599+ Advanced Receive Descriptor (Read) Format
746          *    63                                           1        0
747          *    +-----------------------------------------------------+
748          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
749          *    +----------------------------------------------+------+
750          *  8 |       Header Buffer Address [63:1]           |  DD  |
751          *    +-----------------------------------------------------+
752          *
753          *
754          * 82599+ Advanced Receive Descriptor (Write-Back) Format
755          *
756          *   63       48 47    32 31  30      21 20 17 16   4 3     0
757          *   +------------------------------------------------------+
758          * 0 |RSS / Frag Checksum|SPH| HDR_LEN  |RSC- |Packet|  RSS |
759          *   |/ RTT / PCoE_PARAM |   |          | CNT | Type | Type |
760          *   |/ Flow Dir Flt ID  |   |          |     |      |      |
761          *   +------------------------------------------------------+
762          * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
763          *   +------------------------------------------------------+
764          *   63       48 47    32 31          20 19                 0
765          */
766
767         for (n = 0; n < adapter->num_rx_queues; n++) {
768                 rx_ring = adapter->rx_ring[n];
769                 pr_info("------------------------------------\n");
770                 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
771                 pr_info("------------------------------------\n");
772                 pr_info("%s%s%s",
773                         "R  [desc]      [ PktBuf     A0] ",
774                         "[  HeadBuf   DD] [bi->dma       ] [bi->skb       ] ",
775                         "<-- Adv Rx Read format\n");
776                 pr_info("%s%s%s",
777                         "RWB[desc]      [PcsmIpSHl PtRs] ",
778                         "[vl er S cks ln] ---------------- [bi->skb       ] ",
779                         "<-- Adv Rx Write-Back format\n");
780
781                 for (i = 0; i < rx_ring->count; i++) {
782                         rx_buffer_info = &rx_ring->rx_buffer_info[i];
783                         rx_desc = IXGBE_RX_DESC(rx_ring, i);
784                         u0 = (struct my_u0 *)rx_desc;
785                         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
786                         if (staterr & IXGBE_RXD_STAT_DD) {
787                                 /* Descriptor Done */
788                                 pr_info("RWB[0x%03X]     %016llX "
789                                         "%016llX ---------------- %p", i,
790                                         le64_to_cpu(u0->a),
791                                         le64_to_cpu(u0->b),
792                                         rx_buffer_info->skb);
793                         } else {
794                                 pr_info("R  [0x%03X]     %016llX "
795                                         "%016llX %016llX %p", i,
796                                         le64_to_cpu(u0->a),
797                                         le64_to_cpu(u0->b),
798                                         (u64)rx_buffer_info->dma,
799                                         rx_buffer_info->skb);
800
801                                 if (netif_msg_pktdata(adapter) &&
802                                     rx_buffer_info->dma) {
803                                         print_hex_dump(KERN_INFO, "",
804                                            DUMP_PREFIX_ADDRESS, 16, 1,
805                                            page_address(rx_buffer_info->page) +
806                                                     rx_buffer_info->page_offset,
807                                            ixgbe_rx_bufsz(rx_ring), true);
808                                 }
809                         }
810
811                         if (i == rx_ring->next_to_use)
812                                 pr_cont(" NTU\n");
813                         else if (i == rx_ring->next_to_clean)
814                                 pr_cont(" NTC\n");
815                         else
816                                 pr_cont("\n");
817
818                 }
819         }
820 }
821
822 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
823 {
824         u32 ctrl_ext;
825
826         /* Let firmware take over control of h/w */
827         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
828         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
829                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
830 }
831
832 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
833 {
834         u32 ctrl_ext;
835
836         /* Let firmware know the driver has taken over */
837         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
838         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
839                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
840 }
841
842 /**
843  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
844  * @adapter: pointer to adapter struct
845  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
846  * @queue: queue to map the corresponding interrupt to
847  * @msix_vector: the vector to map to the corresponding queue
848  *
849  */
850 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
851                            u8 queue, u8 msix_vector)
852 {
853         u32 ivar, index;
854         struct ixgbe_hw *hw = &adapter->hw;
855         switch (hw->mac.type) {
856         case ixgbe_mac_82598EB:
857                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
858                 if (direction == -1)
859                         direction = 0;
860                 index = (((direction * 64) + queue) >> 2) & 0x1F;
861                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
862                 ivar &= ~(0xFF << (8 * (queue & 0x3)));
863                 ivar |= (msix_vector << (8 * (queue & 0x3)));
864                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
865                 break;
866         case ixgbe_mac_82599EB:
867         case ixgbe_mac_X540:
868         case ixgbe_mac_X550:
869         case ixgbe_mac_X550EM_x:
870                 if (direction == -1) {
871                         /* other causes */
872                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
873                         index = ((queue & 1) * 8);
874                         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
875                         ivar &= ~(0xFF << index);
876                         ivar |= (msix_vector << index);
877                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
878                         break;
879                 } else {
880                         /* tx or rx causes */
881                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
882                         index = ((16 * (queue & 1)) + (8 * direction));
883                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
884                         ivar &= ~(0xFF << index);
885                         ivar |= (msix_vector << index);
886                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
887                         break;
888                 }
889         default:
890                 break;
891         }
892 }
893
894 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
895                                           u64 qmask)
896 {
897         u32 mask;
898
899         switch (adapter->hw.mac.type) {
900         case ixgbe_mac_82598EB:
901                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
902                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
903                 break;
904         case ixgbe_mac_82599EB:
905         case ixgbe_mac_X540:
906         case ixgbe_mac_X550:
907         case ixgbe_mac_X550EM_x:
908                 mask = (qmask & 0xFFFFFFFF);
909                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
910                 mask = (qmask >> 32);
911                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
912                 break;
913         default:
914                 break;
915         }
916 }
917
918 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
919                                       struct ixgbe_tx_buffer *tx_buffer)
920 {
921         if (tx_buffer->skb) {
922                 dev_kfree_skb_any(tx_buffer->skb);
923                 if (dma_unmap_len(tx_buffer, len))
924                         dma_unmap_single(ring->dev,
925                                          dma_unmap_addr(tx_buffer, dma),
926                                          dma_unmap_len(tx_buffer, len),
927                                          DMA_TO_DEVICE);
928         } else if (dma_unmap_len(tx_buffer, len)) {
929                 dma_unmap_page(ring->dev,
930                                dma_unmap_addr(tx_buffer, dma),
931                                dma_unmap_len(tx_buffer, len),
932                                DMA_TO_DEVICE);
933         }
934         tx_buffer->next_to_watch = NULL;
935         tx_buffer->skb = NULL;
936         dma_unmap_len_set(tx_buffer, len, 0);
937         /* tx_buffer must be completely set up in the transmit path */
938 }
939
940 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
941 {
942         struct ixgbe_hw *hw = &adapter->hw;
943         struct ixgbe_hw_stats *hwstats = &adapter->stats;
944         int i;
945         u32 data;
946
947         if ((hw->fc.current_mode != ixgbe_fc_full) &&
948             (hw->fc.current_mode != ixgbe_fc_rx_pause))
949                 return;
950
951         switch (hw->mac.type) {
952         case ixgbe_mac_82598EB:
953                 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
954                 break;
955         default:
956                 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
957         }
958         hwstats->lxoffrxc += data;
959
960         /* refill credits (no tx hang) if we received xoff */
961         if (!data)
962                 return;
963
964         for (i = 0; i < adapter->num_tx_queues; i++)
965                 clear_bit(__IXGBE_HANG_CHECK_ARMED,
966                           &adapter->tx_ring[i]->state);
967 }
968
969 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
970 {
971         struct ixgbe_hw *hw = &adapter->hw;
972         struct ixgbe_hw_stats *hwstats = &adapter->stats;
973         u32 xoff[8] = {0};
974         u8 tc;
975         int i;
976         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
977
978         if (adapter->ixgbe_ieee_pfc)
979                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
980
981         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
982                 ixgbe_update_xoff_rx_lfc(adapter);
983                 return;
984         }
985
986         /* update stats for each tc, only valid with PFC enabled */
987         for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
988                 u32 pxoffrxc;
989
990                 switch (hw->mac.type) {
991                 case ixgbe_mac_82598EB:
992                         pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
993                         break;
994                 default:
995                         pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
996                 }
997                 hwstats->pxoffrxc[i] += pxoffrxc;
998                 /* Get the TC for given UP */
999                 tc = netdev_get_prio_tc_map(adapter->netdev, i);
1000                 xoff[tc] += pxoffrxc;
1001         }
1002
1003         /* disarm tx queues that have received xoff frames */
1004         for (i = 0; i < adapter->num_tx_queues; i++) {
1005                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1006
1007                 tc = tx_ring->dcb_tc;
1008                 if (xoff[tc])
1009                         clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1010         }
1011 }
1012
1013 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1014 {
1015         return ring->stats.packets;
1016 }
1017
1018 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1019 {
1020         struct ixgbe_adapter *adapter;
1021         struct ixgbe_hw *hw;
1022         u32 head, tail;
1023
1024         if (ring->l2_accel_priv)
1025                 adapter = ring->l2_accel_priv->real_adapter;
1026         else
1027                 adapter = netdev_priv(ring->netdev);
1028
1029         hw = &adapter->hw;
1030         head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1031         tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1032
1033         if (head != tail)
1034                 return (head < tail) ?
1035                         tail - head : (tail + ring->count - head);
1036
1037         return 0;
1038 }
1039
1040 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1041 {
1042         u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1043         u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1044         u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1045
1046         clear_check_for_tx_hang(tx_ring);
1047
1048         /*
1049          * Check for a hung queue, but be thorough. This verifies
1050          * that a transmit has been completed since the previous
1051          * check AND there is at least one packet pending. The
1052          * ARMED bit is set to indicate a potential hang. The
1053          * bit is cleared if a pause frame is received to remove
1054          * false hang detection due to PFC or 802.3x frames. By
1055          * requiring this to fail twice we avoid races with
1056          * pfc clearing the ARMED bit and conditions where we
1057          * run the check_tx_hang logic with a transmit completion
1058          * pending but without time to complete it yet.
1059          */
1060         if (tx_done_old == tx_done && tx_pending)
1061                 /* make sure it is true for two checks in a row */
1062                 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1063                                         &tx_ring->state);
1064         /* update completed stats and continue */
1065         tx_ring->tx_stats.tx_done_old = tx_done;
1066         /* reset the countdown */
1067         clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1068
1069         return false;
1070 }
1071
1072 /**
1073  * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
1074  * @adapter: driver private struct
1075  **/
1076 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1077 {
1078
1079         /* Do the reset outside of interrupt context */
1080         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1081                 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
1082                 e_warn(drv, "initiating reset due to tx timeout\n");
1083                 ixgbe_service_event_schedule(adapter);
1084         }
1085 }
1086
1087 /**
1088  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1089  * @q_vector: structure containing interrupt and ring information
1090  * @tx_ring: tx ring to clean
1091  **/
1092 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1093                                struct ixgbe_ring *tx_ring)
1094 {
1095         struct ixgbe_adapter *adapter = q_vector->adapter;
1096         struct ixgbe_tx_buffer *tx_buffer;
1097         union ixgbe_adv_tx_desc *tx_desc;
1098         unsigned int total_bytes = 0, total_packets = 0;
1099         unsigned int budget = q_vector->tx.work_limit;
1100         unsigned int i = tx_ring->next_to_clean;
1101
1102         if (test_bit(__IXGBE_DOWN, &adapter->state))
1103                 return true;
1104
1105         tx_buffer = &tx_ring->tx_buffer_info[i];
1106         tx_desc = IXGBE_TX_DESC(tx_ring, i);
1107         i -= tx_ring->count;
1108
1109         do {
1110                 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1111
1112                 /* if next_to_watch is not set then there is no work pending */
1113                 if (!eop_desc)
1114                         break;
1115
1116                 /* prevent any other reads prior to eop_desc */
1117                 read_barrier_depends();
1118
1119                 /* if DD is not set pending work has not been completed */
1120                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1121                         break;
1122
1123                 /* clear next_to_watch to prevent false hangs */
1124                 tx_buffer->next_to_watch = NULL;
1125
1126                 /* update the statistics for this packet */
1127                 total_bytes += tx_buffer->bytecount;
1128                 total_packets += tx_buffer->gso_segs;
1129
1130                 /* free the skb */
1131                 dev_consume_skb_any(tx_buffer->skb);
1132
1133                 /* unmap skb header data */
1134                 dma_unmap_single(tx_ring->dev,
1135                                  dma_unmap_addr(tx_buffer, dma),
1136                                  dma_unmap_len(tx_buffer, len),
1137                                  DMA_TO_DEVICE);
1138
1139                 /* clear tx_buffer data */
1140                 tx_buffer->skb = NULL;
1141                 dma_unmap_len_set(tx_buffer, len, 0);
1142
1143                 /* unmap remaining buffers */
1144                 while (tx_desc != eop_desc) {
1145                         tx_buffer++;
1146                         tx_desc++;
1147                         i++;
1148                         if (unlikely(!i)) {
1149                                 i -= tx_ring->count;
1150                                 tx_buffer = tx_ring->tx_buffer_info;
1151                                 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1152                         }
1153
1154                         /* unmap any remaining paged data */
1155                         if (dma_unmap_len(tx_buffer, len)) {
1156                                 dma_unmap_page(tx_ring->dev,
1157                                                dma_unmap_addr(tx_buffer, dma),
1158                                                dma_unmap_len(tx_buffer, len),
1159                                                DMA_TO_DEVICE);
1160                                 dma_unmap_len_set(tx_buffer, len, 0);
1161                         }
1162                 }
1163
1164                 /* move us one more past the eop_desc for start of next pkt */
1165                 tx_buffer++;
1166                 tx_desc++;
1167                 i++;
1168                 if (unlikely(!i)) {
1169                         i -= tx_ring->count;
1170                         tx_buffer = tx_ring->tx_buffer_info;
1171                         tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1172                 }
1173
1174                 /* issue prefetch for next Tx descriptor */
1175                 prefetch(tx_desc);
1176
1177                 /* update budget accounting */
1178                 budget--;
1179         } while (likely(budget));
1180
1181         i += tx_ring->count;
1182         tx_ring->next_to_clean = i;
1183         u64_stats_update_begin(&tx_ring->syncp);
1184         tx_ring->stats.bytes += total_bytes;
1185         tx_ring->stats.packets += total_packets;
1186         u64_stats_update_end(&tx_ring->syncp);
1187         q_vector->tx.total_bytes += total_bytes;
1188         q_vector->tx.total_packets += total_packets;
1189
1190         if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1191                 /* schedule immediate reset if we believe we hung */
1192                 struct ixgbe_hw *hw = &adapter->hw;
1193                 e_err(drv, "Detected Tx Unit Hang\n"
1194                         "  Tx Queue             <%d>\n"
1195                         "  TDH, TDT             <%x>, <%x>\n"
1196                         "  next_to_use          <%x>\n"
1197                         "  next_to_clean        <%x>\n"
1198                         "tx_buffer_info[next_to_clean]\n"
1199                         "  time_stamp           <%lx>\n"
1200                         "  jiffies              <%lx>\n",
1201                         tx_ring->queue_index,
1202                         IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1203                         IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1204                         tx_ring->next_to_use, i,
1205                         tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1206
1207                 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1208
1209                 e_info(probe,
1210                        "tx hang %d detected on queue %d, resetting adapter\n",
1211                         adapter->tx_timeout_count + 1, tx_ring->queue_index);
1212
1213                 /* schedule immediate reset if we believe we hung */
1214                 ixgbe_tx_timeout_reset(adapter);
1215
1216                 /* the adapter is about to reset, no point in enabling stuff */
1217                 return true;
1218         }
1219
1220         netdev_tx_completed_queue(txring_txq(tx_ring),
1221                                   total_packets, total_bytes);
1222
1223 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1224         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1225                      (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1226                 /* Make sure that anybody stopping the queue after this
1227                  * sees the new next_to_clean.
1228                  */
1229                 smp_mb();
1230                 if (__netif_subqueue_stopped(tx_ring->netdev,
1231                                              tx_ring->queue_index)
1232                     && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1233                         netif_wake_subqueue(tx_ring->netdev,
1234                                             tx_ring->queue_index);
1235                         ++tx_ring->tx_stats.restart_queue;
1236                 }
1237         }
1238
1239         return !!budget;
1240 }
1241
1242 #ifdef CONFIG_IXGBE_DCA
1243 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1244                                 struct ixgbe_ring *tx_ring,
1245                                 int cpu)
1246 {
1247         struct ixgbe_hw *hw = &adapter->hw;
1248         u32 txctrl = 0;
1249         u16 reg_offset;
1250
1251         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1252                 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1253
1254         switch (hw->mac.type) {
1255         case ixgbe_mac_82598EB:
1256                 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1257                 break;
1258         case ixgbe_mac_82599EB:
1259         case ixgbe_mac_X540:
1260                 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1261                 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1262                 break;
1263         default:
1264                 /* for unknown hardware do not write register */
1265                 return;
1266         }
1267
1268         /*
1269          * We can enable relaxed ordering for reads, but not writes when
1270          * DCA is enabled.  This is due to a known issue in some chipsets
1271          * which will cause the DCA tag to be cleared.
1272          */
1273         txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1274                   IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1275                   IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1276
1277         IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1278 }
1279
1280 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1281                                 struct ixgbe_ring *rx_ring,
1282                                 int cpu)
1283 {
1284         struct ixgbe_hw *hw = &adapter->hw;
1285         u32 rxctrl = 0;
1286         u8 reg_idx = rx_ring->reg_idx;
1287
1288         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1289                 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1290
1291         switch (hw->mac.type) {
1292         case ixgbe_mac_82599EB:
1293         case ixgbe_mac_X540:
1294                 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1295                 break;
1296         default:
1297                 break;
1298         }
1299
1300         /*
1301          * We can enable relaxed ordering for reads, but not writes when
1302          * DCA is enabled.  This is due to a known issue in some chipsets
1303          * which will cause the DCA tag to be cleared.
1304          */
1305         rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1306                   IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1307                   IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1308
1309         IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1310 }
1311
1312 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1313 {
1314         struct ixgbe_adapter *adapter = q_vector->adapter;
1315         struct ixgbe_ring *ring;
1316         int cpu = get_cpu();
1317
1318         if (q_vector->cpu == cpu)
1319                 goto out_no_update;
1320
1321         ixgbe_for_each_ring(ring, q_vector->tx)
1322                 ixgbe_update_tx_dca(adapter, ring, cpu);
1323
1324         ixgbe_for_each_ring(ring, q_vector->rx)
1325                 ixgbe_update_rx_dca(adapter, ring, cpu);
1326
1327         q_vector->cpu = cpu;
1328 out_no_update:
1329         put_cpu();
1330 }
1331
1332 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1333 {
1334         int i;
1335
1336         /* always use CB2 mode, difference is masked in the CB driver */
1337         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1338                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1339                                 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1340         else
1341                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1342                                 IXGBE_DCA_CTRL_DCA_DISABLE);
1343
1344         for (i = 0; i < adapter->num_q_vectors; i++) {
1345                 adapter->q_vector[i]->cpu = -1;
1346                 ixgbe_update_dca(adapter->q_vector[i]);
1347         }
1348 }
1349
1350 static int __ixgbe_notify_dca(struct device *dev, void *data)
1351 {
1352         struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1353         unsigned long event = *(unsigned long *)data;
1354
1355         if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1356                 return 0;
1357
1358         switch (event) {
1359         case DCA_PROVIDER_ADD:
1360                 /* if we're already enabled, don't do it again */
1361                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1362                         break;
1363                 if (dca_add_requester(dev) == 0) {
1364                         adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1365                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1366                                         IXGBE_DCA_CTRL_DCA_MODE_CB2);
1367                         break;
1368                 }
1369                 /* Fall Through since DCA is disabled. */
1370         case DCA_PROVIDER_REMOVE:
1371                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1372                         dca_remove_requester(dev);
1373                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1374                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1375                                         IXGBE_DCA_CTRL_DCA_DISABLE);
1376                 }
1377                 break;
1378         }
1379
1380         return 0;
1381 }
1382
1383 #endif /* CONFIG_IXGBE_DCA */
1384
1385 #define IXGBE_RSS_L4_TYPES_MASK \
1386         ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1387          (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1388          (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1389          (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1390
1391 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1392                                  union ixgbe_adv_rx_desc *rx_desc,
1393                                  struct sk_buff *skb)
1394 {
1395         u16 rss_type;
1396
1397         if (!(ring->netdev->features & NETIF_F_RXHASH))
1398                 return;
1399
1400         rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1401                    IXGBE_RXDADV_RSSTYPE_MASK;
1402
1403         if (!rss_type)
1404                 return;
1405
1406         skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1407                      (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1408                      PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1409 }
1410
1411 #ifdef IXGBE_FCOE
1412 /**
1413  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1414  * @ring: structure containing ring specific data
1415  * @rx_desc: advanced rx descriptor
1416  *
1417  * Returns : true if it is FCoE pkt
1418  */
1419 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1420                                     union ixgbe_adv_rx_desc *rx_desc)
1421 {
1422         __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1423
1424         return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1425                ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1426                 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1427                              IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1428 }
1429
1430 #endif /* IXGBE_FCOE */
1431 /**
1432  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1433  * @ring: structure containing ring specific data
1434  * @rx_desc: current Rx descriptor being processed
1435  * @skb: skb currently being received and modified
1436  **/
1437 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1438                                      union ixgbe_adv_rx_desc *rx_desc,
1439                                      struct sk_buff *skb)
1440 {
1441         __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1442         __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1443         bool encap_pkt = false;
1444
1445         skb_checksum_none_assert(skb);
1446
1447         /* Rx csum disabled */
1448         if (!(ring->netdev->features & NETIF_F_RXCSUM))
1449                 return;
1450
1451         if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1452             (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1453                 encap_pkt = true;
1454                 skb->encapsulation = 1;
1455         }
1456
1457         /* if IP and error */
1458         if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1459             ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1460                 ring->rx_stats.csum_err++;
1461                 return;
1462         }
1463
1464         if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1465                 return;
1466
1467         if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1468                 /*
1469                  * 82599 errata, UDP frames with a 0 checksum can be marked as
1470                  * checksum errors.
1471                  */
1472                 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1473                     test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1474                         return;
1475
1476                 ring->rx_stats.csum_err++;
1477                 return;
1478         }
1479
1480         /* It must be a TCP or UDP packet with a valid checksum */
1481         skb->ip_summed = CHECKSUM_UNNECESSARY;
1482         if (encap_pkt) {
1483                 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1484                         return;
1485
1486                 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1487                         ring->rx_stats.csum_err++;
1488                         return;
1489                 }
1490                 /* If we checked the outer header let the stack know */
1491                 skb->csum_level = 1;
1492         }
1493 }
1494
1495 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1496                                     struct ixgbe_rx_buffer *bi)
1497 {
1498         struct page *page = bi->page;
1499         dma_addr_t dma;
1500
1501         /* since we are recycling buffers we should seldom need to alloc */
1502         if (likely(page))
1503                 return true;
1504
1505         /* alloc new page for storage */
1506         page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1507         if (unlikely(!page)) {
1508                 rx_ring->rx_stats.alloc_rx_page_failed++;
1509                 return false;
1510         }
1511
1512         /* map page for use */
1513         dma = dma_map_page(rx_ring->dev, page, 0,
1514                            ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1515
1516         /*
1517          * if mapping failed free memory back to system since
1518          * there isn't much point in holding memory we can't use
1519          */
1520         if (dma_mapping_error(rx_ring->dev, dma)) {
1521                 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1522
1523                 rx_ring->rx_stats.alloc_rx_page_failed++;
1524                 return false;
1525         }
1526
1527         bi->dma = dma;
1528         bi->page = page;
1529         bi->page_offset = 0;
1530
1531         return true;
1532 }
1533
1534 /**
1535  * ixgbe_alloc_rx_buffers - Replace used receive buffers
1536  * @rx_ring: ring to place buffers on
1537  * @cleaned_count: number of buffers to replace
1538  **/
1539 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1540 {
1541         union ixgbe_adv_rx_desc *rx_desc;
1542         struct ixgbe_rx_buffer *bi;
1543         u16 i = rx_ring->next_to_use;
1544
1545         /* nothing to do */
1546         if (!cleaned_count)
1547                 return;
1548
1549         rx_desc = IXGBE_RX_DESC(rx_ring, i);
1550         bi = &rx_ring->rx_buffer_info[i];
1551         i -= rx_ring->count;
1552
1553         do {
1554                 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1555                         break;
1556
1557                 /*
1558                  * Refresh the desc even if buffer_addrs didn't change
1559                  * because each write-back erases this info.
1560                  */
1561                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1562
1563                 rx_desc++;
1564                 bi++;
1565                 i++;
1566                 if (unlikely(!i)) {
1567                         rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1568                         bi = rx_ring->rx_buffer_info;
1569                         i -= rx_ring->count;
1570                 }
1571
1572                 /* clear the status bits for the next_to_use descriptor */
1573                 rx_desc->wb.upper.status_error = 0;
1574
1575                 cleaned_count--;
1576         } while (cleaned_count);
1577
1578         i += rx_ring->count;
1579
1580         if (rx_ring->next_to_use != i) {
1581                 rx_ring->next_to_use = i;
1582
1583                 /* update next to alloc since we have filled the ring */
1584                 rx_ring->next_to_alloc = i;
1585
1586                 /* Force memory writes to complete before letting h/w
1587                  * know there are new descriptors to fetch.  (Only
1588                  * applicable for weak-ordered memory model archs,
1589                  * such as IA-64).
1590                  */
1591                 wmb();
1592                 writel(i, rx_ring->tail);
1593         }
1594 }
1595
1596 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1597                                    struct sk_buff *skb)
1598 {
1599         u16 hdr_len = skb_headlen(skb);
1600
1601         /* set gso_size to avoid messing up TCP MSS */
1602         skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1603                                                  IXGBE_CB(skb)->append_cnt);
1604         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1605 }
1606
1607 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1608                                    struct sk_buff *skb)
1609 {
1610         /* if append_cnt is 0 then frame is not RSC */
1611         if (!IXGBE_CB(skb)->append_cnt)
1612                 return;
1613
1614         rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1615         rx_ring->rx_stats.rsc_flush++;
1616
1617         ixgbe_set_rsc_gso_size(rx_ring, skb);
1618
1619         /* gso_size is computed using append_cnt so always clear it last */
1620         IXGBE_CB(skb)->append_cnt = 0;
1621 }
1622
1623 /**
1624  * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1625  * @rx_ring: rx descriptor ring packet is being transacted on
1626  * @rx_desc: pointer to the EOP Rx descriptor
1627  * @skb: pointer to current skb being populated
1628  *
1629  * This function checks the ring, descriptor, and packet information in
1630  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1631  * other fields within the skb.
1632  **/
1633 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1634                                      union ixgbe_adv_rx_desc *rx_desc,
1635                                      struct sk_buff *skb)
1636 {
1637         struct net_device *dev = rx_ring->netdev;
1638
1639         ixgbe_update_rsc_stats(rx_ring, skb);
1640
1641         ixgbe_rx_hash(rx_ring, rx_desc, skb);
1642
1643         ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1644
1645         if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1646                 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1647
1648         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1649             ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1650                 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1651                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1652         }
1653
1654         skb_record_rx_queue(skb, rx_ring->queue_index);
1655
1656         skb->protocol = eth_type_trans(skb, dev);
1657 }
1658
1659 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1660                          struct sk_buff *skb)
1661 {
1662         if (ixgbe_qv_busy_polling(q_vector))
1663                 netif_receive_skb(skb);
1664         else
1665                 napi_gro_receive(&q_vector->napi, skb);
1666 }
1667
1668 /**
1669  * ixgbe_is_non_eop - process handling of non-EOP buffers
1670  * @rx_ring: Rx ring being processed
1671  * @rx_desc: Rx descriptor for current buffer
1672  * @skb: Current socket buffer containing buffer in progress
1673  *
1674  * This function updates next to clean.  If the buffer is an EOP buffer
1675  * this function exits returning false, otherwise it will place the
1676  * sk_buff in the next buffer to be chained and return true indicating
1677  * that this is in fact a non-EOP buffer.
1678  **/
1679 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1680                              union ixgbe_adv_rx_desc *rx_desc,
1681                              struct sk_buff *skb)
1682 {
1683         u32 ntc = rx_ring->next_to_clean + 1;
1684
1685         /* fetch, update, and store next to clean */
1686         ntc = (ntc < rx_ring->count) ? ntc : 0;
1687         rx_ring->next_to_clean = ntc;
1688
1689         prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1690
1691         /* update RSC append count if present */
1692         if (ring_is_rsc_enabled(rx_ring)) {
1693                 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1694                                      cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1695
1696                 if (unlikely(rsc_enabled)) {
1697                         u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1698
1699                         rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1700                         IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1701
1702                         /* update ntc based on RSC value */
1703                         ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1704                         ntc &= IXGBE_RXDADV_NEXTP_MASK;
1705                         ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1706                 }
1707         }
1708
1709         /* if we are the last buffer then there is nothing else to do */
1710         if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1711                 return false;
1712
1713         /* place skb in next buffer to be received */
1714         rx_ring->rx_buffer_info[ntc].skb = skb;
1715         rx_ring->rx_stats.non_eop_descs++;
1716
1717         return true;
1718 }
1719
1720 /**
1721  * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1722  * @rx_ring: rx descriptor ring packet is being transacted on
1723  * @skb: pointer to current skb being adjusted
1724  *
1725  * This function is an ixgbe specific version of __pskb_pull_tail.  The
1726  * main difference between this version and the original function is that
1727  * this function can make several assumptions about the state of things
1728  * that allow for significant optimizations versus the standard function.
1729  * As a result we can do things like drop a frag and maintain an accurate
1730  * truesize for the skb.
1731  */
1732 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1733                             struct sk_buff *skb)
1734 {
1735         struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1736         unsigned char *va;
1737         unsigned int pull_len;
1738
1739         /*
1740          * it is valid to use page_address instead of kmap since we are
1741          * working with pages allocated out of the lomem pool per
1742          * alloc_page(GFP_ATOMIC)
1743          */
1744         va = skb_frag_address(frag);
1745
1746         /*
1747          * we need the header to contain the greater of either ETH_HLEN or
1748          * 60 bytes if the skb->len is less than 60 for skb_pad.
1749          */
1750         pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1751
1752         /* align pull length to size of long to optimize memcpy performance */
1753         skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1754
1755         /* update all of the pointers */
1756         skb_frag_size_sub(frag, pull_len);
1757         frag->page_offset += pull_len;
1758         skb->data_len -= pull_len;
1759         skb->tail += pull_len;
1760 }
1761
1762 /**
1763  * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1764  * @rx_ring: rx descriptor ring packet is being transacted on
1765  * @skb: pointer to current skb being updated
1766  *
1767  * This function provides a basic DMA sync up for the first fragment of an
1768  * skb.  The reason for doing this is that the first fragment cannot be
1769  * unmapped until we have reached the end of packet descriptor for a buffer
1770  * chain.
1771  */
1772 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1773                                 struct sk_buff *skb)
1774 {
1775         /* if the page was released unmap it, else just sync our portion */
1776         if (unlikely(IXGBE_CB(skb)->page_released)) {
1777                 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1778                                ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1779                 IXGBE_CB(skb)->page_released = false;
1780         } else {
1781                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1782
1783                 dma_sync_single_range_for_cpu(rx_ring->dev,
1784                                               IXGBE_CB(skb)->dma,
1785                                               frag->page_offset,
1786                                               ixgbe_rx_bufsz(rx_ring),
1787                                               DMA_FROM_DEVICE);
1788         }
1789         IXGBE_CB(skb)->dma = 0;
1790 }
1791
1792 /**
1793  * ixgbe_cleanup_headers - Correct corrupted or empty headers
1794  * @rx_ring: rx descriptor ring packet is being transacted on
1795  * @rx_desc: pointer to the EOP Rx descriptor
1796  * @skb: pointer to current skb being fixed
1797  *
1798  * Check for corrupted packet headers caused by senders on the local L2
1799  * embedded NIC switch not setting up their Tx Descriptors right.  These
1800  * should be very rare.
1801  *
1802  * Also address the case where we are pulling data in on pages only
1803  * and as such no data is present in the skb header.
1804  *
1805  * In addition if skb is not at least 60 bytes we need to pad it so that
1806  * it is large enough to qualify as a valid Ethernet frame.
1807  *
1808  * Returns true if an error was encountered and skb was freed.
1809  **/
1810 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1811                                   union ixgbe_adv_rx_desc *rx_desc,
1812                                   struct sk_buff *skb)
1813 {
1814         struct net_device *netdev = rx_ring->netdev;
1815
1816         /* verify that the packet does not have any known errors */
1817         if (unlikely(ixgbe_test_staterr(rx_desc,
1818                                         IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1819             !(netdev->features & NETIF_F_RXALL))) {
1820                 dev_kfree_skb_any(skb);
1821                 return true;
1822         }
1823
1824         /* place header in linear portion of buffer */
1825         if (skb_is_nonlinear(skb))
1826                 ixgbe_pull_tail(rx_ring, skb);
1827
1828 #ifdef IXGBE_FCOE
1829         /* do not attempt to pad FCoE Frames as this will disrupt DDP */
1830         if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1831                 return false;
1832
1833 #endif
1834         /* if eth_skb_pad returns an error the skb was freed */
1835         if (eth_skb_pad(skb))
1836                 return true;
1837
1838         return false;
1839 }
1840
1841 /**
1842  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1843  * @rx_ring: rx descriptor ring to store buffers on
1844  * @old_buff: donor buffer to have page reused
1845  *
1846  * Synchronizes page for reuse by the adapter
1847  **/
1848 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1849                                 struct ixgbe_rx_buffer *old_buff)
1850 {
1851         struct ixgbe_rx_buffer *new_buff;
1852         u16 nta = rx_ring->next_to_alloc;
1853
1854         new_buff = &rx_ring->rx_buffer_info[nta];
1855
1856         /* update, and store next to alloc */
1857         nta++;
1858         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1859
1860         /* transfer page from old buffer to new buffer */
1861         *new_buff = *old_buff;
1862
1863         /* sync the buffer for use by the device */
1864         dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1865                                          new_buff->page_offset,
1866                                          ixgbe_rx_bufsz(rx_ring),
1867                                          DMA_FROM_DEVICE);
1868 }
1869
1870 static inline bool ixgbe_page_is_reserved(struct page *page)
1871 {
1872         return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1873 }
1874
1875 /**
1876  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1877  * @rx_ring: rx descriptor ring to transact packets on
1878  * @rx_buffer: buffer containing page to add
1879  * @rx_desc: descriptor containing length of buffer written by hardware
1880  * @skb: sk_buff to place the data into
1881  *
1882  * This function will add the data contained in rx_buffer->page to the skb.
1883  * This is done either through a direct copy if the data in the buffer is
1884  * less than the skb header size, otherwise it will just attach the page as
1885  * a frag to the skb.
1886  *
1887  * The function will then update the page offset if necessary and return
1888  * true if the buffer can be reused by the adapter.
1889  **/
1890 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1891                               struct ixgbe_rx_buffer *rx_buffer,
1892                               union ixgbe_adv_rx_desc *rx_desc,
1893                               struct sk_buff *skb)
1894 {
1895         struct page *page = rx_buffer->page;
1896         unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1897 #if (PAGE_SIZE < 8192)
1898         unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1899 #else
1900         unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1901         unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1902                                    ixgbe_rx_bufsz(rx_ring);
1903 #endif
1904
1905         if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1906                 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1907
1908                 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1909
1910                 /* page is not reserved, we can reuse buffer as-is */
1911                 if (likely(!ixgbe_page_is_reserved(page)))
1912                         return true;
1913
1914                 /* this page cannot be reused so discard it */
1915                 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1916                 return false;
1917         }
1918
1919         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1920                         rx_buffer->page_offset, size, truesize);
1921
1922         /* avoid re-using remote pages */
1923         if (unlikely(ixgbe_page_is_reserved(page)))
1924                 return false;
1925
1926 #if (PAGE_SIZE < 8192)
1927         /* if we are only owner of page we can reuse it */
1928         if (unlikely(page_count(page) != 1))
1929                 return false;
1930
1931         /* flip page offset to other buffer */
1932         rx_buffer->page_offset ^= truesize;
1933 #else
1934         /* move offset up to the next cache line */
1935         rx_buffer->page_offset += truesize;
1936
1937         if (rx_buffer->page_offset > last_offset)
1938                 return false;
1939 #endif
1940
1941         /* Even if we own the page, we are not allowed to use atomic_set()
1942          * This would break get_page_unless_zero() users.
1943          */
1944         atomic_inc(&page->_count);
1945
1946         return true;
1947 }
1948
1949 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1950                                              union ixgbe_adv_rx_desc *rx_desc)
1951 {
1952         struct ixgbe_rx_buffer *rx_buffer;
1953         struct sk_buff *skb;
1954         struct page *page;
1955
1956         rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1957         page = rx_buffer->page;
1958         prefetchw(page);
1959
1960         skb = rx_buffer->skb;
1961
1962         if (likely(!skb)) {
1963                 void *page_addr = page_address(page) +
1964                                   rx_buffer->page_offset;
1965
1966                 /* prefetch first cache line of first page */
1967                 prefetch(page_addr);
1968 #if L1_CACHE_BYTES < 128
1969                 prefetch(page_addr + L1_CACHE_BYTES);
1970 #endif
1971
1972                 /* allocate a skb to store the frags */
1973                 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1974                                      IXGBE_RX_HDR_SIZE);
1975                 if (unlikely(!skb)) {
1976                         rx_ring->rx_stats.alloc_rx_buff_failed++;
1977                         return NULL;
1978                 }
1979
1980                 /*
1981                  * we will be copying header into skb->data in
1982                  * pskb_may_pull so it is in our interest to prefetch
1983                  * it now to avoid a possible cache miss
1984                  */
1985                 prefetchw(skb->data);
1986
1987                 /*
1988                  * Delay unmapping of the first packet. It carries the
1989                  * header information, HW may still access the header
1990                  * after the writeback.  Only unmap it when EOP is
1991                  * reached
1992                  */
1993                 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1994                         goto dma_sync;
1995
1996                 IXGBE_CB(skb)->dma = rx_buffer->dma;
1997         } else {
1998                 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1999                         ixgbe_dma_sync_frag(rx_ring, skb);
2000
2001 dma_sync:
2002                 /* we are reusing so sync this buffer for CPU use */
2003                 dma_sync_single_range_for_cpu(rx_ring->dev,
2004                                               rx_buffer->dma,
2005                                               rx_buffer->page_offset,
2006                                               ixgbe_rx_bufsz(rx_ring),
2007                                               DMA_FROM_DEVICE);
2008
2009                 rx_buffer->skb = NULL;
2010         }
2011
2012         /* pull page into skb */
2013         if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
2014                 /* hand second half of page back to the ring */
2015                 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2016         } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
2017                 /* the page has been released from the ring */
2018                 IXGBE_CB(skb)->page_released = true;
2019         } else {
2020                 /* we are not reusing the buffer so unmap it */
2021                 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
2022                                ixgbe_rx_pg_size(rx_ring),
2023                                DMA_FROM_DEVICE);
2024         }
2025
2026         /* clear contents of buffer_info */
2027         rx_buffer->page = NULL;
2028
2029         return skb;
2030 }
2031
2032 /**
2033  * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2034  * @q_vector: structure containing interrupt and ring information
2035  * @rx_ring: rx descriptor ring to transact packets on
2036  * @budget: Total limit on number of packets to process
2037  *
2038  * This function provides a "bounce buffer" approach to Rx interrupt
2039  * processing.  The advantage to this is that on systems that have
2040  * expensive overhead for IOMMU access this provides a means of avoiding
2041  * it by maintaining the mapping of the page to the syste.
2042  *
2043  * Returns amount of work completed
2044  **/
2045 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2046                                struct ixgbe_ring *rx_ring,
2047                                const int budget)
2048 {
2049         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2050 #ifdef IXGBE_FCOE
2051         struct ixgbe_adapter *adapter = q_vector->adapter;
2052         int ddp_bytes;
2053         unsigned int mss = 0;
2054 #endif /* IXGBE_FCOE */
2055         u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2056
2057         while (likely(total_rx_packets < budget)) {
2058                 union ixgbe_adv_rx_desc *rx_desc;
2059                 struct sk_buff *skb;
2060
2061                 /* return some buffers to hardware, one at a time is too slow */
2062                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2063                         ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2064                         cleaned_count = 0;
2065                 }
2066
2067                 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2068
2069                 if (!rx_desc->wb.upper.status_error)
2070                         break;
2071
2072                 /* This memory barrier is needed to keep us from reading
2073                  * any other fields out of the rx_desc until we know the
2074                  * descriptor has been written back
2075                  */
2076                 dma_rmb();
2077
2078                 /* retrieve a buffer from the ring */
2079                 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2080
2081                 /* exit if we failed to retrieve a buffer */
2082                 if (!skb)
2083                         break;
2084
2085                 cleaned_count++;
2086
2087                 /* place incomplete frames back on ring for completion */
2088                 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2089                         continue;
2090
2091                 /* verify the packet layout is correct */
2092                 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2093                         continue;
2094
2095                 /* probably a little skewed due to removing CRC */
2096                 total_rx_bytes += skb->len;
2097
2098                 /* populate checksum, timestamp, VLAN, and protocol */
2099                 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2100
2101 #ifdef IXGBE_FCOE
2102                 /* if ddp, not passing to ULD unless for FCP_RSP or error */
2103                 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2104                         ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2105                         /* include DDPed FCoE data */
2106                         if (ddp_bytes > 0) {
2107                                 if (!mss) {
2108                                         mss = rx_ring->netdev->mtu -
2109                                                 sizeof(struct fcoe_hdr) -
2110                                                 sizeof(struct fc_frame_header) -
2111                                                 sizeof(struct fcoe_crc_eof);
2112                                         if (mss > 512)
2113                                                 mss &= ~511;
2114                                 }
2115                                 total_rx_bytes += ddp_bytes;
2116                                 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2117                                                                  mss);
2118                         }
2119                         if (!ddp_bytes) {
2120                                 dev_kfree_skb_any(skb);
2121                                 continue;
2122                         }
2123                 }
2124
2125 #endif /* IXGBE_FCOE */
2126                 skb_mark_napi_id(skb, &q_vector->napi);
2127                 ixgbe_rx_skb(q_vector, skb);
2128
2129                 /* update budget accounting */
2130                 total_rx_packets++;
2131         }
2132
2133         u64_stats_update_begin(&rx_ring->syncp);
2134         rx_ring->stats.packets += total_rx_packets;
2135         rx_ring->stats.bytes += total_rx_bytes;
2136         u64_stats_update_end(&rx_ring->syncp);
2137         q_vector->rx.total_packets += total_rx_packets;
2138         q_vector->rx.total_bytes += total_rx_bytes;
2139
2140         return total_rx_packets;
2141 }
2142
2143 #ifdef CONFIG_NET_RX_BUSY_POLL
2144 /* must be called with local_bh_disable()d */
2145 static int ixgbe_low_latency_recv(struct napi_struct *napi)
2146 {
2147         struct ixgbe_q_vector *q_vector =
2148                         container_of(napi, struct ixgbe_q_vector, napi);
2149         struct ixgbe_adapter *adapter = q_vector->adapter;
2150         struct ixgbe_ring  *ring;
2151         int found = 0;
2152
2153         if (test_bit(__IXGBE_DOWN, &adapter->state))
2154                 return LL_FLUSH_FAILED;
2155
2156         if (!ixgbe_qv_lock_poll(q_vector))
2157                 return LL_FLUSH_BUSY;
2158
2159         ixgbe_for_each_ring(ring, q_vector->rx) {
2160                 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2161 #ifdef BP_EXTENDED_STATS
2162                 if (found)
2163                         ring->stats.cleaned += found;
2164                 else
2165                         ring->stats.misses++;
2166 #endif
2167                 if (found)
2168                         break;
2169         }
2170
2171         ixgbe_qv_unlock_poll(q_vector);
2172
2173         return found;
2174 }
2175 #endif  /* CONFIG_NET_RX_BUSY_POLL */
2176
2177 /**
2178  * ixgbe_configure_msix - Configure MSI-X hardware
2179  * @adapter: board private structure
2180  *
2181  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2182  * interrupts.
2183  **/
2184 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2185 {
2186         struct ixgbe_q_vector *q_vector;
2187         int v_idx;
2188         u32 mask;
2189
2190         /* Populate MSIX to EITR Select */
2191         if (adapter->num_vfs > 32) {
2192                 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2193                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2194         }
2195
2196         /*
2197          * Populate the IVAR table and set the ITR values to the
2198          * corresponding register.
2199          */
2200         for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2201                 struct ixgbe_ring *ring;
2202                 q_vector = adapter->q_vector[v_idx];
2203
2204                 ixgbe_for_each_ring(ring, q_vector->rx)
2205                         ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2206
2207                 ixgbe_for_each_ring(ring, q_vector->tx)
2208                         ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2209
2210                 ixgbe_write_eitr(q_vector);
2211         }
2212
2213         switch (adapter->hw.mac.type) {
2214         case ixgbe_mac_82598EB:
2215                 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2216                                v_idx);
2217                 break;
2218         case ixgbe_mac_82599EB:
2219         case ixgbe_mac_X540:
2220         case ixgbe_mac_X550:
2221         case ixgbe_mac_X550EM_x:
2222                 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2223                 break;
2224         default:
2225                 break;
2226         }
2227         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2228
2229         /* set up to autoclear timer, and the vectors */
2230         mask = IXGBE_EIMS_ENABLE_MASK;
2231         mask &= ~(IXGBE_EIMS_OTHER |
2232                   IXGBE_EIMS_MAILBOX |
2233                   IXGBE_EIMS_LSC);
2234
2235         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2236 }
2237
2238 enum latency_range {
2239         lowest_latency = 0,
2240         low_latency = 1,
2241         bulk_latency = 2,
2242         latency_invalid = 255
2243 };
2244
2245 /**
2246  * ixgbe_update_itr - update the dynamic ITR value based on statistics
2247  * @q_vector: structure containing interrupt and ring information
2248  * @ring_container: structure containing ring performance data
2249  *
2250  *      Stores a new ITR value based on packets and byte
2251  *      counts during the last interrupt.  The advantage of per interrupt
2252  *      computation is faster updates and more accurate ITR for the current
2253  *      traffic pattern.  Constants in this function were computed
2254  *      based on theoretical maximum wire speed and thresholds were set based
2255  *      on testing data as well as attempting to minimize response time
2256  *      while increasing bulk throughput.
2257  *      this functionality is controlled by the InterruptThrottleRate module
2258  *      parameter (see ixgbe_param.c)
2259  **/
2260 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2261                              struct ixgbe_ring_container *ring_container)
2262 {
2263         int bytes = ring_container->total_bytes;
2264         int packets = ring_container->total_packets;
2265         u32 timepassed_us;
2266         u64 bytes_perint;
2267         u8 itr_setting = ring_container->itr;
2268
2269         if (packets == 0)
2270                 return;
2271
2272         /* simple throttlerate management
2273          *   0-10MB/s   lowest (100000 ints/s)
2274          *  10-20MB/s   low    (20000 ints/s)
2275          *  20-1249MB/s bulk   (12000 ints/s)
2276          */
2277         /* what was last interrupt timeslice? */
2278         timepassed_us = q_vector->itr >> 2;
2279         if (timepassed_us == 0)
2280                 return;
2281
2282         bytes_perint = bytes / timepassed_us; /* bytes/usec */
2283
2284         switch (itr_setting) {
2285         case lowest_latency:
2286                 if (bytes_perint > 10)
2287                         itr_setting = low_latency;
2288                 break;
2289         case low_latency:
2290                 if (bytes_perint > 20)
2291                         itr_setting = bulk_latency;
2292                 else if (bytes_perint <= 10)
2293                         itr_setting = lowest_latency;
2294                 break;
2295         case bulk_latency:
2296                 if (bytes_perint <= 20)
2297                         itr_setting = low_latency;
2298                 break;
2299         }
2300
2301         /* clear work counters since we have the values we need */
2302         ring_container->total_bytes = 0;
2303         ring_container->total_packets = 0;
2304
2305         /* write updated itr to ring container */
2306         ring_container->itr = itr_setting;
2307 }
2308
2309 /**
2310  * ixgbe_write_eitr - write EITR register in hardware specific way
2311  * @q_vector: structure containing interrupt and ring information
2312  *
2313  * This function is made to be called by ethtool and by the driver
2314  * when it needs to update EITR registers at runtime.  Hardware
2315  * specific quirks/differences are taken care of here.
2316  */
2317 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2318 {
2319         struct ixgbe_adapter *adapter = q_vector->adapter;
2320         struct ixgbe_hw *hw = &adapter->hw;
2321         int v_idx = q_vector->v_idx;
2322         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2323
2324         switch (adapter->hw.mac.type) {
2325         case ixgbe_mac_82598EB:
2326                 /* must write high and low 16 bits to reset counter */
2327                 itr_reg |= (itr_reg << 16);
2328                 break;
2329         case ixgbe_mac_82599EB:
2330         case ixgbe_mac_X540:
2331         case ixgbe_mac_X550:
2332         case ixgbe_mac_X550EM_x:
2333                 /*
2334                  * set the WDIS bit to not clear the timer bits and cause an
2335                  * immediate assertion of the interrupt
2336                  */
2337                 itr_reg |= IXGBE_EITR_CNT_WDIS;
2338                 break;
2339         default:
2340                 break;
2341         }
2342         IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2343 }
2344
2345 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2346 {
2347         u32 new_itr = q_vector->itr;
2348         u8 current_itr;
2349
2350         ixgbe_update_itr(q_vector, &q_vector->tx);
2351         ixgbe_update_itr(q_vector, &q_vector->rx);
2352
2353         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2354
2355         switch (current_itr) {
2356         /* counts and packets in update_itr are dependent on these numbers */
2357         case lowest_latency:
2358                 new_itr = IXGBE_100K_ITR;
2359                 break;
2360         case low_latency:
2361                 new_itr = IXGBE_20K_ITR;
2362                 break;
2363         case bulk_latency:
2364                 new_itr = IXGBE_12K_ITR;
2365                 break;
2366         default:
2367                 break;
2368         }
2369
2370         if (new_itr != q_vector->itr) {
2371                 /* do an exponential smoothing */
2372                 new_itr = (10 * new_itr * q_vector->itr) /
2373                           ((9 * new_itr) + q_vector->itr);
2374
2375                 /* save the algorithm value here */
2376                 q_vector->itr = new_itr;
2377
2378                 ixgbe_write_eitr(q_vector);
2379         }
2380 }
2381
2382 /**
2383  * ixgbe_check_overtemp_subtask - check for over temperature
2384  * @adapter: pointer to adapter
2385  **/
2386 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2387 {
2388         struct ixgbe_hw *hw = &adapter->hw;
2389         u32 eicr = adapter->interrupt_event;
2390
2391         if (test_bit(__IXGBE_DOWN, &adapter->state))
2392                 return;
2393
2394         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2395             !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2396                 return;
2397
2398         adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2399
2400         switch (hw->device_id) {
2401         case IXGBE_DEV_ID_82599_T3_LOM:
2402                 /*
2403                  * Since the warning interrupt is for both ports
2404                  * we don't have to check if:
2405                  *  - This interrupt wasn't for our port.
2406                  *  - We may have missed the interrupt so always have to
2407                  *    check if we  got a LSC
2408                  */
2409                 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2410                     !(eicr & IXGBE_EICR_LSC))
2411                         return;
2412
2413                 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2414                         u32 speed;
2415                         bool link_up = false;
2416
2417                         hw->mac.ops.check_link(hw, &speed, &link_up, false);
2418
2419                         if (link_up)
2420                                 return;
2421                 }
2422
2423                 /* Check if this is not due to overtemp */
2424                 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2425                         return;
2426
2427                 break;
2428         default:
2429                 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2430                         return;
2431                 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2432                         return;
2433                 break;
2434         }
2435         e_crit(drv, "%s\n", ixgbe_overheat_msg);
2436
2437         adapter->interrupt_event = 0;
2438 }
2439
2440 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2441 {
2442         struct ixgbe_hw *hw = &adapter->hw;
2443
2444         if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2445             (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2446                 e_crit(probe, "Fan has stopped, replace the adapter\n");
2447                 /* write to clear the interrupt */
2448                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2449         }
2450 }
2451
2452 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2453 {
2454         struct ixgbe_hw *hw = &adapter->hw;
2455
2456         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2457                 return;
2458
2459         switch (adapter->hw.mac.type) {
2460         case ixgbe_mac_82599EB:
2461                 /*
2462                  * Need to check link state so complete overtemp check
2463                  * on service task
2464                  */
2465                 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2466                      (eicr & IXGBE_EICR_LSC)) &&
2467                     (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2468                         adapter->interrupt_event = eicr;
2469                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2470                         ixgbe_service_event_schedule(adapter);
2471                         return;
2472                 }
2473                 return;
2474         case ixgbe_mac_X540:
2475                 if (!(eicr & IXGBE_EICR_TS))
2476                         return;
2477                 break;
2478         default:
2479                 return;
2480         }
2481
2482         e_crit(drv, "%s\n", ixgbe_overheat_msg);
2483 }
2484
2485 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2486 {
2487         switch (hw->mac.type) {
2488         case ixgbe_mac_82598EB:
2489                 if (hw->phy.type == ixgbe_phy_nl)
2490                         return true;
2491                 return false;
2492         case ixgbe_mac_82599EB:
2493         case ixgbe_mac_X550EM_x:
2494                 switch (hw->mac.ops.get_media_type(hw)) {
2495                 case ixgbe_media_type_fiber:
2496                 case ixgbe_media_type_fiber_qsfp:
2497                         return true;
2498                 default:
2499                         return false;
2500                 }
2501         default:
2502                 return false;
2503         }
2504 }
2505
2506 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2507 {
2508         struct ixgbe_hw *hw = &adapter->hw;
2509         u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2510
2511         if (!ixgbe_is_sfp(hw))
2512                 return;
2513
2514         /* Later MAC's use different SDP */
2515         if (hw->mac.type >= ixgbe_mac_X540)
2516                 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2517
2518         if (eicr & eicr_mask) {
2519                 /* Clear the interrupt */
2520                 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2521                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2522                         adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2523                         adapter->sfp_poll_time = 0;
2524                         ixgbe_service_event_schedule(adapter);
2525                 }
2526         }
2527
2528         if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2529             (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2530                 /* Clear the interrupt */
2531                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2532                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2533                         adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2534                         ixgbe_service_event_schedule(adapter);
2535                 }
2536         }
2537 }
2538
2539 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2540 {
2541         struct ixgbe_hw *hw = &adapter->hw;
2542
2543         adapter->lsc_int++;
2544         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2545         adapter->link_check_timeout = jiffies;
2546         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2547                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2548                 IXGBE_WRITE_FLUSH(hw);
2549                 ixgbe_service_event_schedule(adapter);
2550         }
2551 }
2552
2553 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2554                                            u64 qmask)
2555 {
2556         u32 mask;
2557         struct ixgbe_hw *hw = &adapter->hw;
2558
2559         switch (hw->mac.type) {
2560         case ixgbe_mac_82598EB:
2561                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2562                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2563                 break;
2564         case ixgbe_mac_82599EB:
2565         case ixgbe_mac_X540:
2566         case ixgbe_mac_X550:
2567         case ixgbe_mac_X550EM_x:
2568                 mask = (qmask & 0xFFFFFFFF);
2569                 if (mask)
2570                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2571                 mask = (qmask >> 32);
2572                 if (mask)
2573                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2574                 break;
2575         default:
2576                 break;
2577         }
2578         /* skip the flush */
2579 }
2580
2581 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2582                                             u64 qmask)
2583 {
2584         u32 mask;
2585         struct ixgbe_hw *hw = &adapter->hw;
2586
2587         switch (hw->mac.type) {
2588         case ixgbe_mac_82598EB:
2589                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2590                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2591                 break;
2592         case ixgbe_mac_82599EB:
2593         case ixgbe_mac_X540:
2594         case ixgbe_mac_X550:
2595         case ixgbe_mac_X550EM_x:
2596                 mask = (qmask & 0xFFFFFFFF);
2597                 if (mask)
2598                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2599                 mask = (qmask >> 32);
2600                 if (mask)
2601                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2602                 break;
2603         default:
2604                 break;
2605         }
2606         /* skip the flush */
2607 }
2608
2609 /**
2610  * ixgbe_irq_enable - Enable default interrupt generation settings
2611  * @adapter: board private structure
2612  **/
2613 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2614                                     bool flush)
2615 {
2616         struct ixgbe_hw *hw = &adapter->hw;
2617         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2618
2619         /* don't reenable LSC while waiting for link */
2620         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2621                 mask &= ~IXGBE_EIMS_LSC;
2622
2623         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2624                 switch (adapter->hw.mac.type) {
2625                 case ixgbe_mac_82599EB:
2626                         mask |= IXGBE_EIMS_GPI_SDP0(hw);
2627                         break;
2628                 case ixgbe_mac_X540:
2629                 case ixgbe_mac_X550:
2630                 case ixgbe_mac_X550EM_x:
2631                         mask |= IXGBE_EIMS_TS;
2632                         break;
2633                 default:
2634                         break;
2635                 }
2636         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2637                 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2638         switch (adapter->hw.mac.type) {
2639         case ixgbe_mac_82599EB:
2640                 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2641                 mask |= IXGBE_EIMS_GPI_SDP2(hw);
2642                 /* fall through */
2643         case ixgbe_mac_X540:
2644         case ixgbe_mac_X550:
2645         case ixgbe_mac_X550EM_x:
2646                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2647                         mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2648                 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2649                         mask |= IXGBE_EICR_GPI_SDP0_X540;
2650                 mask |= IXGBE_EIMS_ECC;
2651                 mask |= IXGBE_EIMS_MAILBOX;
2652                 break;
2653         default:
2654                 break;
2655         }
2656
2657         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2658             !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2659                 mask |= IXGBE_EIMS_FLOW_DIR;
2660
2661         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2662         if (queues)
2663                 ixgbe_irq_enable_queues(adapter, ~0);
2664         if (flush)
2665                 IXGBE_WRITE_FLUSH(&adapter->hw);
2666 }
2667
2668 static irqreturn_t ixgbe_msix_other(int irq, void *data)
2669 {
2670         struct ixgbe_adapter *adapter = data;
2671         struct ixgbe_hw *hw = &adapter->hw;
2672         u32 eicr;
2673
2674         /*
2675          * Workaround for Silicon errata.  Use clear-by-write instead
2676          * of clear-by-read.  Reading with EICS will return the
2677          * interrupt causes without clearing, which later be done
2678          * with the write to EICR.
2679          */
2680         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2681
2682         /* The lower 16bits of the EICR register are for the queue interrupts
2683          * which should be masked here in order to not accidentally clear them if
2684          * the bits are high when ixgbe_msix_other is called. There is a race
2685          * condition otherwise which results in possible performance loss
2686          * especially if the ixgbe_msix_other interrupt is triggering
2687          * consistently (as it would when PPS is turned on for the X540 device)
2688          */
2689         eicr &= 0xFFFF0000;
2690
2691         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2692
2693         if (eicr & IXGBE_EICR_LSC)
2694                 ixgbe_check_lsc(adapter);
2695
2696         if (eicr & IXGBE_EICR_MAILBOX)
2697                 ixgbe_msg_task(adapter);
2698
2699         switch (hw->mac.type) {
2700         case ixgbe_mac_82599EB:
2701         case ixgbe_mac_X540:
2702         case ixgbe_mac_X550:
2703         case ixgbe_mac_X550EM_x:
2704                 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2705                     (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2706                         adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
2707                         ixgbe_service_event_schedule(adapter);
2708                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2709                                         IXGBE_EICR_GPI_SDP0_X540);
2710                 }
2711                 if (eicr & IXGBE_EICR_ECC) {
2712                         e_info(link, "Received ECC Err, initiating reset\n");
2713                         adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2714                         ixgbe_service_event_schedule(adapter);
2715                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2716                 }
2717                 /* Handle Flow Director Full threshold interrupt */
2718                 if (eicr & IXGBE_EICR_FLOW_DIR) {
2719                         int reinit_count = 0;
2720                         int i;
2721                         for (i = 0; i < adapter->num_tx_queues; i++) {
2722                                 struct ixgbe_ring *ring = adapter->tx_ring[i];
2723                                 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2724                                                        &ring->state))
2725                                         reinit_count++;
2726                         }
2727                         if (reinit_count) {
2728                                 /* no more flow director interrupts until after init */
2729                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2730                                 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2731                                 ixgbe_service_event_schedule(adapter);
2732                         }
2733                 }
2734                 ixgbe_check_sfp_event(adapter, eicr);
2735                 ixgbe_check_overtemp_event(adapter, eicr);
2736                 break;
2737         default:
2738                 break;
2739         }
2740
2741         ixgbe_check_fan_failure(adapter, eicr);
2742
2743         if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2744                 ixgbe_ptp_check_pps_event(adapter, eicr);
2745
2746         /* re-enable the original interrupt state, no lsc, no queues */
2747         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2748                 ixgbe_irq_enable(adapter, false, false);
2749
2750         return IRQ_HANDLED;
2751 }
2752
2753 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2754 {
2755         struct ixgbe_q_vector *q_vector = data;
2756
2757         /* EIAM disabled interrupts (on this vector) for us */
2758
2759         if (q_vector->rx.ring || q_vector->tx.ring)
2760                 napi_schedule(&q_vector->napi);
2761
2762         return IRQ_HANDLED;
2763 }
2764
2765 /**
2766  * ixgbe_poll - NAPI Rx polling callback
2767  * @napi: structure for representing this polling device
2768  * @budget: how many packets driver is allowed to clean
2769  *
2770  * This function is used for legacy and MSI, NAPI mode
2771  **/
2772 int ixgbe_poll(struct napi_struct *napi, int budget)
2773 {
2774         struct ixgbe_q_vector *q_vector =
2775                                 container_of(napi, struct ixgbe_q_vector, napi);
2776         struct ixgbe_adapter *adapter = q_vector->adapter;
2777         struct ixgbe_ring *ring;
2778         int per_ring_budget;
2779         bool clean_complete = true;
2780
2781 #ifdef CONFIG_IXGBE_DCA
2782         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2783                 ixgbe_update_dca(q_vector);
2784 #endif
2785
2786         ixgbe_for_each_ring(ring, q_vector->tx)
2787                 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2788
2789         if (!ixgbe_qv_lock_napi(q_vector))
2790                 return budget;
2791
2792         /* attempt to distribute budget to each queue fairly, but don't allow
2793          * the budget to go below 1 because we'll exit polling */
2794         if (q_vector->rx.count > 1)
2795                 per_ring_budget = max(budget/q_vector->rx.count, 1);
2796         else
2797                 per_ring_budget = budget;
2798
2799         ixgbe_for_each_ring(ring, q_vector->rx)
2800                 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2801                                    per_ring_budget) < per_ring_budget);
2802
2803         ixgbe_qv_unlock_napi(q_vector);
2804         /* If all work not completed, return budget and keep polling */
2805         if (!clean_complete)
2806                 return budget;
2807
2808         /* all work done, exit the polling mode */
2809         napi_complete(napi);
2810         if (adapter->rx_itr_setting & 1)
2811                 ixgbe_set_itr(q_vector);
2812         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2813                 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2814
2815         return 0;
2816 }
2817
2818 /**
2819  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2820  * @adapter: board private structure
2821  *
2822  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2823  * interrupts from the kernel.
2824  **/
2825 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2826 {
2827         struct net_device *netdev = adapter->netdev;
2828         int vector, err;
2829         int ri = 0, ti = 0;
2830
2831         for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2832                 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2833                 struct msix_entry *entry = &adapter->msix_entries[vector];
2834
2835                 if (q_vector->tx.ring && q_vector->rx.ring) {
2836                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2837                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
2838                         ti++;
2839                 } else if (q_vector->rx.ring) {
2840                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2841                                  "%s-%s-%d", netdev->name, "rx", ri++);
2842                 } else if (q_vector->tx.ring) {
2843                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2844                                  "%s-%s-%d", netdev->name, "tx", ti++);
2845                 } else {
2846                         /* skip this unused q_vector */
2847                         continue;
2848                 }
2849                 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2850                                   q_vector->name, q_vector);
2851                 if (err) {
2852                         e_err(probe, "request_irq failed for MSIX interrupt "
2853                               "Error: %d\n", err);
2854                         goto free_queue_irqs;
2855                 }
2856                 /* If Flow Director is enabled, set interrupt affinity */
2857                 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2858                         /* assign the mask for this irq */
2859                         irq_set_affinity_hint(entry->vector,
2860                                               &q_vector->affinity_mask);
2861                 }
2862         }
2863
2864         err = request_irq(adapter->msix_entries[vector].vector,
2865                           ixgbe_msix_other, 0, netdev->name, adapter);
2866         if (err) {
2867                 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2868                 goto free_queue_irqs;
2869         }
2870
2871         return 0;
2872
2873 free_queue_irqs:
2874         while (vector) {
2875                 vector--;
2876                 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2877                                       NULL);
2878                 free_irq(adapter->msix_entries[vector].vector,
2879                          adapter->q_vector[vector]);
2880         }
2881         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2882         pci_disable_msix(adapter->pdev);
2883         kfree(adapter->msix_entries);
2884         adapter->msix_entries = NULL;
2885         return err;
2886 }
2887
2888 /**
2889  * ixgbe_intr - legacy mode Interrupt Handler
2890  * @irq: interrupt number
2891  * @data: pointer to a network interface device structure
2892  **/
2893 static irqreturn_t ixgbe_intr(int irq, void *data)
2894 {
2895         struct ixgbe_adapter *adapter = data;
2896         struct ixgbe_hw *hw = &adapter->hw;
2897         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2898         u32 eicr;
2899
2900         /*
2901          * Workaround for silicon errata #26 on 82598.  Mask the interrupt
2902          * before the read of EICR.
2903          */
2904         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2905
2906         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2907          * therefore no explicit interrupt disable is necessary */
2908         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2909         if (!eicr) {
2910                 /*
2911                  * shared interrupt alert!
2912                  * make sure interrupts are enabled because the read will
2913                  * have disabled interrupts due to EIAM
2914                  * finish the workaround of silicon errata on 82598.  Unmask
2915                  * the interrupt that we masked before the EICR read.
2916                  */
2917                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2918                         ixgbe_irq_enable(adapter, true, true);
2919                 return IRQ_NONE;        /* Not our interrupt */
2920         }
2921
2922         if (eicr & IXGBE_EICR_LSC)
2923                 ixgbe_check_lsc(adapter);
2924
2925         switch (hw->mac.type) {
2926         case ixgbe_mac_82599EB:
2927                 ixgbe_check_sfp_event(adapter, eicr);
2928                 /* Fall through */
2929         case ixgbe_mac_X540:
2930         case ixgbe_mac_X550:
2931         case ixgbe_mac_X550EM_x:
2932                 if (eicr & IXGBE_EICR_ECC) {
2933                         e_info(link, "Received ECC Err, initiating reset\n");
2934                         adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2935                         ixgbe_service_event_schedule(adapter);
2936                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2937                 }
2938                 ixgbe_check_overtemp_event(adapter, eicr);
2939                 break;
2940         default:
2941                 break;
2942         }
2943
2944         ixgbe_check_fan_failure(adapter, eicr);
2945         if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2946                 ixgbe_ptp_check_pps_event(adapter, eicr);
2947
2948         /* would disable interrupts here but EIAM disabled it */
2949         napi_schedule(&q_vector->napi);
2950
2951         /*
2952          * re-enable link(maybe) and non-queue interrupts, no flush.
2953          * ixgbe_poll will re-enable the queue interrupts
2954          */
2955         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2956                 ixgbe_irq_enable(adapter, false, false);
2957
2958         return IRQ_HANDLED;
2959 }
2960
2961 /**
2962  * ixgbe_request_irq - initialize interrupts
2963  * @adapter: board private structure
2964  *
2965  * Attempts to configure interrupts using the best available
2966  * capabilities of the hardware and kernel.
2967  **/
2968 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2969 {
2970         struct net_device *netdev = adapter->netdev;
2971         int err;
2972
2973         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2974                 err = ixgbe_request_msix_irqs(adapter);
2975         else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2976                 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2977                                   netdev->name, adapter);
2978         else
2979                 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2980                                   netdev->name, adapter);
2981
2982         if (err)
2983                 e_err(probe, "request_irq failed, Error %d\n", err);
2984
2985         return err;
2986 }
2987
2988 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2989 {
2990         int vector;
2991
2992         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2993                 free_irq(adapter->pdev->irq, adapter);
2994                 return;
2995         }
2996
2997         for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2998                 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2999                 struct msix_entry *entry = &adapter->msix_entries[vector];
3000
3001                 /* free only the irqs that were actually requested */
3002                 if (!q_vector->rx.ring && !q_vector->tx.ring)
3003                         continue;
3004
3005                 /* clear the affinity_mask in the IRQ descriptor */
3006                 irq_set_affinity_hint(entry->vector, NULL);
3007
3008                 free_irq(entry->vector, q_vector);
3009         }
3010
3011         free_irq(adapter->msix_entries[vector++].vector, adapter);
3012 }
3013
3014 /**
3015  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
3016  * @adapter: board private structure
3017  **/
3018 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3019 {
3020         switch (adapter->hw.mac.type) {
3021         case ixgbe_mac_82598EB:
3022                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3023                 break;
3024         case ixgbe_mac_82599EB:
3025         case ixgbe_mac_X540:
3026         case ixgbe_mac_X550:
3027         case ixgbe_mac_X550EM_x:
3028                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3029                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3030                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3031                 break;
3032         default:
3033                 break;
3034         }
3035         IXGBE_WRITE_FLUSH(&adapter->hw);
3036         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3037                 int vector;
3038
3039                 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3040                         synchronize_irq(adapter->msix_entries[vector].vector);
3041
3042                 synchronize_irq(adapter->msix_entries[vector++].vector);
3043         } else {
3044                 synchronize_irq(adapter->pdev->irq);
3045         }
3046 }
3047
3048 /**
3049  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
3050  *
3051  **/
3052 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3053 {
3054         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3055
3056         ixgbe_write_eitr(q_vector);
3057
3058         ixgbe_set_ivar(adapter, 0, 0, 0);
3059         ixgbe_set_ivar(adapter, 1, 0, 0);
3060
3061         e_info(hw, "Legacy interrupt IVAR setup done\n");
3062 }
3063
3064 /**
3065  * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
3066  * @adapter: board private structure
3067  * @ring: structure containing ring specific data
3068  *
3069  * Configure the Tx descriptor ring after a reset.
3070  **/
3071 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3072                              struct ixgbe_ring *ring)
3073 {
3074         struct ixgbe_hw *hw = &adapter->hw;
3075         u64 tdba = ring->dma;
3076         int wait_loop = 10;
3077         u32 txdctl = IXGBE_TXDCTL_ENABLE;
3078         u8 reg_idx = ring->reg_idx;
3079
3080         /* disable queue to avoid issues while updating state */
3081         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3082         IXGBE_WRITE_FLUSH(hw);
3083
3084         IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3085                         (tdba & DMA_BIT_MASK(32)));
3086         IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3087         IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3088                         ring->count * sizeof(union ixgbe_adv_tx_desc));
3089         IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3090         IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3091         ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3092
3093         /*
3094          * set WTHRESH to encourage burst writeback, it should not be set
3095          * higher than 1 when:
3096          * - ITR is 0 as it could cause false TX hangs
3097          * - ITR is set to > 100k int/sec and BQL is enabled
3098          *
3099          * In order to avoid issues WTHRESH + PTHRESH should always be equal
3100          * to or less than the number of on chip descriptors, which is
3101          * currently 40.
3102          */
3103         if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3104                 txdctl |= (1 << 16);    /* WTHRESH = 1 */
3105         else
3106                 txdctl |= (8 << 16);    /* WTHRESH = 8 */
3107
3108         /*
3109          * Setting PTHRESH to 32 both improves performance
3110          * and avoids a TX hang with DFP enabled
3111          */
3112         txdctl |= (1 << 8) |    /* HTHRESH = 1 */
3113                    32;          /* PTHRESH = 32 */
3114
3115         /* reinitialize flowdirector state */
3116         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3117                 ring->atr_sample_rate = adapter->atr_sample_rate;
3118                 ring->atr_count = 0;
3119                 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3120         } else {
3121                 ring->atr_sample_rate = 0;
3122         }
3123
3124         /* initialize XPS */
3125         if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3126                 struct ixgbe_q_vector *q_vector = ring->q_vector;
3127
3128                 if (q_vector)
3129                         netif_set_xps_queue(ring->netdev,
3130                                             &q_vector->affinity_mask,
3131                                             ring->queue_index);
3132         }
3133
3134         clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3135
3136         /* enable queue */
3137         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3138
3139         /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3140         if (hw->mac.type == ixgbe_mac_82598EB &&
3141             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3142                 return;
3143
3144         /* poll to verify queue is enabled */
3145         do {
3146                 usleep_range(1000, 2000);
3147                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3148         } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3149         if (!wait_loop)
3150                 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3151 }
3152
3153 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3154 {
3155         struct ixgbe_hw *hw = &adapter->hw;
3156         u32 rttdcs, mtqc;
3157         u8 tcs = netdev_get_num_tc(adapter->netdev);
3158
3159         if (hw->mac.type == ixgbe_mac_82598EB)
3160                 return;
3161
3162         /* disable the arbiter while setting MTQC */
3163         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3164         rttdcs |= IXGBE_RTTDCS_ARBDIS;
3165         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3166
3167         /* set transmit pool layout */
3168         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3169                 mtqc = IXGBE_MTQC_VT_ENA;
3170                 if (tcs > 4)
3171                         mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3172                 else if (tcs > 1)
3173                         mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3174                 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3175                         mtqc |= IXGBE_MTQC_32VF;
3176                 else
3177                         mtqc |= IXGBE_MTQC_64VF;
3178         } else {
3179                 if (tcs > 4)
3180                         mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3181                 else if (tcs > 1)
3182                         mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3183                 else
3184                         mtqc = IXGBE_MTQC_64Q_1PB;
3185         }
3186
3187         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3188
3189         /* Enable Security TX Buffer IFG for multiple pb */
3190         if (tcs) {
3191                 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3192                 sectx |= IXGBE_SECTX_DCB;
3193                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3194         }
3195
3196         /* re-enable the arbiter */
3197         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3198         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3199 }
3200
3201 /**
3202  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
3203  * @adapter: board private structure
3204  *
3205  * Configure the Tx unit of the MAC after a reset.
3206  **/
3207 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3208 {
3209         struct ixgbe_hw *hw = &adapter->hw;
3210         u32 dmatxctl;
3211         u32 i;
3212
3213         ixgbe_setup_mtqc(adapter);
3214
3215         if (hw->mac.type != ixgbe_mac_82598EB) {
3216                 /* DMATXCTL.EN must be before Tx queues are enabled */
3217                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3218                 dmatxctl |= IXGBE_DMATXCTL_TE;
3219                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3220         }
3221
3222         /* Setup the HW Tx Head and Tail descriptor pointers */
3223         for (i = 0; i < adapter->num_tx_queues; i++)
3224                 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3225 }
3226
3227 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3228                                  struct ixgbe_ring *ring)
3229 {
3230         struct ixgbe_hw *hw = &adapter->hw;
3231         u8 reg_idx = ring->reg_idx;
3232         u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3233
3234         srrctl |= IXGBE_SRRCTL_DROP_EN;
3235
3236         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3237 }
3238
3239 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3240                                   struct ixgbe_ring *ring)
3241 {
3242         struct ixgbe_hw *hw = &adapter->hw;
3243         u8 reg_idx = ring->reg_idx;
3244         u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3245
3246         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3247
3248         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3249 }
3250
3251 #ifdef CONFIG_IXGBE_DCB
3252 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3253 #else
3254 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3255 #endif
3256 {
3257         int i;
3258         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3259
3260         if (adapter->ixgbe_ieee_pfc)
3261                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3262
3263         /*
3264          * We should set the drop enable bit if:
3265          *  SR-IOV is enabled
3266          *   or
3267          *  Number of Rx queues > 1 and flow control is disabled
3268          *
3269          *  This allows us to avoid head of line blocking for security
3270          *  and performance reasons.
3271          */
3272         if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3273             !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3274                 for (i = 0; i < adapter->num_rx_queues; i++)
3275                         ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3276         } else {
3277                 for (i = 0; i < adapter->num_rx_queues; i++)
3278                         ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3279         }
3280 }
3281
3282 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3283
3284 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3285                                    struct ixgbe_ring *rx_ring)
3286 {
3287         struct ixgbe_hw *hw = &adapter->hw;
3288         u32 srrctl;
3289         u8 reg_idx = rx_ring->reg_idx;
3290
3291         if (hw->mac.type == ixgbe_mac_82598EB) {
3292                 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3293
3294                 /*
3295                  * if VMDq is not active we must program one srrctl register
3296                  * per RSS queue since we have enabled RDRXCTL.MVMEN
3297                  */
3298                 reg_idx &= mask;
3299         }
3300
3301         /* configure header buffer length, needed for RSC */
3302         srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3303
3304         /* configure the packet buffer length */
3305         srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3306
3307         /* configure descriptor type */
3308         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3309
3310         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3311 }
3312
3313 /**
3314  * Return a number of entries in the RSS indirection table
3315  *
3316  * @adapter: device handle
3317  *
3318  *  - 82598/82599/X540:     128
3319  *  - X550(non-SRIOV mode): 512
3320  *  - X550(SRIOV mode):     64
3321  */
3322 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3323 {
3324         if (adapter->hw.mac.type < ixgbe_mac_X550)
3325                 return 128;
3326         else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3327                 return 64;
3328         else
3329                 return 512;
3330 }
3331
3332 /**
3333  * Write the RETA table to HW
3334  *
3335  * @adapter: device handle
3336  *
3337  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3338  */
3339 void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3340 {
3341         u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3342         struct ixgbe_hw *hw = &adapter->hw;
3343         u32 reta = 0;
3344         u32 indices_multi;
3345         u8 *indir_tbl = adapter->rss_indir_tbl;
3346
3347         /* Fill out the redirection table as follows:
3348          *  - 82598:      8 bit wide entries containing pair of 4 bit RSS
3349          *    indices.
3350          *  - 82599/X540: 8 bit wide entries containing 4 bit RSS index
3351          *  - X550:       8 bit wide entries containing 6 bit RSS index
3352          */
3353         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3354                 indices_multi = 0x11;
3355         else
3356                 indices_multi = 0x1;
3357
3358         /* Write redirection table to HW */
3359         for (i = 0; i < reta_entries; i++) {
3360                 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3361                 if ((i & 3) == 3) {
3362                         if (i < 128)
3363                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3364                         else
3365                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3366                                                 reta);
3367                         reta = 0;
3368                 }
3369         }
3370 }
3371
3372 /**
3373  * Write the RETA table to HW (for x550 devices in SRIOV mode)
3374  *
3375  * @adapter: device handle
3376  *
3377  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3378  */
3379 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3380 {
3381         u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3382         struct ixgbe_hw *hw = &adapter->hw;
3383         u32 vfreta = 0;
3384         unsigned int pf_pool = adapter->num_vfs;
3385
3386         /* Write redirection table to HW */
3387         for (i = 0; i < reta_entries; i++) {
3388                 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3389                 if ((i & 3) == 3) {
3390                         IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3391                                         vfreta);
3392                         vfreta = 0;
3393                 }
3394         }
3395 }
3396
3397 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3398 {
3399         struct ixgbe_hw *hw = &adapter->hw;
3400         u32 i, j;
3401         u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3402         u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3403
3404         /* Program table for at least 2 queues w/ SR-IOV so that VFs can
3405          * make full use of any rings they may have.  We will use the
3406          * PSRTYPE register to control how many rings we use within the PF.
3407          */
3408         if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3409                 rss_i = 2;
3410
3411         /* Fill out hash function seeds */
3412         for (i = 0; i < 10; i++)
3413                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3414
3415         /* Fill out redirection table */
3416         memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3417
3418         for (i = 0, j = 0; i < reta_entries; i++, j++) {
3419                 if (j == rss_i)
3420                         j = 0;
3421
3422                 adapter->rss_indir_tbl[i] = j;
3423         }
3424
3425         ixgbe_store_reta(adapter);
3426 }
3427
3428 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3429 {
3430         struct ixgbe_hw *hw = &adapter->hw;
3431         u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3432         unsigned int pf_pool = adapter->num_vfs;
3433         int i, j;
3434
3435         /* Fill out hash function seeds */
3436         for (i = 0; i < 10; i++)
3437                 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3438                                 adapter->rss_key[i]);
3439
3440         /* Fill out the redirection table */
3441         for (i = 0, j = 0; i < 64; i++, j++) {
3442                 if (j == rss_i)
3443                         j = 0;
3444
3445                 adapter->rss_indir_tbl[i] = j;
3446         }
3447
3448         ixgbe_store_vfreta(adapter);
3449 }
3450
3451 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3452 {
3453         struct ixgbe_hw *hw = &adapter->hw;
3454         u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3455         u32 rxcsum;
3456
3457         /* Disable indicating checksum in descriptor, enables RSS hash */
3458         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3459         rxcsum |= IXGBE_RXCSUM_PCSD;
3460         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3461
3462         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3463                 if (adapter->ring_feature[RING_F_RSS].mask)
3464                         mrqc = IXGBE_MRQC_RSSEN;
3465         } else {
3466                 u8 tcs = netdev_get_num_tc(adapter->netdev);
3467
3468                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3469                         if (tcs > 4)
3470                                 mrqc = IXGBE_MRQC_VMDQRT8TCEN;  /* 8 TCs */
3471                         else if (tcs > 1)
3472                                 mrqc = IXGBE_MRQC_VMDQRT4TCEN;  /* 4 TCs */
3473                         else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3474                                 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3475                         else
3476                                 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3477                 } else {
3478                         if (tcs > 4)
3479                                 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3480                         else if (tcs > 1)
3481                                 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3482                         else
3483                                 mrqc = IXGBE_MRQC_RSSEN;
3484                 }
3485         }
3486
3487         /* Perform hash on these packet types */
3488         rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3489                      IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3490                      IXGBE_MRQC_RSS_FIELD_IPV6 |
3491                      IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3492
3493         if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3494                 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3495         if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3496                 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3497
3498         netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
3499         if ((hw->mac.type >= ixgbe_mac_X550) &&
3500             (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3501                 unsigned int pf_pool = adapter->num_vfs;
3502
3503                 /* Enable VF RSS mode */
3504                 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3505                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3506
3507                 /* Setup RSS through the VF registers */
3508                 ixgbe_setup_vfreta(adapter);
3509                 vfmrqc = IXGBE_MRQC_RSSEN;
3510                 vfmrqc |= rss_field;
3511                 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3512         } else {
3513                 ixgbe_setup_reta(adapter);
3514                 mrqc |= rss_field;
3515                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3516         }
3517 }
3518
3519 /**
3520  * ixgbe_configure_rscctl - enable RSC for the indicated ring
3521  * @adapter:    address of board private structure
3522  * @index:      index of ring to set
3523  **/
3524 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3525                                    struct ixgbe_ring *ring)
3526 {
3527         struct ixgbe_hw *hw = &adapter->hw;
3528         u32 rscctrl;
3529         u8 reg_idx = ring->reg_idx;
3530
3531         if (!ring_is_rsc_enabled(ring))
3532                 return;
3533
3534         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3535         rscctrl |= IXGBE_RSCCTL_RSCEN;
3536         /*
3537          * we must limit the number of descriptors so that the
3538          * total size of max desc * buf_len is not greater
3539          * than 65536
3540          */
3541         rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3542         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3543 }
3544
3545 #define IXGBE_MAX_RX_DESC_POLL 10
3546 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3547                                        struct ixgbe_ring *ring)
3548 {
3549         struct ixgbe_hw *hw = &adapter->hw;
3550         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3551         u32 rxdctl;
3552         u8 reg_idx = ring->reg_idx;
3553
3554         if (ixgbe_removed(hw->hw_addr))
3555                 return;
3556         /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3557         if (hw->mac.type == ixgbe_mac_82598EB &&
3558             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3559                 return;
3560
3561         do {
3562                 usleep_range(1000, 2000);
3563                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3564         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3565
3566         if (!wait_loop) {
3567                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3568                       "the polling period\n", reg_idx);
3569         }
3570 }
3571
3572 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3573                             struct ixgbe_ring *ring)
3574 {
3575         struct ixgbe_hw *hw = &adapter->hw;
3576         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3577         u32 rxdctl;
3578         u8 reg_idx = ring->reg_idx;
3579
3580         if (ixgbe_removed(hw->hw_addr))
3581                 return;
3582         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3583         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3584
3585         /* write value back with RXDCTL.ENABLE bit cleared */
3586         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3587
3588         if (hw->mac.type == ixgbe_mac_82598EB &&
3589             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3590                 return;
3591
3592         /* the hardware may take up to 100us to really disable the rx queue */
3593         do {
3594                 udelay(10);
3595                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3596         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3597
3598         if (!wait_loop) {
3599                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3600                       "the polling period\n", reg_idx);
3601         }
3602 }
3603
3604 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3605                              struct ixgbe_ring *ring)
3606 {
3607         struct ixgbe_hw *hw = &adapter->hw;
3608         u64 rdba = ring->dma;
3609         u32 rxdctl;
3610         u8 reg_idx = ring->reg_idx;
3611
3612         /* disable queue to avoid issues while updating state */
3613         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3614         ixgbe_disable_rx_queue(adapter, ring);
3615
3616         IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3617         IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3618         IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3619                         ring->count * sizeof(union ixgbe_adv_rx_desc));
3620         IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3621         IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3622         ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3623
3624         ixgbe_configure_srrctl(adapter, ring);
3625         ixgbe_configure_rscctl(adapter, ring);
3626
3627         if (hw->mac.type == ixgbe_mac_82598EB) {
3628                 /*
3629                  * enable cache line friendly hardware writes:
3630                  * PTHRESH=32 descriptors (half the internal cache),
3631                  * this also removes ugly rx_no_buffer_count increment
3632                  * HTHRESH=4 descriptors (to minimize latency on fetch)
3633                  * WTHRESH=8 burst writeback up to two cache lines
3634                  */
3635                 rxdctl &= ~0x3FFFFF;
3636                 rxdctl |=  0x080420;
3637         }
3638
3639         /* enable receive descriptor ring */
3640         rxdctl |= IXGBE_RXDCTL_ENABLE;
3641         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3642
3643         ixgbe_rx_desc_queue_enable(adapter, ring);
3644         ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3645 }
3646
3647 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3648 {
3649         struct ixgbe_hw *hw = &adapter->hw;
3650         int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3651         u16 pool;
3652
3653         /* PSRTYPE must be initialized in non 82598 adapters */
3654         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3655                       IXGBE_PSRTYPE_UDPHDR |
3656                       IXGBE_PSRTYPE_IPV4HDR |
3657                       IXGBE_PSRTYPE_L2HDR |
3658                       IXGBE_PSRTYPE_IPV6HDR;
3659
3660         if (hw->mac.type == ixgbe_mac_82598EB)
3661                 return;
3662
3663         if (rss_i > 3)
3664                 psrtype |= 2 << 29;
3665         else if (rss_i > 1)
3666                 psrtype |= 1 << 29;
3667
3668         for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3669                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3670 }
3671
3672 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3673 {
3674         struct ixgbe_hw *hw = &adapter->hw;
3675         u32 reg_offset, vf_shift;
3676         u32 gcr_ext, vmdctl;
3677         int i;
3678
3679         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3680                 return;
3681
3682         vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3683         vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3684         vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3685         vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3686         vmdctl |= IXGBE_VT_CTL_REPLEN;
3687         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3688
3689         vf_shift = VMDQ_P(0) % 32;
3690         reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3691
3692         /* Enable only the PF's pool for Tx/Rx */
3693         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3694         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3695         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3696         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3697         if (adapter->bridge_mode == BRIDGE_MODE_VEB)
3698                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3699
3700         /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3701         hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3702
3703         /*
3704          * Set up VF register offsets for selected VT Mode,
3705          * i.e. 32 or 64 VFs for SR-IOV
3706          */
3707         switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3708         case IXGBE_82599_VMDQ_8Q_MASK:
3709                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3710                 break;
3711         case IXGBE_82599_VMDQ_4Q_MASK:
3712                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3713                 break;
3714         default:
3715                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3716                 break;
3717         }
3718
3719         IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3720
3721
3722         /* Enable MAC Anti-Spoofing */
3723         hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3724                                           adapter->num_vfs);
3725
3726         /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
3727          * calling set_ethertype_anti_spoofing for each VF in loop below
3728          */
3729         if (hw->mac.ops.set_ethertype_anti_spoofing) {
3730                 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3731                                 (IXGBE_ETQF_FILTER_EN    |
3732                                  IXGBE_ETQF_TX_ANTISPOOF |
3733                                  IXGBE_ETH_P_LLDP));
3734
3735                 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
3736                                 (IXGBE_ETQF_FILTER_EN |
3737                                  IXGBE_ETQF_TX_ANTISPOOF |
3738                                  ETH_P_PAUSE));
3739         }
3740
3741         /* For VFs that have spoof checking turned off */
3742         for (i = 0; i < adapter->num_vfs; i++) {
3743                 if (!adapter->vfinfo[i].spoofchk_enabled)
3744                         ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3745
3746                 /* enable ethertype anti spoofing if hw supports it */
3747                 if (hw->mac.ops.set_ethertype_anti_spoofing)
3748                         hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3749
3750                 /* Enable/Disable RSS query feature  */
3751                 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
3752                                           adapter->vfinfo[i].rss_query_enabled);
3753         }
3754 }
3755
3756 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3757 {
3758         struct ixgbe_hw *hw = &adapter->hw;
3759         struct net_device *netdev = adapter->netdev;
3760         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3761         struct ixgbe_ring *rx_ring;
3762         int i;
3763         u32 mhadd, hlreg0;
3764
3765 #ifdef IXGBE_FCOE
3766         /* adjust max frame to be able to do baby jumbo for FCoE */
3767         if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3768             (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3769                 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3770
3771 #endif /* IXGBE_FCOE */
3772
3773         /* adjust max frame to be at least the size of a standard frame */
3774         if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3775                 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3776
3777         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3778         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3779                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3780                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3781
3782                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3783         }
3784
3785         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3786         /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3787         hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3788         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3789
3790         /*
3791          * Setup the HW Rx Head and Tail Descriptor Pointers and
3792          * the Base and Length of the Rx Descriptor Ring
3793          */
3794         for (i = 0; i < adapter->num_rx_queues; i++) {
3795                 rx_ring = adapter->rx_ring[i];
3796                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3797                         set_ring_rsc_enabled(rx_ring);
3798                 else
3799                         clear_ring_rsc_enabled(rx_ring);
3800         }
3801 }
3802
3803 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3804 {
3805         struct ixgbe_hw *hw = &adapter->hw;
3806         u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3807
3808         switch (hw->mac.type) {
3809         case ixgbe_mac_82598EB:
3810                 /*
3811                  * For VMDq support of different descriptor types or
3812                  * buffer sizes through the use of multiple SRRCTL
3813                  * registers, RDRXCTL.MVMEN must be set to 1
3814                  *
3815                  * also, the manual doesn't mention it clearly but DCA hints
3816                  * will only use queue 0's tags unless this bit is set.  Side
3817                  * effects of setting this bit are only that SRRCTL must be
3818                  * fully programmed [0..15]
3819                  */
3820                 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3821                 break;
3822         case ixgbe_mac_X550:
3823         case ixgbe_mac_X550EM_x:
3824                 if (adapter->num_vfs)
3825                         rdrxctl |= IXGBE_RDRXCTL_PSP;
3826                 /* fall through for older HW */
3827         case ixgbe_mac_82599EB:
3828         case ixgbe_mac_X540:
3829                 /* Disable RSC for ACK packets */
3830                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3831                    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3832                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3833                 /* hardware requires some bits to be set by default */
3834                 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3835                 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3836                 break;
3837         default:
3838                 /* We should do nothing since we don't know this hardware */
3839                 return;
3840         }
3841
3842         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3843 }
3844
3845 /**
3846  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3847  * @adapter: board private structure
3848  *
3849  * Configure the Rx unit of the MAC after a reset.
3850  **/
3851 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3852 {
3853         struct ixgbe_hw *hw = &adapter->hw;
3854         int i;
3855         u32 rxctrl, rfctl;
3856
3857         /* disable receives while setting up the descriptors */
3858         hw->mac.ops.disable_rx(hw);
3859
3860         ixgbe_setup_psrtype(adapter);
3861         ixgbe_setup_rdrxctl(adapter);
3862
3863         /* RSC Setup */
3864         rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3865         rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3866         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3867                 rfctl |= IXGBE_RFCTL_RSC_DIS;
3868         IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3869
3870         /* Program registers for the distribution of queues */
3871         ixgbe_setup_mrqc(adapter);
3872
3873         /* set_rx_buffer_len must be called before ring initialization */
3874         ixgbe_set_rx_buffer_len(adapter);
3875
3876         /*
3877          * Setup the HW Rx Head and Tail Descriptor Pointers and
3878          * the Base and Length of the Rx Descriptor Ring
3879          */
3880         for (i = 0; i < adapter->num_rx_queues; i++)
3881                 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3882
3883         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3884         /* disable drop enable for 82598 parts */
3885         if (hw->mac.type == ixgbe_mac_82598EB)
3886                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3887
3888         /* enable all receives */
3889         rxctrl |= IXGBE_RXCTRL_RXEN;
3890         hw->mac.ops.enable_rx_dma(hw, rxctrl);
3891 }
3892
3893 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3894                                  __be16 proto, u16 vid)
3895 {
3896         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3897         struct ixgbe_hw *hw = &adapter->hw;
3898
3899         /* add VID to filter table */
3900         hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3901         set_bit(vid, adapter->active_vlans);
3902
3903         return 0;
3904 }
3905
3906 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3907                                   __be16 proto, u16 vid)
3908 {
3909         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3910         struct ixgbe_hw *hw = &adapter->hw;
3911
3912         /* remove VID from filter table */
3913         hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3914         clear_bit(vid, adapter->active_vlans);
3915
3916         return 0;
3917 }
3918
3919 /**
3920  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3921  * @adapter: driver data
3922  */
3923 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3924 {
3925         struct ixgbe_hw *hw = &adapter->hw;
3926         u32 vlnctrl;
3927         int i, j;
3928
3929         switch (hw->mac.type) {
3930         case ixgbe_mac_82598EB:
3931                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3932                 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3933                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3934                 break;
3935         case ixgbe_mac_82599EB:
3936         case ixgbe_mac_X540:
3937         case ixgbe_mac_X550:
3938         case ixgbe_mac_X550EM_x:
3939                 for (i = 0; i < adapter->num_rx_queues; i++) {
3940                         struct ixgbe_ring *ring = adapter->rx_ring[i];
3941
3942                         if (ring->l2_accel_priv)
3943                                 continue;
3944                         j = ring->reg_idx;
3945                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3946                         vlnctrl &= ~IXGBE_RXDCTL_VME;
3947                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3948                 }
3949                 break;
3950         default:
3951                 break;
3952         }
3953 }
3954
3955 /**
3956  * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3957  * @adapter: driver data
3958  */
3959 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3960 {
3961         struct ixgbe_hw *hw = &adapter->hw;
3962         u32 vlnctrl;
3963         int i, j;
3964
3965         switch (hw->mac.type) {
3966         case ixgbe_mac_82598EB:
3967                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3968                 vlnctrl |= IXGBE_VLNCTRL_VME;
3969                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3970                 break;
3971         case ixgbe_mac_82599EB:
3972         case ixgbe_mac_X540:
3973         case ixgbe_mac_X550:
3974         case ixgbe_mac_X550EM_x:
3975                 for (i = 0; i < adapter->num_rx_queues; i++) {
3976                         struct ixgbe_ring *ring = adapter->rx_ring[i];
3977
3978                         if (ring->l2_accel_priv)
3979                                 continue;
3980                         j = ring->reg_idx;
3981                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3982                         vlnctrl |= IXGBE_RXDCTL_VME;
3983                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3984                 }
3985                 break;
3986         default:
3987                 break;
3988         }
3989 }
3990
3991 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3992 {
3993         u16 vid;
3994
3995         ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
3996
3997         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3998                 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
3999 }
4000
4001 /**
4002  * ixgbe_write_mc_addr_list - write multicast addresses to MTA
4003  * @netdev: network interface device structure
4004  *
4005  * Writes multicast address list to the MTA hash table.
4006  * Returns: -ENOMEM on failure
4007  *                0 on no addresses written
4008  *                X on writing X addresses to MTA
4009  **/
4010 static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4011 {
4012         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4013         struct ixgbe_hw *hw = &adapter->hw;
4014
4015         if (!netif_running(netdev))
4016                 return 0;
4017
4018         if (hw->mac.ops.update_mc_addr_list)
4019                 hw->mac.ops.update_mc_addr_list(hw, netdev);
4020         else
4021                 return -ENOMEM;
4022
4023 #ifdef CONFIG_PCI_IOV
4024         ixgbe_restore_vf_multicasts(adapter);
4025 #endif
4026
4027         return netdev_mc_count(netdev);
4028 }
4029
4030 #ifdef CONFIG_PCI_IOV
4031 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4032 {
4033         struct ixgbe_hw *hw = &adapter->hw;
4034         int i;
4035         for (i = 0; i < hw->mac.num_rar_entries; i++) {
4036                 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
4037                         hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
4038                                             adapter->mac_table[i].queue,
4039                                             IXGBE_RAH_AV);
4040                 else
4041                         hw->mac.ops.clear_rar(hw, i);
4042
4043                 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
4044         }
4045 }
4046 #endif
4047
4048 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4049 {
4050         struct ixgbe_hw *hw = &adapter->hw;
4051         int i;
4052         for (i = 0; i < hw->mac.num_rar_entries; i++) {
4053                 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
4054                         if (adapter->mac_table[i].state &
4055                             IXGBE_MAC_STATE_IN_USE)
4056                                 hw->mac.ops.set_rar(hw, i,
4057                                                 adapter->mac_table[i].addr,
4058                                                 adapter->mac_table[i].queue,
4059                                                 IXGBE_RAH_AV);
4060                         else
4061                                 hw->mac.ops.clear_rar(hw, i);
4062
4063                         adapter->mac_table[i].state &=
4064                                                 ~(IXGBE_MAC_STATE_MODIFIED);
4065                 }
4066         }
4067 }
4068
4069 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4070 {
4071         int i;
4072         struct ixgbe_hw *hw = &adapter->hw;
4073
4074         for (i = 0; i < hw->mac.num_rar_entries; i++) {
4075                 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
4076                 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
4077                 eth_zero_addr(adapter->mac_table[i].addr);
4078                 adapter->mac_table[i].queue = 0;
4079         }
4080         ixgbe_sync_mac_table(adapter);
4081 }
4082
4083 static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
4084 {
4085         struct ixgbe_hw *hw = &adapter->hw;
4086         int i, count = 0;
4087
4088         for (i = 0; i < hw->mac.num_rar_entries; i++) {
4089                 if (adapter->mac_table[i].state == 0)
4090                         count++;
4091         }
4092         return count;
4093 }
4094
4095 /* this function destroys the first RAR entry */
4096 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
4097                                          u8 *addr)
4098 {
4099         struct ixgbe_hw *hw = &adapter->hw;
4100
4101         memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
4102         adapter->mac_table[0].queue = VMDQ_P(0);
4103         adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
4104                                        IXGBE_MAC_STATE_IN_USE);
4105         hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
4106                             adapter->mac_table[0].queue,
4107                             IXGBE_RAH_AV);
4108 }
4109
4110 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
4111 {
4112         struct ixgbe_hw *hw = &adapter->hw;
4113         int i;
4114
4115         if (is_zero_ether_addr(addr))
4116                 return -EINVAL;
4117
4118         for (i = 0; i < hw->mac.num_rar_entries; i++) {
4119                 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
4120                         continue;
4121                 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
4122                                                 IXGBE_MAC_STATE_IN_USE);
4123                 ether_addr_copy(adapter->mac_table[i].addr, addr);
4124                 adapter->mac_table[i].queue = queue;
4125                 ixgbe_sync_mac_table(adapter);
4126                 return i;
4127         }
4128         return -ENOMEM;
4129 }
4130
4131 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
4132 {
4133         /* search table for addr, if found, set to 0 and sync */
4134         int i;
4135         struct ixgbe_hw *hw = &adapter->hw;
4136
4137         if (is_zero_ether_addr(addr))
4138                 return -EINVAL;
4139
4140         for (i = 0; i < hw->mac.num_rar_entries; i++) {
4141                 if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
4142                     adapter->mac_table[i].queue == queue) {
4143                         adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
4144                         adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
4145                         eth_zero_addr(adapter->mac_table[i].addr);
4146                         adapter->mac_table[i].queue = 0;
4147                         ixgbe_sync_mac_table(adapter);
4148                         return 0;
4149                 }
4150         }
4151         return -ENOMEM;
4152 }
4153 /**
4154  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
4155  * @netdev: network interface device structure
4156  *
4157  * Writes unicast address list to the RAR table.
4158  * Returns: -ENOMEM on failure/insufficient address space
4159  *                0 on no addresses written
4160  *                X on writing X addresses to the RAR table
4161  **/
4162 static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4163 {
4164         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4165         int count = 0;
4166
4167         /* return ENOMEM indicating insufficient memory for addresses */
4168         if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
4169                 return -ENOMEM;
4170
4171         if (!netdev_uc_empty(netdev)) {
4172                 struct netdev_hw_addr *ha;
4173                 netdev_for_each_uc_addr(ha, netdev) {
4174                         ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4175                         ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4176                         count++;
4177                 }
4178         }
4179         return count;
4180 }
4181
4182 /**
4183  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
4184  * @netdev: network interface device structure
4185  *
4186  * The set_rx_method entry point is called whenever the unicast/multicast
4187  * address list or the network interface flags are updated.  This routine is
4188  * responsible for configuring the hardware for proper unicast, multicast and
4189  * promiscuous mode.
4190  **/
4191 void ixgbe_set_rx_mode(struct net_device *netdev)
4192 {
4193         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4194         struct ixgbe_hw *hw = &adapter->hw;
4195         u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4196         u32 vlnctrl;
4197         int count;
4198
4199         /* Check for Promiscuous and All Multicast modes */
4200         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4201         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4202
4203         /* set all bits that we expect to always be set */
4204         fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
4205         fctrl |= IXGBE_FCTRL_BAM;
4206         fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
4207         fctrl |= IXGBE_FCTRL_PMCF;
4208
4209         /* clear the bits we are changing the status of */
4210         fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4211         vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4212         if (netdev->flags & IFF_PROMISC) {
4213                 hw->addr_ctrl.user_set_promisc = true;
4214                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4215                 vmolr |= IXGBE_VMOLR_MPE;
4216                 /* Only disable hardware filter vlans in promiscuous mode
4217                  * if SR-IOV and VMDQ are disabled - otherwise ensure
4218                  * that hardware VLAN filters remain enabled.
4219                  */
4220                 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
4221                                       IXGBE_FLAG_SRIOV_ENABLED))
4222                         vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4223         } else {
4224                 if (netdev->flags & IFF_ALLMULTI) {
4225                         fctrl |= IXGBE_FCTRL_MPE;
4226                         vmolr |= IXGBE_VMOLR_MPE;
4227                 }
4228                 vlnctrl |= IXGBE_VLNCTRL_VFE;
4229                 hw->addr_ctrl.user_set_promisc = false;
4230         }
4231
4232         /*
4233          * Write addresses to available RAR registers, if there is not
4234          * sufficient space to store all the addresses then enable
4235          * unicast promiscuous mode
4236          */
4237         count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
4238         if (count < 0) {
4239                 fctrl |= IXGBE_FCTRL_UPE;
4240                 vmolr |= IXGBE_VMOLR_ROPE;
4241         }
4242
4243         /* Write addresses to the MTA, if the attempt fails
4244          * then we should just turn on promiscuous mode so
4245          * that we can at least receive multicast traffic
4246          */
4247         count = ixgbe_write_mc_addr_list(netdev);
4248         if (count < 0) {
4249                 fctrl |= IXGBE_FCTRL_MPE;
4250                 vmolr |= IXGBE_VMOLR_MPE;
4251         } else if (count) {
4252                 vmolr |= IXGBE_VMOLR_ROMPE;
4253         }
4254
4255         if (hw->mac.type != ixgbe_mac_82598EB) {
4256                 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4257                          ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4258                            IXGBE_VMOLR_ROPE);
4259                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4260         }
4261
4262         /* This is useful for sniffing bad packets. */
4263         if (adapter->netdev->features & NETIF_F_RXALL) {
4264                 /* UPE and MPE will be handled by normal PROMISC logic
4265                  * in e1000e_set_rx_mode */
4266                 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
4267                           IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
4268                           IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
4269
4270                 fctrl &= ~(IXGBE_FCTRL_DPF);
4271                 /* NOTE:  VLAN filtering is disabled by setting PROMISC */
4272         }
4273
4274         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4275         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4276
4277         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4278                 ixgbe_vlan_strip_enable(adapter);
4279         else
4280                 ixgbe_vlan_strip_disable(adapter);
4281 }
4282
4283 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4284 {
4285         int q_idx;
4286
4287         for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4288                 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
4289                 napi_enable(&adapter->q_vector[q_idx]->napi);
4290         }
4291 }
4292
4293 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4294 {
4295         int q_idx;
4296
4297         for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4298                 napi_disable(&adapter->q_vector[q_idx]->napi);
4299                 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
4300                         pr_info("QV %d locked\n", q_idx);
4301                         usleep_range(1000, 20000);
4302                 }
4303         }
4304 }
4305
4306 static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4307 {
4308         switch (adapter->hw.mac.type) {
4309         case ixgbe_mac_X550:
4310         case ixgbe_mac_X550EM_x:
4311                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4312 #ifdef CONFIG_IXGBE_VXLAN
4313                 adapter->vxlan_port = 0;
4314 #endif
4315                 break;
4316         default:
4317                 break;
4318         }
4319 }
4320
4321 #ifdef CONFIG_IXGBE_DCB
4322 /**
4323  * ixgbe_configure_dcb - Configure DCB hardware
4324  * @adapter: ixgbe adapter struct
4325  *
4326  * This is called by the driver on open to configure the DCB hardware.
4327  * This is also called by the gennetlink interface when reconfiguring
4328  * the DCB state.
4329  */
4330 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4331 {
4332         struct ixgbe_hw *hw = &adapter->hw;
4333         int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4334
4335         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4336                 if (hw->mac.type == ixgbe_mac_82598EB)
4337                         netif_set_gso_max_size(adapter->netdev, 65536);
4338                 return;
4339         }
4340
4341         if (hw->mac.type == ixgbe_mac_82598EB)
4342                 netif_set_gso_max_size(adapter->netdev, 32768);
4343
4344 #ifdef IXGBE_FCOE
4345         if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4346                 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4347 #endif
4348
4349         /* reconfigure the hardware */
4350         if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4351                 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4352                                                 DCB_TX_CONFIG);
4353                 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4354                                                 DCB_RX_CONFIG);
4355                 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4356         } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4357                 ixgbe_dcb_hw_ets(&adapter->hw,
4358                                  adapter->ixgbe_ieee_ets,
4359                                  max_frame);
4360                 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4361                                         adapter->ixgbe_ieee_pfc->pfc_en,
4362                                         adapter->ixgbe_ieee_ets->prio_tc);
4363         }
4364
4365         /* Enable RSS Hash per TC */
4366         if (hw->mac.type != ixgbe_mac_82598EB) {
4367                 u32 msb = 0;
4368                 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4369
4370                 while (rss_i) {
4371                         msb++;
4372                         rss_i >>= 1;
4373                 }
4374
4375                 /* write msb to all 8 TCs in one write */
4376                 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4377         }
4378 }
4379 #endif
4380
4381 /* Additional bittime to account for IXGBE framing */
4382 #define IXGBE_ETH_FRAMING 20
4383
4384 /**
4385  * ixgbe_hpbthresh - calculate high water mark for flow control
4386  *
4387  * @adapter: board private structure to calculate for
4388  * @pb: packet buffer to calculate
4389  */
4390 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4391 {
4392         struct ixgbe_hw *hw = &adapter->hw;
4393         struct net_device *dev = adapter->netdev;
4394         int link, tc, kb, marker;
4395         u32 dv_id, rx_pba;
4396
4397         /* Calculate max LAN frame size */
4398         tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4399
4400 #ifdef IXGBE_FCOE
4401         /* FCoE traffic class uses FCOE jumbo frames */
4402         if ((dev->features & NETIF_F_FCOE_MTU) &&
4403             (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4404             (pb == ixgbe_fcoe_get_tc(adapter)))
4405                 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4406 #endif
4407
4408         /* Calculate delay value for device */
4409         switch (hw->mac.type) {
4410         case ixgbe_mac_X540:
4411         case ixgbe_mac_X550:
4412         case ixgbe_mac_X550EM_x:
4413                 dv_id = IXGBE_DV_X540(link, tc);
4414                 break;
4415         default:
4416                 dv_id = IXGBE_DV(link, tc);
4417                 break;
4418         }
4419
4420         /* Loopback switch introduces additional latency */
4421         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4422                 dv_id += IXGBE_B2BT(tc);
4423
4424         /* Delay value is calculated in bit times convert to KB */
4425         kb = IXGBE_BT2KB(dv_id);
4426         rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4427
4428         marker = rx_pba - kb;
4429
4430         /* It is possible that the packet buffer is not large enough
4431          * to provide required headroom. In this case throw an error
4432          * to user and a do the best we can.
4433          */
4434         if (marker < 0) {
4435                 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4436                             "headroom to support flow control."
4437                             "Decrease MTU or number of traffic classes\n", pb);
4438                 marker = tc + 1;
4439         }
4440
4441         return marker;
4442 }
4443
4444 /**
4445  * ixgbe_lpbthresh - calculate low water mark for for flow control
4446  *
4447  * @adapter: board private structure to calculate for
4448  * @pb: packet buffer to calculate
4449  */
4450 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4451 {
4452         struct ixgbe_hw *hw = &adapter->hw;
4453         struct net_device *dev = adapter->netdev;
4454         int tc;
4455         u32 dv_id;
4456
4457         /* Calculate max LAN frame size */
4458         tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4459
4460 #ifdef IXGBE_FCOE
4461         /* FCoE traffic class uses FCOE jumbo frames */
4462         if ((dev->features & NETIF_F_FCOE_MTU) &&
4463             (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4464             (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4465                 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4466 #endif
4467
4468         /* Calculate delay value for device */
4469         switch (hw->mac.type) {
4470         case ixgbe_mac_X540:
4471         case ixgbe_mac_X550:
4472         case ixgbe_mac_X550EM_x:
4473                 dv_id = IXGBE_LOW_DV_X540(tc);
4474                 break;
4475         default:
4476                 dv_id = IXGBE_LOW_DV(tc);
4477                 break;
4478         }
4479
4480         /* Delay value is calculated in bit times convert to KB */
4481         return IXGBE_BT2KB(dv_id);
4482 }
4483
4484 /*
4485  * ixgbe_pbthresh_setup - calculate and setup high low water marks
4486  */
4487 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4488 {
4489         struct ixgbe_hw *hw = &adapter->hw;
4490         int num_tc = netdev_get_num_tc(adapter->netdev);
4491         int i;
4492
4493         if (!num_tc)
4494                 num_tc = 1;
4495
4496         for (i = 0; i < num_tc; i++) {
4497                 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4498                 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4499
4500                 /* Low water marks must not be larger than high water marks */
4501                 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4502                         hw->fc.low_water[i] = 0;
4503         }
4504
4505         for (; i < MAX_TRAFFIC_CLASS; i++)
4506                 hw->fc.high_water[i] = 0;
4507 }
4508
4509 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4510 {
4511         struct ixgbe_hw *hw = &adapter->hw;
4512         int hdrm;
4513         u8 tc = netdev_get_num_tc(adapter->netdev);
4514
4515         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4516             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4517                 hdrm = 32 << adapter->fdir_pballoc;
4518         else
4519                 hdrm = 0;
4520
4521         hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4522         ixgbe_pbthresh_setup(adapter);
4523 }
4524
4525 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4526 {
4527         struct ixgbe_hw *hw = &adapter->hw;
4528         struct hlist_node *node2;
4529         struct ixgbe_fdir_filter *filter;
4530
4531         spin_lock(&adapter->fdir_perfect_lock);
4532
4533         if (!hlist_empty(&adapter->fdir_filter_list))
4534                 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4535
4536         hlist_for_each_entry_safe(filter, node2,
4537                                   &adapter->fdir_filter_list, fdir_node) {
4538                 ixgbe_fdir_write_perfect_filter_82599(hw,
4539                                 &filter->filter,
4540                                 filter->sw_idx,
4541                                 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4542                                 IXGBE_FDIR_DROP_QUEUE :
4543                                 adapter->rx_ring[filter->action]->reg_idx);
4544         }
4545
4546         spin_unlock(&adapter->fdir_perfect_lock);
4547 }
4548
4549 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4550                                       struct ixgbe_adapter *adapter)
4551 {
4552         struct ixgbe_hw *hw = &adapter->hw;
4553         u32 vmolr;
4554
4555         /* No unicast promiscuous support for VMDQ devices. */
4556         vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4557         vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4558
4559         /* clear the affected bit */
4560         vmolr &= ~IXGBE_VMOLR_MPE;
4561
4562         if (dev->flags & IFF_ALLMULTI) {
4563                 vmolr |= IXGBE_VMOLR_MPE;
4564         } else {
4565                 vmolr |= IXGBE_VMOLR_ROMPE;
4566                 hw->mac.ops.update_mc_addr_list(hw, dev);
4567         }
4568         ixgbe_write_uc_addr_list(adapter->netdev, pool);
4569         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4570 }
4571
4572 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4573 {
4574         struct ixgbe_adapter *adapter = vadapter->real_adapter;
4575         int rss_i = adapter->num_rx_queues_per_pool;
4576         struct ixgbe_hw *hw = &adapter->hw;
4577         u16 pool = vadapter->pool;
4578         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4579                       IXGBE_PSRTYPE_UDPHDR |
4580                       IXGBE_PSRTYPE_IPV4HDR |
4581                       IXGBE_PSRTYPE_L2HDR |
4582                       IXGBE_PSRTYPE_IPV6HDR;
4583
4584         if (hw->mac.type == ixgbe_mac_82598EB)
4585                 return;
4586
4587         if (rss_i > 3)
4588                 psrtype |= 2 << 29;
4589         else if (rss_i > 1)
4590                 psrtype |= 1 << 29;
4591
4592         IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4593 }
4594
4595 /**
4596  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4597  * @rx_ring: ring to free buffers from
4598  **/
4599 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4600 {
4601         struct device *dev = rx_ring->dev;
4602         unsigned long size;
4603         u16 i;
4604
4605         /* ring already cleared, nothing to do */
4606         if (!rx_ring->rx_buffer_info)
4607                 return;
4608
4609         /* Free all the Rx ring sk_buffs */
4610         for (i = 0; i < rx_ring->count; i++) {
4611                 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4612
4613                 if (rx_buffer->skb) {
4614                         struct sk_buff *skb = rx_buffer->skb;
4615                         if (IXGBE_CB(skb)->page_released)
4616                                 dma_unmap_page(dev,
4617                                                IXGBE_CB(skb)->dma,
4618                                                ixgbe_rx_bufsz(rx_ring),
4619                                                DMA_FROM_DEVICE);
4620                         dev_kfree_skb(skb);
4621                         rx_buffer->skb = NULL;
4622                 }
4623
4624                 if (!rx_buffer->page)
4625                         continue;
4626
4627                 dma_unmap_page(dev, rx_buffer->dma,
4628                                ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4629                 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4630
4631                 rx_buffer->page = NULL;
4632         }
4633
4634         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4635         memset(rx_ring->rx_buffer_info, 0, size);
4636
4637         /* Zero out the descriptor ring */
4638         memset(rx_ring->desc, 0, rx_ring->size);
4639
4640         rx_ring->next_to_alloc = 0;
4641         rx_ring->next_to_clean = 0;
4642         rx_ring->next_to_use = 0;
4643 }
4644
4645 static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4646                                    struct ixgbe_ring *rx_ring)
4647 {
4648         struct ixgbe_adapter *adapter = vadapter->real_adapter;
4649         int index = rx_ring->queue_index + vadapter->rx_base_queue;
4650
4651         /* shutdown specific queue receive and wait for dma to settle */
4652         ixgbe_disable_rx_queue(adapter, rx_ring);
4653         usleep_range(10000, 20000);
4654         ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4655         ixgbe_clean_rx_ring(rx_ring);
4656         rx_ring->l2_accel_priv = NULL;
4657 }
4658
4659 static int ixgbe_fwd_ring_down(struct net_device *vdev,
4660                                struct ixgbe_fwd_adapter *accel)
4661 {
4662         struct ixgbe_adapter *adapter = accel->real_adapter;
4663         unsigned int rxbase = accel->rx_base_queue;
4664         unsigned int txbase = accel->tx_base_queue;
4665         int i;
4666
4667         netif_tx_stop_all_queues(vdev);
4668
4669         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4670                 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4671                 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4672         }
4673
4674         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4675                 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4676                 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4677         }
4678
4679
4680         return 0;
4681 }
4682
4683 static int ixgbe_fwd_ring_up(struct net_device *vdev,
4684                              struct ixgbe_fwd_adapter *accel)
4685 {
4686         struct ixgbe_adapter *adapter = accel->real_adapter;
4687         unsigned int rxbase, txbase, queues;
4688         int i, baseq, err = 0;
4689
4690         if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4691                 return 0;
4692
4693         baseq = accel->pool * adapter->num_rx_queues_per_pool;
4694         netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4695                    accel->pool, adapter->num_rx_pools,
4696                    baseq, baseq + adapter->num_rx_queues_per_pool,
4697                    adapter->fwd_bitmask);
4698
4699         accel->netdev = vdev;
4700         accel->rx_base_queue = rxbase = baseq;
4701         accel->tx_base_queue = txbase = baseq;
4702
4703         for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4704                 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4705
4706         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4707                 adapter->rx_ring[rxbase + i]->netdev = vdev;
4708                 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4709                 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4710         }
4711
4712         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4713                 adapter->tx_ring[txbase + i]->netdev = vdev;
4714                 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4715         }
4716
4717         queues = min_t(unsigned int,
4718                        adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4719         err = netif_set_real_num_tx_queues(vdev, queues);
4720         if (err)
4721                 goto fwd_queue_err;
4722
4723         err = netif_set_real_num_rx_queues(vdev, queues);
4724         if (err)
4725                 goto fwd_queue_err;
4726
4727         if (is_valid_ether_addr(vdev->dev_addr))
4728                 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4729
4730         ixgbe_fwd_psrtype(accel);
4731         ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4732         return err;
4733 fwd_queue_err:
4734         ixgbe_fwd_ring_down(vdev, accel);
4735         return err;
4736 }
4737
4738 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4739 {
4740         struct net_device *upper;
4741         struct list_head *iter;
4742         int err;
4743
4744         netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4745                 if (netif_is_macvlan(upper)) {
4746                         struct macvlan_dev *dfwd = netdev_priv(upper);
4747                         struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4748
4749                         if (dfwd->fwd_priv) {
4750                                 err = ixgbe_fwd_ring_up(upper, vadapter);
4751                                 if (err)
4752                                         continue;
4753                         }
4754                 }
4755         }
4756 }
4757
4758 static void ixgbe_configure(struct ixgbe_adapter *adapter)
4759 {
4760         struct ixgbe_hw *hw = &adapter->hw;
4761
4762         ixgbe_configure_pb(adapter);
4763 #ifdef CONFIG_IXGBE_DCB
4764         ixgbe_configure_dcb(adapter);
4765 #endif
4766         /*
4767          * We must restore virtualization before VLANs or else
4768          * the VLVF registers will not be populated
4769          */
4770         ixgbe_configure_virtualization(adapter);
4771
4772         ixgbe_set_rx_mode(adapter->netdev);
4773         ixgbe_restore_vlan(adapter);
4774
4775         switch (hw->mac.type) {
4776         case ixgbe_mac_82599EB:
4777         case ixgbe_mac_X540:
4778                 hw->mac.ops.disable_rx_buff(hw);
4779                 break;
4780         default:
4781                 break;
4782         }
4783
4784         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4785                 ixgbe_init_fdir_signature_82599(&adapter->hw,
4786                                                 adapter->fdir_pballoc);
4787         } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4788                 ixgbe_init_fdir_perfect_82599(&adapter->hw,
4789                                               adapter->fdir_pballoc);
4790                 ixgbe_fdir_filter_restore(adapter);
4791         }
4792
4793         switch (hw->mac.type) {
4794         case ixgbe_mac_82599EB:
4795         case ixgbe_mac_X540:
4796                 hw->mac.ops.enable_rx_buff(hw);
4797                 break;
4798         default:
4799                 break;
4800         }
4801
4802 #ifdef CONFIG_IXGBE_DCA
4803         /* configure DCA */
4804         if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
4805                 ixgbe_setup_dca(adapter);
4806 #endif /* CONFIG_IXGBE_DCA */
4807
4808 #ifdef IXGBE_FCOE
4809         /* configure FCoE L2 filters, redirection table, and Rx control */
4810         ixgbe_configure_fcoe(adapter);
4811
4812 #endif /* IXGBE_FCOE */
4813         ixgbe_configure_tx(adapter);
4814         ixgbe_configure_rx(adapter);
4815         ixgbe_configure_dfwd(adapter);
4816 }
4817
4818 /**
4819  * ixgbe_sfp_link_config - set up SFP+ link
4820  * @adapter: pointer to private adapter struct
4821  **/
4822 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4823 {
4824         /*
4825          * We are assuming the worst case scenario here, and that
4826          * is that an SFP was inserted/removed after the reset
4827          * but before SFP detection was enabled.  As such the best
4828          * solution is to just start searching as soon as we start
4829          */
4830         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4831                 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
4832
4833         adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
4834         adapter->sfp_poll_time = 0;
4835 }
4836
4837 /**
4838  * ixgbe_non_sfp_link_config - set up non-SFP+ link
4839  * @hw: pointer to private hardware struct
4840  *
4841  * Returns 0 on success, negative on failure
4842  **/
4843 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4844 {
4845         u32 speed;
4846         bool autoneg, link_up = false;
4847         int ret = IXGBE_ERR_LINK_SETUP;
4848
4849         if (hw->mac.ops.check_link)
4850                 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4851
4852         if (ret)
4853                 return ret;
4854
4855         speed = hw->phy.autoneg_advertised;
4856         if ((!speed) && (hw->mac.ops.get_link_capabilities))
4857                 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4858                                                         &autoneg);
4859         if (ret)
4860                 return ret;
4861
4862         if (hw->mac.ops.setup_link)
4863                 ret = hw->mac.ops.setup_link(hw, speed, link_up);
4864
4865         return ret;
4866 }
4867
4868 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4869 {
4870         struct ixgbe_hw *hw = &adapter->hw;
4871         u32 gpie = 0;
4872
4873         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4874                 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4875                        IXGBE_GPIE_OCD;
4876                 gpie |= IXGBE_GPIE_EIAME;
4877                 /*
4878                  * use EIAM to auto-mask when MSI-X interrupt is asserted
4879                  * this saves a register write for every interrupt
4880                  */
4881                 switch (hw->mac.type) {
4882                 case ixgbe_mac_82598EB:
4883                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4884                         break;
4885                 case ixgbe_mac_82599EB:
4886                 case ixgbe_mac_X540:
4887                 case ixgbe_mac_X550:
4888                 case ixgbe_mac_X550EM_x:
4889                 default:
4890                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4891                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4892                         break;
4893                 }
4894         } else {
4895                 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
4896                  * specifically only auto mask tx and rx interrupts */
4897                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4898         }
4899
4900         /* XXX: to interrupt immediately for EICS writes, enable this */
4901         /* gpie |= IXGBE_GPIE_EIMEN; */
4902
4903         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4904                 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
4905
4906                 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4907                 case IXGBE_82599_VMDQ_8Q_MASK:
4908                         gpie |= IXGBE_GPIE_VTMODE_16;
4909                         break;
4910                 case IXGBE_82599_VMDQ_4Q_MASK:
4911                         gpie |= IXGBE_GPIE_VTMODE_32;
4912                         break;
4913                 default:
4914                         gpie |= IXGBE_GPIE_VTMODE_64;
4915                         break;
4916                 }
4917         }
4918
4919         /* Enable Thermal over heat sensor interrupt */
4920         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4921                 switch (adapter->hw.mac.type) {
4922                 case ixgbe_mac_82599EB:
4923                         gpie |= IXGBE_SDP0_GPIEN_8259X;
4924                         break;
4925                 default:
4926                         break;
4927                 }
4928         }
4929
4930         /* Enable fan failure interrupt */
4931         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
4932                 gpie |= IXGBE_SDP1_GPIEN(hw);
4933
4934         switch (hw->mac.type) {
4935         case ixgbe_mac_82599EB:
4936                 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
4937                 break;
4938         case ixgbe_mac_X550EM_x:
4939                 gpie |= IXGBE_SDP0_GPIEN_X540;
4940                 break;
4941         default:
4942                 break;
4943         }
4944
4945         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4946 }
4947
4948 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4949 {
4950         struct ixgbe_hw *hw = &adapter->hw;
4951         int err;
4952         u32 ctrl_ext;
4953
4954         ixgbe_get_hw_control(adapter);
4955         ixgbe_setup_gpie(adapter);
4956
4957         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4958                 ixgbe_configure_msix(adapter);
4959         else
4960                 ixgbe_configure_msi_and_legacy(adapter);
4961
4962         /* enable the optics for 82599 SFP+ fiber */
4963         if (hw->mac.ops.enable_tx_laser)
4964                 hw->mac.ops.enable_tx_laser(hw);
4965
4966         if (hw->phy.ops.set_phy_power)
4967                 hw->phy.ops.set_phy_power(hw, true);
4968
4969         smp_mb__before_atomic();
4970         clear_bit(__IXGBE_DOWN, &adapter->state);
4971         ixgbe_napi_enable_all(adapter);
4972
4973         if (ixgbe_is_sfp(hw)) {
4974                 ixgbe_sfp_link_config(adapter);
4975         } else {
4976                 err = ixgbe_non_sfp_link_config(hw);
4977                 if (err)
4978                         e_err(probe, "link_config FAILED %d\n", err);
4979         }
4980
4981         /* clear any pending interrupts, may auto mask */
4982         IXGBE_READ_REG(hw, IXGBE_EICR);
4983         ixgbe_irq_enable(adapter, true, true);
4984
4985         /*
4986          * If this adapter has a fan, check to see if we had a failure
4987          * before we enabled the interrupt.
4988          */
4989         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4990                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4991                 if (esdp & IXGBE_ESDP_SDP1)
4992                         e_crit(drv, "Fan has stopped, replace the adapter\n");
4993         }
4994
4995         /* bring the link up in the watchdog, this could race with our first
4996          * link up interrupt but shouldn't be a problem */
4997         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4998         adapter->link_check_timeout = jiffies;
4999         mod_timer(&adapter->service_timer, jiffies);
5000
5001         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
5002         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5003         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5004         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5005 }
5006
5007 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5008 {
5009         WARN_ON(in_interrupt());
5010         /* put off any impending NetWatchDogTimeout */
5011         adapter->netdev->trans_start = jiffies;
5012
5013         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5014                 usleep_range(1000, 2000);
5015         ixgbe_down(adapter);
5016         /*
5017          * If SR-IOV enabled then wait a bit before bringing the adapter
5018          * back up to give the VFs time to respond to the reset.  The
5019          * two second wait is based upon the watchdog timer cycle in
5020          * the VF driver.
5021          */
5022         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5023                 msleep(2000);
5024         ixgbe_up(adapter);
5025         clear_bit(__IXGBE_RESETTING, &adapter->state);
5026 }
5027
5028 void ixgbe_up(struct ixgbe_adapter *adapter)
5029 {
5030         /* hardware has been reset, we need to reload some things */
5031         ixgbe_configure(adapter);
5032
5033         ixgbe_up_complete(adapter);
5034 }
5035
5036 void ixgbe_reset(struct ixgbe_adapter *adapter)
5037 {
5038         struct ixgbe_hw *hw = &adapter->hw;
5039         struct net_device *netdev = adapter->netdev;
5040         int err;
5041         u8 old_addr[ETH_ALEN];
5042
5043         if (ixgbe_removed(hw->hw_addr))
5044                 return;
5045         /* lock SFP init bit to prevent race conditions with the watchdog */
5046         while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5047                 usleep_range(1000, 2000);
5048
5049         /* clear all SFP and link config related flags while holding SFP_INIT */
5050         adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5051                              IXGBE_FLAG2_SFP_NEEDS_RESET);
5052         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5053
5054         err = hw->mac.ops.init_hw(hw);
5055         switch (err) {
5056         case 0:
5057         case IXGBE_ERR_SFP_NOT_PRESENT:
5058         case IXGBE_ERR_SFP_NOT_SUPPORTED:
5059                 break;
5060         case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5061                 e_dev_err("master disable timed out\n");
5062                 break;
5063         case IXGBE_ERR_EEPROM_VERSION:
5064                 /* We are running on a pre-production device, log a warning */
5065                 e_dev_warn("This device is a pre-production adapter/LOM. "
5066                            "Please be aware there may be issues associated with "
5067                            "your hardware.  If you are experiencing problems "
5068                            "please contact your Intel or hardware "
5069                            "representative who provided you with this "
5070                            "hardware.\n");
5071                 break;
5072         default:
5073                 e_dev_err("Hardware Error: %d\n", err);
5074         }
5075
5076         clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5077         /* do not flush user set addresses */
5078         memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
5079         ixgbe_flush_sw_mac_table(adapter);
5080         ixgbe_mac_set_default_filter(adapter, old_addr);
5081
5082         /* update SAN MAC vmdq pool selection */
5083         if (hw->mac.san_mac_rar_index)
5084                 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5085
5086         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5087                 ixgbe_ptp_reset(adapter);
5088
5089         if (hw->phy.ops.set_phy_power) {
5090                 if (!netif_running(adapter->netdev) && !adapter->wol)
5091                         hw->phy.ops.set_phy_power(hw, false);
5092                 else
5093                         hw->phy.ops.set_phy_power(hw, true);
5094         }
5095 }
5096
5097 /**
5098  * ixgbe_clean_tx_ring - Free Tx Buffers
5099  * @tx_ring: ring to be cleaned
5100  **/
5101 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5102 {
5103         struct ixgbe_tx_buffer *tx_buffer_info;
5104         unsigned long size;
5105         u16 i;
5106
5107         /* ring already cleared, nothing to do */
5108         if (!tx_ring->tx_buffer_info)
5109                 return;
5110
5111         /* Free all the Tx ring sk_buffs */
5112         for (i = 0; i < tx_ring->count; i++) {
5113                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5114                 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
5115         }
5116
5117         netdev_tx_reset_queue(txring_txq(tx_ring));
5118
5119         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5120         memset(tx_ring->tx_buffer_info, 0, size);
5121
5122         /* Zero out the descriptor ring */
5123         memset(tx_ring->desc, 0, tx_ring->size);
5124
5125         tx_ring->next_to_use = 0;
5126         tx_ring->next_to_clean = 0;
5127 }
5128
5129 /**
5130  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
5131  * @adapter: board private structure
5132  **/
5133 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5134 {
5135         int i;
5136
5137         for (i = 0; i < adapter->num_rx_queues; i++)
5138                 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5139 }
5140
5141 /**
5142  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
5143  * @adapter: board private structure
5144  **/
5145 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5146 {
5147         int i;
5148
5149         for (i = 0; i < adapter->num_tx_queues; i++)
5150                 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5151 }
5152
5153 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5154 {
5155         struct hlist_node *node2;
5156         struct ixgbe_fdir_filter *filter;
5157
5158         spin_lock(&adapter->fdir_perfect_lock);
5159
5160         hlist_for_each_entry_safe(filter, node2,
5161                                   &adapter->fdir_filter_list, fdir_node) {
5162                 hlist_del(&filter->fdir_node);
5163                 kfree(filter);
5164         }
5165         adapter->fdir_filter_count = 0;
5166
5167         spin_unlock(&adapter->fdir_perfect_lock);
5168 }
5169
5170 void ixgbe_down(struct ixgbe_adapter *adapter)
5171 {
5172         struct net_device *netdev = adapter->netdev;
5173         struct ixgbe_hw *hw = &adapter->hw;
5174         struct net_device *upper;
5175         struct list_head *iter;
5176         int i;
5177
5178         /* signal that we are down to the interrupt handler */
5179         if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5180                 return; /* do nothing if already down */
5181
5182         /* disable receives */
5183         hw->mac.ops.disable_rx(hw);
5184
5185         /* disable all enabled rx queues */
5186         for (i = 0; i < adapter->num_rx_queues; i++)
5187                 /* this call also flushes the previous write */
5188                 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5189
5190         usleep_range(10000, 20000);
5191
5192         netif_tx_stop_all_queues(netdev);
5193
5194         /* call carrier off first to avoid false dev_watchdog timeouts */
5195         netif_carrier_off(netdev);
5196         netif_tx_disable(netdev);
5197
5198         /* disable any upper devices */
5199         netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
5200                 if (netif_is_macvlan(upper)) {
5201                         struct macvlan_dev *vlan = netdev_priv(upper);
5202
5203                         if (vlan->fwd_priv) {
5204                                 netif_tx_stop_all_queues(upper);
5205                                 netif_carrier_off(upper);
5206                                 netif_tx_disable(upper);
5207                         }
5208                 }
5209         }
5210
5211         ixgbe_irq_disable(adapter);
5212
5213         ixgbe_napi_disable_all(adapter);
5214
5215         adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
5216                              IXGBE_FLAG2_RESET_REQUESTED);
5217         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5218
5219         del_timer_sync(&adapter->service_timer);
5220
5221         if (adapter->num_vfs) {
5222                 /* Clear EITR Select mapping */
5223                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5224
5225                 /* Mark all the VFs as inactive */
5226                 for (i = 0 ; i < adapter->num_vfs; i++)
5227                         adapter->vfinfo[i].clear_to_send = false;
5228
5229                 /* ping all the active vfs to let them know we are going down */
5230                 ixgbe_ping_all_vfs(adapter);
5231
5232                 /* Disable all VFTE/VFRE TX/RX */
5233                 ixgbe_disable_tx_rx(adapter);
5234         }
5235
5236         /* disable transmits in the hardware now that interrupts are off */
5237         for (i = 0; i < adapter->num_tx_queues; i++) {
5238                 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5239                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5240         }
5241
5242         /* Disable the Tx DMA engine on 82599 and later MAC */
5243         switch (hw->mac.type) {
5244         case ixgbe_mac_82599EB:
5245         case ixgbe_mac_X540:
5246         case ixgbe_mac_X550:
5247         case ixgbe_mac_X550EM_x:
5248                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5249                                 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5250                                  ~IXGBE_DMATXCTL_TE));
5251                 break;
5252         default:
5253                 break;
5254         }
5255
5256         if (!pci_channel_offline(adapter->pdev))
5257                 ixgbe_reset(adapter);
5258
5259         /* power down the optics for 82599 SFP+ fiber */
5260         if (hw->mac.ops.disable_tx_laser)
5261                 hw->mac.ops.disable_tx_laser(hw);
5262
5263         ixgbe_clean_all_tx_rings(adapter);
5264         ixgbe_clean_all_rx_rings(adapter);
5265 }
5266
5267 /**
5268  * ixgbe_tx_timeout - Respond to a Tx Hang
5269  * @netdev: network interface device structure
5270  **/
5271 static void ixgbe_tx_timeout(struct net_device *netdev)
5272 {
5273         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5274
5275         /* Do the reset outside of interrupt context */
5276         ixgbe_tx_timeout_reset(adapter);
5277 }
5278
5279 /**
5280  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5281  * @adapter: board private structure to initialize
5282  *
5283  * ixgbe_sw_init initializes the Adapter private data structure.
5284  * Fields are initialized based on PCI device information and
5285  * OS network device settings (MTU size).
5286  **/
5287 static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5288 {
5289         struct ixgbe_hw *hw = &adapter->hw;
5290         struct pci_dev *pdev = adapter->pdev;
5291         unsigned int rss, fdir;
5292         u32 fwsm;
5293 #ifdef CONFIG_IXGBE_DCB
5294         int j;
5295         struct tc_configuration *tc;
5296 #endif
5297
5298         /* PCI config space info */
5299
5300         hw->vendor_id = pdev->vendor;
5301         hw->device_id = pdev->device;
5302         hw->revision_id = pdev->revision;
5303         hw->subsystem_vendor_id = pdev->subsystem_vendor;
5304         hw->subsystem_device_id = pdev->subsystem_device;
5305
5306         /* Set common capability flags and settings */
5307         rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5308         adapter->ring_feature[RING_F_RSS].limit = rss;
5309         adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5310         adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
5311         adapter->max_q_vectors = MAX_Q_VECTORS_82599;
5312         adapter->atr_sample_rate = 20;
5313         fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
5314         adapter->ring_feature[RING_F_FDIR].limit = fdir;
5315         adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5316 #ifdef CONFIG_IXGBE_DCA
5317         adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
5318 #endif
5319 #ifdef IXGBE_FCOE
5320         adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5321         adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5322 #ifdef CONFIG_IXGBE_DCB
5323         /* Default traffic class to use for FCoE */
5324         adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5325 #endif /* CONFIG_IXGBE_DCB */
5326 #endif /* IXGBE_FCOE */
5327
5328         adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5329                                      hw->mac.num_rar_entries,
5330                                      GFP_ATOMIC);
5331
5332         /* Set MAC specific capability flags and exceptions */
5333         switch (hw->mac.type) {
5334         case ixgbe_mac_82598EB:
5335                 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
5336                 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
5337
5338                 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5339                         adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
5340
5341                 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
5342                 adapter->ring_feature[RING_F_FDIR].limit = 0;
5343                 adapter->atr_sample_rate = 0;
5344                 adapter->fdir_pballoc = 0;
5345 #ifdef IXGBE_FCOE
5346                 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5347                 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5348 #ifdef CONFIG_IXGBE_DCB
5349                 adapter->fcoe.up = 0;
5350 #endif /* IXGBE_DCB */
5351 #endif /* IXGBE_FCOE */
5352                 break;
5353         case ixgbe_mac_82599EB:
5354                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5355                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5356                 break;
5357         case ixgbe_mac_X540:
5358                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
5359                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5360                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5361                 break;
5362         case ixgbe_mac_X550EM_x:
5363         case ixgbe_mac_X550:
5364 #ifdef CONFIG_IXGBE_DCA
5365                 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5366 #endif
5367 #ifdef CONFIG_IXGBE_VXLAN
5368                 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
5369 #endif
5370                 break;
5371         default:
5372                 break;
5373         }
5374
5375 #ifdef IXGBE_FCOE
5376         /* FCoE support exists, always init the FCoE lock */
5377         spin_lock_init(&adapter->fcoe.lock);
5378
5379 #endif
5380         /* n-tuple support exists, always init our spinlock */
5381         spin_lock_init(&adapter->fdir_perfect_lock);
5382
5383 #ifdef CONFIG_IXGBE_DCB
5384         switch (hw->mac.type) {
5385         case ixgbe_mac_X540:
5386         case ixgbe_mac_X550:
5387         case ixgbe_mac_X550EM_x:
5388                 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5389                 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5390                 break;
5391         default:
5392                 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5393                 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5394                 break;
5395         }
5396
5397         /* Configure DCB traffic classes */
5398         for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5399                 tc = &adapter->dcb_cfg.tc_config[j];
5400                 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5401                 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5402                 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5403                 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5404                 tc->dcb_pfc = pfc_disabled;
5405         }
5406
5407         /* Initialize default user to priority mapping, UPx->TC0 */
5408         tc = &adapter->dcb_cfg.tc_config[0];
5409         tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5410         tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5411
5412         adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5413         adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5414         adapter->dcb_cfg.pfc_mode_enable = false;
5415         adapter->dcb_set_bitmap = 0x00;
5416         adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5417         memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5418                sizeof(adapter->temp_dcb_cfg));
5419
5420 #endif
5421
5422         /* default flow control settings */
5423         hw->fc.requested_mode = ixgbe_fc_full;
5424         hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
5425         ixgbe_pbthresh_setup(adapter);
5426         hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5427         hw->fc.send_xon = true;
5428         hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5429
5430 #ifdef CONFIG_PCI_IOV
5431         if (max_vfs > 0)
5432                 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5433
5434         /* assign number of SR-IOV VFs */
5435         if (hw->mac.type != ixgbe_mac_82598EB) {
5436                 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
5437                         adapter->num_vfs = 0;
5438                         e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5439                 } else {
5440                         adapter->num_vfs = max_vfs;
5441                 }
5442         }
5443 #endif /* CONFIG_PCI_IOV */
5444
5445         /* enable itr by default in dynamic mode */
5446         adapter->rx_itr_setting = 1;
5447         adapter->tx_itr_setting = 1;
5448
5449         /* set default ring sizes */
5450         adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5451         adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5452
5453         /* set default work limits */
5454         adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5455
5456         /* initialize eeprom parameters */
5457         if (ixgbe_init_eeprom_params_generic(hw)) {
5458                 e_dev_err("EEPROM initialization failed\n");
5459                 return -EIO;
5460         }
5461
5462         /* PF holds first pool slot */
5463         set_bit(0, &adapter->fwd_bitmask);
5464         set_bit(__IXGBE_DOWN, &adapter->state);
5465
5466         return 0;
5467 }
5468
5469 /**
5470  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
5471  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
5472  *
5473  * Return 0 on success, negative on failure
5474  **/
5475 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5476 {
5477         struct device *dev = tx_ring->dev;
5478         int orig_node = dev_to_node(dev);
5479         int ring_node = -1;
5480         int size;
5481
5482         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5483
5484         if (tx_ring->q_vector)
5485                 ring_node = tx_ring->q_vector->numa_node;
5486
5487         tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
5488         if (!tx_ring->tx_buffer_info)
5489                 tx_ring->tx_buffer_info = vzalloc(size);
5490         if (!tx_ring->tx_buffer_info)
5491                 goto err;
5492
5493         u64_stats_init(&tx_ring->syncp);
5494
5495         /* round up to nearest 4K */
5496         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
5497         tx_ring->size = ALIGN(tx_ring->size, 4096);
5498
5499         set_dev_node(dev, ring_node);
5500         tx_ring->desc = dma_alloc_coherent(dev,
5501                                            tx_ring->size,
5502                                            &tx_ring->dma,
5503                                            GFP_KERNEL);
5504         set_dev_node(dev, orig_node);
5505         if (!tx_ring->desc)
5506                 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
5507                                                    &tx_ring->dma, GFP_KERNEL);
5508         if (!tx_ring->desc)
5509                 goto err;
5510
5511         tx_ring->next_to_use = 0;
5512         tx_ring->next_to_clean = 0;
5513         return 0;
5514
5515 err:
5516         vfree(tx_ring->tx_buffer_info);
5517         tx_ring->tx_buffer_info = NULL;
5518         dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5519         return -ENOMEM;
5520 }
5521
5522 /**
5523  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5524  * @adapter: board private structure
5525  *
5526  * If this function returns with an error, then it's possible one or
5527  * more of the rings is populated (while the rest are not).  It is the
5528  * callers duty to clean those orphaned rings.
5529  *
5530  * Return 0 on success, negative on failure
5531  **/
5532 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5533 {
5534         int i, err = 0;
5535
5536         for (i = 0; i < adapter->num_tx_queues; i++) {
5537                 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5538                 if (!err)
5539                         continue;
5540
5541                 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5542                 goto err_setup_tx;
5543         }
5544
5545         return 0;
5546 err_setup_tx:
5547         /* rewind the index freeing the rings as we go */
5548         while (i--)
5549                 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5550         return err;
5551 }
5552
5553 /**
5554  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5555  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
5556  *
5557  * Returns 0 on success, negative on failure
5558  **/
5559 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5560 {
5561         struct device *dev = rx_ring->dev;
5562         int orig_node = dev_to_node(dev);
5563         int ring_node = -1;
5564         int size;
5565
5566         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5567
5568         if (rx_ring->q_vector)
5569                 ring_node = rx_ring->q_vector->numa_node;
5570
5571         rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
5572         if (!rx_ring->rx_buffer_info)
5573                 rx_ring->rx_buffer_info = vzalloc(size);
5574         if (!rx_ring->rx_buffer_info)
5575                 goto err;
5576
5577         u64_stats_init(&rx_ring->syncp);
5578
5579         /* Round up to nearest 4K */
5580         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5581         rx_ring->size = ALIGN(rx_ring->size, 4096);
5582
5583         set_dev_node(dev, ring_node);
5584         rx_ring->desc = dma_alloc_coherent(dev,
5585                                            rx_ring->size,
5586                                            &rx_ring->dma,
5587                                            GFP_KERNEL);
5588         set_dev_node(dev, orig_node);
5589         if (!rx_ring->desc)
5590                 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5591                                                    &rx_ring->dma, GFP_KERNEL);
5592         if (!rx_ring->desc)
5593                 goto err;
5594
5595         rx_ring->next_to_clean = 0;
5596         rx_ring->next_to_use = 0;
5597
5598         return 0;
5599 err:
5600         vfree(rx_ring->rx_buffer_info);
5601         rx_ring->rx_buffer_info = NULL;
5602         dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5603         return -ENOMEM;
5604 }
5605
5606 /**
5607  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5608  * @adapter: board private structure
5609  *
5610  * If this function returns with an error, then it's possible one or
5611  * more of the rings is populated (while the rest are not).  It is the
5612  * callers duty to clean those orphaned rings.
5613  *
5614  * Return 0 on success, negative on failure
5615  **/
5616 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5617 {
5618         int i, err = 0;
5619
5620         for (i = 0; i < adapter->num_rx_queues; i++) {
5621                 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5622                 if (!err)
5623                         continue;
5624
5625                 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5626                 goto err_setup_rx;
5627         }
5628
5629 #ifdef IXGBE_FCOE
5630         err = ixgbe_setup_fcoe_ddp_resources(adapter);
5631         if (!err)
5632 #endif
5633                 return 0;
5634 err_setup_rx:
5635         /* rewind the index freeing the rings as we go */
5636         while (i--)
5637                 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5638         return err;
5639 }
5640
5641 /**
5642  * ixgbe_free_tx_resources - Free Tx Resources per Queue
5643  * @tx_ring: Tx descriptor ring for a specific queue
5644  *
5645  * Free all transmit software resources
5646  **/
5647 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5648 {
5649         ixgbe_clean_tx_ring(tx_ring);
5650
5651         vfree(tx_ring->tx_buffer_info);
5652         tx_ring->tx_buffer_info = NULL;
5653
5654         /* if not set, then don't free */
5655         if (!tx_ring->desc)
5656                 return;
5657
5658         dma_free_coherent(tx_ring->dev, tx_ring->size,
5659                           tx_ring->desc, tx_ring->dma);
5660
5661         tx_ring->desc = NULL;
5662 }
5663
5664 /**
5665  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5666  * @adapter: board private structure
5667  *
5668  * Free all transmit software resources
5669  **/
5670 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5671 {
5672         int i;
5673
5674         for (i = 0; i < adapter->num_tx_queues; i++)
5675                 if (adapter->tx_ring[i]->desc)
5676                         ixgbe_free_tx_resources(adapter->tx_ring[i]);
5677 }
5678
5679 /**
5680  * ixgbe_free_rx_resources - Free Rx Resources
5681  * @rx_ring: ring to clean the resources from
5682  *
5683  * Free all receive software resources
5684  **/
5685 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5686 {
5687         ixgbe_clean_rx_ring(rx_ring);
5688
5689         vfree(rx_ring->rx_buffer_info);
5690         rx_ring->rx_buffer_info = NULL;
5691
5692         /* if not set, then don't free */
5693         if (!rx_ring->desc)
5694                 return;
5695
5696         dma_free_coherent(rx_ring->dev, rx_ring->size,
5697                           rx_ring->desc, rx_ring->dma);
5698
5699         rx_ring->desc = NULL;
5700 }
5701
5702 /**
5703  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5704  * @adapter: board private structure
5705  *
5706  * Free all receive software resources
5707  **/
5708 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5709 {
5710         int i;
5711
5712 #ifdef IXGBE_FCOE
5713         ixgbe_free_fcoe_ddp_resources(adapter);
5714
5715 #endif
5716         for (i = 0; i < adapter->num_rx_queues; i++)
5717                 if (adapter->rx_ring[i]->desc)
5718                         ixgbe_free_rx_resources(adapter->rx_ring[i]);
5719 }
5720
5721 /**
5722  * ixgbe_change_mtu - Change the Maximum Transfer Unit
5723  * @netdev: network interface device structure
5724  * @new_mtu: new value for maximum frame size
5725  *
5726  * Returns 0 on success, negative on failure
5727  **/
5728 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5729 {
5730         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5731         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5732
5733         /* MTU < 68 is an error and causes problems on some kernels */
5734         if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5735                 return -EINVAL;
5736
5737         /*
5738          * For 82599EB we cannot allow legacy VFs to enable their receive
5739          * paths when MTU greater than 1500 is configured.  So display a
5740          * warning that legacy VFs will be disabled.
5741          */
5742         if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5743             (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5744             (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5745                 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5746
5747         e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5748
5749         /* must set new MTU before calling down or up */
5750         netdev->mtu = new_mtu;
5751
5752         if (netif_running(netdev))
5753                 ixgbe_reinit_locked(adapter);
5754
5755         return 0;
5756 }
5757
5758 /**
5759  * ixgbe_open - Called when a network interface is made active
5760  * @netdev: network interface device structure
5761  *
5762  * Returns 0 on success, negative value on failure
5763  *
5764  * The open entry point is called when a network interface is made
5765  * active by the system (IFF_UP).  At this point all resources needed
5766  * for transmit and receive operations are allocated, the interrupt
5767  * handler is registered with the OS, the watchdog timer is started,
5768  * and the stack is notified that the interface is ready.
5769  **/
5770 static int ixgbe_open(struct net_device *netdev)
5771 {
5772         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5773         struct ixgbe_hw *hw = &adapter->hw;
5774         int err, queues;
5775
5776         /* disallow open during test */
5777         if (test_bit(__IXGBE_TESTING, &adapter->state))
5778                 return -EBUSY;
5779
5780         netif_carrier_off(netdev);
5781
5782         /* allocate transmit descriptors */
5783         err = ixgbe_setup_all_tx_resources(adapter);
5784         if (err)
5785                 goto err_setup_tx;
5786
5787         /* allocate receive descriptors */
5788         err = ixgbe_setup_all_rx_resources(adapter);
5789         if (err)
5790                 goto err_setup_rx;
5791
5792         ixgbe_configure(adapter);
5793
5794         err = ixgbe_request_irq(adapter);
5795         if (err)
5796                 goto err_req_irq;
5797
5798         /* Notify the stack of the actual queue counts. */
5799         if (adapter->num_rx_pools > 1)
5800                 queues = adapter->num_rx_queues_per_pool;
5801         else
5802                 queues = adapter->num_tx_queues;
5803
5804         err = netif_set_real_num_tx_queues(netdev, queues);
5805         if (err)
5806                 goto err_set_queues;
5807
5808         if (adapter->num_rx_pools > 1 &&
5809             adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
5810                 queues = IXGBE_MAX_L2A_QUEUES;
5811         else
5812                 queues = adapter->num_rx_queues;
5813         err = netif_set_real_num_rx_queues(netdev, queues);
5814         if (err)
5815                 goto err_set_queues;
5816
5817         ixgbe_ptp_init(adapter);
5818
5819         ixgbe_up_complete(adapter);
5820
5821         ixgbe_clear_vxlan_port(adapter);
5822 #ifdef CONFIG_IXGBE_VXLAN
5823         vxlan_get_rx_port(netdev);
5824 #endif
5825
5826         return 0;
5827
5828 err_set_queues:
5829         ixgbe_free_irq(adapter);
5830 err_req_irq:
5831         ixgbe_free_all_rx_resources(adapter);
5832         if (hw->phy.ops.set_phy_power && !adapter->wol)
5833                 hw->phy.ops.set_phy_power(&adapter->hw, false);
5834 err_setup_rx:
5835         ixgbe_free_all_tx_resources(adapter);
5836 err_setup_tx:
5837         ixgbe_reset(adapter);
5838
5839         return err;
5840 }
5841
5842 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
5843 {
5844         ixgbe_ptp_suspend(adapter);
5845
5846         if (adapter->hw.phy.ops.enter_lplu) {
5847                 adapter->hw.phy.reset_disable = true;
5848                 ixgbe_down(adapter);
5849                 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
5850                 adapter->hw.phy.reset_disable = false;
5851         } else {
5852                 ixgbe_down(adapter);
5853         }
5854
5855         ixgbe_free_irq(adapter);
5856
5857         ixgbe_free_all_tx_resources(adapter);
5858         ixgbe_free_all_rx_resources(adapter);
5859 }
5860
5861 /**
5862  * ixgbe_close - Disables a network interface
5863  * @netdev: network interface device structure
5864  *
5865  * Returns 0, this is not allowed to fail
5866  *
5867  * The close entry point is called when an interface is de-activated
5868  * by the OS.  The hardware is still under the drivers control, but
5869  * needs to be disabled.  A global MAC reset is issued to stop the
5870  * hardware, and all transmit and receive resources are freed.
5871  **/
5872 static int ixgbe_close(struct net_device *netdev)
5873 {
5874         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5875
5876         ixgbe_ptp_stop(adapter);
5877
5878         ixgbe_close_suspend(adapter);
5879
5880         ixgbe_fdir_filter_exit(adapter);
5881
5882         ixgbe_release_hw_control(adapter);
5883
5884         return 0;
5885 }
5886
5887 #ifdef CONFIG_PM
5888 static int ixgbe_resume(struct pci_dev *pdev)
5889 {
5890         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5891         struct net_device *netdev = adapter->netdev;
5892         u32 err;
5893
5894         adapter->hw.hw_addr = adapter->io_addr;
5895         pci_set_power_state(pdev, PCI_D0);
5896         pci_restore_state(pdev);
5897         /*
5898          * pci_restore_state clears dev->state_saved so call
5899          * pci_save_state to restore it.
5900          */
5901         pci_save_state(pdev);
5902
5903         err = pci_enable_device_mem(pdev);
5904         if (err) {
5905                 e_dev_err("Cannot enable PCI device from suspend\n");
5906                 return err;
5907         }
5908         smp_mb__before_atomic();
5909         clear_bit(__IXGBE_DISABLED, &adapter->state);
5910         pci_set_master(pdev);
5911
5912         pci_wake_from_d3(pdev, false);
5913
5914         ixgbe_reset(adapter);
5915
5916         IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5917
5918         rtnl_lock();
5919         err = ixgbe_init_interrupt_scheme(adapter);
5920         if (!err && netif_running(netdev))
5921                 err = ixgbe_open(netdev);
5922
5923         rtnl_unlock();
5924
5925         if (err)
5926                 return err;
5927
5928         netif_device_attach(netdev);
5929
5930         return 0;
5931 }
5932 #endif /* CONFIG_PM */
5933
5934 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5935 {
5936         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5937         struct net_device *netdev = adapter->netdev;
5938         struct ixgbe_hw *hw = &adapter->hw;
5939         u32 ctrl, fctrl;
5940         u32 wufc = adapter->wol;
5941 #ifdef CONFIG_PM
5942         int retval = 0;
5943 #endif
5944
5945         netif_device_detach(netdev);
5946
5947         rtnl_lock();
5948         if (netif_running(netdev))
5949                 ixgbe_close_suspend(adapter);
5950         rtnl_unlock();
5951
5952         ixgbe_clear_interrupt_scheme(adapter);
5953
5954 #ifdef CONFIG_PM
5955         retval = pci_save_state(pdev);
5956         if (retval)
5957                 return retval;
5958
5959 #endif
5960         if (hw->mac.ops.stop_link_on_d3)
5961                 hw->mac.ops.stop_link_on_d3(hw);
5962
5963         if (wufc) {
5964                 ixgbe_set_rx_mode(netdev);
5965
5966                 /* enable the optics for 82599 SFP+ fiber as we can WoL */
5967                 if (hw->mac.ops.enable_tx_laser)
5968                         hw->mac.ops.enable_tx_laser(hw);
5969
5970                 /* turn on all-multi mode if wake on multicast is enabled */
5971                 if (wufc & IXGBE_WUFC_MC) {
5972                         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5973                         fctrl |= IXGBE_FCTRL_MPE;
5974                         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5975                 }
5976
5977                 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5978                 ctrl |= IXGBE_CTRL_GIO_DIS;
5979                 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5980
5981                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5982         } else {
5983                 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5984                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5985         }
5986
5987         switch (hw->mac.type) {
5988         case ixgbe_mac_82598EB:
5989                 pci_wake_from_d3(pdev, false);
5990                 break;
5991         case ixgbe_mac_82599EB:
5992         case ixgbe_mac_X540:
5993         case ixgbe_mac_X550:
5994         case ixgbe_mac_X550EM_x:
5995                 pci_wake_from_d3(pdev, !!wufc);
5996                 break;
5997         default:
5998                 break;
5999         }
6000
6001         *enable_wake = !!wufc;
6002         if (hw->phy.ops.set_phy_power && !*enable_wake)
6003                 hw->phy.ops.set_phy_power(hw, false);
6004
6005         ixgbe_release_hw_control(adapter);
6006
6007         if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6008                 pci_disable_device(pdev);
6009
6010         return 0;
6011 }
6012
6013 #ifdef CONFIG_PM
6014 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6015 {
6016         int retval;
6017         bool wake;
6018
6019         retval = __ixgbe_shutdown(pdev, &wake);
6020         if (retval)
6021                 return retval;
6022
6023         if (wake) {
6024                 pci_prepare_to_sleep(pdev);
6025         } else {
6026                 pci_wake_from_d3(pdev, false);
6027                 pci_set_power_state(pdev, PCI_D3hot);
6028         }
6029
6030         return 0;
6031 }
6032 #endif /* CONFIG_PM */
6033
6034 static void ixgbe_shutdown(struct pci_dev *pdev)
6035 {
6036         bool wake;
6037
6038         __ixgbe_shutdown(pdev, &wake);
6039
6040         if (system_state == SYSTEM_POWER_OFF) {
6041                 pci_wake_from_d3(pdev, wake);
6042                 pci_set_power_state(pdev, PCI_D3hot);
6043         }
6044 }
6045
6046 /**
6047  * ixgbe_update_stats - Update the board statistics counters.
6048  * @adapter: board private structure
6049  **/
6050 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6051 {
6052         struct net_device *netdev = adapter->netdev;
6053         struct ixgbe_hw *hw = &adapter->hw;
6054         struct ixgbe_hw_stats *hwstats = &adapter->stats;
6055         u64 total_mpc = 0;
6056         u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6057         u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6058         u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6059         u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6060
6061         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6062             test_bit(__IXGBE_RESETTING, &adapter->state))
6063                 return;
6064
6065         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6066                 u64 rsc_count = 0;
6067                 u64 rsc_flush = 0;
6068                 for (i = 0; i < adapter->num_rx_queues; i++) {
6069                         rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6070                         rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6071                 }
6072                 adapter->rsc_total_count = rsc_count;
6073                 adapter->rsc_total_flush = rsc_flush;
6074         }
6075
6076         for (i = 0; i < adapter->num_rx_queues; i++) {
6077                 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6078                 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6079                 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6080                 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6081                 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6082                 bytes += rx_ring->stats.bytes;
6083                 packets += rx_ring->stats.packets;
6084         }
6085         adapter->non_eop_descs = non_eop_descs;
6086         adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6087         adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6088         adapter->hw_csum_rx_error = hw_csum_rx_error;
6089         netdev->stats.rx_bytes = bytes;
6090         netdev->stats.rx_packets = packets;
6091
6092         bytes = 0;
6093         packets = 0;
6094         /* gather some stats to the adapter struct that are per queue */
6095         for (i = 0; i < adapter->num_tx_queues; i++) {
6096                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6097                 restart_queue += tx_ring->tx_stats.restart_queue;
6098                 tx_busy += tx_ring->tx_stats.tx_busy;
6099                 bytes += tx_ring->stats.bytes;
6100                 packets += tx_ring->stats.packets;
6101         }
6102         adapter->restart_queue = restart_queue;
6103         adapter->tx_busy = tx_busy;
6104         netdev->stats.tx_bytes = bytes;
6105         netdev->stats.tx_packets = packets;
6106
6107         hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6108
6109         /* 8 register reads */
6110         for (i = 0; i < 8; i++) {
6111                 /* for packet buffers not used, the register should read 0 */
6112                 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6113                 missed_rx += mpc;
6114                 hwstats->mpc[i] += mpc;
6115                 total_mpc += hwstats->mpc[i];
6116                 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6117                 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6118                 switch (hw->mac.type) {
6119                 case ixgbe_mac_82598EB:
6120                         hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6121                         hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6122                         hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6123                         hwstats->pxonrxc[i] +=
6124                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6125                         break;
6126                 case ixgbe_mac_82599EB:
6127                 case ixgbe_mac_X540:
6128                 case ixgbe_mac_X550:
6129                 case ixgbe_mac_X550EM_x:
6130                         hwstats->pxonrxc[i] +=
6131                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6132                         break;
6133                 default:
6134                         break;
6135                 }
6136         }
6137
6138         /*16 register reads */
6139         for (i = 0; i < 16; i++) {
6140                 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6141                 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6142                 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6143                     (hw->mac.type == ixgbe_mac_X540) ||
6144                     (hw->mac.type == ixgbe_mac_X550) ||
6145                     (hw->mac.type == ixgbe_mac_X550EM_x)) {
6146                         hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6147                         IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
6148                         hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6149                         IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
6150                 }
6151         }
6152
6153         hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6154         /* work around hardware counting issue */
6155         hwstats->gprc -= missed_rx;
6156
6157         ixgbe_update_xoff_received(adapter);
6158
6159         /* 82598 hardware only has a 32 bit counter in the high register */
6160         switch (hw->mac.type) {
6161         case ixgbe_mac_82598EB:
6162                 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6163                 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6164                 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6165                 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6166                 break;
6167         case ixgbe_mac_X540:
6168         case ixgbe_mac_X550:
6169         case ixgbe_mac_X550EM_x:
6170                 /* OS2BMC stats are X540 and later */
6171                 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6172                 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6173                 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6174                 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6175         case ixgbe_mac_82599EB:
6176                 for (i = 0; i < 16; i++)
6177                         adapter->hw_rx_no_dma_resources +=
6178                                              IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6179                 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6180                 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
6181                 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6182                 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
6183                 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6184                 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
6185                 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6186                 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6187                 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6188 #ifdef IXGBE_FCOE
6189                 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6190                 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6191                 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6192                 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6193                 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6194                 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6195                 /* Add up per cpu counters for total ddp aloc fail */
6196                 if (adapter->fcoe.ddp_pool) {
6197                         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6198                         struct ixgbe_fcoe_ddp_pool *ddp_pool;
6199                         unsigned int cpu;
6200                         u64 noddp = 0, noddp_ext_buff = 0;
6201                         for_each_possible_cpu(cpu) {
6202                                 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6203                                 noddp += ddp_pool->noddp;
6204                                 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6205                         }
6206                         hwstats->fcoe_noddp = noddp;
6207                         hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6208                 }
6209 #endif /* IXGBE_FCOE */
6210                 break;
6211         default:
6212                 break;
6213         }
6214         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6215         hwstats->bprc += bprc;
6216         hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6217         if (hw->mac.type == ixgbe_mac_82598EB)
6218                 hwstats->mprc -= bprc;
6219         hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6220         hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6221         hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6222         hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6223         hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6224         hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6225         hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6226         hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6227         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6228         hwstats->lxontxc += lxon;
6229         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6230         hwstats->lxofftxc += lxoff;
6231         hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6232         hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6233         /*
6234          * 82598 errata - tx of flow control packets is included in tx counters
6235          */
6236         xon_off_tot = lxon + lxoff;
6237         hwstats->gptc -= xon_off_tot;
6238         hwstats->mptc -= xon_off_tot;
6239         hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6240         hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6241         hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6242         hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6243         hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6244         hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6245         hwstats->ptc64 -= xon_off_tot;
6246         hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6247         hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6248         hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6249         hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6250         hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6251         hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6252
6253         /* Fill out the OS statistics structure */
6254         netdev->stats.multicast = hwstats->mprc;
6255
6256         /* Rx Errors */
6257         netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6258         netdev->stats.rx_dropped = 0;
6259         netdev->stats.rx_length_errors = hwstats->rlec;
6260         netdev->stats.rx_crc_errors = hwstats->crcerrs;
6261         netdev->stats.rx_missed_errors = total_mpc;
6262 }
6263
6264 /**
6265  * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
6266  * @adapter: pointer to the device adapter structure
6267  **/
6268 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6269 {
6270         struct ixgbe_hw *hw = &adapter->hw;
6271         int i;
6272
6273         if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
6274                 return;
6275
6276         adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6277
6278         /* if interface is down do nothing */
6279         if (test_bit(__IXGBE_DOWN, &adapter->state))
6280                 return;
6281
6282         /* do nothing if we are not using signature filters */
6283         if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
6284                 return;
6285
6286         adapter->fdir_overflow++;
6287
6288         if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
6289                 for (i = 0; i < adapter->num_tx_queues; i++)
6290                         set_bit(__IXGBE_TX_FDIR_INIT_DONE,
6291                                 &(adapter->tx_ring[i]->state));
6292                 /* re-enable flow director interrupts */
6293                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6294         } else {
6295                 e_err(probe, "failed to finish FDIR re-initialization, "
6296                       "ignored adding FDIR ATR filters\n");
6297         }
6298 }
6299
6300 /**
6301  * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
6302  * @adapter: pointer to the device adapter structure
6303  *
6304  * This function serves two purposes.  First it strobes the interrupt lines
6305  * in order to make certain interrupts are occurring.  Secondly it sets the
6306  * bits needed to check for TX hangs.  As a result we should immediately
6307  * determine if a hang has occurred.
6308  */
6309 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6310 {
6311         struct ixgbe_hw *hw = &adapter->hw;
6312         u64 eics = 0;
6313         int i;
6314
6315         /* If we're down, removing or resetting, just bail */
6316         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6317             test_bit(__IXGBE_REMOVING, &adapter->state) ||
6318             test_bit(__IXGBE_RESETTING, &adapter->state))
6319                 return;
6320
6321         /* Force detection of hung controller */
6322         if (netif_carrier_ok(adapter->netdev)) {
6323                 for (i = 0; i < adapter->num_tx_queues; i++)
6324                         set_check_for_tx_hang(adapter->tx_ring[i]);
6325         }
6326
6327         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6328                 /*
6329                  * for legacy and MSI interrupts don't set any bits
6330                  * that are enabled for EIAM, because this operation
6331                  * would set *both* EIMS and EICS for any bit in EIAM
6332                  */
6333                 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6334                         (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6335         } else {
6336                 /* get one bit for every active tx/rx interrupt vector */
6337                 for (i = 0; i < adapter->num_q_vectors; i++) {
6338                         struct ixgbe_q_vector *qv = adapter->q_vector[i];
6339                         if (qv->rx.ring || qv->tx.ring)
6340                                 eics |= ((u64)1 << i);
6341                 }
6342         }
6343
6344         /* Cause software interrupt to ensure rings are cleaned */
6345         ixgbe_irq_rearm_queues(adapter, eics);
6346 }
6347
6348 /**
6349  * ixgbe_watchdog_update_link - update the link status
6350  * @adapter: pointer to the device adapter structure
6351  * @link_speed: pointer to a u32 to store the link_speed
6352  **/
6353 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6354 {
6355         struct ixgbe_hw *hw = &adapter->hw;
6356         u32 link_speed = adapter->link_speed;
6357         bool link_up = adapter->link_up;
6358         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
6359
6360         if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6361                 return;
6362
6363         if (hw->mac.ops.check_link) {
6364                 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6365         } else {
6366                 /* always assume link is up, if no check link function */
6367                 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6368                 link_up = true;
6369         }
6370
6371         if (adapter->ixgbe_ieee_pfc)
6372                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
6373
6374         if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
6375                 hw->mac.ops.fc_enable(hw);
6376                 ixgbe_set_rx_drop_en(adapter);
6377         }
6378
6379         if (link_up ||
6380             time_after(jiffies, (adapter->link_check_timeout +
6381                                  IXGBE_TRY_LINK_TIMEOUT))) {
6382                 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6383                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6384                 IXGBE_WRITE_FLUSH(hw);
6385         }
6386
6387         adapter->link_up = link_up;
6388         adapter->link_speed = link_speed;
6389 }
6390
6391 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
6392 {
6393 #ifdef CONFIG_IXGBE_DCB
6394         struct net_device *netdev = adapter->netdev;
6395         struct dcb_app app = {
6396                               .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
6397                               .protocol = 0,
6398                              };
6399         u8 up = 0;
6400
6401         if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
6402                 up = dcb_ieee_getapp_mask(netdev, &app);
6403
6404         adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
6405 #endif
6406 }
6407
6408 /**
6409  * ixgbe_watchdog_link_is_up - update netif_carrier status and
6410  *                             print link up message
6411  * @adapter: pointer to the device adapter structure
6412  **/
6413 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6414 {
6415         struct net_device *netdev = adapter->netdev;
6416         struct ixgbe_hw *hw = &adapter->hw;
6417         struct net_device *upper;
6418         struct list_head *iter;
6419         u32 link_speed = adapter->link_speed;
6420         const char *speed_str;
6421         bool flow_rx, flow_tx;
6422
6423         /* only continue if link was previously down */
6424         if (netif_carrier_ok(netdev))
6425                 return;
6426
6427         adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6428
6429         switch (hw->mac.type) {
6430         case ixgbe_mac_82598EB: {
6431                 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6432                 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6433                 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6434                 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6435         }
6436                 break;
6437         case ixgbe_mac_X540:
6438         case ixgbe_mac_X550:
6439         case ixgbe_mac_X550EM_x:
6440         case ixgbe_mac_82599EB: {
6441                 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6442                 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6443                 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6444                 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6445         }
6446                 break;
6447         default:
6448                 flow_tx = false;
6449                 flow_rx = false;
6450                 break;
6451         }
6452
6453         adapter->last_rx_ptp_check = jiffies;
6454
6455         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6456                 ixgbe_ptp_start_cyclecounter(adapter);
6457
6458         switch (link_speed) {
6459         case IXGBE_LINK_SPEED_10GB_FULL:
6460                 speed_str = "10 Gbps";
6461                 break;
6462         case IXGBE_LINK_SPEED_2_5GB_FULL:
6463                 speed_str = "2.5 Gbps";
6464                 break;
6465         case IXGBE_LINK_SPEED_1GB_FULL:
6466                 speed_str = "1 Gbps";
6467                 break;
6468         case IXGBE_LINK_SPEED_100_FULL:
6469                 speed_str = "100 Mbps";
6470                 break;
6471         default:
6472                 speed_str = "unknown speed";
6473                 break;
6474         }
6475         e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
6476                ((flow_rx && flow_tx) ? "RX/TX" :
6477                (flow_rx ? "RX" :
6478                (flow_tx ? "TX" : "None"))));
6479
6480         netif_carrier_on(netdev);
6481         ixgbe_check_vf_rate_limit(adapter);
6482
6483         /* enable transmits */
6484         netif_tx_wake_all_queues(adapter->netdev);
6485
6486         /* enable any upper devices */
6487         rtnl_lock();
6488         netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6489                 if (netif_is_macvlan(upper)) {
6490                         struct macvlan_dev *vlan = netdev_priv(upper);
6491
6492                         if (vlan->fwd_priv)
6493                                 netif_tx_wake_all_queues(upper);
6494                 }
6495         }
6496         rtnl_unlock();
6497
6498         /* update the default user priority for VFs */
6499         ixgbe_update_default_up(adapter);
6500
6501         /* ping all the active vfs to let them know link has changed */
6502         ixgbe_ping_all_vfs(adapter);
6503 }
6504
6505 /**
6506  * ixgbe_watchdog_link_is_down - update netif_carrier status and
6507  *                               print link down message
6508  * @adapter: pointer to the adapter structure
6509  **/
6510 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6511 {
6512         struct net_device *netdev = adapter->netdev;
6513         struct ixgbe_hw *hw = &adapter->hw;
6514
6515         adapter->link_up = false;
6516         adapter->link_speed = 0;
6517
6518         /* only continue if link was up previously */
6519         if (!netif_carrier_ok(netdev))
6520                 return;
6521
6522         /* poll for SFP+ cable when link is down */
6523         if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6524                 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6525
6526         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6527                 ixgbe_ptp_start_cyclecounter(adapter);
6528
6529         e_info(drv, "NIC Link is Down\n");
6530         netif_carrier_off(netdev);
6531
6532         /* ping all the active vfs to let them know link has changed */
6533         ixgbe_ping_all_vfs(adapter);
6534 }
6535
6536 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
6537 {
6538         int i;
6539
6540         for (i = 0; i < adapter->num_tx_queues; i++) {
6541                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6542
6543                 if (tx_ring->next_to_use != tx_ring->next_to_clean)
6544                         return true;
6545         }
6546
6547         return false;
6548 }
6549
6550 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6551 {
6552         struct ixgbe_hw *hw = &adapter->hw;
6553         struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
6554         u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6555
6556         int i, j;
6557
6558         if (!adapter->num_vfs)
6559                 return false;
6560
6561         /* resetting the PF is only needed for MAC before X550 */
6562         if (hw->mac.type >= ixgbe_mac_X550)
6563                 return false;
6564
6565         for (i = 0; i < adapter->num_vfs; i++) {
6566                 for (j = 0; j < q_per_pool; j++) {
6567                         u32 h, t;
6568
6569                         h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
6570                         t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
6571
6572                         if (h != t)
6573                                 return true;
6574                 }
6575         }
6576
6577         return false;
6578 }
6579
6580 /**
6581  * ixgbe_watchdog_flush_tx - flush queues on link down
6582  * @adapter: pointer to the device adapter structure
6583  **/
6584 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6585 {
6586         if (!netif_carrier_ok(adapter->netdev)) {
6587                 if (ixgbe_ring_tx_pending(adapter) ||
6588                     ixgbe_vf_tx_pending(adapter)) {
6589                         /* We've lost link, so the controller stops DMA,
6590                          * but we've got queued Tx work that's never going
6591                          * to get done, so reset controller to flush Tx.
6592                          * (Do the reset outside of interrupt context).
6593                          */
6594                         e_warn(drv, "initiating reset to clear Tx work after link loss\n");
6595                         adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6596                 }
6597         }
6598 }
6599
6600 #ifdef CONFIG_PCI_IOV
6601 static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
6602                                       struct pci_dev *vfdev)
6603 {
6604         if (!pci_wait_for_pending_transaction(vfdev))
6605                 e_dev_warn("Issuing VFLR with pending transactions\n");
6606
6607         e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
6608         pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
6609
6610         msleep(100);
6611 }
6612
6613 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6614 {
6615         struct ixgbe_hw *hw = &adapter->hw;
6616         struct pci_dev *pdev = adapter->pdev;
6617         struct pci_dev *vfdev;
6618         u32 gpc;
6619         int pos;
6620         unsigned short vf_id;
6621
6622         if (!(netif_carrier_ok(adapter->netdev)))
6623                 return;
6624
6625         gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6626         if (gpc) /* If incrementing then no need for the check below */
6627                 return;
6628         /* Check to see if a bad DMA write target from an errant or
6629          * malicious VF has caused a PCIe error.  If so then we can
6630          * issue a VFLR to the offending VF(s) and then resume without
6631          * requesting a full slot reset.
6632          */
6633
6634         if (!pdev)
6635                 return;
6636
6637         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6638         if (!pos)
6639                 return;
6640
6641         /* get the device ID for the VF */
6642         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
6643
6644         /* check status reg for all VFs owned by this PF */
6645         vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
6646         while (vfdev) {
6647                 if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
6648                         u16 status_reg;
6649
6650                         pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
6651                         if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
6652                                 /* issue VFLR */
6653                                 ixgbe_issue_vf_flr(adapter, vfdev);
6654                 }
6655
6656                 vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
6657         }
6658 }
6659
6660 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6661 {
6662         u32 ssvpc;
6663
6664         /* Do not perform spoof check for 82598 or if not in IOV mode */
6665         if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
6666             adapter->num_vfs == 0)
6667                 return;
6668
6669         ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6670
6671         /*
6672          * ssvpc register is cleared on read, if zero then no
6673          * spoofed packets in the last interval.
6674          */
6675         if (!ssvpc)
6676                 return;
6677
6678         e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6679 }
6680 #else
6681 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
6682 {
6683 }
6684
6685 static void
6686 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
6687 {
6688 }
6689 #endif /* CONFIG_PCI_IOV */
6690
6691
6692 /**
6693  * ixgbe_watchdog_subtask - check and bring link up
6694  * @adapter: pointer to the device adapter structure
6695  **/
6696 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6697 {
6698         /* if interface is down, removing or resetting, do nothing */
6699         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6700             test_bit(__IXGBE_REMOVING, &adapter->state) ||
6701             test_bit(__IXGBE_RESETTING, &adapter->state))
6702                 return;
6703
6704         ixgbe_watchdog_update_link(adapter);
6705
6706         if (adapter->link_up)
6707                 ixgbe_watchdog_link_is_up(adapter);
6708         else
6709                 ixgbe_watchdog_link_is_down(adapter);
6710
6711         ixgbe_check_for_bad_vf(adapter);
6712         ixgbe_spoof_check(adapter);
6713         ixgbe_update_stats(adapter);
6714
6715         ixgbe_watchdog_flush_tx(adapter);
6716 }
6717
6718 /**
6719  * ixgbe_sfp_detection_subtask - poll for SFP+ cable
6720  * @adapter: the ixgbe adapter structure
6721  **/
6722 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6723 {
6724         struct ixgbe_hw *hw = &adapter->hw;
6725         s32 err;
6726
6727         /* not searching for SFP so there is nothing to do here */
6728         if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6729             !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6730                 return;
6731
6732         if (adapter->sfp_poll_time &&
6733             time_after(adapter->sfp_poll_time, jiffies))
6734                 return; /* If not yet time to poll for SFP */
6735
6736         /* someone else is in init, wait until next service event */
6737         if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6738                 return;
6739
6740         adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
6741
6742         err = hw->phy.ops.identify_sfp(hw);
6743         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6744                 goto sfp_out;
6745
6746         if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6747                 /* If no cable is present, then we need to reset
6748                  * the next time we find a good cable. */
6749                 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6750         }
6751
6752         /* exit on error */
6753         if (err)
6754                 goto sfp_out;
6755
6756         /* exit if reset not needed */
6757         if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6758                 goto sfp_out;
6759
6760         adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6761
6762         /*
6763          * A module may be identified correctly, but the EEPROM may not have
6764          * support for that module.  setup_sfp() will fail in that case, so
6765          * we should not allow that module to load.
6766          */
6767         if (hw->mac.type == ixgbe_mac_82598EB)
6768                 err = hw->phy.ops.reset(hw);
6769         else
6770                 err = hw->mac.ops.setup_sfp(hw);
6771
6772         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6773                 goto sfp_out;
6774
6775         adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6776         e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6777
6778 sfp_out:
6779         clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6780
6781         if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6782             (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6783                 e_dev_err("failed to initialize because an unsupported "
6784                           "SFP+ module type was detected.\n");
6785                 e_dev_err("Reload the driver after installing a "
6786                           "supported module.\n");
6787                 unregister_netdev(adapter->netdev);
6788         }
6789 }
6790
6791 /**
6792  * ixgbe_sfp_link_config_subtask - set up link SFP after module install
6793  * @adapter: the ixgbe adapter structure
6794  **/
6795 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6796 {
6797         struct ixgbe_hw *hw = &adapter->hw;
6798         u32 speed;
6799         bool autoneg = false;
6800
6801         if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6802                 return;
6803
6804         /* someone else is in init, wait until next service event */
6805         if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6806                 return;
6807
6808         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6809
6810         speed = hw->phy.autoneg_advertised;
6811         if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
6812                 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6813
6814                 /* setup the highest link when no autoneg */
6815                 if (!autoneg) {
6816                         if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6817                                 speed = IXGBE_LINK_SPEED_10GB_FULL;
6818                 }
6819         }
6820
6821         if (hw->mac.ops.setup_link)
6822                 hw->mac.ops.setup_link(hw, speed, true);
6823
6824         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6825         adapter->link_check_timeout = jiffies;
6826         clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6827 }
6828
6829 /**
6830  * ixgbe_service_timer - Timer Call-back
6831  * @data: pointer to adapter cast into an unsigned long
6832  **/
6833 static void ixgbe_service_timer(unsigned long data)
6834 {
6835         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6836         unsigned long next_event_offset;
6837
6838         /* poll faster when waiting for link */
6839         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6840                 next_event_offset = HZ / 10;
6841         else
6842                 next_event_offset = HZ * 2;
6843
6844         /* Reset the timer */
6845         mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6846
6847         ixgbe_service_event_schedule(adapter);
6848 }
6849
6850 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
6851 {
6852         struct ixgbe_hw *hw = &adapter->hw;
6853         u32 status;
6854
6855         if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
6856                 return;
6857
6858         adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
6859
6860         if (!hw->phy.ops.handle_lasi)
6861                 return;
6862
6863         status = hw->phy.ops.handle_lasi(&adapter->hw);
6864         if (status != IXGBE_ERR_OVERTEMP)
6865                 return;
6866
6867         e_crit(drv, "%s\n", ixgbe_overheat_msg);
6868 }
6869
6870 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6871 {
6872         if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6873                 return;
6874
6875         adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6876
6877         /* If we're already down, removing or resetting, just bail */
6878         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6879             test_bit(__IXGBE_REMOVING, &adapter->state) ||
6880             test_bit(__IXGBE_RESETTING, &adapter->state))
6881                 return;
6882
6883         ixgbe_dump(adapter);
6884         netdev_err(adapter->netdev, "Reset adapter\n");
6885         adapter->tx_timeout_count++;
6886
6887         rtnl_lock();
6888         ixgbe_reinit_locked(adapter);
6889         rtnl_unlock();
6890 }
6891
6892 /**
6893  * ixgbe_service_task - manages and runs subtasks
6894  * @work: pointer to work_struct containing our data
6895  **/
6896 static void ixgbe_service_task(struct work_struct *work)
6897 {
6898         struct ixgbe_adapter *adapter = container_of(work,
6899                                                      struct ixgbe_adapter,
6900                                                      service_task);
6901         if (ixgbe_removed(adapter->hw.hw_addr)) {
6902                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
6903                         rtnl_lock();
6904                         ixgbe_down(adapter);
6905                         rtnl_unlock();
6906                 }
6907                 ixgbe_service_event_complete(adapter);
6908                 return;
6909         }
6910 #ifdef CONFIG_IXGBE_VXLAN
6911         if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
6912                 adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
6913                 vxlan_get_rx_port(adapter->netdev);
6914         }
6915 #endif /* CONFIG_IXGBE_VXLAN */
6916         ixgbe_reset_subtask(adapter);
6917         ixgbe_phy_interrupt_subtask(adapter);
6918         ixgbe_sfp_detection_subtask(adapter);
6919         ixgbe_sfp_link_config_subtask(adapter);
6920         ixgbe_check_overtemp_subtask(adapter);
6921         ixgbe_watchdog_subtask(adapter);
6922         ixgbe_fdir_reinit_subtask(adapter);
6923         ixgbe_check_hang_subtask(adapter);
6924
6925         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6926                 ixgbe_ptp_overflow_check(adapter);
6927                 ixgbe_ptp_rx_hang(adapter);
6928         }
6929
6930         ixgbe_service_event_complete(adapter);
6931 }
6932
6933 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6934                      struct ixgbe_tx_buffer *first,
6935                      u8 *hdr_len)
6936 {
6937         struct sk_buff *skb = first->skb;
6938         u32 vlan_macip_lens, type_tucmd;
6939         u32 mss_l4len_idx, l4len;
6940         int err;
6941
6942         if (skb->ip_summed != CHECKSUM_PARTIAL)
6943                 return 0;
6944
6945         if (!skb_is_gso(skb))
6946                 return 0;
6947
6948         err = skb_cow_head(skb, 0);
6949         if (err < 0)
6950                 return err;
6951
6952         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6953         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6954
6955         if (first->protocol == htons(ETH_P_IP)) {
6956                 struct iphdr *iph = ip_hdr(skb);
6957                 iph->tot_len = 0;
6958                 iph->check = 0;
6959                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6960                                                          iph->daddr, 0,
6961                                                          IPPROTO_TCP,
6962                                                          0);
6963                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6964                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6965                                    IXGBE_TX_FLAGS_CSUM |
6966                                    IXGBE_TX_FLAGS_IPV4;
6967         } else if (skb_is_gso_v6(skb)) {
6968                 ipv6_hdr(skb)->payload_len = 0;
6969                 tcp_hdr(skb)->check =
6970                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6971                                      &ipv6_hdr(skb)->daddr,
6972                                      0, IPPROTO_TCP, 0);
6973                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6974                                    IXGBE_TX_FLAGS_CSUM;
6975         }
6976
6977         /* compute header lengths */
6978         l4len = tcp_hdrlen(skb);
6979         *hdr_len = skb_transport_offset(skb) + l4len;
6980
6981         /* update gso size and bytecount with header size */
6982         first->gso_segs = skb_shinfo(skb)->gso_segs;
6983         first->bytecount += (first->gso_segs - 1) * *hdr_len;
6984
6985         /* mss_l4len_id: use 0 as index for TSO */
6986         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6987         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6988
6989         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
6990         vlan_macip_lens = skb_network_header_len(skb);
6991         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6992         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6993
6994         ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6995                           mss_l4len_idx);
6996
6997         return 1;
6998 }
6999
7000 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7001                           struct ixgbe_tx_buffer *first)
7002 {
7003         struct sk_buff *skb = first->skb;
7004         u32 vlan_macip_lens = 0;
7005         u32 mss_l4len_idx = 0;
7006         u32 type_tucmd = 0;
7007
7008         if (skb->ip_summed != CHECKSUM_PARTIAL) {
7009                 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
7010                     !(first->tx_flags & IXGBE_TX_FLAGS_CC))
7011                         return;
7012                 vlan_macip_lens = skb_network_offset(skb) <<
7013                                   IXGBE_ADVTXD_MACLEN_SHIFT;
7014         } else {
7015                 u8 l4_hdr = 0;
7016                 union {
7017                         struct iphdr *ipv4;
7018                         struct ipv6hdr *ipv6;
7019                         u8 *raw;
7020                 } network_hdr;
7021                 union {
7022                         struct tcphdr *tcphdr;
7023                         u8 *raw;
7024                 } transport_hdr;
7025
7026                 if (skb->encapsulation) {
7027                         network_hdr.raw = skb_inner_network_header(skb);
7028                         transport_hdr.raw = skb_inner_transport_header(skb);
7029                         vlan_macip_lens = skb_inner_network_offset(skb) <<
7030                                           IXGBE_ADVTXD_MACLEN_SHIFT;
7031                 } else {
7032                         network_hdr.raw = skb_network_header(skb);
7033                         transport_hdr.raw = skb_transport_header(skb);
7034                         vlan_macip_lens = skb_network_offset(skb) <<
7035                                           IXGBE_ADVTXD_MACLEN_SHIFT;
7036                 }
7037
7038                 /* use first 4 bits to determine IP version */
7039                 switch (network_hdr.ipv4->version) {
7040                 case IPVERSION:
7041                         vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7042                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7043                         l4_hdr = network_hdr.ipv4->protocol;
7044                         break;
7045                 case 6:
7046                         vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7047                         l4_hdr = network_hdr.ipv6->nexthdr;
7048                         break;
7049                 default:
7050                         if (unlikely(net_ratelimit())) {
7051                                 dev_warn(tx_ring->dev,
7052                                          "partial checksum but version=%d\n",
7053                                          network_hdr.ipv4->version);
7054                         }
7055                 }
7056
7057                 switch (l4_hdr) {
7058                 case IPPROTO_TCP:
7059                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
7060                         mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
7061                                         IXGBE_ADVTXD_L4LEN_SHIFT;
7062                         break;
7063                 case IPPROTO_SCTP:
7064                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7065                         mss_l4len_idx = sizeof(struct sctphdr) <<
7066                                         IXGBE_ADVTXD_L4LEN_SHIFT;
7067                         break;
7068                 case IPPROTO_UDP:
7069                         mss_l4len_idx = sizeof(struct udphdr) <<
7070                                         IXGBE_ADVTXD_L4LEN_SHIFT;
7071                         break;
7072                 default:
7073                         if (unlikely(net_ratelimit())) {
7074                                 dev_warn(tx_ring->dev,
7075                                  "partial checksum but l4 proto=%x!\n",
7076                                  l4_hdr);
7077                         }
7078                         break;
7079                 }
7080
7081                 /* update TX checksum flag */
7082                 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7083         }
7084
7085         /* vlan_macip_lens: MACLEN, VLAN tag */
7086         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7087
7088         ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
7089                           type_tucmd, mss_l4len_idx);
7090 }
7091
7092 #define IXGBE_SET_FLAG(_input, _flag, _result) \
7093         ((_flag <= _result) ? \
7094          ((u32)(_input & _flag) * (_result / _flag)) : \
7095          ((u32)(_input & _flag) / (_flag / _result)))
7096
7097 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7098 {
7099         /* set type for advanced descriptor with frame checksum insertion */
7100         u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7101                        IXGBE_ADVTXD_DCMD_DEXT |
7102                        IXGBE_ADVTXD_DCMD_IFCS;
7103
7104         /* set HW vlan bit if vlan is present */
7105         cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7106                                    IXGBE_ADVTXD_DCMD_VLE);
7107
7108         /* set segmentation enable bits for TSO/FSO */
7109         cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7110                                    IXGBE_ADVTXD_DCMD_TSE);
7111
7112         /* set timestamp bit if present */
7113         cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7114                                    IXGBE_ADVTXD_MAC_TSTAMP);
7115
7116         /* insert frame checksum */
7117         cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7118
7119         return cmd_type;
7120 }
7121
7122 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7123                                    u32 tx_flags, unsigned int paylen)
7124 {
7125         u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7126
7127         /* enable L4 checksum for TSO and TX checksum offload */
7128         olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7129                                         IXGBE_TX_FLAGS_CSUM,
7130                                         IXGBE_ADVTXD_POPTS_TXSM);
7131
7132         /* enble IPv4 checksum for TSO */
7133         olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7134                                         IXGBE_TX_FLAGS_IPV4,
7135                                         IXGBE_ADVTXD_POPTS_IXSM);
7136
7137         /*
7138          * Check Context must be set if Tx switch is enabled, which it
7139          * always is for case where virtual functions are running
7140          */
7141         olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7142                                         IXGBE_TX_FLAGS_CC,
7143                                         IXGBE_ADVTXD_CC);
7144
7145         tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7146 }
7147
7148 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7149 {
7150         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7151
7152         /* Herbert's original patch had:
7153          *  smp_mb__after_netif_stop_queue();
7154          * but since that doesn't exist yet, just open code it.
7155          */
7156         smp_mb();
7157
7158         /* We need to check again in a case another CPU has just
7159          * made room available.
7160          */
7161         if (likely(ixgbe_desc_unused(tx_ring) < size))
7162                 return -EBUSY;
7163
7164         /* A reprieve! - use start_queue because it doesn't call schedule */
7165         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7166         ++tx_ring->tx_stats.restart_queue;
7167         return 0;
7168 }
7169
7170 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7171 {
7172         if (likely(ixgbe_desc_unused(tx_ring) >= size))
7173                 return 0;
7174
7175         return __ixgbe_maybe_stop_tx(tx_ring, size);
7176 }
7177
7178 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7179                        IXGBE_TXD_CMD_RS)
7180
7181 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7182                          struct ixgbe_tx_buffer *first,
7183                          const u8 hdr_len)
7184 {
7185         struct sk_buff *skb = first->skb;
7186         struct ixgbe_tx_buffer *tx_buffer;
7187         union ixgbe_adv_tx_desc *tx_desc;
7188         struct skb_frag_struct *frag;
7189         dma_addr_t dma;
7190         unsigned int data_len, size;
7191         u32 tx_flags = first->tx_flags;
7192         u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7193         u16 i = tx_ring->next_to_use;
7194
7195         tx_desc = IXGBE_TX_DESC(tx_ring, i);
7196
7197         ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7198
7199         size = skb_headlen(skb);
7200         data_len = skb->data_len;
7201
7202 #ifdef IXGBE_FCOE
7203         if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7204                 if (data_len < sizeof(struct fcoe_crc_eof)) {
7205                         size -= sizeof(struct fcoe_crc_eof) - data_len;
7206                         data_len = 0;
7207                 } else {
7208                         data_len -= sizeof(struct fcoe_crc_eof);
7209                 }
7210         }
7211
7212 #endif
7213         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7214
7215         tx_buffer = first;
7216
7217         for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7218                 if (dma_mapping_error(tx_ring->dev, dma))
7219                         goto dma_error;
7220
7221                 /* record length, and DMA address */
7222                 dma_unmap_len_set(tx_buffer, len, size);
7223                 dma_unmap_addr_set(tx_buffer, dma, dma);
7224
7225                 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7226
7227                 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7228                         tx_desc->read.cmd_type_len =
7229                                 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7230
7231                         i++;
7232                         tx_desc++;
7233                         if (i == tx_ring->count) {
7234                                 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7235                                 i = 0;
7236                         }
7237                         tx_desc->read.olinfo_status = 0;
7238
7239                         dma += IXGBE_MAX_DATA_PER_TXD;
7240                         size -= IXGBE_MAX_DATA_PER_TXD;
7241
7242                         tx_desc->read.buffer_addr = cpu_to_le64(dma);
7243                 }
7244
7245                 if (likely(!data_len))
7246                         break;
7247
7248                 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7249
7250                 i++;
7251                 tx_desc++;
7252                 if (i == tx_ring->count) {
7253                         tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7254                         i = 0;
7255                 }
7256                 tx_desc->read.olinfo_status = 0;
7257
7258 #ifdef IXGBE_FCOE
7259                 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7260 #else
7261                 size = skb_frag_size(frag);
7262 #endif
7263                 data_len -= size;
7264
7265                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7266                                        DMA_TO_DEVICE);
7267
7268                 tx_buffer = &tx_ring->tx_buffer_info[i];
7269         }
7270
7271         /* write last descriptor with RS and EOP bits */
7272         cmd_type |= size | IXGBE_TXD_CMD;
7273         tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7274
7275         netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7276
7277         /* set the timestamp */
7278         first->time_stamp = jiffies;
7279
7280         /*
7281          * Force memory writes to complete before letting h/w know there
7282          * are new descriptors to fetch.  (Only applicable for weak-ordered
7283          * memory model archs, such as IA-64).
7284          *
7285          * We also need this memory barrier to make certain all of the
7286          * status bits have been updated before next_to_watch is written.
7287          */
7288         wmb();
7289
7290         /* set next_to_watch value indicating a packet is present */
7291         first->next_to_watch = tx_desc;
7292
7293         i++;
7294         if (i == tx_ring->count)
7295                 i = 0;
7296
7297         tx_ring->next_to_use = i;
7298
7299         ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7300
7301         if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
7302                 writel(i, tx_ring->tail);
7303
7304                 /* we need this if more than one processor can write to our tail
7305                  * at a time, it synchronizes IO on IA64/Altix systems
7306                  */
7307                 mmiowb();
7308         }
7309
7310         return;
7311 dma_error:
7312         dev_err(tx_ring->dev, "TX DMA map failed\n");
7313
7314         /* clear dma mappings for failed tx_buffer_info map */
7315         for (;;) {
7316                 tx_buffer = &tx_ring->tx_buffer_info[i];
7317                 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
7318                 if (tx_buffer == first)
7319                         break;
7320                 if (i == 0)
7321                         i = tx_ring->count;
7322                 i--;
7323         }
7324
7325         tx_ring->next_to_use = i;
7326 }
7327
7328 static void ixgbe_atr(struct ixgbe_ring *ring,
7329                       struct ixgbe_tx_buffer *first)
7330 {
7331         struct ixgbe_q_vector *q_vector = ring->q_vector;
7332         union ixgbe_atr_hash_dword input = { .dword = 0 };
7333         union ixgbe_atr_hash_dword common = { .dword = 0 };
7334         union {
7335                 unsigned char *network;
7336                 struct iphdr *ipv4;
7337                 struct ipv6hdr *ipv6;
7338         } hdr;
7339         struct tcphdr *th;
7340         struct sk_buff *skb;
7341 #ifdef CONFIG_IXGBE_VXLAN
7342         u8 encap = false;
7343 #endif /* CONFIG_IXGBE_VXLAN */
7344         __be16 vlan_id;
7345
7346         /* if ring doesn't have a interrupt vector, cannot perform ATR */
7347         if (!q_vector)
7348                 return;
7349
7350         /* do nothing if sampling is disabled */
7351         if (!ring->atr_sample_rate)
7352                 return;
7353
7354         ring->atr_count++;
7355
7356         /* snag network header to get L4 type and address */
7357         skb = first->skb;
7358         hdr.network = skb_network_header(skb);
7359         if (skb->encapsulation) {
7360 #ifdef CONFIG_IXGBE_VXLAN
7361                 struct ixgbe_adapter *adapter = q_vector->adapter;
7362
7363                 if (!adapter->vxlan_port)
7364                         return;
7365                 if (first->protocol != htons(ETH_P_IP) ||
7366                     hdr.ipv4->version != IPVERSION ||
7367                     hdr.ipv4->protocol != IPPROTO_UDP) {
7368                         return;
7369                 }
7370                 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7371                         return;
7372                 encap = true;
7373                 hdr.network = skb_inner_network_header(skb);
7374                 th = inner_tcp_hdr(skb);
7375 #else
7376                 return;
7377 #endif /* CONFIG_IXGBE_VXLAN */
7378         } else {
7379                 /* Currently only IPv4/IPv6 with TCP is supported */
7380                 if ((first->protocol != htons(ETH_P_IPV6) ||
7381                      hdr.ipv6->nexthdr != IPPROTO_TCP) &&
7382                     (first->protocol != htons(ETH_P_IP) ||
7383                      hdr.ipv4->protocol != IPPROTO_TCP))
7384                         return;
7385                 th = tcp_hdr(skb);
7386         }
7387
7388         /* skip this packet since it is invalid or the socket is closing */
7389         if (!th || th->fin)
7390                 return;
7391
7392         /* sample on all syn packets or once every atr sample count */
7393         if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
7394                 return;
7395
7396         /* reset sample count */
7397         ring->atr_count = 0;
7398
7399         vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
7400
7401         /*
7402          * src and dst are inverted, think how the receiver sees them
7403          *
7404          * The input is broken into two sections, a non-compressed section
7405          * containing vm_pool, vlan_id, and flow_type.  The rest of the data
7406          * is XORed together and stored in the compressed dword.
7407          */
7408         input.formatted.vlan_id = vlan_id;
7409
7410         /*
7411          * since src port and flex bytes occupy the same word XOR them together
7412          * and write the value to source port portion of compressed dword
7413          */
7414         if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
7415                 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
7416         else
7417                 common.port.src ^= th->dest ^ first->protocol;
7418         common.port.dst ^= th->source;
7419
7420         if (first->protocol == htons(ETH_P_IP)) {
7421                 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
7422                 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
7423         } else {
7424                 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
7425                 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
7426                              hdr.ipv6->saddr.s6_addr32[1] ^
7427                              hdr.ipv6->saddr.s6_addr32[2] ^
7428                              hdr.ipv6->saddr.s6_addr32[3] ^
7429                              hdr.ipv6->daddr.s6_addr32[0] ^
7430                              hdr.ipv6->daddr.s6_addr32[1] ^
7431                              hdr.ipv6->daddr.s6_addr32[2] ^
7432                              hdr.ipv6->daddr.s6_addr32[3];
7433         }
7434
7435 #ifdef CONFIG_IXGBE_VXLAN
7436         if (encap)
7437                 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7438 #endif /* CONFIG_IXGBE_VXLAN */
7439
7440         /* This assumes the Rx queue and Tx queue are bound to the same CPU */
7441         ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
7442                                               input, common, ring->queue_index);
7443 }
7444
7445 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7446                               void *accel_priv, select_queue_fallback_t fallback)
7447 {
7448         struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
7449 #ifdef IXGBE_FCOE
7450         struct ixgbe_adapter *adapter;
7451         struct ixgbe_ring_feature *f;
7452         int txq;
7453 #endif
7454
7455         if (fwd_adapter)
7456                 return skb->queue_mapping + fwd_adapter->tx_base_queue;
7457
7458 #ifdef IXGBE_FCOE
7459
7460         /*
7461          * only execute the code below if protocol is FCoE
7462          * or FIP and we have FCoE enabled on the adapter
7463          */
7464         switch (vlan_get_protocol(skb)) {
7465         case htons(ETH_P_FCOE):
7466         case htons(ETH_P_FIP):
7467                 adapter = netdev_priv(dev);
7468
7469                 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7470                         break;
7471         default:
7472                 return fallback(dev, skb);
7473         }
7474
7475         f = &adapter->ring_feature[RING_F_FCOE];
7476
7477         txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
7478                                            smp_processor_id();
7479
7480         while (txq >= f->indices)
7481                 txq -= f->indices;
7482
7483         return txq + f->offset;
7484 #else
7485         return fallback(dev, skb);
7486 #endif
7487 }
7488
7489 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7490                           struct ixgbe_adapter *adapter,
7491                           struct ixgbe_ring *tx_ring)
7492 {
7493         struct ixgbe_tx_buffer *first;
7494         int tso;
7495         u32 tx_flags = 0;
7496         unsigned short f;
7497         u16 count = TXD_USE_COUNT(skb_headlen(skb));
7498         __be16 protocol = skb->protocol;
7499         u8 hdr_len = 0;
7500
7501         /*
7502          * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
7503          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
7504          *       + 2 desc gap to keep tail from touching head,
7505          *       + 1 desc for context descriptor,
7506          * otherwise try next time
7507          */
7508         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
7509                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
7510
7511         if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
7512                 tx_ring->tx_stats.tx_busy++;
7513                 return NETDEV_TX_BUSY;
7514         }
7515
7516         /* record the location of the first descriptor for this packet */
7517         first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
7518         first->skb = skb;
7519         first->bytecount = skb->len;
7520         first->gso_segs = 1;
7521
7522         /* if we have a HW VLAN tag being added default to the HW one */
7523         if (skb_vlan_tag_present(skb)) {
7524                 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7525                 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7526         /* else if it is a SW VLAN check the next protocol and store the tag */
7527         } else if (protocol == htons(ETH_P_8021Q)) {
7528                 struct vlan_hdr *vhdr, _vhdr;
7529                 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
7530                 if (!vhdr)
7531                         goto out_drop;
7532
7533                 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7534                                   IXGBE_TX_FLAGS_VLAN_SHIFT;
7535                 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7536         }
7537         protocol = vlan_get_protocol(skb);
7538
7539         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7540             adapter->ptp_clock &&
7541             !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
7542                                    &adapter->state)) {
7543                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7544                 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
7545
7546                 /* schedule check for Tx timestamp */
7547                 adapter->ptp_tx_skb = skb_get(skb);
7548                 adapter->ptp_tx_start = jiffies;
7549                 schedule_work(&adapter->ptp_tx_work);
7550         }
7551
7552         skb_tx_timestamp(skb);
7553
7554 #ifdef CONFIG_PCI_IOV
7555         /*
7556          * Use the l2switch_enable flag - would be false if the DMA
7557          * Tx switch had been disabled.
7558          */
7559         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7560                 tx_flags |= IXGBE_TX_FLAGS_CC;
7561
7562 #endif
7563         /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
7564         if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7565             ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
7566              (skb->priority != TC_PRIO_CONTROL))) {
7567                 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
7568                 tx_flags |= (skb->priority & 0x7) <<
7569                                         IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
7570                 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
7571                         struct vlan_ethhdr *vhdr;
7572
7573                         if (skb_cow_head(skb, 0))
7574                                 goto out_drop;
7575                         vhdr = (struct vlan_ethhdr *)skb->data;
7576                         vhdr->h_vlan_TCI = htons(tx_flags >>
7577                                                  IXGBE_TX_FLAGS_VLAN_SHIFT);
7578                 } else {
7579                         tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7580                 }
7581         }
7582
7583         /* record initial flags and protocol */
7584         first->tx_flags = tx_flags;
7585         first->protocol = protocol;
7586
7587 #ifdef IXGBE_FCOE
7588         /* setup tx offload for FCoE */
7589         if ((protocol == htons(ETH_P_FCOE)) &&
7590             (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7591                 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7592                 if (tso < 0)
7593                         goto out_drop;
7594
7595                 goto xmit_fcoe;
7596         }
7597
7598 #endif /* IXGBE_FCOE */
7599         tso = ixgbe_tso(tx_ring, first, &hdr_len);
7600         if (tso < 0)
7601                 goto out_drop;
7602         else if (!tso)
7603                 ixgbe_tx_csum(tx_ring, first);
7604
7605         /* add the ATR filter if ATR is on */
7606         if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
7607                 ixgbe_atr(tx_ring, first);
7608
7609 #ifdef IXGBE_FCOE
7610 xmit_fcoe:
7611 #endif /* IXGBE_FCOE */
7612         ixgbe_tx_map(tx_ring, first, hdr_len);
7613
7614         return NETDEV_TX_OK;
7615
7616 out_drop:
7617         dev_kfree_skb_any(first->skb);
7618         first->skb = NULL;
7619
7620         return NETDEV_TX_OK;
7621 }
7622
7623 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
7624                                       struct net_device *netdev,
7625                                       struct ixgbe_ring *ring)
7626 {
7627         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7628         struct ixgbe_ring *tx_ring;
7629
7630         /*
7631          * The minimum packet size for olinfo paylen is 17 so pad the skb
7632          * in order to meet this minimum size requirement.
7633          */
7634         if (skb_put_padto(skb, 17))
7635                 return NETDEV_TX_OK;
7636
7637         tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7638
7639         return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7640 }
7641
7642 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7643                                     struct net_device *netdev)
7644 {
7645         return __ixgbe_xmit_frame(skb, netdev, NULL);
7646 }
7647
7648 /**
7649  * ixgbe_set_mac - Change the Ethernet Address of the NIC
7650  * @netdev: network interface device structure
7651  * @p: pointer to an address structure
7652  *
7653  * Returns 0 on success, negative on failure
7654  **/
7655 static int ixgbe_set_mac(struct net_device *netdev, void *p)
7656 {
7657         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7658         struct ixgbe_hw *hw = &adapter->hw;
7659         struct sockaddr *addr = p;
7660         int ret;
7661
7662         if (!is_valid_ether_addr(addr->sa_data))
7663                 return -EADDRNOTAVAIL;
7664
7665         ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7666         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7667         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7668
7669         ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7670         return ret > 0 ? 0 : ret;
7671 }
7672
7673 static int
7674 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7675 {
7676         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7677         struct ixgbe_hw *hw = &adapter->hw;
7678         u16 value;
7679         int rc;
7680
7681         if (prtad != hw->phy.mdio.prtad)
7682                 return -EINVAL;
7683         rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7684         if (!rc)
7685                 rc = value;
7686         return rc;
7687 }
7688
7689 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7690                             u16 addr, u16 value)
7691 {
7692         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7693         struct ixgbe_hw *hw = &adapter->hw;
7694
7695         if (prtad != hw->phy.mdio.prtad)
7696                 return -EINVAL;
7697         return hw->phy.ops.write_reg(hw, addr, devad, value);
7698 }
7699
7700 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7701 {
7702         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7703
7704         switch (cmd) {
7705         case SIOCSHWTSTAMP:
7706                 return ixgbe_ptp_set_ts_config(adapter, req);
7707         case SIOCGHWTSTAMP:
7708                 return ixgbe_ptp_get_ts_config(adapter, req);
7709         default:
7710                 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7711         }
7712 }
7713
7714 /**
7715  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
7716  * netdev->dev_addrs
7717  * @netdev: network interface device structure
7718  *
7719  * Returns non-zero on failure
7720  **/
7721 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7722 {
7723         int err = 0;
7724         struct ixgbe_adapter *adapter = netdev_priv(dev);
7725         struct ixgbe_hw *hw = &adapter->hw;
7726
7727         if (is_valid_ether_addr(hw->mac.san_addr)) {
7728                 rtnl_lock();
7729                 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
7730                 rtnl_unlock();
7731
7732                 /* update SAN MAC vmdq pool selection */
7733                 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
7734         }
7735         return err;
7736 }
7737
7738 /**
7739  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
7740  * netdev->dev_addrs
7741  * @netdev: network interface device structure
7742  *
7743  * Returns non-zero on failure
7744  **/
7745 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7746 {
7747         int err = 0;
7748         struct ixgbe_adapter *adapter = netdev_priv(dev);
7749         struct ixgbe_mac_info *mac = &adapter->hw.mac;
7750
7751         if (is_valid_ether_addr(mac->san_addr)) {
7752                 rtnl_lock();
7753                 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7754                 rtnl_unlock();
7755         }
7756         return err;
7757 }
7758
7759 #ifdef CONFIG_NET_POLL_CONTROLLER
7760 /*
7761  * Polling 'interrupt' - used by things like netconsole to send skbs
7762  * without having to re-enable interrupts. It's not called while
7763  * the interrupt routine is executing.
7764  */
7765 static void ixgbe_netpoll(struct net_device *netdev)
7766 {
7767         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7768         int i;
7769
7770         /* if interface is down do nothing */
7771         if (test_bit(__IXGBE_DOWN, &adapter->state))
7772                 return;
7773
7774         /* loop through and schedule all active queues */
7775         for (i = 0; i < adapter->num_q_vectors; i++)
7776                 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
7777 }
7778
7779 #endif
7780 static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7781                                                    struct rtnl_link_stats64 *stats)
7782 {
7783         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7784         int i;
7785
7786         rcu_read_lock();
7787         for (i = 0; i < adapter->num_rx_queues; i++) {
7788                 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
7789                 u64 bytes, packets;
7790                 unsigned int start;
7791
7792                 if (ring) {
7793                         do {
7794                                 start = u64_stats_fetch_begin_irq(&ring->syncp);
7795                                 packets = ring->stats.packets;
7796                                 bytes   = ring->stats.bytes;
7797                         } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7798                         stats->rx_packets += packets;
7799                         stats->rx_bytes   += bytes;
7800                 }
7801         }
7802
7803         for (i = 0; i < adapter->num_tx_queues; i++) {
7804                 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7805                 u64 bytes, packets;
7806                 unsigned int start;
7807
7808                 if (ring) {
7809                         do {
7810                                 start = u64_stats_fetch_begin_irq(&ring->syncp);
7811                                 packets = ring->stats.packets;
7812                                 bytes   = ring->stats.bytes;
7813                         } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7814                         stats->tx_packets += packets;
7815                         stats->tx_bytes   += bytes;
7816                 }
7817         }
7818         rcu_read_unlock();
7819         /* following stats updated by ixgbe_watchdog_task() */
7820         stats->multicast        = netdev->stats.multicast;
7821         stats->rx_errors        = netdev->stats.rx_errors;
7822         stats->rx_length_errors = netdev->stats.rx_length_errors;
7823         stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
7824         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
7825         return stats;
7826 }
7827
7828 #ifdef CONFIG_IXGBE_DCB
7829 /**
7830  * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
7831  * @adapter: pointer to ixgbe_adapter
7832  * @tc: number of traffic classes currently enabled
7833  *
7834  * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
7835  * 802.1Q priority maps to a packet buffer that exists.
7836  */
7837 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7838 {
7839         struct ixgbe_hw *hw = &adapter->hw;
7840         u32 reg, rsave;
7841         int i;
7842
7843         /* 82598 have a static priority to TC mapping that can not
7844          * be changed so no validation is needed.
7845          */
7846         if (hw->mac.type == ixgbe_mac_82598EB)
7847                 return;
7848
7849         reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
7850         rsave = reg;
7851
7852         for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
7853                 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
7854
7855                 /* If up2tc is out of bounds default to zero */
7856                 if (up2tc > tc)
7857                         reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7858         }
7859
7860         if (reg != rsave)
7861                 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7862
7863         return;
7864 }
7865
7866 /**
7867  * ixgbe_set_prio_tc_map - Configure netdev prio tc map
7868  * @adapter: Pointer to adapter struct
7869  *
7870  * Populate the netdev user priority to tc map
7871  */
7872 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
7873 {
7874         struct net_device *dev = adapter->netdev;
7875         struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
7876         struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
7877         u8 prio;
7878
7879         for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
7880                 u8 tc = 0;
7881
7882                 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
7883                         tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
7884                 else if (ets)
7885                         tc = ets->prio_tc[prio];
7886
7887                 netdev_set_prio_tc_map(dev, prio, tc);
7888         }
7889 }
7890
7891 #endif /* CONFIG_IXGBE_DCB */
7892 /**
7893  * ixgbe_setup_tc - configure net_device for multiple traffic classes
7894  *
7895  * @netdev: net device to configure
7896  * @tc: number of traffic classes to enable
7897  */
7898 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7899 {
7900         struct ixgbe_adapter *adapter = netdev_priv(dev);
7901         struct ixgbe_hw *hw = &adapter->hw;
7902         bool pools;
7903
7904         /* Hardware supports up to 8 traffic classes */
7905         if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
7906                 return -EINVAL;
7907
7908         if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
7909                 return -EINVAL;
7910
7911         pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
7912         if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
7913                 return -EBUSY;
7914
7915         /* Hardware has to reinitialize queues and interrupts to
7916          * match packet buffer alignment. Unfortunately, the
7917          * hardware is not flexible enough to do this dynamically.
7918          */
7919         if (netif_running(dev))
7920                 ixgbe_close(dev);
7921         ixgbe_clear_interrupt_scheme(adapter);
7922
7923 #ifdef CONFIG_IXGBE_DCB
7924         if (tc) {
7925                 netdev_set_num_tc(dev, tc);
7926                 ixgbe_set_prio_tc_map(adapter);
7927
7928                 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
7929
7930                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
7931                         adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
7932                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
7933                 }
7934         } else {
7935                 netdev_reset_tc(dev);
7936
7937                 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7938                         adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
7939
7940                 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
7941
7942                 adapter->temp_dcb_cfg.pfc_mode_enable = false;
7943                 adapter->dcb_cfg.pfc_mode_enable = false;
7944         }
7945
7946         ixgbe_validate_rtr(adapter, tc);
7947
7948 #endif /* CONFIG_IXGBE_DCB */
7949         ixgbe_init_interrupt_scheme(adapter);
7950
7951         if (netif_running(dev))
7952                 return ixgbe_open(dev);
7953
7954         return 0;
7955 }
7956
7957 #ifdef CONFIG_PCI_IOV
7958 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
7959 {
7960         struct net_device *netdev = adapter->netdev;
7961
7962         rtnl_lock();
7963         ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
7964         rtnl_unlock();
7965 }
7966
7967 #endif
7968 void ixgbe_do_reset(struct net_device *netdev)
7969 {
7970         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7971
7972         if (netif_running(netdev))
7973                 ixgbe_reinit_locked(adapter);
7974         else
7975                 ixgbe_reset(adapter);
7976 }
7977
7978 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
7979                                             netdev_features_t features)
7980 {
7981         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7982
7983         /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
7984         if (!(features & NETIF_F_RXCSUM))
7985                 features &= ~NETIF_F_LRO;
7986
7987         /* Turn off LRO if not RSC capable */
7988         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
7989                 features &= ~NETIF_F_LRO;
7990
7991         return features;
7992 }
7993
7994 static int ixgbe_set_features(struct net_device *netdev,
7995                               netdev_features_t features)
7996 {
7997         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7998         netdev_features_t changed = netdev->features ^ features;
7999         bool need_reset = false;
8000
8001         /* Make sure RSC matches LRO, reset if change */
8002         if (!(features & NETIF_F_LRO)) {
8003                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8004                         need_reset = true;
8005                 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
8006         } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
8007                    !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
8008                 if (adapter->rx_itr_setting == 1 ||
8009                     adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
8010                         adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
8011                         need_reset = true;
8012                 } else if ((changed ^ features) & NETIF_F_LRO) {
8013                         e_info(probe, "rx-usecs set too low, "
8014                                "disabling RSC\n");
8015                 }
8016         }
8017
8018         /*
8019          * Check if Flow Director n-tuple support was enabled or disabled.  If
8020          * the state changed, we need to reset.
8021          */
8022         switch (features & NETIF_F_NTUPLE) {
8023         case NETIF_F_NTUPLE:
8024                 /* turn off ATR, enable perfect filters and reset */
8025                 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
8026                         need_reset = true;
8027
8028                 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
8029                 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8030                 break;
8031         default:
8032                 /* turn off perfect filters, enable ATR and reset */
8033                 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
8034                         need_reset = true;
8035
8036                 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8037
8038                 /* We cannot enable ATR if SR-IOV is enabled */
8039                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8040                         break;
8041
8042                 /* We cannot enable ATR if we have 2 or more traffic classes */
8043                 if (netdev_get_num_tc(netdev) > 1)
8044                         break;
8045
8046                 /* We cannot enable ATR if RSS is disabled */
8047                 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
8048                         break;
8049
8050                 /* A sample rate of 0 indicates ATR disabled */
8051                 if (!adapter->atr_sample_rate)
8052                         break;
8053
8054                 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
8055                 break;
8056         }
8057
8058         if (features & NETIF_F_HW_VLAN_CTAG_RX)
8059                 ixgbe_vlan_strip_enable(adapter);
8060         else
8061                 ixgbe_vlan_strip_disable(adapter);
8062
8063         if (changed & NETIF_F_RXALL)
8064                 need_reset = true;
8065
8066         netdev->features = features;
8067
8068 #ifdef CONFIG_IXGBE_VXLAN
8069         if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
8070                 if (features & NETIF_F_RXCSUM)
8071                         adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8072                 else
8073                         ixgbe_clear_vxlan_port(adapter);
8074         }
8075 #endif /* CONFIG_IXGBE_VXLAN */
8076
8077         if (need_reset)
8078                 ixgbe_do_reset(netdev);
8079
8080         return 0;
8081 }
8082
8083 #ifdef CONFIG_IXGBE_VXLAN
8084 /**
8085  * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
8086  * @dev: The port's netdev
8087  * @sa_family: Socket Family that VXLAN is notifiying us about
8088  * @port: New UDP port number that VXLAN started listening to
8089  **/
8090 static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8091                                  __be16 port)
8092 {
8093         struct ixgbe_adapter *adapter = netdev_priv(dev);
8094         struct ixgbe_hw *hw = &adapter->hw;
8095         u16 new_port = ntohs(port);
8096
8097         if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8098                 return;
8099
8100         if (sa_family == AF_INET6)
8101                 return;
8102
8103         if (adapter->vxlan_port == new_port)
8104                 return;
8105
8106         if (adapter->vxlan_port) {
8107                 netdev_info(dev,
8108                             "Hit Max num of VXLAN ports, not adding port %d\n",
8109                             new_port);
8110                 return;
8111         }
8112
8113         adapter->vxlan_port = new_port;
8114         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
8115 }
8116
8117 /**
8118  * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
8119  * @dev: The port's netdev
8120  * @sa_family: Socket Family that VXLAN is notifying us about
8121  * @port: UDP port number that VXLAN stopped listening to
8122  **/
8123 static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8124                                  __be16 port)
8125 {
8126         struct ixgbe_adapter *adapter = netdev_priv(dev);
8127         u16 new_port = ntohs(port);
8128
8129         if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8130                 return;
8131
8132         if (sa_family == AF_INET6)
8133                 return;
8134
8135         if (adapter->vxlan_port != new_port) {
8136                 netdev_info(dev, "Port %d was not found, not deleting\n",
8137                             new_port);
8138                 return;
8139         }
8140
8141         ixgbe_clear_vxlan_port(adapter);
8142         adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8143 }
8144 #endif /* CONFIG_IXGBE_VXLAN */
8145
8146 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8147                              struct net_device *dev,
8148                              const unsigned char *addr, u16 vid,
8149                              u16 flags)
8150 {
8151         /* guarantee we can provide a unique filter for the unicast address */
8152         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
8153                 if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
8154                         return -ENOMEM;
8155         }
8156
8157         return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
8158 }
8159
8160 /**
8161  * ixgbe_configure_bridge_mode - set various bridge modes
8162  * @adapter - the private structure
8163  * @mode - requested bridge mode
8164  *
8165  * Configure some settings require for various bridge modes.
8166  **/
8167 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
8168                                        __u16 mode)
8169 {
8170         struct ixgbe_hw *hw = &adapter->hw;
8171         unsigned int p, num_pools;
8172         u32 vmdctl;
8173
8174         switch (mode) {
8175         case BRIDGE_MODE_VEPA:
8176                 /* disable Tx loopback, rely on switch hairpin mode */
8177                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
8178
8179                 /* must enable Rx switching replication to allow multicast
8180                  * packet reception on all VFs, and to enable source address
8181                  * pruning.
8182                  */
8183                 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8184                 vmdctl |= IXGBE_VT_CTL_REPLEN;
8185                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8186
8187                 /* enable Rx source address pruning. Note, this requires
8188                  * replication to be enabled or else it does nothing.
8189                  */
8190                 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8191                 for (p = 0; p < num_pools; p++) {
8192                         if (hw->mac.ops.set_source_address_pruning)
8193                                 hw->mac.ops.set_source_address_pruning(hw,
8194                                                                        true,
8195                                                                        p);
8196                 }
8197                 break;
8198         case BRIDGE_MODE_VEB:
8199                 /* enable Tx loopback for internal VF/PF communication */
8200                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
8201                                 IXGBE_PFDTXGSWC_VT_LBEN);
8202
8203                 /* disable Rx switching replication unless we have SR-IOV
8204                  * virtual functions
8205                  */
8206                 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8207                 if (!adapter->num_vfs)
8208                         vmdctl &= ~IXGBE_VT_CTL_REPLEN;
8209                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8210
8211                 /* disable Rx source address pruning, since we don't expect to
8212                  * be receiving external loopback of our transmitted frames.
8213                  */
8214                 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8215                 for (p = 0; p < num_pools; p++) {
8216                         if (hw->mac.ops.set_source_address_pruning)
8217                                 hw->mac.ops.set_source_address_pruning(hw,
8218                                                                        false,
8219                                                                        p);
8220                 }
8221                 break;
8222         default:
8223                 return -EINVAL;
8224         }
8225
8226         adapter->bridge_mode = mode;
8227
8228         e_info(drv, "enabling bridge mode: %s\n",
8229                mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8230
8231         return 0;
8232 }
8233
8234 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
8235                                     struct nlmsghdr *nlh, u16 flags)
8236 {
8237         struct ixgbe_adapter *adapter = netdev_priv(dev);
8238         struct nlattr *attr, *br_spec;
8239         int rem;
8240
8241         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8242                 return -EOPNOTSUPP;
8243
8244         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8245         if (!br_spec)
8246                 return -EINVAL;
8247
8248         nla_for_each_nested(attr, br_spec, rem) {
8249                 int status;
8250                 __u16 mode;
8251
8252                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8253                         continue;
8254
8255                 if (nla_len(attr) < sizeof(mode))
8256                         return -EINVAL;
8257
8258                 mode = nla_get_u16(attr);
8259                 status = ixgbe_configure_bridge_mode(adapter, mode);
8260                 if (status)
8261                         return status;
8262
8263                 break;
8264         }
8265
8266         return 0;
8267 }
8268
8269 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8270                                     struct net_device *dev,
8271                                     u32 filter_mask, int nlflags)
8272 {
8273         struct ixgbe_adapter *adapter = netdev_priv(dev);
8274
8275         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8276                 return 0;
8277
8278         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
8279                                        adapter->bridge_mode, 0, 0, nlflags,
8280                                        filter_mask, NULL);
8281 }
8282
8283 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
8284 {
8285         struct ixgbe_fwd_adapter *fwd_adapter = NULL;
8286         struct ixgbe_adapter *adapter = netdev_priv(pdev);
8287         int used_pools = adapter->num_vfs + adapter->num_rx_pools;
8288         unsigned int limit;
8289         int pool, err;
8290
8291         /* Hardware has a limited number of available pools. Each VF, and the
8292          * PF require a pool. Check to ensure we don't attempt to use more
8293          * then the available number of pools.
8294          */
8295         if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
8296                 return ERR_PTR(-EINVAL);
8297
8298 #ifdef CONFIG_RPS
8299         if (vdev->num_rx_queues != vdev->num_tx_queues) {
8300                 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
8301                             vdev->name);
8302                 return ERR_PTR(-EINVAL);
8303         }
8304 #endif
8305         /* Check for hardware restriction on number of rx/tx queues */
8306         if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
8307             vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
8308                 netdev_info(pdev,
8309                             "%s: Supports RX/TX Queue counts 1,2, and 4\n",
8310                             pdev->name);
8311                 return ERR_PTR(-EINVAL);
8312         }
8313
8314         if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8315               adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
8316             (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
8317                 return ERR_PTR(-EBUSY);
8318
8319         fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
8320         if (!fwd_adapter)
8321                 return ERR_PTR(-ENOMEM);
8322
8323         pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
8324         adapter->num_rx_pools++;
8325         set_bit(pool, &adapter->fwd_bitmask);
8326         limit = find_last_bit(&adapter->fwd_bitmask, 32);
8327
8328         /* Enable VMDq flag so device will be set in VM mode */
8329         adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
8330         adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8331         adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
8332
8333         /* Force reinit of ring allocation with VMDQ enabled */
8334         err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8335         if (err)
8336                 goto fwd_add_err;
8337         fwd_adapter->pool = pool;
8338         fwd_adapter->real_adapter = adapter;
8339         err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
8340         if (err)
8341                 goto fwd_add_err;
8342         netif_tx_start_all_queues(vdev);
8343         return fwd_adapter;
8344 fwd_add_err:
8345         /* unwind counter and free adapter struct */
8346         netdev_info(pdev,
8347                     "%s: dfwd hardware acceleration failed\n", vdev->name);
8348         clear_bit(pool, &adapter->fwd_bitmask);
8349         adapter->num_rx_pools--;
8350         kfree(fwd_adapter);
8351         return ERR_PTR(err);
8352 }
8353
8354 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
8355 {
8356         struct ixgbe_fwd_adapter *fwd_adapter = priv;
8357         struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
8358         unsigned int limit;
8359
8360         clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
8361         adapter->num_rx_pools--;
8362
8363         limit = find_last_bit(&adapter->fwd_bitmask, 32);
8364         adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8365         ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
8366         ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8367         netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
8368                    fwd_adapter->pool, adapter->num_rx_pools,
8369                    fwd_adapter->rx_base_queue,
8370                    fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
8371                    adapter->fwd_bitmask);
8372         kfree(fwd_adapter);
8373 }
8374
8375 #define IXGBE_MAX_TUNNEL_HDR_LEN 80
8376 static netdev_features_t
8377 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
8378                      netdev_features_t features)
8379 {
8380         if (!skb->encapsulation)
8381                 return features;
8382
8383         if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
8384                      IXGBE_MAX_TUNNEL_HDR_LEN))
8385                 return features & ~NETIF_F_ALL_CSUM;
8386
8387         return features;
8388 }
8389
8390 static const struct net_device_ops ixgbe_netdev_ops = {
8391         .ndo_open               = ixgbe_open,
8392         .ndo_stop               = ixgbe_close,
8393         .ndo_start_xmit         = ixgbe_xmit_frame,
8394         .ndo_select_queue       = ixgbe_select_queue,
8395         .ndo_set_rx_mode        = ixgbe_set_rx_mode,
8396         .ndo_validate_addr      = eth_validate_addr,
8397         .ndo_set_mac_address    = ixgbe_set_mac,
8398         .ndo_change_mtu         = ixgbe_change_mtu,
8399         .ndo_tx_timeout         = ixgbe_tx_timeout,
8400         .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
8401         .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
8402         .ndo_do_ioctl           = ixgbe_ioctl,
8403         .ndo_set_vf_mac         = ixgbe_ndo_set_vf_mac,
8404         .ndo_set_vf_vlan        = ixgbe_ndo_set_vf_vlan,
8405         .ndo_set_vf_rate        = ixgbe_ndo_set_vf_bw,
8406         .ndo_set_vf_spoofchk    = ixgbe_ndo_set_vf_spoofchk,
8407         .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
8408         .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
8409         .ndo_get_stats64        = ixgbe_get_stats64,
8410 #ifdef CONFIG_IXGBE_DCB
8411         .ndo_setup_tc           = ixgbe_setup_tc,
8412 #endif
8413 #ifdef CONFIG_NET_POLL_CONTROLLER
8414         .ndo_poll_controller    = ixgbe_netpoll,
8415 #endif
8416 #ifdef CONFIG_NET_RX_BUSY_POLL
8417         .ndo_busy_poll          = ixgbe_low_latency_recv,
8418 #endif
8419 #ifdef IXGBE_FCOE
8420         .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
8421         .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
8422         .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8423         .ndo_fcoe_enable = ixgbe_fcoe_enable,
8424         .ndo_fcoe_disable = ixgbe_fcoe_disable,
8425         .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
8426         .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
8427 #endif /* IXGBE_FCOE */
8428         .ndo_set_features = ixgbe_set_features,
8429         .ndo_fix_features = ixgbe_fix_features,
8430         .ndo_fdb_add            = ixgbe_ndo_fdb_add,
8431         .ndo_bridge_setlink     = ixgbe_ndo_bridge_setlink,
8432         .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
8433         .ndo_dfwd_add_station   = ixgbe_fwd_add,
8434         .ndo_dfwd_del_station   = ixgbe_fwd_del,
8435 #ifdef CONFIG_IXGBE_VXLAN
8436         .ndo_add_vxlan_port     = ixgbe_add_vxlan_port,
8437         .ndo_del_vxlan_port     = ixgbe_del_vxlan_port,
8438 #endif /* CONFIG_IXGBE_VXLAN */
8439         .ndo_features_check     = ixgbe_features_check,
8440 };
8441
8442 /**
8443  * ixgbe_enumerate_functions - Get the number of ports this device has
8444  * @adapter: adapter structure
8445  *
8446  * This function enumerates the phsyical functions co-located on a single slot,
8447  * in order to determine how many ports a device has. This is most useful in
8448  * determining the required GT/s of PCIe bandwidth necessary for optimal
8449  * performance.
8450  **/
8451 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
8452 {
8453         struct pci_dev *entry, *pdev = adapter->pdev;
8454         int physfns = 0;
8455
8456         /* Some cards can not use the generic count PCIe functions method,
8457          * because they are behind a parent switch, so we hardcode these with
8458          * the correct number of functions.
8459          */
8460         if (ixgbe_pcie_from_parent(&adapter->hw))
8461                 physfns = 4;
8462
8463         list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
8464                 /* don't count virtual functions */
8465                 if (entry->is_virtfn)
8466                         continue;
8467
8468                 /* When the devices on the bus don't all match our device ID,
8469                  * we can't reliably determine the correct number of
8470                  * functions. This can occur if a function has been direct
8471                  * attached to a virtual machine using VT-d, for example. In
8472                  * this case, simply return -1 to indicate this.
8473                  */
8474                 if ((entry->vendor != pdev->vendor) ||
8475                     (entry->device != pdev->device))
8476                         return -1;
8477
8478                 physfns++;
8479         }
8480
8481         return physfns;
8482 }
8483
8484 /**
8485  * ixgbe_wol_supported - Check whether device supports WoL
8486  * @hw: hw specific details
8487  * @device_id: the device ID
8488  * @subdev_id: the subsystem device ID
8489  *
8490  * This function is used by probe and ethtool to determine
8491  * which devices have WoL support
8492  *
8493  **/
8494 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8495                         u16 subdevice_id)
8496 {
8497         struct ixgbe_hw *hw = &adapter->hw;
8498         u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
8499         int is_wol_supported = 0;
8500
8501         switch (device_id) {
8502         case IXGBE_DEV_ID_82599_SFP:
8503                 /* Only these subdevices could supports WOL */
8504                 switch (subdevice_id) {
8505                 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
8506                 case IXGBE_SUBDEV_ID_82599_560FLR:
8507                         /* only support first port */
8508                         if (hw->bus.func != 0)
8509                                 break;
8510                 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
8511                 case IXGBE_SUBDEV_ID_82599_SFP:
8512                 case IXGBE_SUBDEV_ID_82599_RNDC:
8513                 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
8514                 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
8515                         is_wol_supported = 1;
8516                         break;
8517                 }
8518                 break;
8519         case IXGBE_DEV_ID_82599EN_SFP:
8520                 /* Only this subdevice supports WOL */
8521                 switch (subdevice_id) {
8522                 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
8523                         is_wol_supported = 1;
8524                         break;
8525                 }
8526                 break;
8527         case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
8528                 /* All except this subdevice support WOL */
8529                 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
8530                         is_wol_supported = 1;
8531                 break;
8532         case IXGBE_DEV_ID_82599_KX4:
8533                 is_wol_supported = 1;
8534                 break;
8535         case IXGBE_DEV_ID_X540T:
8536         case IXGBE_DEV_ID_X540T1:
8537         case IXGBE_DEV_ID_X550T:
8538         case IXGBE_DEV_ID_X550EM_X_KX4:
8539         case IXGBE_DEV_ID_X550EM_X_KR:
8540         case IXGBE_DEV_ID_X550EM_X_10G_T:
8541                 /* check eeprom to see if enabled wol */
8542                 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
8543                     ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
8544                      (hw->bus.func == 0))) {
8545                         is_wol_supported = 1;
8546                 }
8547                 break;
8548         }
8549
8550         return is_wol_supported;
8551 }
8552
8553 /**
8554  * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
8555  * @adapter: Pointer to adapter struct
8556  */
8557 static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
8558 {
8559 #ifdef CONFIG_OF
8560         struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
8561         struct ixgbe_hw *hw = &adapter->hw;
8562         const unsigned char *addr;
8563
8564         addr = of_get_mac_address(dp);
8565         if (addr) {
8566                 ether_addr_copy(hw->mac.perm_addr, addr);
8567                 return;
8568         }
8569 #endif /* CONFIG_OF */
8570
8571 #ifdef CONFIG_SPARC
8572         ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
8573 #endif /* CONFIG_SPARC */
8574 }
8575
8576 /**
8577  * ixgbe_probe - Device Initialization Routine
8578  * @pdev: PCI device information struct
8579  * @ent: entry in ixgbe_pci_tbl
8580  *
8581  * Returns 0 on success, negative on failure
8582  *
8583  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
8584  * The OS initialization, configuring of the adapter private structure,
8585  * and a hardware reset occur.
8586  **/
8587 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8588 {
8589         struct net_device *netdev;
8590         struct ixgbe_adapter *adapter = NULL;
8591         struct ixgbe_hw *hw;
8592         const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
8593         int i, err, pci_using_dac, expected_gts;
8594         unsigned int indices = MAX_TX_QUEUES;
8595         u8 part_str[IXGBE_PBANUM_LENGTH];
8596         bool disable_dev = false;
8597 #ifdef IXGBE_FCOE
8598         u16 device_caps;
8599 #endif
8600         u32 eec;
8601
8602         /* Catch broken hardware that put the wrong VF device ID in
8603          * the PCIe SR-IOV capability.
8604          */
8605         if (pdev->is_virtfn) {
8606                 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
8607                      pci_name(pdev), pdev->vendor, pdev->device);
8608                 return -EINVAL;
8609         }
8610
8611         err = pci_enable_device_mem(pdev);
8612         if (err)
8613                 return err;
8614
8615         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
8616                 pci_using_dac = 1;
8617         } else {
8618                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8619                 if (err) {
8620                         dev_err(&pdev->dev,
8621                                 "No usable DMA configuration, aborting\n");
8622                         goto err_dma;
8623                 }
8624                 pci_using_dac = 0;
8625         }
8626
8627         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8628                                            IORESOURCE_MEM), ixgbe_driver_name);
8629         if (err) {
8630                 dev_err(&pdev->dev,
8631                         "pci_request_selected_regions failed 0x%x\n", err);
8632                 goto err_pci_reg;
8633         }
8634
8635         pci_enable_pcie_error_reporting(pdev);
8636
8637         pci_set_master(pdev);
8638         pci_save_state(pdev);
8639
8640         if (ii->mac == ixgbe_mac_82598EB) {
8641 #ifdef CONFIG_IXGBE_DCB
8642                 /* 8 TC w/ 4 queues per TC */
8643                 indices = 4 * MAX_TRAFFIC_CLASS;
8644 #else
8645                 indices = IXGBE_MAX_RSS_INDICES;
8646 #endif
8647         }
8648
8649         netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
8650         if (!netdev) {
8651                 err = -ENOMEM;
8652                 goto err_alloc_etherdev;
8653         }
8654
8655         SET_NETDEV_DEV(netdev, &pdev->dev);
8656
8657         adapter = netdev_priv(netdev);
8658
8659         adapter->netdev = netdev;
8660         adapter->pdev = pdev;
8661         hw = &adapter->hw;
8662         hw->back = adapter;
8663         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
8664
8665         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8666                               pci_resource_len(pdev, 0));
8667         adapter->io_addr = hw->hw_addr;
8668         if (!hw->hw_addr) {
8669                 err = -EIO;
8670                 goto err_ioremap;
8671         }
8672
8673         netdev->netdev_ops = &ixgbe_netdev_ops;
8674         ixgbe_set_ethtool_ops(netdev);
8675         netdev->watchdog_timeo = 5 * HZ;
8676         strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
8677
8678         /* Setup hw api */
8679         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
8680         hw->mac.type  = ii->mac;
8681         hw->mvals     = ii->mvals;
8682
8683         /* EEPROM */
8684         memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
8685         eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
8686         if (ixgbe_removed(hw->hw_addr)) {
8687                 err = -EIO;
8688                 goto err_ioremap;
8689         }
8690         /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
8691         if (!(eec & (1 << 8)))
8692                 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
8693
8694         /* PHY */
8695         memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
8696         hw->phy.sfp_type = ixgbe_sfp_type_unknown;
8697         /* ixgbe_identify_phy_generic will set prtad and mmds properly */
8698         hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
8699         hw->phy.mdio.mmds = 0;
8700         hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8701         hw->phy.mdio.dev = netdev;
8702         hw->phy.mdio.mdio_read = ixgbe_mdio_read;
8703         hw->phy.mdio.mdio_write = ixgbe_mdio_write;
8704
8705         ii->get_invariants(hw);
8706
8707         /* setup the private structure */
8708         err = ixgbe_sw_init(adapter);
8709         if (err)
8710                 goto err_sw_init;
8711
8712         /* Make it possible the adapter to be woken up via WOL */
8713         switch (adapter->hw.mac.type) {
8714         case ixgbe_mac_82599EB:
8715         case ixgbe_mac_X540:
8716         case ixgbe_mac_X550:
8717         case ixgbe_mac_X550EM_x:
8718                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8719                 break;
8720         default:
8721                 break;
8722         }
8723
8724         /*
8725          * If there is a fan on this device and it has failed log the
8726          * failure.
8727          */
8728         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
8729                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
8730                 if (esdp & IXGBE_ESDP_SDP1)
8731                         e_crit(probe, "Fan has stopped, replace the adapter\n");
8732         }
8733
8734         if (allow_unsupported_sfp)
8735                 hw->allow_unsupported_sfp = allow_unsupported_sfp;
8736
8737         /* reset_hw fills in the perm_addr as well */
8738         hw->phy.reset_if_overtemp = true;
8739         err = hw->mac.ops.reset_hw(hw);
8740         hw->phy.reset_if_overtemp = false;
8741         if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
8742                 err = 0;
8743         } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
8744                 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
8745                 e_dev_err("Reload the driver after installing a supported module.\n");
8746                 goto err_sw_init;
8747         } else if (err) {
8748                 e_dev_err("HW Init failed: %d\n", err);
8749                 goto err_sw_init;
8750         }
8751
8752 #ifdef CONFIG_PCI_IOV
8753         /* SR-IOV not supported on the 82598 */
8754         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8755                 goto skip_sriov;
8756         /* Mailbox */
8757         ixgbe_init_mbx_params_pf(hw);
8758         memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
8759         pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
8760         ixgbe_enable_sriov(adapter);
8761 skip_sriov:
8762
8763 #endif
8764         netdev->features = NETIF_F_SG |
8765                            NETIF_F_IP_CSUM |
8766                            NETIF_F_IPV6_CSUM |
8767                            NETIF_F_HW_VLAN_CTAG_TX |
8768                            NETIF_F_HW_VLAN_CTAG_RX |
8769                            NETIF_F_TSO |
8770                            NETIF_F_TSO6 |
8771                            NETIF_F_RXHASH |
8772                            NETIF_F_RXCSUM;
8773
8774         netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
8775
8776         switch (adapter->hw.mac.type) {
8777         case ixgbe_mac_82599EB:
8778         case ixgbe_mac_X540:
8779         case ixgbe_mac_X550:
8780         case ixgbe_mac_X550EM_x:
8781                 netdev->features |= NETIF_F_SCTP_CSUM;
8782                 netdev->hw_features |= NETIF_F_SCTP_CSUM |
8783                                        NETIF_F_NTUPLE;
8784                 break;
8785         default:
8786                 break;
8787         }
8788
8789         netdev->hw_features |= NETIF_F_RXALL;
8790         netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
8791
8792         netdev->vlan_features |= NETIF_F_TSO;
8793         netdev->vlan_features |= NETIF_F_TSO6;
8794         netdev->vlan_features |= NETIF_F_IP_CSUM;
8795         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
8796         netdev->vlan_features |= NETIF_F_SG;
8797
8798         netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
8799                                    NETIF_F_IPV6_CSUM;
8800
8801         netdev->priv_flags |= IFF_UNICAST_FLT;
8802         netdev->priv_flags |= IFF_SUPP_NOFCS;
8803
8804 #ifdef CONFIG_IXGBE_VXLAN
8805         switch (adapter->hw.mac.type) {
8806         case ixgbe_mac_X550:
8807         case ixgbe_mac_X550EM_x:
8808                 netdev->hw_enc_features |= NETIF_F_RXCSUM |
8809                                            NETIF_F_IP_CSUM |
8810                                            NETIF_F_IPV6_CSUM;
8811                 break;
8812         default:
8813                 break;
8814         }
8815 #endif /* CONFIG_IXGBE_VXLAN */
8816
8817 #ifdef CONFIG_IXGBE_DCB
8818         netdev->dcbnl_ops = &dcbnl_ops;
8819 #endif
8820
8821 #ifdef IXGBE_FCOE
8822         if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
8823                 unsigned int fcoe_l;
8824
8825                 if (hw->mac.ops.get_device_caps) {
8826                         hw->mac.ops.get_device_caps(hw, &device_caps);
8827                         if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
8828                                 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
8829                 }
8830
8831
8832                 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
8833                 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
8834
8835                 netdev->features |= NETIF_F_FSO |
8836                                     NETIF_F_FCOE_CRC;
8837
8838                 netdev->vlan_features |= NETIF_F_FSO |
8839                                          NETIF_F_FCOE_CRC |
8840                                          NETIF_F_FCOE_MTU;
8841         }
8842 #endif /* IXGBE_FCOE */
8843         if (pci_using_dac) {
8844                 netdev->features |= NETIF_F_HIGHDMA;
8845                 netdev->vlan_features |= NETIF_F_HIGHDMA;
8846         }
8847
8848         if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
8849                 netdev->hw_features |= NETIF_F_LRO;
8850         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8851                 netdev->features |= NETIF_F_LRO;
8852
8853         /* make sure the EEPROM is good */
8854         if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
8855                 e_dev_err("The EEPROM Checksum Is Not Valid\n");
8856                 err = -EIO;
8857                 goto err_sw_init;
8858         }
8859
8860         ixgbe_get_platform_mac_addr(adapter);
8861
8862         memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
8863
8864         if (!is_valid_ether_addr(netdev->dev_addr)) {
8865                 e_dev_err("invalid MAC address\n");
8866                 err = -EIO;
8867                 goto err_sw_init;
8868         }
8869
8870         ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
8871
8872         setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8873                     (unsigned long) adapter);
8874
8875         if (ixgbe_removed(hw->hw_addr)) {
8876                 err = -EIO;
8877                 goto err_sw_init;
8878         }
8879         INIT_WORK(&adapter->service_task, ixgbe_service_task);
8880         set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
8881         clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
8882
8883         err = ixgbe_init_interrupt_scheme(adapter);
8884         if (err)
8885                 goto err_sw_init;
8886
8887         /* WOL not supported for all devices */
8888         adapter->wol = 0;
8889         hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
8890         hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
8891                                                 pdev->subsystem_device);
8892         if (hw->wol_enabled)
8893                 adapter->wol = IXGBE_WUFC_MAG;
8894
8895         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
8896
8897         /* save off EEPROM version number */
8898         hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
8899         hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
8900
8901         /* pick up the PCI bus settings for reporting later */
8902         if (ixgbe_pcie_from_parent(hw))
8903                 ixgbe_get_parent_bus_info(adapter);
8904         else
8905                  hw->mac.ops.get_bus_info(hw);
8906
8907         /* calculate the expected PCIe bandwidth required for optimal
8908          * performance. Note that some older parts will never have enough
8909          * bandwidth due to being older generation PCIe parts. We clamp these
8910          * parts to ensure no warning is displayed if it can't be fixed.
8911          */
8912         switch (hw->mac.type) {
8913         case ixgbe_mac_82598EB:
8914                 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
8915                 break;
8916         default:
8917                 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
8918                 break;
8919         }
8920
8921         /* don't check link if we failed to enumerate functions */
8922         if (expected_gts > 0)
8923                 ixgbe_check_minimum_link(adapter, expected_gts);
8924
8925         err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
8926         if (err)
8927                 strlcpy(part_str, "Unknown", sizeof(part_str));
8928         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8929                 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8930                            hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8931                            part_str);
8932         else
8933                 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8934                            hw->mac.type, hw->phy.type, part_str);
8935
8936         e_dev_info("%pM\n", netdev->dev_addr);
8937
8938         /* reset the hardware with the new settings */
8939         err = hw->mac.ops.start_hw(hw);
8940         if (err == IXGBE_ERR_EEPROM_VERSION) {
8941                 /* We are running on a pre-production device, log a warning */
8942                 e_dev_warn("This device is a pre-production adapter/LOM. "
8943                            "Please be aware there may be issues associated "
8944                            "with your hardware.  If you are experiencing "
8945                            "problems please contact your Intel or hardware "
8946                            "representative who provided you with this "
8947                            "hardware.\n");
8948         }
8949         strcpy(netdev->name, "eth%d");
8950         err = register_netdev(netdev);
8951         if (err)
8952                 goto err_register;
8953
8954         pci_set_drvdata(pdev, adapter);
8955
8956         /* power down the optics for 82599 SFP+ fiber */
8957         if (hw->mac.ops.disable_tx_laser)
8958                 hw->mac.ops.disable_tx_laser(hw);
8959
8960         /* carrier off reporting is important to ethtool even BEFORE open */
8961         netif_carrier_off(netdev);
8962
8963 #ifdef CONFIG_IXGBE_DCA
8964         if (dca_add_requester(&pdev->dev) == 0) {
8965                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
8966                 ixgbe_setup_dca(adapter);
8967         }
8968 #endif
8969         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
8970                 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
8971                 for (i = 0; i < adapter->num_vfs; i++)
8972                         ixgbe_vf_configuration(pdev, (i | 0x10000000));
8973         }
8974
8975         /* firmware requires driver version to be 0xFFFFFFFF
8976          * since os does not support feature
8977          */
8978         if (hw->mac.ops.set_fw_drv_ver)
8979                 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
8980                                            0xFF);
8981
8982         /* add san mac addr to netdev */
8983         ixgbe_add_sanmac_netdev(netdev);
8984
8985         e_dev_info("%s\n", ixgbe_default_device_descr);
8986
8987 #ifdef CONFIG_IXGBE_HWMON
8988         if (ixgbe_sysfs_init(adapter))
8989                 e_err(probe, "failed to allocate sysfs resources\n");
8990 #endif /* CONFIG_IXGBE_HWMON */
8991
8992         ixgbe_dbg_adapter_init(adapter);
8993
8994         /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
8995         if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
8996                 hw->mac.ops.setup_link(hw,
8997                         IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8998                         true);
8999
9000         return 0;
9001
9002 err_register:
9003         ixgbe_release_hw_control(adapter);
9004         ixgbe_clear_interrupt_scheme(adapter);
9005 err_sw_init:
9006         ixgbe_disable_sriov(adapter);
9007         adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
9008         iounmap(adapter->io_addr);
9009         kfree(adapter->mac_table);
9010 err_ioremap:
9011         disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9012         free_netdev(netdev);
9013 err_alloc_etherdev:
9014         pci_release_selected_regions(pdev,
9015                                      pci_select_bars(pdev, IORESOURCE_MEM));
9016 err_pci_reg:
9017 err_dma:
9018         if (!adapter || disable_dev)
9019                 pci_disable_device(pdev);
9020         return err;
9021 }
9022
9023 /**
9024  * ixgbe_remove - Device Removal Routine
9025  * @pdev: PCI device information struct
9026  *
9027  * ixgbe_remove is called by the PCI subsystem to alert the driver
9028  * that it should release a PCI device.  The could be caused by a
9029  * Hot-Plug event, or because the driver is going to be removed from
9030  * memory.
9031  **/
9032 static void ixgbe_remove(struct pci_dev *pdev)
9033 {
9034         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9035         struct net_device *netdev;
9036         bool disable_dev;
9037
9038         /* if !adapter then we already cleaned up in probe */
9039         if (!adapter)
9040                 return;
9041
9042         netdev  = adapter->netdev;
9043         ixgbe_dbg_adapter_exit(adapter);
9044
9045         set_bit(__IXGBE_REMOVING, &adapter->state);
9046         cancel_work_sync(&adapter->service_task);
9047
9048
9049 #ifdef CONFIG_IXGBE_DCA
9050         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
9051                 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
9052                 dca_remove_requester(&pdev->dev);
9053                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
9054                                 IXGBE_DCA_CTRL_DCA_DISABLE);
9055         }
9056
9057 #endif
9058 #ifdef CONFIG_IXGBE_HWMON
9059         ixgbe_sysfs_exit(adapter);
9060 #endif /* CONFIG_IXGBE_HWMON */
9061
9062         /* remove the added san mac */
9063         ixgbe_del_sanmac_netdev(netdev);
9064
9065 #ifdef CONFIG_PCI_IOV
9066         ixgbe_disable_sriov(adapter);
9067 #endif
9068         if (netdev->reg_state == NETREG_REGISTERED)
9069                 unregister_netdev(netdev);
9070
9071         ixgbe_clear_interrupt_scheme(adapter);
9072
9073         ixgbe_release_hw_control(adapter);
9074
9075 #ifdef CONFIG_DCB
9076         kfree(adapter->ixgbe_ieee_pfc);
9077         kfree(adapter->ixgbe_ieee_ets);
9078
9079 #endif
9080         iounmap(adapter->io_addr);
9081         pci_release_selected_regions(pdev, pci_select_bars(pdev,
9082                                      IORESOURCE_MEM));
9083
9084         e_dev_info("complete\n");
9085
9086         kfree(adapter->mac_table);
9087         disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9088         free_netdev(netdev);
9089
9090         pci_disable_pcie_error_reporting(pdev);
9091
9092         if (disable_dev)
9093                 pci_disable_device(pdev);
9094 }
9095
9096 /**
9097  * ixgbe_io_error_detected - called when PCI error is detected
9098  * @pdev: Pointer to PCI device
9099  * @state: The current pci connection state
9100  *
9101  * This function is called after a PCI bus error affecting
9102  * this device has been detected.
9103  */
9104 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
9105                                                 pci_channel_state_t state)
9106 {
9107         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9108         struct net_device *netdev = adapter->netdev;
9109
9110 #ifdef CONFIG_PCI_IOV
9111         struct ixgbe_hw *hw = &adapter->hw;
9112         struct pci_dev *bdev, *vfdev;
9113         u32 dw0, dw1, dw2, dw3;
9114         int vf, pos;
9115         u16 req_id, pf_func;
9116
9117         if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
9118             adapter->num_vfs == 0)
9119                 goto skip_bad_vf_detection;
9120
9121         bdev = pdev->bus->self;
9122         while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
9123                 bdev = bdev->bus->self;
9124
9125         if (!bdev)
9126                 goto skip_bad_vf_detection;
9127
9128         pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
9129         if (!pos)
9130                 goto skip_bad_vf_detection;
9131
9132         dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
9133         dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
9134         dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
9135         dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
9136         if (ixgbe_removed(hw->hw_addr))
9137                 goto skip_bad_vf_detection;
9138
9139         req_id = dw1 >> 16;
9140         /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
9141         if (!(req_id & 0x0080))
9142                 goto skip_bad_vf_detection;
9143
9144         pf_func = req_id & 0x01;
9145         if ((pf_func & 1) == (pdev->devfn & 1)) {
9146                 unsigned int device_id;
9147
9148                 vf = (req_id & 0x7F) >> 1;
9149                 e_dev_err("VF %d has caused a PCIe error\n", vf);
9150                 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
9151                                 "%8.8x\tdw3: %8.8x\n",
9152                 dw0, dw1, dw2, dw3);
9153                 switch (adapter->hw.mac.type) {
9154                 case ixgbe_mac_82599EB:
9155                         device_id = IXGBE_82599_VF_DEVICE_ID;
9156                         break;
9157                 case ixgbe_mac_X540:
9158                         device_id = IXGBE_X540_VF_DEVICE_ID;
9159                         break;
9160                 case ixgbe_mac_X550:
9161                         device_id = IXGBE_DEV_ID_X550_VF;
9162                         break;
9163                 case ixgbe_mac_X550EM_x:
9164                         device_id = IXGBE_DEV_ID_X550EM_X_VF;
9165                         break;
9166                 default:
9167                         device_id = 0;
9168                         break;
9169                 }
9170
9171                 /* Find the pci device of the offending VF */
9172                 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
9173                 while (vfdev) {
9174                         if (vfdev->devfn == (req_id & 0xFF))
9175                                 break;
9176                         vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
9177                                                device_id, vfdev);
9178                 }
9179                 /*
9180                  * There's a slim chance the VF could have been hot plugged,
9181                  * so if it is no longer present we don't need to issue the
9182                  * VFLR.  Just clean up the AER in that case.
9183                  */
9184                 if (vfdev) {
9185                         ixgbe_issue_vf_flr(adapter, vfdev);
9186                         /* Free device reference count */
9187                         pci_dev_put(vfdev);
9188                 }
9189
9190                 pci_cleanup_aer_uncorrect_error_status(pdev);
9191         }
9192
9193         /*
9194          * Even though the error may have occurred on the other port
9195          * we still need to increment the vf error reference count for
9196          * both ports because the I/O resume function will be called
9197          * for both of them.
9198          */
9199         adapter->vferr_refcount++;
9200
9201         return PCI_ERS_RESULT_RECOVERED;
9202
9203 skip_bad_vf_detection:
9204 #endif /* CONFIG_PCI_IOV */
9205         if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
9206                 return PCI_ERS_RESULT_DISCONNECT;
9207
9208         rtnl_lock();
9209         netif_device_detach(netdev);
9210
9211         if (state == pci_channel_io_perm_failure) {
9212                 rtnl_unlock();
9213                 return PCI_ERS_RESULT_DISCONNECT;
9214         }
9215
9216         if (netif_running(netdev))
9217                 ixgbe_down(adapter);
9218
9219         if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
9220                 pci_disable_device(pdev);
9221         rtnl_unlock();
9222
9223         /* Request a slot reset. */
9224         return PCI_ERS_RESULT_NEED_RESET;
9225 }
9226
9227 /**
9228  * ixgbe_io_slot_reset - called after the pci bus has been reset.
9229  * @pdev: Pointer to PCI device
9230  *
9231  * Restart the card from scratch, as if from a cold-boot.
9232  */
9233 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
9234 {
9235         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9236         pci_ers_result_t result;
9237         int err;
9238
9239         if (pci_enable_device_mem(pdev)) {
9240                 e_err(probe, "Cannot re-enable PCI device after reset.\n");
9241                 result = PCI_ERS_RESULT_DISCONNECT;
9242         } else {
9243                 smp_mb__before_atomic();
9244                 clear_bit(__IXGBE_DISABLED, &adapter->state);
9245                 adapter->hw.hw_addr = adapter->io_addr;
9246                 pci_set_master(pdev);
9247                 pci_restore_state(pdev);
9248                 pci_save_state(pdev);
9249
9250                 pci_wake_from_d3(pdev, false);
9251
9252                 ixgbe_reset(adapter);
9253                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9254                 result = PCI_ERS_RESULT_RECOVERED;
9255         }
9256
9257         err = pci_cleanup_aer_uncorrect_error_status(pdev);
9258         if (err) {
9259                 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
9260                           "failed 0x%0x\n", err);
9261                 /* non-fatal, continue */
9262         }
9263
9264         return result;
9265 }
9266
9267 /**
9268  * ixgbe_io_resume - called when traffic can start flowing again.
9269  * @pdev: Pointer to PCI device
9270  *
9271  * This callback is called when the error recovery driver tells us that
9272  * its OK to resume normal operation.
9273  */
9274 static void ixgbe_io_resume(struct pci_dev *pdev)
9275 {
9276         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9277         struct net_device *netdev = adapter->netdev;
9278
9279 #ifdef CONFIG_PCI_IOV
9280         if (adapter->vferr_refcount) {
9281                 e_info(drv, "Resuming after VF err\n");
9282                 adapter->vferr_refcount--;
9283                 return;
9284         }
9285
9286 #endif
9287         if (netif_running(netdev))
9288                 ixgbe_up(adapter);
9289
9290         netif_device_attach(netdev);
9291 }
9292
9293 static const struct pci_error_handlers ixgbe_err_handler = {
9294         .error_detected = ixgbe_io_error_detected,
9295         .slot_reset = ixgbe_io_slot_reset,
9296         .resume = ixgbe_io_resume,
9297 };
9298
9299 static struct pci_driver ixgbe_driver = {
9300         .name     = ixgbe_driver_name,
9301         .id_table = ixgbe_pci_tbl,
9302         .probe    = ixgbe_probe,
9303         .remove   = ixgbe_remove,
9304 #ifdef CONFIG_PM
9305         .suspend  = ixgbe_suspend,
9306         .resume   = ixgbe_resume,
9307 #endif
9308         .shutdown = ixgbe_shutdown,
9309         .sriov_configure = ixgbe_pci_sriov_configure,
9310         .err_handler = &ixgbe_err_handler
9311 };
9312
9313 /**
9314  * ixgbe_init_module - Driver Registration Routine
9315  *
9316  * ixgbe_init_module is the first routine called when the driver is
9317  * loaded. All it does is register with the PCI subsystem.
9318  **/
9319 static int __init ixgbe_init_module(void)
9320 {
9321         int ret;
9322         pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
9323         pr_info("%s\n", ixgbe_copyright);
9324
9325         ixgbe_dbg_init();
9326
9327         ret = pci_register_driver(&ixgbe_driver);
9328         if (ret) {
9329                 ixgbe_dbg_exit();
9330                 return ret;
9331         }
9332
9333 #ifdef CONFIG_IXGBE_DCA
9334         dca_register_notify(&dca_notifier);
9335 #endif
9336
9337         return 0;
9338 }
9339
9340 module_init(ixgbe_init_module);
9341
9342 /**
9343  * ixgbe_exit_module - Driver Exit Cleanup Routine
9344  *
9345  * ixgbe_exit_module is called just before the driver is removed
9346  * from memory.
9347  **/
9348 static void __exit ixgbe_exit_module(void)
9349 {
9350 #ifdef CONFIG_IXGBE_DCA
9351         dca_unregister_notify(&dca_notifier);
9352 #endif
9353         pci_unregister_driver(&ixgbe_driver);
9354
9355         ixgbe_dbg_exit();
9356 }
9357
9358 #ifdef CONFIG_IXGBE_DCA
9359 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
9360                             void *p)
9361 {
9362         int ret_val;
9363
9364         ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
9365                                          __ixgbe_notify_dca);
9366
9367         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
9368 }
9369
9370 #endif /* CONFIG_IXGBE_DCA */
9371
9372 module_exit(ixgbe_exit_module);
9373
9374 /* ixgbe_main.c */