]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/i40e/i40e_main.c
3bb832a2ec51a6997a7fab7d832176ce9a606977
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36                         "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 6
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44              __stringify(DRV_VERSION_MINOR) "." \
45              __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
80         {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
81         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
82         /* required last entry */
83         {0, }
84 };
85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
86
87 #define I40E_MAX_VF_COUNT 128
88 static int debug = -1;
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
91
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(DRV_VERSION);
96
97 /**
98  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
99  * @hw:   pointer to the HW structure
100  * @mem:  ptr to mem struct to fill out
101  * @size: size of memory requested
102  * @alignment: what to align the allocation to
103  **/
104 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
105                             u64 size, u32 alignment)
106 {
107         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
108
109         mem->size = ALIGN(size, alignment);
110         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
111                                       &mem->pa, GFP_KERNEL);
112         if (!mem->va)
113                 return -ENOMEM;
114
115         return 0;
116 }
117
118 /**
119  * i40e_free_dma_mem_d - OS specific memory free for shared code
120  * @hw:   pointer to the HW structure
121  * @mem:  ptr to mem struct to free
122  **/
123 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
124 {
125         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
126
127         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
128         mem->va = NULL;
129         mem->pa = 0;
130         mem->size = 0;
131
132         return 0;
133 }
134
135 /**
136  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
137  * @hw:   pointer to the HW structure
138  * @mem:  ptr to mem struct to fill out
139  * @size: size of memory requested
140  **/
141 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
142                              u32 size)
143 {
144         mem->size = size;
145         mem->va = kzalloc(size, GFP_KERNEL);
146
147         if (!mem->va)
148                 return -ENOMEM;
149
150         return 0;
151 }
152
153 /**
154  * i40e_free_virt_mem_d - OS specific memory free for shared code
155  * @hw:   pointer to the HW structure
156  * @mem:  ptr to mem struct to free
157  **/
158 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
159 {
160         /* it's ok to kfree a NULL pointer */
161         kfree(mem->va);
162         mem->va = NULL;
163         mem->size = 0;
164
165         return 0;
166 }
167
168 /**
169  * i40e_get_lump - find a lump of free generic resource
170  * @pf: board private structure
171  * @pile: the pile of resource to search
172  * @needed: the number of items needed
173  * @id: an owner id to stick on the items assigned
174  *
175  * Returns the base item index of the lump, or negative for error
176  *
177  * The search_hint trick and lack of advanced fit-finding only work
178  * because we're highly likely to have all the same size lump requests.
179  * Linear search time and any fragmentation should be minimal.
180  **/
181 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
182                          u16 needed, u16 id)
183 {
184         int ret = -ENOMEM;
185         int i, j;
186
187         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
188                 dev_info(&pf->pdev->dev,
189                          "param err: pile=%p needed=%d id=0x%04x\n",
190                          pile, needed, id);
191                 return -EINVAL;
192         }
193
194         /* start the linear search with an imperfect hint */
195         i = pile->search_hint;
196         while (i < pile->num_entries) {
197                 /* skip already allocated entries */
198                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
199                         i++;
200                         continue;
201                 }
202
203                 /* do we have enough in this lump? */
204                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
205                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
206                                 break;
207                 }
208
209                 if (j == needed) {
210                         /* there was enough, so assign it to the requestor */
211                         for (j = 0; j < needed; j++)
212                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
213                         ret = i;
214                         pile->search_hint = i + j;
215                         break;
216                 } else {
217                         /* not enough, so skip over it and continue looking */
218                         i += j;
219                 }
220         }
221
222         return ret;
223 }
224
225 /**
226  * i40e_put_lump - return a lump of generic resource
227  * @pile: the pile of resource to search
228  * @index: the base item index
229  * @id: the owner id of the items assigned
230  *
231  * Returns the count of items in the lump
232  **/
233 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
234 {
235         int valid_id = (id | I40E_PILE_VALID_BIT);
236         int count = 0;
237         int i;
238
239         if (!pile || index >= pile->num_entries)
240                 return -EINVAL;
241
242         for (i = index;
243              i < pile->num_entries && pile->list[i] == valid_id;
244              i++) {
245                 pile->list[i] = 0;
246                 count++;
247         }
248
249         if (count && index < pile->search_hint)
250                 pile->search_hint = index;
251
252         return count;
253 }
254
255 /**
256  * i40e_find_vsi_from_id - searches for the vsi with the given id
257  * @pf - the pf structure to search for the vsi
258  * @id - id of the vsi it is searching for
259  **/
260 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
261 {
262         int i;
263
264         for (i = 0; i < pf->num_alloc_vsi; i++)
265                 if (pf->vsi[i] && (pf->vsi[i]->id == id))
266                         return pf->vsi[i];
267
268         return NULL;
269 }
270
271 /**
272  * i40e_service_event_schedule - Schedule the service task to wake up
273  * @pf: board private structure
274  *
275  * If not already scheduled, this puts the task into the work queue
276  **/
277 static void i40e_service_event_schedule(struct i40e_pf *pf)
278 {
279         if (!test_bit(__I40E_DOWN, &pf->state) &&
280             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
281             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
282                 schedule_work(&pf->service_task);
283 }
284
285 /**
286  * i40e_tx_timeout - Respond to a Tx Hang
287  * @netdev: network interface device structure
288  *
289  * If any port has noticed a Tx timeout, it is likely that the whole
290  * device is munged, not just the one netdev port, so go for the full
291  * reset.
292  **/
293 #ifdef I40E_FCOE
294 void i40e_tx_timeout(struct net_device *netdev)
295 #else
296 static void i40e_tx_timeout(struct net_device *netdev)
297 #endif
298 {
299         struct i40e_netdev_priv *np = netdev_priv(netdev);
300         struct i40e_vsi *vsi = np->vsi;
301         struct i40e_pf *pf = vsi->back;
302
303         pf->tx_timeout_count++;
304
305         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
306                 pf->tx_timeout_recovery_level = 1;
307         pf->tx_timeout_last_recovery = jiffies;
308         netdev_info(netdev, "tx_timeout recovery level %d\n",
309                     pf->tx_timeout_recovery_level);
310
311         switch (pf->tx_timeout_recovery_level) {
312         case 0:
313                 /* disable and re-enable queues for the VSI */
314                 if (in_interrupt()) {
315                         set_bit(__I40E_REINIT_REQUESTED, &pf->state);
316                         set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
317                 } else {
318                         i40e_vsi_reinit_locked(vsi);
319                 }
320                 break;
321         case 1:
322                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
323                 break;
324         case 2:
325                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
326                 break;
327         case 3:
328                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
329                 break;
330         default:
331                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
332                 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
333                 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
334                 break;
335         }
336         i40e_service_event_schedule(pf);
337         pf->tx_timeout_recovery_level++;
338 }
339
340 /**
341  * i40e_release_rx_desc - Store the new tail and head values
342  * @rx_ring: ring to bump
343  * @val: new head index
344  **/
345 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
346 {
347         rx_ring->next_to_use = val;
348
349         /* Force memory writes to complete before letting h/w
350          * know there are new descriptors to fetch.  (Only
351          * applicable for weak-ordered memory model archs,
352          * such as IA-64).
353          */
354         wmb();
355         writel(val, rx_ring->tail);
356 }
357
358 /**
359  * i40e_get_vsi_stats_struct - Get System Network Statistics
360  * @vsi: the VSI we care about
361  *
362  * Returns the address of the device statistics structure.
363  * The statistics are actually updated from the service task.
364  **/
365 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
366 {
367         return &vsi->net_stats;
368 }
369
370 /**
371  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
372  * @netdev: network interface device structure
373  *
374  * Returns the address of the device statistics structure.
375  * The statistics are actually updated from the service task.
376  **/
377 #ifdef I40E_FCOE
378 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
379                                              struct net_device *netdev,
380                                              struct rtnl_link_stats64 *stats)
381 #else
382 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
383                                              struct net_device *netdev,
384                                              struct rtnl_link_stats64 *stats)
385 #endif
386 {
387         struct i40e_netdev_priv *np = netdev_priv(netdev);
388         struct i40e_ring *tx_ring, *rx_ring;
389         struct i40e_vsi *vsi = np->vsi;
390         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
391         int i;
392
393         if (test_bit(__I40E_DOWN, &vsi->state))
394                 return stats;
395
396         if (!vsi->tx_rings)
397                 return stats;
398
399         rcu_read_lock();
400         for (i = 0; i < vsi->num_queue_pairs; i++) {
401                 u64 bytes, packets;
402                 unsigned int start;
403
404                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
405                 if (!tx_ring)
406                         continue;
407
408                 do {
409                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
410                         packets = tx_ring->stats.packets;
411                         bytes   = tx_ring->stats.bytes;
412                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
413
414                 stats->tx_packets += packets;
415                 stats->tx_bytes   += bytes;
416                 rx_ring = &tx_ring[1];
417
418                 do {
419                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
420                         packets = rx_ring->stats.packets;
421                         bytes   = rx_ring->stats.bytes;
422                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
423
424                 stats->rx_packets += packets;
425                 stats->rx_bytes   += bytes;
426         }
427         rcu_read_unlock();
428
429         /* following stats updated by i40e_watchdog_subtask() */
430         stats->multicast        = vsi_stats->multicast;
431         stats->tx_errors        = vsi_stats->tx_errors;
432         stats->tx_dropped       = vsi_stats->tx_dropped;
433         stats->rx_errors        = vsi_stats->rx_errors;
434         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
435         stats->rx_length_errors = vsi_stats->rx_length_errors;
436
437         return stats;
438 }
439
440 /**
441  * i40e_vsi_reset_stats - Resets all stats of the given vsi
442  * @vsi: the VSI to have its stats reset
443  **/
444 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
445 {
446         struct rtnl_link_stats64 *ns;
447         int i;
448
449         if (!vsi)
450                 return;
451
452         ns = i40e_get_vsi_stats_struct(vsi);
453         memset(ns, 0, sizeof(*ns));
454         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
455         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
456         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
457         if (vsi->rx_rings && vsi->rx_rings[0]) {
458                 for (i = 0; i < vsi->num_queue_pairs; i++) {
459                         memset(&vsi->rx_rings[i]->stats, 0 ,
460                                sizeof(vsi->rx_rings[i]->stats));
461                         memset(&vsi->rx_rings[i]->rx_stats, 0 ,
462                                sizeof(vsi->rx_rings[i]->rx_stats));
463                         memset(&vsi->tx_rings[i]->stats, 0 ,
464                                sizeof(vsi->tx_rings[i]->stats));
465                         memset(&vsi->tx_rings[i]->tx_stats, 0,
466                                sizeof(vsi->tx_rings[i]->tx_stats));
467                 }
468         }
469         vsi->stat_offsets_loaded = false;
470 }
471
472 /**
473  * i40e_pf_reset_stats - Reset all of the stats for the given PF
474  * @pf: the PF to be reset
475  **/
476 void i40e_pf_reset_stats(struct i40e_pf *pf)
477 {
478         int i;
479
480         memset(&pf->stats, 0, sizeof(pf->stats));
481         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
482         pf->stat_offsets_loaded = false;
483
484         for (i = 0; i < I40E_MAX_VEB; i++) {
485                 if (pf->veb[i]) {
486                         memset(&pf->veb[i]->stats, 0,
487                                sizeof(pf->veb[i]->stats));
488                         memset(&pf->veb[i]->stats_offsets, 0,
489                                sizeof(pf->veb[i]->stats_offsets));
490                         pf->veb[i]->stat_offsets_loaded = false;
491                 }
492         }
493 }
494
495 /**
496  * i40e_stat_update48 - read and update a 48 bit stat from the chip
497  * @hw: ptr to the hardware info
498  * @hireg: the high 32 bit reg to read
499  * @loreg: the low 32 bit reg to read
500  * @offset_loaded: has the initial offset been loaded yet
501  * @offset: ptr to current offset value
502  * @stat: ptr to the stat
503  *
504  * Since the device stats are not reset at PFReset, they likely will not
505  * be zeroed when the driver starts.  We'll save the first values read
506  * and use them as offsets to be subtracted from the raw values in order
507  * to report stats that count from zero.  In the process, we also manage
508  * the potential roll-over.
509  **/
510 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
511                                bool offset_loaded, u64 *offset, u64 *stat)
512 {
513         u64 new_data;
514
515         if (hw->device_id == I40E_DEV_ID_QEMU) {
516                 new_data = rd32(hw, loreg);
517                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
518         } else {
519                 new_data = rd64(hw, loreg);
520         }
521         if (!offset_loaded)
522                 *offset = new_data;
523         if (likely(new_data >= *offset))
524                 *stat = new_data - *offset;
525         else
526                 *stat = (new_data + BIT_ULL(48)) - *offset;
527         *stat &= 0xFFFFFFFFFFFFULL;
528 }
529
530 /**
531  * i40e_stat_update32 - read and update a 32 bit stat from the chip
532  * @hw: ptr to the hardware info
533  * @reg: the hw reg to read
534  * @offset_loaded: has the initial offset been loaded yet
535  * @offset: ptr to current offset value
536  * @stat: ptr to the stat
537  **/
538 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
539                                bool offset_loaded, u64 *offset, u64 *stat)
540 {
541         u32 new_data;
542
543         new_data = rd32(hw, reg);
544         if (!offset_loaded)
545                 *offset = new_data;
546         if (likely(new_data >= *offset))
547                 *stat = (u32)(new_data - *offset);
548         else
549                 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
550 }
551
552 /**
553  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
554  * @vsi: the VSI to be updated
555  **/
556 void i40e_update_eth_stats(struct i40e_vsi *vsi)
557 {
558         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
559         struct i40e_pf *pf = vsi->back;
560         struct i40e_hw *hw = &pf->hw;
561         struct i40e_eth_stats *oes;
562         struct i40e_eth_stats *es;     /* device's eth stats */
563
564         es = &vsi->eth_stats;
565         oes = &vsi->eth_stats_offsets;
566
567         /* Gather up the stats that the hw collects */
568         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
569                            vsi->stat_offsets_loaded,
570                            &oes->tx_errors, &es->tx_errors);
571         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
572                            vsi->stat_offsets_loaded,
573                            &oes->rx_discards, &es->rx_discards);
574         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
575                            vsi->stat_offsets_loaded,
576                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
577         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
578                            vsi->stat_offsets_loaded,
579                            &oes->tx_errors, &es->tx_errors);
580
581         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
582                            I40E_GLV_GORCL(stat_idx),
583                            vsi->stat_offsets_loaded,
584                            &oes->rx_bytes, &es->rx_bytes);
585         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
586                            I40E_GLV_UPRCL(stat_idx),
587                            vsi->stat_offsets_loaded,
588                            &oes->rx_unicast, &es->rx_unicast);
589         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
590                            I40E_GLV_MPRCL(stat_idx),
591                            vsi->stat_offsets_loaded,
592                            &oes->rx_multicast, &es->rx_multicast);
593         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
594                            I40E_GLV_BPRCL(stat_idx),
595                            vsi->stat_offsets_loaded,
596                            &oes->rx_broadcast, &es->rx_broadcast);
597
598         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
599                            I40E_GLV_GOTCL(stat_idx),
600                            vsi->stat_offsets_loaded,
601                            &oes->tx_bytes, &es->tx_bytes);
602         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
603                            I40E_GLV_UPTCL(stat_idx),
604                            vsi->stat_offsets_loaded,
605                            &oes->tx_unicast, &es->tx_unicast);
606         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
607                            I40E_GLV_MPTCL(stat_idx),
608                            vsi->stat_offsets_loaded,
609                            &oes->tx_multicast, &es->tx_multicast);
610         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
611                            I40E_GLV_BPTCL(stat_idx),
612                            vsi->stat_offsets_loaded,
613                            &oes->tx_broadcast, &es->tx_broadcast);
614         vsi->stat_offsets_loaded = true;
615 }
616
617 /**
618  * i40e_update_veb_stats - Update Switch component statistics
619  * @veb: the VEB being updated
620  **/
621 static void i40e_update_veb_stats(struct i40e_veb *veb)
622 {
623         struct i40e_pf *pf = veb->pf;
624         struct i40e_hw *hw = &pf->hw;
625         struct i40e_eth_stats *oes;
626         struct i40e_eth_stats *es;     /* device's eth stats */
627         int idx = 0;
628
629         idx = veb->stats_idx;
630         es = &veb->stats;
631         oes = &veb->stats_offsets;
632
633         /* Gather up the stats that the hw collects */
634         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
635                            veb->stat_offsets_loaded,
636                            &oes->tx_discards, &es->tx_discards);
637         if (hw->revision_id > 0)
638                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
639                                    veb->stat_offsets_loaded,
640                                    &oes->rx_unknown_protocol,
641                                    &es->rx_unknown_protocol);
642         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
643                            veb->stat_offsets_loaded,
644                            &oes->rx_bytes, &es->rx_bytes);
645         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
646                            veb->stat_offsets_loaded,
647                            &oes->rx_unicast, &es->rx_unicast);
648         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
649                            veb->stat_offsets_loaded,
650                            &oes->rx_multicast, &es->rx_multicast);
651         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
652                            veb->stat_offsets_loaded,
653                            &oes->rx_broadcast, &es->rx_broadcast);
654
655         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
656                            veb->stat_offsets_loaded,
657                            &oes->tx_bytes, &es->tx_bytes);
658         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
659                            veb->stat_offsets_loaded,
660                            &oes->tx_unicast, &es->tx_unicast);
661         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
662                            veb->stat_offsets_loaded,
663                            &oes->tx_multicast, &es->tx_multicast);
664         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
665                            veb->stat_offsets_loaded,
666                            &oes->tx_broadcast, &es->tx_broadcast);
667         veb->stat_offsets_loaded = true;
668 }
669
670 #ifdef I40E_FCOE
671 /**
672  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
673  * @vsi: the VSI that is capable of doing FCoE
674  **/
675 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
676 {
677         struct i40e_pf *pf = vsi->back;
678         struct i40e_hw *hw = &pf->hw;
679         struct i40e_fcoe_stats *ofs;
680         struct i40e_fcoe_stats *fs;     /* device's eth stats */
681         int idx;
682
683         if (vsi->type != I40E_VSI_FCOE)
684                 return;
685
686         idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
687         fs = &vsi->fcoe_stats;
688         ofs = &vsi->fcoe_stats_offsets;
689
690         i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
691                            vsi->fcoe_stat_offsets_loaded,
692                            &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
693         i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
694                            vsi->fcoe_stat_offsets_loaded,
695                            &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
696         i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
697                            vsi->fcoe_stat_offsets_loaded,
698                            &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
699         i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
700                            vsi->fcoe_stat_offsets_loaded,
701                            &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
702         i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
703                            vsi->fcoe_stat_offsets_loaded,
704                            &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
705         i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
706                            vsi->fcoe_stat_offsets_loaded,
707                            &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
708         i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
709                            vsi->fcoe_stat_offsets_loaded,
710                            &ofs->fcoe_last_error, &fs->fcoe_last_error);
711         i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
712                            vsi->fcoe_stat_offsets_loaded,
713                            &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
714
715         vsi->fcoe_stat_offsets_loaded = true;
716 }
717
718 #endif
719 /**
720  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
721  * @pf: the corresponding PF
722  *
723  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
724  **/
725 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
726 {
727         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
728         struct i40e_hw_port_stats *nsd = &pf->stats;
729         struct i40e_hw *hw = &pf->hw;
730         u64 xoff = 0;
731         u16 i, v;
732
733         if ((hw->fc.current_mode != I40E_FC_FULL) &&
734             (hw->fc.current_mode != I40E_FC_RX_PAUSE))
735                 return;
736
737         xoff = nsd->link_xoff_rx;
738         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
739                            pf->stat_offsets_loaded,
740                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
741
742         /* No new LFC xoff rx */
743         if (!(nsd->link_xoff_rx - xoff))
744                 return;
745
746         /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
747         for (v = 0; v < pf->num_alloc_vsi; v++) {
748                 struct i40e_vsi *vsi = pf->vsi[v];
749
750                 if (!vsi || !vsi->tx_rings[0])
751                         continue;
752
753                 for (i = 0; i < vsi->num_queue_pairs; i++) {
754                         struct i40e_ring *ring = vsi->tx_rings[i];
755                         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
756                 }
757         }
758 }
759
760 /**
761  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
762  * @pf: the corresponding PF
763  *
764  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
765  **/
766 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
767 {
768         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
769         struct i40e_hw_port_stats *nsd = &pf->stats;
770         bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
771         struct i40e_dcbx_config *dcb_cfg;
772         struct i40e_hw *hw = &pf->hw;
773         u16 i, v;
774         u8 tc;
775
776         dcb_cfg = &hw->local_dcbx_config;
777
778         /* Collect Link XOFF stats when PFC is disabled */
779         if (!dcb_cfg->pfc.pfcenable) {
780                 i40e_update_link_xoff_rx(pf);
781                 return;
782         }
783
784         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
785                 u64 prio_xoff = nsd->priority_xoff_rx[i];
786                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
787                                    pf->stat_offsets_loaded,
788                                    &osd->priority_xoff_rx[i],
789                                    &nsd->priority_xoff_rx[i]);
790
791                 /* No new PFC xoff rx */
792                 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
793                         continue;
794                 /* Get the TC for given priority */
795                 tc = dcb_cfg->etscfg.prioritytable[i];
796                 xoff[tc] = true;
797         }
798
799         /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
800         for (v = 0; v < pf->num_alloc_vsi; v++) {
801                 struct i40e_vsi *vsi = pf->vsi[v];
802
803                 if (!vsi || !vsi->tx_rings[0])
804                         continue;
805
806                 for (i = 0; i < vsi->num_queue_pairs; i++) {
807                         struct i40e_ring *ring = vsi->tx_rings[i];
808
809                         tc = ring->dcb_tc;
810                         if (xoff[tc])
811                                 clear_bit(__I40E_HANG_CHECK_ARMED,
812                                           &ring->state);
813                 }
814         }
815 }
816
817 /**
818  * i40e_update_vsi_stats - Update the vsi statistics counters.
819  * @vsi: the VSI to be updated
820  *
821  * There are a few instances where we store the same stat in a
822  * couple of different structs.  This is partly because we have
823  * the netdev stats that need to be filled out, which is slightly
824  * different from the "eth_stats" defined by the chip and used in
825  * VF communications.  We sort it out here.
826  **/
827 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
828 {
829         struct i40e_pf *pf = vsi->back;
830         struct rtnl_link_stats64 *ons;
831         struct rtnl_link_stats64 *ns;   /* netdev stats */
832         struct i40e_eth_stats *oes;
833         struct i40e_eth_stats *es;     /* device's eth stats */
834         u32 tx_restart, tx_busy;
835         struct i40e_ring *p;
836         u32 rx_page, rx_buf;
837         u64 bytes, packets;
838         unsigned int start;
839         u64 rx_p, rx_b;
840         u64 tx_p, tx_b;
841         u16 q;
842
843         if (test_bit(__I40E_DOWN, &vsi->state) ||
844             test_bit(__I40E_CONFIG_BUSY, &pf->state))
845                 return;
846
847         ns = i40e_get_vsi_stats_struct(vsi);
848         ons = &vsi->net_stats_offsets;
849         es = &vsi->eth_stats;
850         oes = &vsi->eth_stats_offsets;
851
852         /* Gather up the netdev and vsi stats that the driver collects
853          * on the fly during packet processing
854          */
855         rx_b = rx_p = 0;
856         tx_b = tx_p = 0;
857         tx_restart = tx_busy = 0;
858         rx_page = 0;
859         rx_buf = 0;
860         rcu_read_lock();
861         for (q = 0; q < vsi->num_queue_pairs; q++) {
862                 /* locate Tx ring */
863                 p = ACCESS_ONCE(vsi->tx_rings[q]);
864
865                 do {
866                         start = u64_stats_fetch_begin_irq(&p->syncp);
867                         packets = p->stats.packets;
868                         bytes = p->stats.bytes;
869                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
870                 tx_b += bytes;
871                 tx_p += packets;
872                 tx_restart += p->tx_stats.restart_queue;
873                 tx_busy += p->tx_stats.tx_busy;
874
875                 /* Rx queue is part of the same block as Tx queue */
876                 p = &p[1];
877                 do {
878                         start = u64_stats_fetch_begin_irq(&p->syncp);
879                         packets = p->stats.packets;
880                         bytes = p->stats.bytes;
881                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
882                 rx_b += bytes;
883                 rx_p += packets;
884                 rx_buf += p->rx_stats.alloc_buff_failed;
885                 rx_page += p->rx_stats.alloc_page_failed;
886         }
887         rcu_read_unlock();
888         vsi->tx_restart = tx_restart;
889         vsi->tx_busy = tx_busy;
890         vsi->rx_page_failed = rx_page;
891         vsi->rx_buf_failed = rx_buf;
892
893         ns->rx_packets = rx_p;
894         ns->rx_bytes = rx_b;
895         ns->tx_packets = tx_p;
896         ns->tx_bytes = tx_b;
897
898         /* update netdev stats from eth stats */
899         i40e_update_eth_stats(vsi);
900         ons->tx_errors = oes->tx_errors;
901         ns->tx_errors = es->tx_errors;
902         ons->multicast = oes->rx_multicast;
903         ns->multicast = es->rx_multicast;
904         ons->rx_dropped = oes->rx_discards;
905         ns->rx_dropped = es->rx_discards;
906         ons->tx_dropped = oes->tx_discards;
907         ns->tx_dropped = es->tx_discards;
908
909         /* pull in a couple PF stats if this is the main vsi */
910         if (vsi == pf->vsi[pf->lan_vsi]) {
911                 ns->rx_crc_errors = pf->stats.crc_errors;
912                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
913                 ns->rx_length_errors = pf->stats.rx_length_errors;
914         }
915 }
916
917 /**
918  * i40e_update_pf_stats - Update the PF statistics counters.
919  * @pf: the PF to be updated
920  **/
921 static void i40e_update_pf_stats(struct i40e_pf *pf)
922 {
923         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
924         struct i40e_hw_port_stats *nsd = &pf->stats;
925         struct i40e_hw *hw = &pf->hw;
926         u32 val;
927         int i;
928
929         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
930                            I40E_GLPRT_GORCL(hw->port),
931                            pf->stat_offsets_loaded,
932                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
933         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
934                            I40E_GLPRT_GOTCL(hw->port),
935                            pf->stat_offsets_loaded,
936                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
937         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
938                            pf->stat_offsets_loaded,
939                            &osd->eth.rx_discards,
940                            &nsd->eth.rx_discards);
941         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
942                            I40E_GLPRT_UPRCL(hw->port),
943                            pf->stat_offsets_loaded,
944                            &osd->eth.rx_unicast,
945                            &nsd->eth.rx_unicast);
946         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
947                            I40E_GLPRT_MPRCL(hw->port),
948                            pf->stat_offsets_loaded,
949                            &osd->eth.rx_multicast,
950                            &nsd->eth.rx_multicast);
951         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
952                            I40E_GLPRT_BPRCL(hw->port),
953                            pf->stat_offsets_loaded,
954                            &osd->eth.rx_broadcast,
955                            &nsd->eth.rx_broadcast);
956         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
957                            I40E_GLPRT_UPTCL(hw->port),
958                            pf->stat_offsets_loaded,
959                            &osd->eth.tx_unicast,
960                            &nsd->eth.tx_unicast);
961         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
962                            I40E_GLPRT_MPTCL(hw->port),
963                            pf->stat_offsets_loaded,
964                            &osd->eth.tx_multicast,
965                            &nsd->eth.tx_multicast);
966         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
967                            I40E_GLPRT_BPTCL(hw->port),
968                            pf->stat_offsets_loaded,
969                            &osd->eth.tx_broadcast,
970                            &nsd->eth.tx_broadcast);
971
972         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
973                            pf->stat_offsets_loaded,
974                            &osd->tx_dropped_link_down,
975                            &nsd->tx_dropped_link_down);
976
977         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
978                            pf->stat_offsets_loaded,
979                            &osd->crc_errors, &nsd->crc_errors);
980
981         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
982                            pf->stat_offsets_loaded,
983                            &osd->illegal_bytes, &nsd->illegal_bytes);
984
985         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
986                            pf->stat_offsets_loaded,
987                            &osd->mac_local_faults,
988                            &nsd->mac_local_faults);
989         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
990                            pf->stat_offsets_loaded,
991                            &osd->mac_remote_faults,
992                            &nsd->mac_remote_faults);
993
994         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
995                            pf->stat_offsets_loaded,
996                            &osd->rx_length_errors,
997                            &nsd->rx_length_errors);
998
999         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1000                            pf->stat_offsets_loaded,
1001                            &osd->link_xon_rx, &nsd->link_xon_rx);
1002         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1003                            pf->stat_offsets_loaded,
1004                            &osd->link_xon_tx, &nsd->link_xon_tx);
1005         i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
1006         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1007                            pf->stat_offsets_loaded,
1008                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
1009
1010         for (i = 0; i < 8; i++) {
1011                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1012                                    pf->stat_offsets_loaded,
1013                                    &osd->priority_xon_rx[i],
1014                                    &nsd->priority_xon_rx[i]);
1015                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1016                                    pf->stat_offsets_loaded,
1017                                    &osd->priority_xon_tx[i],
1018                                    &nsd->priority_xon_tx[i]);
1019                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1020                                    pf->stat_offsets_loaded,
1021                                    &osd->priority_xoff_tx[i],
1022                                    &nsd->priority_xoff_tx[i]);
1023                 i40e_stat_update32(hw,
1024                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1025                                    pf->stat_offsets_loaded,
1026                                    &osd->priority_xon_2_xoff[i],
1027                                    &nsd->priority_xon_2_xoff[i]);
1028         }
1029
1030         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1031                            I40E_GLPRT_PRC64L(hw->port),
1032                            pf->stat_offsets_loaded,
1033                            &osd->rx_size_64, &nsd->rx_size_64);
1034         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1035                            I40E_GLPRT_PRC127L(hw->port),
1036                            pf->stat_offsets_loaded,
1037                            &osd->rx_size_127, &nsd->rx_size_127);
1038         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1039                            I40E_GLPRT_PRC255L(hw->port),
1040                            pf->stat_offsets_loaded,
1041                            &osd->rx_size_255, &nsd->rx_size_255);
1042         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1043                            I40E_GLPRT_PRC511L(hw->port),
1044                            pf->stat_offsets_loaded,
1045                            &osd->rx_size_511, &nsd->rx_size_511);
1046         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1047                            I40E_GLPRT_PRC1023L(hw->port),
1048                            pf->stat_offsets_loaded,
1049                            &osd->rx_size_1023, &nsd->rx_size_1023);
1050         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1051                            I40E_GLPRT_PRC1522L(hw->port),
1052                            pf->stat_offsets_loaded,
1053                            &osd->rx_size_1522, &nsd->rx_size_1522);
1054         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1055                            I40E_GLPRT_PRC9522L(hw->port),
1056                            pf->stat_offsets_loaded,
1057                            &osd->rx_size_big, &nsd->rx_size_big);
1058
1059         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1060                            I40E_GLPRT_PTC64L(hw->port),
1061                            pf->stat_offsets_loaded,
1062                            &osd->tx_size_64, &nsd->tx_size_64);
1063         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1064                            I40E_GLPRT_PTC127L(hw->port),
1065                            pf->stat_offsets_loaded,
1066                            &osd->tx_size_127, &nsd->tx_size_127);
1067         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1068                            I40E_GLPRT_PTC255L(hw->port),
1069                            pf->stat_offsets_loaded,
1070                            &osd->tx_size_255, &nsd->tx_size_255);
1071         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1072                            I40E_GLPRT_PTC511L(hw->port),
1073                            pf->stat_offsets_loaded,
1074                            &osd->tx_size_511, &nsd->tx_size_511);
1075         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1076                            I40E_GLPRT_PTC1023L(hw->port),
1077                            pf->stat_offsets_loaded,
1078                            &osd->tx_size_1023, &nsd->tx_size_1023);
1079         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1080                            I40E_GLPRT_PTC1522L(hw->port),
1081                            pf->stat_offsets_loaded,
1082                            &osd->tx_size_1522, &nsd->tx_size_1522);
1083         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1084                            I40E_GLPRT_PTC9522L(hw->port),
1085                            pf->stat_offsets_loaded,
1086                            &osd->tx_size_big, &nsd->tx_size_big);
1087
1088         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1089                            pf->stat_offsets_loaded,
1090                            &osd->rx_undersize, &nsd->rx_undersize);
1091         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1092                            pf->stat_offsets_loaded,
1093                            &osd->rx_fragments, &nsd->rx_fragments);
1094         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1095                            pf->stat_offsets_loaded,
1096                            &osd->rx_oversize, &nsd->rx_oversize);
1097         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1098                            pf->stat_offsets_loaded,
1099                            &osd->rx_jabber, &nsd->rx_jabber);
1100
1101         /* FDIR stats */
1102         i40e_stat_update32(hw,
1103                            I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1104                            pf->stat_offsets_loaded,
1105                            &osd->fd_atr_match, &nsd->fd_atr_match);
1106         i40e_stat_update32(hw,
1107                            I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1108                            pf->stat_offsets_loaded,
1109                            &osd->fd_sb_match, &nsd->fd_sb_match);
1110         i40e_stat_update32(hw,
1111                       I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1112                       pf->stat_offsets_loaded,
1113                       &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1114
1115         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1116         nsd->tx_lpi_status =
1117                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1118                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1119         nsd->rx_lpi_status =
1120                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1121                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1122         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1123                            pf->stat_offsets_loaded,
1124                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1125         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1126                            pf->stat_offsets_loaded,
1127                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1128
1129         if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1130             !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1131                 nsd->fd_sb_status = true;
1132         else
1133                 nsd->fd_sb_status = false;
1134
1135         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1136             !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1137                 nsd->fd_atr_status = true;
1138         else
1139                 nsd->fd_atr_status = false;
1140
1141         pf->stat_offsets_loaded = true;
1142 }
1143
1144 /**
1145  * i40e_update_stats - Update the various statistics counters.
1146  * @vsi: the VSI to be updated
1147  *
1148  * Update the various stats for this VSI and its related entities.
1149  **/
1150 void i40e_update_stats(struct i40e_vsi *vsi)
1151 {
1152         struct i40e_pf *pf = vsi->back;
1153
1154         if (vsi == pf->vsi[pf->lan_vsi])
1155                 i40e_update_pf_stats(pf);
1156
1157         i40e_update_vsi_stats(vsi);
1158 #ifdef I40E_FCOE
1159         i40e_update_fcoe_stats(vsi);
1160 #endif
1161 }
1162
1163 /**
1164  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1165  * @vsi: the VSI to be searched
1166  * @macaddr: the MAC address
1167  * @vlan: the vlan
1168  * @is_vf: make sure its a VF filter, else doesn't matter
1169  * @is_netdev: make sure its a netdev filter, else doesn't matter
1170  *
1171  * Returns ptr to the filter object or NULL
1172  **/
1173 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1174                                                 u8 *macaddr, s16 vlan,
1175                                                 bool is_vf, bool is_netdev)
1176 {
1177         struct i40e_mac_filter *f;
1178
1179         if (!vsi || !macaddr)
1180                 return NULL;
1181
1182         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1183                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1184                     (vlan == f->vlan)    &&
1185                     (!is_vf || f->is_vf) &&
1186                     (!is_netdev || f->is_netdev))
1187                         return f;
1188         }
1189         return NULL;
1190 }
1191
1192 /**
1193  * i40e_find_mac - Find a mac addr in the macvlan filters list
1194  * @vsi: the VSI to be searched
1195  * @macaddr: the MAC address we are searching for
1196  * @is_vf: make sure its a VF filter, else doesn't matter
1197  * @is_netdev: make sure its a netdev filter, else doesn't matter
1198  *
1199  * Returns the first filter with the provided MAC address or NULL if
1200  * MAC address was not found
1201  **/
1202 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1203                                       bool is_vf, bool is_netdev)
1204 {
1205         struct i40e_mac_filter *f;
1206
1207         if (!vsi || !macaddr)
1208                 return NULL;
1209
1210         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1211                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1212                     (!is_vf || f->is_vf) &&
1213                     (!is_netdev || f->is_netdev))
1214                         return f;
1215         }
1216         return NULL;
1217 }
1218
1219 /**
1220  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1221  * @vsi: the VSI to be searched
1222  *
1223  * Returns true if VSI is in vlan mode or false otherwise
1224  **/
1225 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1226 {
1227         struct i40e_mac_filter *f;
1228
1229         /* Only -1 for all the filters denotes not in vlan mode
1230          * so we have to go through all the list in order to make sure
1231          */
1232         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1233                 if (f->vlan >= 0)
1234                         return true;
1235         }
1236
1237         return false;
1238 }
1239
1240 /**
1241  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1242  * @vsi: the VSI to be searched
1243  * @macaddr: the mac address to be filtered
1244  * @is_vf: true if it is a VF
1245  * @is_netdev: true if it is a netdev
1246  *
1247  * Goes through all the macvlan filters and adds a
1248  * macvlan filter for each unique vlan that already exists
1249  *
1250  * Returns first filter found on success, else NULL
1251  **/
1252 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1253                                              bool is_vf, bool is_netdev)
1254 {
1255         struct i40e_mac_filter *f;
1256
1257         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1258                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1259                                       is_vf, is_netdev)) {
1260                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1261                                              is_vf, is_netdev))
1262                                 return NULL;
1263                 }
1264         }
1265
1266         return list_first_entry_or_null(&vsi->mac_filter_list,
1267                                         struct i40e_mac_filter, list);
1268 }
1269
1270 /**
1271  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1272  * @vsi: the PF Main VSI - inappropriate for any other VSI
1273  * @macaddr: the MAC address
1274  *
1275  * Some older firmware configurations set up a default promiscuous VLAN
1276  * filter that needs to be removed.
1277  **/
1278 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1279 {
1280         struct i40e_aqc_remove_macvlan_element_data element;
1281         struct i40e_pf *pf = vsi->back;
1282         i40e_status ret;
1283
1284         /* Only appropriate for the PF main VSI */
1285         if (vsi->type != I40E_VSI_MAIN)
1286                 return -EINVAL;
1287
1288         memset(&element, 0, sizeof(element));
1289         ether_addr_copy(element.mac_addr, macaddr);
1290         element.vlan_tag = 0;
1291         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1292                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1293         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1294         if (ret)
1295                 return -ENOENT;
1296
1297         return 0;
1298 }
1299
1300 /**
1301  * i40e_add_filter - Add a mac/vlan filter to the VSI
1302  * @vsi: the VSI to be searched
1303  * @macaddr: the MAC address
1304  * @vlan: the vlan
1305  * @is_vf: make sure its a VF filter, else doesn't matter
1306  * @is_netdev: make sure its a netdev filter, else doesn't matter
1307  *
1308  * Returns ptr to the filter object or NULL when no memory available.
1309  **/
1310 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1311                                         u8 *macaddr, s16 vlan,
1312                                         bool is_vf, bool is_netdev)
1313 {
1314         struct i40e_mac_filter *f;
1315
1316         if (!vsi || !macaddr)
1317                 return NULL;
1318
1319         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1320         if (!f) {
1321                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1322                 if (!f)
1323                         goto add_filter_out;
1324
1325                 ether_addr_copy(f->macaddr, macaddr);
1326                 f->vlan = vlan;
1327                 f->changed = true;
1328
1329                 INIT_LIST_HEAD(&f->list);
1330                 list_add(&f->list, &vsi->mac_filter_list);
1331         }
1332
1333         /* increment counter and add a new flag if needed */
1334         if (is_vf) {
1335                 if (!f->is_vf) {
1336                         f->is_vf = true;
1337                         f->counter++;
1338                 }
1339         } else if (is_netdev) {
1340                 if (!f->is_netdev) {
1341                         f->is_netdev = true;
1342                         f->counter++;
1343                 }
1344         } else {
1345                 f->counter++;
1346         }
1347
1348         /* changed tells sync_filters_subtask to
1349          * push the filter down to the firmware
1350          */
1351         if (f->changed) {
1352                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1353                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1354         }
1355
1356 add_filter_out:
1357         return f;
1358 }
1359
1360 /**
1361  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1362  * @vsi: the VSI to be searched
1363  * @macaddr: the MAC address
1364  * @vlan: the vlan
1365  * @is_vf: make sure it's a VF filter, else doesn't matter
1366  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1367  **/
1368 void i40e_del_filter(struct i40e_vsi *vsi,
1369                      u8 *macaddr, s16 vlan,
1370                      bool is_vf, bool is_netdev)
1371 {
1372         struct i40e_mac_filter *f;
1373
1374         if (!vsi || !macaddr)
1375                 return;
1376
1377         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1378         if (!f || f->counter == 0)
1379                 return;
1380
1381         if (is_vf) {
1382                 if (f->is_vf) {
1383                         f->is_vf = false;
1384                         f->counter--;
1385                 }
1386         } else if (is_netdev) {
1387                 if (f->is_netdev) {
1388                         f->is_netdev = false;
1389                         f->counter--;
1390                 }
1391         } else {
1392                 /* make sure we don't remove a filter in use by VF or netdev */
1393                 int min_f = 0;
1394                 min_f += (f->is_vf ? 1 : 0);
1395                 min_f += (f->is_netdev ? 1 : 0);
1396
1397                 if (f->counter > min_f)
1398                         f->counter--;
1399         }
1400
1401         /* counter == 0 tells sync_filters_subtask to
1402          * remove the filter from the firmware's list
1403          */
1404         if (f->counter == 0) {
1405                 f->changed = true;
1406                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1407                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1408         }
1409 }
1410
1411 /**
1412  * i40e_set_mac - NDO callback to set mac address
1413  * @netdev: network interface device structure
1414  * @p: pointer to an address structure
1415  *
1416  * Returns 0 on success, negative on failure
1417  **/
1418 #ifdef I40E_FCOE
1419 int i40e_set_mac(struct net_device *netdev, void *p)
1420 #else
1421 static int i40e_set_mac(struct net_device *netdev, void *p)
1422 #endif
1423 {
1424         struct i40e_netdev_priv *np = netdev_priv(netdev);
1425         struct i40e_vsi *vsi = np->vsi;
1426         struct i40e_pf *pf = vsi->back;
1427         struct i40e_hw *hw = &pf->hw;
1428         struct sockaddr *addr = p;
1429         struct i40e_mac_filter *f;
1430
1431         if (!is_valid_ether_addr(addr->sa_data))
1432                 return -EADDRNOTAVAIL;
1433
1434         if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1435                 netdev_info(netdev, "already using mac address %pM\n",
1436                             addr->sa_data);
1437                 return 0;
1438         }
1439
1440         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1441             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1442                 return -EADDRNOTAVAIL;
1443
1444         if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1445                 netdev_info(netdev, "returning to hw mac address %pM\n",
1446                             hw->mac.addr);
1447         else
1448                 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1449
1450         if (vsi->type == I40E_VSI_MAIN) {
1451                 i40e_status ret;
1452                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1453                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
1454                                                 addr->sa_data, NULL);
1455                 if (ret) {
1456                         netdev_info(netdev,
1457                                     "Addr change for Main VSI failed: %d\n",
1458                                     ret);
1459                         return -EADDRNOTAVAIL;
1460                 }
1461         }
1462
1463         if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1464                 struct i40e_aqc_remove_macvlan_element_data element;
1465
1466                 memset(&element, 0, sizeof(element));
1467                 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1468                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1469                 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1470         } else {
1471                 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1472                                 false, false);
1473         }
1474
1475         if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1476                 struct i40e_aqc_add_macvlan_element_data element;
1477
1478                 memset(&element, 0, sizeof(element));
1479                 ether_addr_copy(element.mac_addr, hw->mac.addr);
1480                 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1481                 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1482         } else {
1483                 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1484                                     false, false);
1485                 if (f)
1486                         f->is_laa = true;
1487         }
1488
1489         i40e_sync_vsi_filters(vsi);
1490         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1491
1492         return 0;
1493 }
1494
1495 /**
1496  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1497  * @vsi: the VSI being setup
1498  * @ctxt: VSI context structure
1499  * @enabled_tc: Enabled TCs bitmap
1500  * @is_add: True if called before Add VSI
1501  *
1502  * Setup VSI queue mapping for enabled traffic classes.
1503  **/
1504 #ifdef I40E_FCOE
1505 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1506                               struct i40e_vsi_context *ctxt,
1507                               u8 enabled_tc,
1508                               bool is_add)
1509 #else
1510 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1511                                      struct i40e_vsi_context *ctxt,
1512                                      u8 enabled_tc,
1513                                      bool is_add)
1514 #endif
1515 {
1516         struct i40e_pf *pf = vsi->back;
1517         u16 sections = 0;
1518         u8 netdev_tc = 0;
1519         u16 numtc = 0;
1520         u16 qcount;
1521         u8 offset;
1522         u16 qmap;
1523         int i;
1524         u16 num_tc_qps = 0;
1525
1526         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1527         offset = 0;
1528
1529         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1530                 /* Find numtc from enabled TC bitmap */
1531                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1532                         if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
1533                                 numtc++;
1534                 }
1535                 if (!numtc) {
1536                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1537                         numtc = 1;
1538                 }
1539         } else {
1540                 /* At least TC0 is enabled in case of non-DCB case */
1541                 numtc = 1;
1542         }
1543
1544         vsi->tc_config.numtc = numtc;
1545         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1546         /* Number of queues per enabled TC */
1547         /* In MFP case we can have a much lower count of MSIx
1548          * vectors available and so we need to lower the used
1549          * q count.
1550          */
1551         qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1552         num_tc_qps = qcount / numtc;
1553         num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1554
1555         /* Setup queue offset/count for all TCs for given VSI */
1556         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1557                 /* See if the given TC is enabled for the given VSI */
1558                 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
1559                         /* TC is enabled */
1560                         int pow, num_qps;
1561
1562                         switch (vsi->type) {
1563                         case I40E_VSI_MAIN:
1564                                 qcount = min_t(int, pf->rss_size, num_tc_qps);
1565                                 break;
1566 #ifdef I40E_FCOE
1567                         case I40E_VSI_FCOE:
1568                                 qcount = num_tc_qps;
1569                                 break;
1570 #endif
1571                         case I40E_VSI_FDIR:
1572                         case I40E_VSI_SRIOV:
1573                         case I40E_VSI_VMDQ2:
1574                         default:
1575                                 qcount = num_tc_qps;
1576                                 WARN_ON(i != 0);
1577                                 break;
1578                         }
1579                         vsi->tc_config.tc_info[i].qoffset = offset;
1580                         vsi->tc_config.tc_info[i].qcount = qcount;
1581
1582                         /* find the next higher power-of-2 of num queue pairs */
1583                         num_qps = qcount;
1584                         pow = 0;
1585                         while (num_qps && (BIT_ULL(pow) < qcount)) {
1586                                 pow++;
1587                                 num_qps >>= 1;
1588                         }
1589
1590                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1591                         qmap =
1592                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1593                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1594
1595                         offset += qcount;
1596                 } else {
1597                         /* TC is not enabled so set the offset to
1598                          * default queue and allocate one queue
1599                          * for the given TC.
1600                          */
1601                         vsi->tc_config.tc_info[i].qoffset = 0;
1602                         vsi->tc_config.tc_info[i].qcount = 1;
1603                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1604
1605                         qmap = 0;
1606                 }
1607                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1608         }
1609
1610         /* Set actual Tx/Rx queue pairs */
1611         vsi->num_queue_pairs = offset;
1612         if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1613                 if (vsi->req_queue_pairs > 0)
1614                         vsi->num_queue_pairs = vsi->req_queue_pairs;
1615                 else
1616                         vsi->num_queue_pairs = pf->num_lan_msix;
1617         }
1618
1619         /* Scheduler section valid can only be set for ADD VSI */
1620         if (is_add) {
1621                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1622
1623                 ctxt->info.up_enable_bits = enabled_tc;
1624         }
1625         if (vsi->type == I40E_VSI_SRIOV) {
1626                 ctxt->info.mapping_flags |=
1627                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1628                 for (i = 0; i < vsi->num_queue_pairs; i++)
1629                         ctxt->info.queue_mapping[i] =
1630                                                cpu_to_le16(vsi->base_queue + i);
1631         } else {
1632                 ctxt->info.mapping_flags |=
1633                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1634                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1635         }
1636         ctxt->info.valid_sections |= cpu_to_le16(sections);
1637 }
1638
1639 /**
1640  * i40e_set_rx_mode - NDO callback to set the netdev filters
1641  * @netdev: network interface device structure
1642  **/
1643 #ifdef I40E_FCOE
1644 void i40e_set_rx_mode(struct net_device *netdev)
1645 #else
1646 static void i40e_set_rx_mode(struct net_device *netdev)
1647 #endif
1648 {
1649         struct i40e_netdev_priv *np = netdev_priv(netdev);
1650         struct i40e_mac_filter *f, *ftmp;
1651         struct i40e_vsi *vsi = np->vsi;
1652         struct netdev_hw_addr *uca;
1653         struct netdev_hw_addr *mca;
1654         struct netdev_hw_addr *ha;
1655
1656         /* add addr if not already in the filter list */
1657         netdev_for_each_uc_addr(uca, netdev) {
1658                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1659                         if (i40e_is_vsi_in_vlan(vsi))
1660                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1661                                                      false, true);
1662                         else
1663                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1664                                                 false, true);
1665                 }
1666         }
1667
1668         netdev_for_each_mc_addr(mca, netdev) {
1669                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1670                         if (i40e_is_vsi_in_vlan(vsi))
1671                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1672                                                      false, true);
1673                         else
1674                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1675                                                 false, true);
1676                 }
1677         }
1678
1679         /* remove filter if not in netdev list */
1680         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1681                 bool found = false;
1682
1683                 if (!f->is_netdev)
1684                         continue;
1685
1686                 if (is_multicast_ether_addr(f->macaddr)) {
1687                         netdev_for_each_mc_addr(mca, netdev) {
1688                                 if (ether_addr_equal(mca->addr, f->macaddr)) {
1689                                         found = true;
1690                                         break;
1691                                 }
1692                         }
1693                 } else {
1694                         netdev_for_each_uc_addr(uca, netdev) {
1695                                 if (ether_addr_equal(uca->addr, f->macaddr)) {
1696                                         found = true;
1697                                         break;
1698                                 }
1699                         }
1700
1701                         for_each_dev_addr(netdev, ha) {
1702                                 if (ether_addr_equal(ha->addr, f->macaddr)) {
1703                                         found = true;
1704                                         break;
1705                                 }
1706                         }
1707                 }
1708                 if (!found)
1709                         i40e_del_filter(
1710                            vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1711         }
1712
1713         /* check for other flag changes */
1714         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1715                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1716                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1717         }
1718 }
1719
1720 /**
1721  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1722  * @vsi: ptr to the VSI
1723  *
1724  * Push any outstanding VSI filter changes through the AdminQ.
1725  *
1726  * Returns 0 or error value
1727  **/
1728 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1729 {
1730         struct i40e_mac_filter *f, *ftmp;
1731         bool promisc_forced_on = false;
1732         bool add_happened = false;
1733         int filter_list_len = 0;
1734         u32 changed_flags = 0;
1735         i40e_status ret = 0;
1736         struct i40e_pf *pf;
1737         int num_add = 0;
1738         int num_del = 0;
1739         int aq_err = 0;
1740         u16 cmd_flags;
1741
1742         /* empty array typed pointers, kcalloc later */
1743         struct i40e_aqc_add_macvlan_element_data *add_list;
1744         struct i40e_aqc_remove_macvlan_element_data *del_list;
1745
1746         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1747                 usleep_range(1000, 2000);
1748         pf = vsi->back;
1749
1750         if (vsi->netdev) {
1751                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1752                 vsi->current_netdev_flags = vsi->netdev->flags;
1753         }
1754
1755         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1756                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1757
1758                 filter_list_len = pf->hw.aq.asq_buf_size /
1759                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1760                 del_list = kcalloc(filter_list_len,
1761                             sizeof(struct i40e_aqc_remove_macvlan_element_data),
1762                             GFP_KERNEL);
1763                 if (!del_list)
1764                         return -ENOMEM;
1765
1766                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1767                         if (!f->changed)
1768                                 continue;
1769
1770                         if (f->counter != 0)
1771                                 continue;
1772                         f->changed = false;
1773                         cmd_flags = 0;
1774
1775                         /* add to delete list */
1776                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1777                         del_list[num_del].vlan_tag =
1778                                 cpu_to_le16((u16)(f->vlan ==
1779                                             I40E_VLAN_ANY ? 0 : f->vlan));
1780
1781                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1782                         del_list[num_del].flags = cmd_flags;
1783                         num_del++;
1784
1785                         /* unlink from filter list */
1786                         list_del(&f->list);
1787                         kfree(f);
1788
1789                         /* flush a full buffer */
1790                         if (num_del == filter_list_len) {
1791                                 ret = i40e_aq_remove_macvlan(&pf->hw,
1792                                                   vsi->seid, del_list, num_del,
1793                                                   NULL);
1794                                 aq_err = pf->hw.aq.asq_last_status;
1795                                 num_del = 0;
1796                                 memset(del_list, 0, sizeof(*del_list));
1797
1798                                 if (ret && aq_err != I40E_AQ_RC_ENOENT)
1799                                         dev_info(&pf->pdev->dev,
1800                                                  "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1801                                                  i40e_stat_str(&pf->hw, ret),
1802                                                  i40e_aq_str(&pf->hw, aq_err));
1803                         }
1804                 }
1805                 if (num_del) {
1806                         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1807                                                      del_list, num_del, NULL);
1808                         aq_err = pf->hw.aq.asq_last_status;
1809                         num_del = 0;
1810
1811                         if (ret && aq_err != I40E_AQ_RC_ENOENT)
1812                                 dev_info(&pf->pdev->dev,
1813                                          "ignoring delete macvlan error, err %s aq_err %s\n",
1814                                          i40e_stat_str(&pf->hw, ret),
1815                                          i40e_aq_str(&pf->hw, aq_err));
1816                 }
1817
1818                 kfree(del_list);
1819                 del_list = NULL;
1820
1821                 /* do all the adds now */
1822                 filter_list_len = pf->hw.aq.asq_buf_size /
1823                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1824                 add_list = kcalloc(filter_list_len,
1825                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1826                                GFP_KERNEL);
1827                 if (!add_list)
1828                         return -ENOMEM;
1829
1830                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1831                         if (!f->changed)
1832                                 continue;
1833
1834                         if (f->counter == 0)
1835                                 continue;
1836                         f->changed = false;
1837                         add_happened = true;
1838                         cmd_flags = 0;
1839
1840                         /* add to add array */
1841                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1842                         add_list[num_add].vlan_tag =
1843                                 cpu_to_le16(
1844                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1845                         add_list[num_add].queue_number = 0;
1846
1847                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1848                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
1849                         num_add++;
1850
1851                         /* flush a full buffer */
1852                         if (num_add == filter_list_len) {
1853                                 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1854                                                           add_list, num_add,
1855                                                           NULL);
1856                                 aq_err = pf->hw.aq.asq_last_status;
1857                                 num_add = 0;
1858
1859                                 if (ret)
1860                                         break;
1861                                 memset(add_list, 0, sizeof(*add_list));
1862                         }
1863                 }
1864                 if (num_add) {
1865                         ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1866                                                   add_list, num_add, NULL);
1867                         aq_err = pf->hw.aq.asq_last_status;
1868                         num_add = 0;
1869                 }
1870                 kfree(add_list);
1871                 add_list = NULL;
1872
1873                 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
1874                         dev_info(&pf->pdev->dev,
1875                                  "add filter failed, err %s aq_err %s\n",
1876                                  i40e_stat_str(&pf->hw, ret),
1877                                  i40e_aq_str(&pf->hw, aq_err));
1878                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1879                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1880                                       &vsi->state)) {
1881                                 promisc_forced_on = true;
1882                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1883                                         &vsi->state);
1884                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1885                         }
1886                 }
1887         }
1888
1889         /* check for changes in promiscuous modes */
1890         if (changed_flags & IFF_ALLMULTI) {
1891                 bool cur_multipromisc;
1892                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1893                 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1894                                                             vsi->seid,
1895                                                             cur_multipromisc,
1896                                                             NULL);
1897                 if (ret)
1898                         dev_info(&pf->pdev->dev,
1899                                  "set multi promisc failed, err %s aq_err %s\n",
1900                                  i40e_stat_str(&pf->hw, ret),
1901                                  i40e_aq_str(&pf->hw,
1902                                              pf->hw.aq.asq_last_status));
1903         }
1904         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1905                 bool cur_promisc;
1906                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1907                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1908                                         &vsi->state));
1909                 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1910                                                           vsi->seid,
1911                                                           cur_promisc, NULL);
1912                 if (ret)
1913                         dev_info(&pf->pdev->dev,
1914                                  "set uni promisc failed, err %s, aq_err %s\n",
1915                                  i40e_stat_str(&pf->hw, ret),
1916                                  i40e_aq_str(&pf->hw,
1917                                              pf->hw.aq.asq_last_status));
1918                 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1919                                                 vsi->seid,
1920                                                 cur_promisc, NULL);
1921                 if (ret)
1922                         dev_info(&pf->pdev->dev,
1923                                  "set brdcast promisc failed, err %s, aq_err %s\n",
1924                                  i40e_stat_str(&pf->hw, ret),
1925                                  i40e_aq_str(&pf->hw,
1926                                              pf->hw.aq.asq_last_status));
1927         }
1928
1929         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1930         return 0;
1931 }
1932
1933 /**
1934  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1935  * @pf: board private structure
1936  **/
1937 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1938 {
1939         int v;
1940
1941         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1942                 return;
1943         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1944
1945         for (v = 0; v < pf->num_alloc_vsi; v++) {
1946                 if (pf->vsi[v] &&
1947                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1948                         i40e_sync_vsi_filters(pf->vsi[v]);
1949         }
1950 }
1951
1952 /**
1953  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1954  * @netdev: network interface device structure
1955  * @new_mtu: new value for maximum frame size
1956  *
1957  * Returns 0 on success, negative on failure
1958  **/
1959 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1960 {
1961         struct i40e_netdev_priv *np = netdev_priv(netdev);
1962         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1963         struct i40e_vsi *vsi = np->vsi;
1964
1965         /* MTU < 68 is an error and causes problems on some kernels */
1966         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1967                 return -EINVAL;
1968
1969         netdev_info(netdev, "changing MTU from %d to %d\n",
1970                     netdev->mtu, new_mtu);
1971         netdev->mtu = new_mtu;
1972         if (netif_running(netdev))
1973                 i40e_vsi_reinit_locked(vsi);
1974
1975         return 0;
1976 }
1977
1978 /**
1979  * i40e_ioctl - Access the hwtstamp interface
1980  * @netdev: network interface device structure
1981  * @ifr: interface request data
1982  * @cmd: ioctl command
1983  **/
1984 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1985 {
1986         struct i40e_netdev_priv *np = netdev_priv(netdev);
1987         struct i40e_pf *pf = np->vsi->back;
1988
1989         switch (cmd) {
1990         case SIOCGHWTSTAMP:
1991                 return i40e_ptp_get_ts_config(pf, ifr);
1992         case SIOCSHWTSTAMP:
1993                 return i40e_ptp_set_ts_config(pf, ifr);
1994         default:
1995                 return -EOPNOTSUPP;
1996         }
1997 }
1998
1999 /**
2000  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2001  * @vsi: the vsi being adjusted
2002  **/
2003 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2004 {
2005         struct i40e_vsi_context ctxt;
2006         i40e_status ret;
2007
2008         if ((vsi->info.valid_sections &
2009              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2010             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2011                 return;  /* already enabled */
2012
2013         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2014         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2015                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2016
2017         ctxt.seid = vsi->seid;
2018         ctxt.info = vsi->info;
2019         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2020         if (ret) {
2021                 dev_info(&vsi->back->pdev->dev,
2022                          "update vlan stripping failed, err %s aq_err %s\n",
2023                          i40e_stat_str(&vsi->back->hw, ret),
2024                          i40e_aq_str(&vsi->back->hw,
2025                                      vsi->back->hw.aq.asq_last_status));
2026         }
2027 }
2028
2029 /**
2030  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2031  * @vsi: the vsi being adjusted
2032  **/
2033 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2034 {
2035         struct i40e_vsi_context ctxt;
2036         i40e_status ret;
2037
2038         if ((vsi->info.valid_sections &
2039              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2040             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2041              I40E_AQ_VSI_PVLAN_EMOD_MASK))
2042                 return;  /* already disabled */
2043
2044         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2045         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2046                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2047
2048         ctxt.seid = vsi->seid;
2049         ctxt.info = vsi->info;
2050         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2051         if (ret) {
2052                 dev_info(&vsi->back->pdev->dev,
2053                          "update vlan stripping failed, err %s aq_err %s\n",
2054                          i40e_stat_str(&vsi->back->hw, ret),
2055                          i40e_aq_str(&vsi->back->hw,
2056                                      vsi->back->hw.aq.asq_last_status));
2057         }
2058 }
2059
2060 /**
2061  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2062  * @netdev: network interface to be adjusted
2063  * @features: netdev features to test if VLAN offload is enabled or not
2064  **/
2065 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2066 {
2067         struct i40e_netdev_priv *np = netdev_priv(netdev);
2068         struct i40e_vsi *vsi = np->vsi;
2069
2070         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2071                 i40e_vlan_stripping_enable(vsi);
2072         else
2073                 i40e_vlan_stripping_disable(vsi);
2074 }
2075
2076 /**
2077  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2078  * @vsi: the vsi being configured
2079  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2080  **/
2081 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2082 {
2083         struct i40e_mac_filter *f, *add_f;
2084         bool is_netdev, is_vf;
2085
2086         is_vf = (vsi->type == I40E_VSI_SRIOV);
2087         is_netdev = !!(vsi->netdev);
2088
2089         if (is_netdev) {
2090                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2091                                         is_vf, is_netdev);
2092                 if (!add_f) {
2093                         dev_info(&vsi->back->pdev->dev,
2094                                  "Could not add vlan filter %d for %pM\n",
2095                                  vid, vsi->netdev->dev_addr);
2096                         return -ENOMEM;
2097                 }
2098         }
2099
2100         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2101                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2102                 if (!add_f) {
2103                         dev_info(&vsi->back->pdev->dev,
2104                                  "Could not add vlan filter %d for %pM\n",
2105                                  vid, f->macaddr);
2106                         return -ENOMEM;
2107                 }
2108         }
2109
2110         /* Now if we add a vlan tag, make sure to check if it is the first
2111          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2112          * with 0, so we now accept untagged and specified tagged traffic
2113          * (and not any taged and untagged)
2114          */
2115         if (vid > 0) {
2116                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2117                                                   I40E_VLAN_ANY,
2118                                                   is_vf, is_netdev)) {
2119                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
2120                                         I40E_VLAN_ANY, is_vf, is_netdev);
2121                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2122                                                 is_vf, is_netdev);
2123                         if (!add_f) {
2124                                 dev_info(&vsi->back->pdev->dev,
2125                                          "Could not add filter 0 for %pM\n",
2126                                          vsi->netdev->dev_addr);
2127                                 return -ENOMEM;
2128                         }
2129                 }
2130         }
2131
2132         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2133         if (vid > 0 && !vsi->info.pvid) {
2134                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2135                         if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2136                                              is_vf, is_netdev)) {
2137                                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2138                                                 is_vf, is_netdev);
2139                                 add_f = i40e_add_filter(vsi, f->macaddr,
2140                                                         0, is_vf, is_netdev);
2141                                 if (!add_f) {
2142                                         dev_info(&vsi->back->pdev->dev,
2143                                                  "Could not add filter 0 for %pM\n",
2144                                                  f->macaddr);
2145                                         return -ENOMEM;
2146                                 }
2147                         }
2148                 }
2149         }
2150
2151         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2152             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2153                 return 0;
2154
2155         return i40e_sync_vsi_filters(vsi);
2156 }
2157
2158 /**
2159  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2160  * @vsi: the vsi being configured
2161  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2162  *
2163  * Return: 0 on success or negative otherwise
2164  **/
2165 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2166 {
2167         struct net_device *netdev = vsi->netdev;
2168         struct i40e_mac_filter *f, *add_f;
2169         bool is_vf, is_netdev;
2170         int filter_count = 0;
2171
2172         is_vf = (vsi->type == I40E_VSI_SRIOV);
2173         is_netdev = !!(netdev);
2174
2175         if (is_netdev)
2176                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2177
2178         list_for_each_entry(f, &vsi->mac_filter_list, list)
2179                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2180
2181         /* go through all the filters for this VSI and if there is only
2182          * vid == 0 it means there are no other filters, so vid 0 must
2183          * be replaced with -1. This signifies that we should from now
2184          * on accept any traffic (with any tag present, or untagged)
2185          */
2186         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2187                 if (is_netdev) {
2188                         if (f->vlan &&
2189                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2190                                 filter_count++;
2191                 }
2192
2193                 if (f->vlan)
2194                         filter_count++;
2195         }
2196
2197         if (!filter_count && is_netdev) {
2198                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2199                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2200                                     is_vf, is_netdev);
2201                 if (!f) {
2202                         dev_info(&vsi->back->pdev->dev,
2203                                  "Could not add filter %d for %pM\n",
2204                                  I40E_VLAN_ANY, netdev->dev_addr);
2205                         return -ENOMEM;
2206                 }
2207         }
2208
2209         if (!filter_count) {
2210                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2211                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2212                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2213                                             is_vf, is_netdev);
2214                         if (!add_f) {
2215                                 dev_info(&vsi->back->pdev->dev,
2216                                          "Could not add filter %d for %pM\n",
2217                                          I40E_VLAN_ANY, f->macaddr);
2218                                 return -ENOMEM;
2219                         }
2220                 }
2221         }
2222
2223         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2224             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2225                 return 0;
2226
2227         return i40e_sync_vsi_filters(vsi);
2228 }
2229
2230 /**
2231  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2232  * @netdev: network interface to be adjusted
2233  * @vid: vlan id to be added
2234  *
2235  * net_device_ops implementation for adding vlan ids
2236  **/
2237 #ifdef I40E_FCOE
2238 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2239                          __always_unused __be16 proto, u16 vid)
2240 #else
2241 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2242                                 __always_unused __be16 proto, u16 vid)
2243 #endif
2244 {
2245         struct i40e_netdev_priv *np = netdev_priv(netdev);
2246         struct i40e_vsi *vsi = np->vsi;
2247         int ret = 0;
2248
2249         if (vid > 4095)
2250                 return -EINVAL;
2251
2252         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2253
2254         /* If the network stack called us with vid = 0 then
2255          * it is asking to receive priority tagged packets with
2256          * vlan id 0.  Our HW receives them by default when configured
2257          * to receive untagged packets so there is no need to add an
2258          * extra filter for vlan 0 tagged packets.
2259          */
2260         if (vid)
2261                 ret = i40e_vsi_add_vlan(vsi, vid);
2262
2263         if (!ret && (vid < VLAN_N_VID))
2264                 set_bit(vid, vsi->active_vlans);
2265
2266         return ret;
2267 }
2268
2269 /**
2270  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2271  * @netdev: network interface to be adjusted
2272  * @vid: vlan id to be removed
2273  *
2274  * net_device_ops implementation for removing vlan ids
2275  **/
2276 #ifdef I40E_FCOE
2277 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2278                           __always_unused __be16 proto, u16 vid)
2279 #else
2280 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2281                                  __always_unused __be16 proto, u16 vid)
2282 #endif
2283 {
2284         struct i40e_netdev_priv *np = netdev_priv(netdev);
2285         struct i40e_vsi *vsi = np->vsi;
2286
2287         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2288
2289         /* return code is ignored as there is nothing a user
2290          * can do about failure to remove and a log message was
2291          * already printed from the other function
2292          */
2293         i40e_vsi_kill_vlan(vsi, vid);
2294
2295         clear_bit(vid, vsi->active_vlans);
2296
2297         return 0;
2298 }
2299
2300 /**
2301  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2302  * @vsi: the vsi being brought back up
2303  **/
2304 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2305 {
2306         u16 vid;
2307
2308         if (!vsi->netdev)
2309                 return;
2310
2311         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2312
2313         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2314                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2315                                      vid);
2316 }
2317
2318 /**
2319  * i40e_vsi_add_pvid - Add pvid for the VSI
2320  * @vsi: the vsi being adjusted
2321  * @vid: the vlan id to set as a PVID
2322  **/
2323 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2324 {
2325         struct i40e_vsi_context ctxt;
2326         i40e_status ret;
2327
2328         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2329         vsi->info.pvid = cpu_to_le16(vid);
2330         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2331                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2332                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2333
2334         ctxt.seid = vsi->seid;
2335         ctxt.info = vsi->info;
2336         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2337         if (ret) {
2338                 dev_info(&vsi->back->pdev->dev,
2339                          "add pvid failed, err %s aq_err %s\n",
2340                          i40e_stat_str(&vsi->back->hw, ret),
2341                          i40e_aq_str(&vsi->back->hw,
2342                                      vsi->back->hw.aq.asq_last_status));
2343                 return -ENOENT;
2344         }
2345
2346         return 0;
2347 }
2348
2349 /**
2350  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2351  * @vsi: the vsi being adjusted
2352  *
2353  * Just use the vlan_rx_register() service to put it back to normal
2354  **/
2355 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2356 {
2357         i40e_vlan_stripping_disable(vsi);
2358
2359         vsi->info.pvid = 0;
2360 }
2361
2362 /**
2363  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2364  * @vsi: ptr to the VSI
2365  *
2366  * If this function returns with an error, then it's possible one or
2367  * more of the rings is populated (while the rest are not).  It is the
2368  * callers duty to clean those orphaned rings.
2369  *
2370  * Return 0 on success, negative on failure
2371  **/
2372 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2373 {
2374         int i, err = 0;
2375
2376         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2377                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2378
2379         return err;
2380 }
2381
2382 /**
2383  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2384  * @vsi: ptr to the VSI
2385  *
2386  * Free VSI's transmit software resources
2387  **/
2388 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2389 {
2390         int i;
2391
2392         if (!vsi->tx_rings)
2393                 return;
2394
2395         for (i = 0; i < vsi->num_queue_pairs; i++)
2396                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2397                         i40e_free_tx_resources(vsi->tx_rings[i]);
2398 }
2399
2400 /**
2401  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2402  * @vsi: ptr to the VSI
2403  *
2404  * If this function returns with an error, then it's possible one or
2405  * more of the rings is populated (while the rest are not).  It is the
2406  * callers duty to clean those orphaned rings.
2407  *
2408  * Return 0 on success, negative on failure
2409  **/
2410 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2411 {
2412         int i, err = 0;
2413
2414         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2415                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2416 #ifdef I40E_FCOE
2417         i40e_fcoe_setup_ddp_resources(vsi);
2418 #endif
2419         return err;
2420 }
2421
2422 /**
2423  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2424  * @vsi: ptr to the VSI
2425  *
2426  * Free all receive software resources
2427  **/
2428 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2429 {
2430         int i;
2431
2432         if (!vsi->rx_rings)
2433                 return;
2434
2435         for (i = 0; i < vsi->num_queue_pairs; i++)
2436                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2437                         i40e_free_rx_resources(vsi->rx_rings[i]);
2438 #ifdef I40E_FCOE
2439         i40e_fcoe_free_ddp_resources(vsi);
2440 #endif
2441 }
2442
2443 /**
2444  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2445  * @ring: The Tx ring to configure
2446  *
2447  * This enables/disables XPS for a given Tx descriptor ring
2448  * based on the TCs enabled for the VSI that ring belongs to.
2449  **/
2450 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2451 {
2452         struct i40e_vsi *vsi = ring->vsi;
2453         cpumask_var_t mask;
2454
2455         if (!ring->q_vector || !ring->netdev)
2456                 return;
2457
2458         /* Single TC mode enable XPS */
2459         if (vsi->tc_config.numtc <= 1) {
2460                 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2461                         netif_set_xps_queue(ring->netdev,
2462                                             &ring->q_vector->affinity_mask,
2463                                             ring->queue_index);
2464         } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2465                 /* Disable XPS to allow selection based on TC */
2466                 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2467                 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2468                 free_cpumask_var(mask);
2469         }
2470 }
2471
2472 /**
2473  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2474  * @ring: The Tx ring to configure
2475  *
2476  * Configure the Tx descriptor ring in the HMC context.
2477  **/
2478 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2479 {
2480         struct i40e_vsi *vsi = ring->vsi;
2481         u16 pf_q = vsi->base_queue + ring->queue_index;
2482         struct i40e_hw *hw = &vsi->back->hw;
2483         struct i40e_hmc_obj_txq tx_ctx;
2484         i40e_status err = 0;
2485         u32 qtx_ctl = 0;
2486
2487         /* some ATR related tx ring init */
2488         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2489                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2490                 ring->atr_count = 0;
2491         } else {
2492                 ring->atr_sample_rate = 0;
2493         }
2494
2495         /* configure XPS */
2496         i40e_config_xps_tx_ring(ring);
2497
2498         /* clear the context structure first */
2499         memset(&tx_ctx, 0, sizeof(tx_ctx));
2500
2501         tx_ctx.new_context = 1;
2502         tx_ctx.base = (ring->dma / 128);
2503         tx_ctx.qlen = ring->count;
2504         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2505                                                I40E_FLAG_FD_ATR_ENABLED));
2506 #ifdef I40E_FCOE
2507         tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2508 #endif
2509         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2510         /* FDIR VSI tx ring can still use RS bit and writebacks */
2511         if (vsi->type != I40E_VSI_FDIR)
2512                 tx_ctx.head_wb_ena = 1;
2513         tx_ctx.head_wb_addr = ring->dma +
2514                               (ring->count * sizeof(struct i40e_tx_desc));
2515
2516         /* As part of VSI creation/update, FW allocates certain
2517          * Tx arbitration queue sets for each TC enabled for
2518          * the VSI. The FW returns the handles to these queue
2519          * sets as part of the response buffer to Add VSI,
2520          * Update VSI, etc. AQ commands. It is expected that
2521          * these queue set handles be associated with the Tx
2522          * queues by the driver as part of the TX queue context
2523          * initialization. This has to be done regardless of
2524          * DCB as by default everything is mapped to TC0.
2525          */
2526         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2527         tx_ctx.rdylist_act = 0;
2528
2529         /* clear the context in the HMC */
2530         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2531         if (err) {
2532                 dev_info(&vsi->back->pdev->dev,
2533                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2534                          ring->queue_index, pf_q, err);
2535                 return -ENOMEM;
2536         }
2537
2538         /* set the context in the HMC */
2539         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2540         if (err) {
2541                 dev_info(&vsi->back->pdev->dev,
2542                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2543                          ring->queue_index, pf_q, err);
2544                 return -ENOMEM;
2545         }
2546
2547         /* Now associate this queue with this PCI function */
2548         if (vsi->type == I40E_VSI_VMDQ2) {
2549                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2550                 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2551                            I40E_QTX_CTL_VFVM_INDX_MASK;
2552         } else {
2553                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2554         }
2555
2556         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2557                     I40E_QTX_CTL_PF_INDX_MASK);
2558         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2559         i40e_flush(hw);
2560
2561         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2562
2563         /* cache tail off for easier writes later */
2564         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2565
2566         return 0;
2567 }
2568
2569 /**
2570  * i40e_configure_rx_ring - Configure a receive ring context
2571  * @ring: The Rx ring to configure
2572  *
2573  * Configure the Rx descriptor ring in the HMC context.
2574  **/
2575 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2576 {
2577         struct i40e_vsi *vsi = ring->vsi;
2578         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2579         u16 pf_q = vsi->base_queue + ring->queue_index;
2580         struct i40e_hw *hw = &vsi->back->hw;
2581         struct i40e_hmc_obj_rxq rx_ctx;
2582         i40e_status err = 0;
2583
2584         ring->state = 0;
2585
2586         /* clear the context structure first */
2587         memset(&rx_ctx, 0, sizeof(rx_ctx));
2588
2589         ring->rx_buf_len = vsi->rx_buf_len;
2590         ring->rx_hdr_len = vsi->rx_hdr_len;
2591
2592         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2593         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2594
2595         rx_ctx.base = (ring->dma / 128);
2596         rx_ctx.qlen = ring->count;
2597
2598         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2599                 set_ring_16byte_desc_enabled(ring);
2600                 rx_ctx.dsize = 0;
2601         } else {
2602                 rx_ctx.dsize = 1;
2603         }
2604
2605         rx_ctx.dtype = vsi->dtype;
2606         if (vsi->dtype) {
2607                 set_ring_ps_enabled(ring);
2608                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2609                                   I40E_RX_SPLIT_IP      |
2610                                   I40E_RX_SPLIT_TCP_UDP |
2611                                   I40E_RX_SPLIT_SCTP;
2612         } else {
2613                 rx_ctx.hsplit_0 = 0;
2614         }
2615
2616         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2617                                   (chain_len * ring->rx_buf_len));
2618         if (hw->revision_id == 0)
2619                 rx_ctx.lrxqthresh = 0;
2620         else
2621                 rx_ctx.lrxqthresh = 2;
2622         rx_ctx.crcstrip = 1;
2623         rx_ctx.l2tsel = 1;
2624         rx_ctx.showiv = 1;
2625 #ifdef I40E_FCOE
2626         rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2627 #endif
2628         /* set the prefena field to 1 because the manual says to */
2629         rx_ctx.prefena = 1;
2630
2631         /* clear the context in the HMC */
2632         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2633         if (err) {
2634                 dev_info(&vsi->back->pdev->dev,
2635                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2636                          ring->queue_index, pf_q, err);
2637                 return -ENOMEM;
2638         }
2639
2640         /* set the context in the HMC */
2641         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2642         if (err) {
2643                 dev_info(&vsi->back->pdev->dev,
2644                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2645                          ring->queue_index, pf_q, err);
2646                 return -ENOMEM;
2647         }
2648
2649         /* cache tail for quicker writes, and clear the reg before use */
2650         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2651         writel(0, ring->tail);
2652
2653         if (ring_is_ps_enabled(ring)) {
2654                 i40e_alloc_rx_headers(ring);
2655                 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2656         } else {
2657                 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2658         }
2659
2660         return 0;
2661 }
2662
2663 /**
2664  * i40e_vsi_configure_tx - Configure the VSI for Tx
2665  * @vsi: VSI structure describing this set of rings and resources
2666  *
2667  * Configure the Tx VSI for operation.
2668  **/
2669 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2670 {
2671         int err = 0;
2672         u16 i;
2673
2674         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2675                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2676
2677         return err;
2678 }
2679
2680 /**
2681  * i40e_vsi_configure_rx - Configure the VSI for Rx
2682  * @vsi: the VSI being configured
2683  *
2684  * Configure the Rx VSI for operation.
2685  **/
2686 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2687 {
2688         int err = 0;
2689         u16 i;
2690
2691         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2692                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2693                                + ETH_FCS_LEN + VLAN_HLEN;
2694         else
2695                 vsi->max_frame = I40E_RXBUFFER_2048;
2696
2697         /* figure out correct receive buffer length */
2698         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2699                                     I40E_FLAG_RX_PS_ENABLED)) {
2700         case I40E_FLAG_RX_1BUF_ENABLED:
2701                 vsi->rx_hdr_len = 0;
2702                 vsi->rx_buf_len = vsi->max_frame;
2703                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2704                 break;
2705         case I40E_FLAG_RX_PS_ENABLED:
2706                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2707                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2708                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2709                 break;
2710         default:
2711                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2712                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2713                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2714                 break;
2715         }
2716
2717 #ifdef I40E_FCOE
2718         /* setup rx buffer for FCoE */
2719         if ((vsi->type == I40E_VSI_FCOE) &&
2720             (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2721                 vsi->rx_hdr_len = 0;
2722                 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2723                 vsi->max_frame = I40E_RXBUFFER_3072;
2724                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2725         }
2726
2727 #endif /* I40E_FCOE */
2728         /* round up for the chip's needs */
2729         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2730                                 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
2731         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2732                                 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2733
2734         /* set up individual rings */
2735         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2736                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2737
2738         return err;
2739 }
2740
2741 /**
2742  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2743  * @vsi: ptr to the VSI
2744  **/
2745 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2746 {
2747         struct i40e_ring *tx_ring, *rx_ring;
2748         u16 qoffset, qcount;
2749         int i, n;
2750
2751         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2752                 /* Reset the TC information */
2753                 for (i = 0; i < vsi->num_queue_pairs; i++) {
2754                         rx_ring = vsi->rx_rings[i];
2755                         tx_ring = vsi->tx_rings[i];
2756                         rx_ring->dcb_tc = 0;
2757                         tx_ring->dcb_tc = 0;
2758                 }
2759         }
2760
2761         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2762                 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
2763                         continue;
2764
2765                 qoffset = vsi->tc_config.tc_info[n].qoffset;
2766                 qcount = vsi->tc_config.tc_info[n].qcount;
2767                 for (i = qoffset; i < (qoffset + qcount); i++) {
2768                         rx_ring = vsi->rx_rings[i];
2769                         tx_ring = vsi->tx_rings[i];
2770                         rx_ring->dcb_tc = n;
2771                         tx_ring->dcb_tc = n;
2772                 }
2773         }
2774 }
2775
2776 /**
2777  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2778  * @vsi: ptr to the VSI
2779  **/
2780 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2781 {
2782         if (vsi->netdev)
2783                 i40e_set_rx_mode(vsi->netdev);
2784 }
2785
2786 /**
2787  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2788  * @vsi: Pointer to the targeted VSI
2789  *
2790  * This function replays the hlist on the hw where all the SB Flow Director
2791  * filters were saved.
2792  **/
2793 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2794 {
2795         struct i40e_fdir_filter *filter;
2796         struct i40e_pf *pf = vsi->back;
2797         struct hlist_node *node;
2798
2799         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2800                 return;
2801
2802         hlist_for_each_entry_safe(filter, node,
2803                                   &pf->fdir_filter_list, fdir_node) {
2804                 i40e_add_del_fdir(vsi, filter, true);
2805         }
2806 }
2807
2808 /**
2809  * i40e_vsi_configure - Set up the VSI for action
2810  * @vsi: the VSI being configured
2811  **/
2812 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2813 {
2814         int err;
2815
2816         i40e_set_vsi_rx_mode(vsi);
2817         i40e_restore_vlan(vsi);
2818         i40e_vsi_config_dcb_rings(vsi);
2819         err = i40e_vsi_configure_tx(vsi);
2820         if (!err)
2821                 err = i40e_vsi_configure_rx(vsi);
2822
2823         return err;
2824 }
2825
2826 /**
2827  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2828  * @vsi: the VSI being configured
2829  **/
2830 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2831 {
2832         struct i40e_pf *pf = vsi->back;
2833         struct i40e_q_vector *q_vector;
2834         struct i40e_hw *hw = &pf->hw;
2835         u16 vector;
2836         int i, q;
2837         u32 val;
2838         u32 qp;
2839
2840         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2841          * and PFINT_LNKLSTn registers, e.g.:
2842          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2843          */
2844         qp = vsi->base_queue;
2845         vector = vsi->base_vector;
2846         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2847                 q_vector = vsi->q_vectors[i];
2848                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2849                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2850                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2851                      q_vector->rx.itr);
2852                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2853                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2854                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2855                      q_vector->tx.itr);
2856
2857                 /* Linked list for the queuepairs assigned to this vector */
2858                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2859                 for (q = 0; q < q_vector->num_ringpairs; q++) {
2860                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2861                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2862                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2863                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2864                               (I40E_QUEUE_TYPE_TX
2865                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2866
2867                         wr32(hw, I40E_QINT_RQCTL(qp), val);
2868
2869                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2870                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2871                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2872                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2873                               (I40E_QUEUE_TYPE_RX
2874                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2875
2876                         /* Terminate the linked list */
2877                         if (q == (q_vector->num_ringpairs - 1))
2878                                 val |= (I40E_QUEUE_END_OF_LIST
2879                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2880
2881                         wr32(hw, I40E_QINT_TQCTL(qp), val);
2882                         qp++;
2883                 }
2884         }
2885
2886         i40e_flush(hw);
2887 }
2888
2889 /**
2890  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2891  * @hw: ptr to the hardware info
2892  **/
2893 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2894 {
2895         struct i40e_hw *hw = &pf->hw;
2896         u32 val;
2897
2898         /* clear things first */
2899         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2900         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2901
2902         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2903               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2904               I40E_PFINT_ICR0_ENA_GRST_MASK          |
2905               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2906               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2907               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2908               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2909               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2910
2911         if (pf->flags & I40E_FLAG_IWARP_ENABLED)
2912                 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
2913
2914         if (pf->flags & I40E_FLAG_PTP)
2915                 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2916
2917         wr32(hw, I40E_PFINT_ICR0_ENA, val);
2918
2919         /* SW_ITR_IDX = 0, but don't change INTENA */
2920         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2921                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2922
2923         /* OTHER_ITR_IDX = 0 */
2924         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2925 }
2926
2927 /**
2928  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2929  * @vsi: the VSI being configured
2930  **/
2931 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2932 {
2933         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2934         struct i40e_pf *pf = vsi->back;
2935         struct i40e_hw *hw = &pf->hw;
2936         u32 val;
2937
2938         /* set the ITR configuration */
2939         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2940         q_vector->rx.latency_range = I40E_LOW_LATENCY;
2941         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2942         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2943         q_vector->tx.latency_range = I40E_LOW_LATENCY;
2944         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2945
2946         i40e_enable_misc_int_causes(pf);
2947
2948         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2949         wr32(hw, I40E_PFINT_LNKLST0, 0);
2950
2951         /* Associate the queue pair to the vector and enable the queue int */
2952         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
2953               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2954               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2955
2956         wr32(hw, I40E_QINT_RQCTL(0), val);
2957
2958         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
2959               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2960               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2961
2962         wr32(hw, I40E_QINT_TQCTL(0), val);
2963         i40e_flush(hw);
2964 }
2965
2966 /**
2967  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2968  * @pf: board private structure
2969  **/
2970 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2971 {
2972         struct i40e_hw *hw = &pf->hw;
2973
2974         wr32(hw, I40E_PFINT_DYN_CTL0,
2975              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2976         i40e_flush(hw);
2977 }
2978
2979 /**
2980  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2981  * @pf: board private structure
2982  **/
2983 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2984 {
2985         struct i40e_hw *hw = &pf->hw;
2986         u32 val;
2987
2988         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
2989               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2990               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2991
2992         wr32(hw, I40E_PFINT_DYN_CTL0, val);
2993         i40e_flush(hw);
2994 }
2995
2996 /**
2997  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2998  * @vsi: pointer to a vsi
2999  * @vector: enable a particular Hw Interrupt vector
3000  **/
3001 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
3002 {
3003         struct i40e_pf *pf = vsi->back;
3004         struct i40e_hw *hw = &pf->hw;
3005         u32 val;
3006
3007         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3008               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3009               (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3010         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3011         /* skip the flush */
3012 }
3013
3014 /**
3015  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3016  * @vsi: pointer to a vsi
3017  * @vector: disable a particular Hw Interrupt vector
3018  **/
3019 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3020 {
3021         struct i40e_pf *pf = vsi->back;
3022         struct i40e_hw *hw = &pf->hw;
3023         u32 val;
3024
3025         val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3026         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3027         i40e_flush(hw);
3028 }
3029
3030 /**
3031  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3032  * @irq: interrupt number
3033  * @data: pointer to a q_vector
3034  **/
3035 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3036 {
3037         struct i40e_q_vector *q_vector = data;
3038
3039         if (!q_vector->tx.ring && !q_vector->rx.ring)
3040                 return IRQ_HANDLED;
3041
3042         napi_schedule(&q_vector->napi);
3043
3044         return IRQ_HANDLED;
3045 }
3046
3047 /**
3048  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3049  * @vsi: the VSI being configured
3050  * @basename: name for the vector
3051  *
3052  * Allocates MSI-X vectors and requests interrupts from the kernel.
3053  **/
3054 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3055 {
3056         int q_vectors = vsi->num_q_vectors;
3057         struct i40e_pf *pf = vsi->back;
3058         int base = vsi->base_vector;
3059         int rx_int_idx = 0;
3060         int tx_int_idx = 0;
3061         int vector, err;
3062
3063         for (vector = 0; vector < q_vectors; vector++) {
3064                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3065
3066                 if (q_vector->tx.ring && q_vector->rx.ring) {
3067                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3068                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3069                         tx_int_idx++;
3070                 } else if (q_vector->rx.ring) {
3071                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3072                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
3073                 } else if (q_vector->tx.ring) {
3074                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3075                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
3076                 } else {
3077                         /* skip this unused q_vector */
3078                         continue;
3079                 }
3080                 err = request_irq(pf->msix_entries[base + vector].vector,
3081                                   vsi->irq_handler,
3082                                   0,
3083                                   q_vector->name,
3084                                   q_vector);
3085                 if (err) {
3086                         dev_info(&pf->pdev->dev,
3087                                  "%s: request_irq failed, error: %d\n",
3088                                  __func__, err);
3089                         goto free_queue_irqs;
3090                 }
3091                 /* assign the mask for this irq */
3092                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3093                                       &q_vector->affinity_mask);
3094         }
3095
3096         vsi->irqs_ready = true;
3097         return 0;
3098
3099 free_queue_irqs:
3100         while (vector) {
3101                 vector--;
3102                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3103                                       NULL);
3104                 free_irq(pf->msix_entries[base + vector].vector,
3105                          &(vsi->q_vectors[vector]));
3106         }
3107         return err;
3108 }
3109
3110 /**
3111  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3112  * @vsi: the VSI being un-configured
3113  **/
3114 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3115 {
3116         struct i40e_pf *pf = vsi->back;
3117         struct i40e_hw *hw = &pf->hw;
3118         int base = vsi->base_vector;
3119         int i;
3120
3121         for (i = 0; i < vsi->num_queue_pairs; i++) {
3122                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3123                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3124         }
3125
3126         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3127                 for (i = vsi->base_vector;
3128                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3129                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3130
3131                 i40e_flush(hw);
3132                 for (i = 0; i < vsi->num_q_vectors; i++)
3133                         synchronize_irq(pf->msix_entries[i + base].vector);
3134         } else {
3135                 /* Legacy and MSI mode - this stops all interrupt handling */
3136                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3137                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3138                 i40e_flush(hw);
3139                 synchronize_irq(pf->pdev->irq);
3140         }
3141 }
3142
3143 /**
3144  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3145  * @vsi: the VSI being configured
3146  **/
3147 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3148 {
3149         struct i40e_pf *pf = vsi->back;
3150         int i;
3151
3152         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3153                 for (i = vsi->base_vector;
3154                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3155                         i40e_irq_dynamic_enable(vsi, i);
3156         } else {
3157                 i40e_irq_dynamic_enable_icr0(pf);
3158         }
3159
3160         i40e_flush(&pf->hw);
3161         return 0;
3162 }
3163
3164 /**
3165  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3166  * @pf: board private structure
3167  **/
3168 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3169 {
3170         /* Disable ICR 0 */
3171         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3172         i40e_flush(&pf->hw);
3173 }
3174
3175 /**
3176  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3177  * @irq: interrupt number
3178  * @data: pointer to a q_vector
3179  *
3180  * This is the handler used for all MSI/Legacy interrupts, and deals
3181  * with both queue and non-queue interrupts.  This is also used in
3182  * MSIX mode to handle the non-queue interrupts.
3183  **/
3184 static irqreturn_t i40e_intr(int irq, void *data)
3185 {
3186         struct i40e_pf *pf = (struct i40e_pf *)data;
3187         struct i40e_hw *hw = &pf->hw;
3188         irqreturn_t ret = IRQ_NONE;
3189         u32 icr0, icr0_remaining;
3190         u32 val, ena_mask;
3191
3192         icr0 = rd32(hw, I40E_PFINT_ICR0);
3193         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3194
3195         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3196         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3197                 goto enable_intr;
3198
3199         /* if interrupt but no bits showing, must be SWINT */
3200         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3201             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3202                 pf->sw_int_count++;
3203
3204         if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3205             (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3206                 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3207                 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3208                 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3209         }
3210
3211         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3212         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3213
3214                 /* temporarily disable queue cause for NAPI processing */
3215                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3216                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3217                 wr32(hw, I40E_QINT_RQCTL(0), qval);
3218
3219                 qval = rd32(hw, I40E_QINT_TQCTL(0));
3220                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3221                 wr32(hw, I40E_QINT_TQCTL(0), qval);
3222
3223                 if (!test_bit(__I40E_DOWN, &pf->state))
3224                         napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3225         }
3226
3227         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3228                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3229                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3230         }
3231
3232         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3233                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3234                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3235         }
3236
3237         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3238                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3239                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3240         }
3241
3242         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3243                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3244                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3245                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3246                 val = rd32(hw, I40E_GLGEN_RSTAT);
3247                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3248                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3249                 if (val == I40E_RESET_CORER) {
3250                         pf->corer_count++;
3251                 } else if (val == I40E_RESET_GLOBR) {
3252                         pf->globr_count++;
3253                 } else if (val == I40E_RESET_EMPR) {
3254                         pf->empr_count++;
3255                         set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3256                 }
3257         }
3258
3259         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3260                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3261                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3262                 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3263                          rd32(hw, I40E_PFHMC_ERRORINFO),
3264                          rd32(hw, I40E_PFHMC_ERRORDATA));
3265         }
3266
3267         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3268                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3269
3270                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3271                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3272                         i40e_ptp_tx_hwtstamp(pf);
3273                 }
3274         }
3275
3276         /* If a critical error is pending we have no choice but to reset the
3277          * device.
3278          * Report and mask out any remaining unexpected interrupts.
3279          */
3280         icr0_remaining = icr0 & ena_mask;
3281         if (icr0_remaining) {
3282                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3283                          icr0_remaining);
3284                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3285                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3286                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3287                         dev_info(&pf->pdev->dev, "device will be reset\n");
3288                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3289                         i40e_service_event_schedule(pf);
3290                 }
3291                 ena_mask &= ~icr0_remaining;
3292         }
3293         ret = IRQ_HANDLED;
3294
3295 enable_intr:
3296         /* re-enable interrupt causes */
3297         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3298         if (!test_bit(__I40E_DOWN, &pf->state)) {
3299                 i40e_service_event_schedule(pf);
3300                 i40e_irq_dynamic_enable_icr0(pf);
3301         }
3302
3303         return ret;
3304 }
3305
3306 /**
3307  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3308  * @tx_ring:  tx ring to clean
3309  * @budget:   how many cleans we're allowed
3310  *
3311  * Returns true if there's any budget left (e.g. the clean is finished)
3312  **/
3313 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3314 {
3315         struct i40e_vsi *vsi = tx_ring->vsi;
3316         u16 i = tx_ring->next_to_clean;
3317         struct i40e_tx_buffer *tx_buf;
3318         struct i40e_tx_desc *tx_desc;
3319
3320         tx_buf = &tx_ring->tx_bi[i];
3321         tx_desc = I40E_TX_DESC(tx_ring, i);
3322         i -= tx_ring->count;
3323
3324         do {
3325                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3326
3327                 /* if next_to_watch is not set then there is no work pending */
3328                 if (!eop_desc)
3329                         break;
3330
3331                 /* prevent any other reads prior to eop_desc */
3332                 read_barrier_depends();
3333
3334                 /* if the descriptor isn't done, no work yet to do */
3335                 if (!(eop_desc->cmd_type_offset_bsz &
3336                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3337                         break;
3338
3339                 /* clear next_to_watch to prevent false hangs */
3340                 tx_buf->next_to_watch = NULL;
3341
3342                 tx_desc->buffer_addr = 0;
3343                 tx_desc->cmd_type_offset_bsz = 0;
3344                 /* move past filter desc */
3345                 tx_buf++;
3346                 tx_desc++;
3347                 i++;
3348                 if (unlikely(!i)) {
3349                         i -= tx_ring->count;
3350                         tx_buf = tx_ring->tx_bi;
3351                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3352                 }
3353                 /* unmap skb header data */
3354                 dma_unmap_single(tx_ring->dev,
3355                                  dma_unmap_addr(tx_buf, dma),
3356                                  dma_unmap_len(tx_buf, len),
3357                                  DMA_TO_DEVICE);
3358                 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3359                         kfree(tx_buf->raw_buf);
3360
3361                 tx_buf->raw_buf = NULL;
3362                 tx_buf->tx_flags = 0;
3363                 tx_buf->next_to_watch = NULL;
3364                 dma_unmap_len_set(tx_buf, len, 0);
3365                 tx_desc->buffer_addr = 0;
3366                 tx_desc->cmd_type_offset_bsz = 0;
3367
3368                 /* move us past the eop_desc for start of next FD desc */
3369                 tx_buf++;
3370                 tx_desc++;
3371                 i++;
3372                 if (unlikely(!i)) {
3373                         i -= tx_ring->count;
3374                         tx_buf = tx_ring->tx_bi;
3375                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3376                 }
3377
3378                 /* update budget accounting */
3379                 budget--;
3380         } while (likely(budget));
3381
3382         i += tx_ring->count;
3383         tx_ring->next_to_clean = i;
3384
3385         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3386                 i40e_irq_dynamic_enable(vsi,
3387                                 tx_ring->q_vector->v_idx + vsi->base_vector);
3388         }
3389         return budget > 0;
3390 }
3391
3392 /**
3393  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3394  * @irq: interrupt number
3395  * @data: pointer to a q_vector
3396  **/
3397 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3398 {
3399         struct i40e_q_vector *q_vector = data;
3400         struct i40e_vsi *vsi;
3401
3402         if (!q_vector->tx.ring)
3403                 return IRQ_HANDLED;
3404
3405         vsi = q_vector->tx.ring->vsi;
3406         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3407
3408         return IRQ_HANDLED;
3409 }
3410
3411 /**
3412  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3413  * @vsi: the VSI being configured
3414  * @v_idx: vector index
3415  * @qp_idx: queue pair index
3416  **/
3417 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3418 {
3419         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3420         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3421         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3422
3423         tx_ring->q_vector = q_vector;
3424         tx_ring->next = q_vector->tx.ring;
3425         q_vector->tx.ring = tx_ring;
3426         q_vector->tx.count++;
3427
3428         rx_ring->q_vector = q_vector;
3429         rx_ring->next = q_vector->rx.ring;
3430         q_vector->rx.ring = rx_ring;
3431         q_vector->rx.count++;
3432 }
3433
3434 /**
3435  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3436  * @vsi: the VSI being configured
3437  *
3438  * This function maps descriptor rings to the queue-specific vectors
3439  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3440  * one vector per queue pair, but on a constrained vector budget, we
3441  * group the queue pairs as "efficiently" as possible.
3442  **/
3443 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3444 {
3445         int qp_remaining = vsi->num_queue_pairs;
3446         int q_vectors = vsi->num_q_vectors;
3447         int num_ringpairs;
3448         int v_start = 0;
3449         int qp_idx = 0;
3450
3451         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3452          * group them so there are multiple queues per vector.
3453          * It is also important to go through all the vectors available to be
3454          * sure that if we don't use all the vectors, that the remaining vectors
3455          * are cleared. This is especially important when decreasing the
3456          * number of queues in use.
3457          */
3458         for (; v_start < q_vectors; v_start++) {
3459                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3460
3461                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3462
3463                 q_vector->num_ringpairs = num_ringpairs;
3464
3465                 q_vector->rx.count = 0;
3466                 q_vector->tx.count = 0;
3467                 q_vector->rx.ring = NULL;
3468                 q_vector->tx.ring = NULL;
3469
3470                 while (num_ringpairs--) {
3471                         map_vector_to_qp(vsi, v_start, qp_idx);
3472                         qp_idx++;
3473                         qp_remaining--;
3474                 }
3475         }
3476 }
3477
3478 /**
3479  * i40e_vsi_request_irq - Request IRQ from the OS
3480  * @vsi: the VSI being configured
3481  * @basename: name for the vector
3482  **/
3483 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3484 {
3485         struct i40e_pf *pf = vsi->back;
3486         int err;
3487
3488         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3489                 err = i40e_vsi_request_irq_msix(vsi, basename);
3490         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3491                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3492                                   pf->int_name, pf);
3493         else
3494                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3495                                   pf->int_name, pf);
3496
3497         if (err)
3498                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3499
3500         return err;
3501 }
3502
3503 #ifdef CONFIG_NET_POLL_CONTROLLER
3504 /**
3505  * i40e_netpoll - A Polling 'interrupt'handler
3506  * @netdev: network interface device structure
3507  *
3508  * This is used by netconsole to send skbs without having to re-enable
3509  * interrupts.  It's not called while the normal interrupt routine is executing.
3510  **/
3511 #ifdef I40E_FCOE
3512 void i40e_netpoll(struct net_device *netdev)
3513 #else
3514 static void i40e_netpoll(struct net_device *netdev)
3515 #endif
3516 {
3517         struct i40e_netdev_priv *np = netdev_priv(netdev);
3518         struct i40e_vsi *vsi = np->vsi;
3519         struct i40e_pf *pf = vsi->back;
3520         int i;
3521
3522         /* if interface is down do nothing */
3523         if (test_bit(__I40E_DOWN, &vsi->state))
3524                 return;
3525
3526         pf->flags |= I40E_FLAG_IN_NETPOLL;
3527         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3528                 for (i = 0; i < vsi->num_q_vectors; i++)
3529                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3530         } else {
3531                 i40e_intr(pf->pdev->irq, netdev);
3532         }
3533         pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3534 }
3535 #endif
3536
3537 /**
3538  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3539  * @pf: the PF being configured
3540  * @pf_q: the PF queue
3541  * @enable: enable or disable state of the queue
3542  *
3543  * This routine will wait for the given Tx queue of the PF to reach the
3544  * enabled or disabled state.
3545  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3546  * multiple retries; else will return 0 in case of success.
3547  **/
3548 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3549 {
3550         int i;
3551         u32 tx_reg;
3552
3553         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3554                 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3555                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3556                         break;
3557
3558                 usleep_range(10, 20);
3559         }
3560         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3561                 return -ETIMEDOUT;
3562
3563         return 0;
3564 }
3565
3566 /**
3567  * i40e_vsi_control_tx - Start or stop a VSI's rings
3568  * @vsi: the VSI being configured
3569  * @enable: start or stop the rings
3570  **/
3571 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3572 {
3573         struct i40e_pf *pf = vsi->back;
3574         struct i40e_hw *hw = &pf->hw;
3575         int i, j, pf_q, ret = 0;
3576         u32 tx_reg;
3577
3578         pf_q = vsi->base_queue;
3579         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3580
3581                 /* warn the TX unit of coming changes */
3582                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3583                 if (!enable)
3584                         usleep_range(10, 20);
3585
3586                 for (j = 0; j < 50; j++) {
3587                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3588                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3589                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3590                                 break;
3591                         usleep_range(1000, 2000);
3592                 }
3593                 /* Skip if the queue is already in the requested state */
3594                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3595                         continue;
3596
3597                 /* turn on/off the queue */
3598                 if (enable) {
3599                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3600                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3601                 } else {
3602                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3603                 }
3604
3605                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3606                 /* No waiting for the Tx queue to disable */
3607                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3608                         continue;
3609
3610                 /* wait for the change to finish */
3611                 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3612                 if (ret) {
3613                         dev_info(&pf->pdev->dev,
3614                                  "%s: VSI seid %d Tx ring %d %sable timeout\n",
3615                                  __func__, vsi->seid, pf_q,
3616                                  (enable ? "en" : "dis"));
3617                         break;
3618                 }
3619         }
3620
3621         if (hw->revision_id == 0)
3622                 mdelay(50);
3623         return ret;
3624 }
3625
3626 /**
3627  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3628  * @pf: the PF being configured
3629  * @pf_q: the PF queue
3630  * @enable: enable or disable state of the queue
3631  *
3632  * This routine will wait for the given Rx queue of the PF to reach the
3633  * enabled or disabled state.
3634  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3635  * multiple retries; else will return 0 in case of success.
3636  **/
3637 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3638 {
3639         int i;
3640         u32 rx_reg;
3641
3642         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3643                 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3644                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3645                         break;
3646
3647                 usleep_range(10, 20);
3648         }
3649         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3650                 return -ETIMEDOUT;
3651
3652         return 0;
3653 }
3654
3655 /**
3656  * i40e_vsi_control_rx - Start or stop a VSI's rings
3657  * @vsi: the VSI being configured
3658  * @enable: start or stop the rings
3659  **/
3660 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3661 {
3662         struct i40e_pf *pf = vsi->back;
3663         struct i40e_hw *hw = &pf->hw;
3664         int i, j, pf_q, ret = 0;
3665         u32 rx_reg;
3666
3667         pf_q = vsi->base_queue;
3668         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3669                 for (j = 0; j < 50; j++) {
3670                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3671                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3672                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3673                                 break;
3674                         usleep_range(1000, 2000);
3675                 }
3676
3677                 /* Skip if the queue is already in the requested state */
3678                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3679                         continue;
3680
3681                 /* turn on/off the queue */
3682                 if (enable)
3683                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3684                 else
3685                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3686                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3687
3688                 /* wait for the change to finish */
3689                 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3690                 if (ret) {
3691                         dev_info(&pf->pdev->dev,
3692                                  "%s: VSI seid %d Rx ring %d %sable timeout\n",
3693                                  __func__, vsi->seid, pf_q,
3694                                  (enable ? "en" : "dis"));
3695                         break;
3696                 }
3697         }
3698
3699         return ret;
3700 }
3701
3702 /**
3703  * i40e_vsi_control_rings - Start or stop a VSI's rings
3704  * @vsi: the VSI being configured
3705  * @enable: start or stop the rings
3706  **/
3707 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3708 {
3709         int ret = 0;
3710
3711         /* do rx first for enable and last for disable */
3712         if (request) {
3713                 ret = i40e_vsi_control_rx(vsi, request);
3714                 if (ret)
3715                         return ret;
3716                 ret = i40e_vsi_control_tx(vsi, request);
3717         } else {
3718                 /* Ignore return value, we need to shutdown whatever we can */
3719                 i40e_vsi_control_tx(vsi, request);
3720                 i40e_vsi_control_rx(vsi, request);
3721         }
3722
3723         return ret;
3724 }
3725
3726 /**
3727  * i40e_vsi_free_irq - Free the irq association with the OS
3728  * @vsi: the VSI being configured
3729  **/
3730 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3731 {
3732         struct i40e_pf *pf = vsi->back;
3733         struct i40e_hw *hw = &pf->hw;
3734         int base = vsi->base_vector;
3735         u32 val, qp;
3736         int i;
3737
3738         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3739                 if (!vsi->q_vectors)
3740                         return;
3741
3742                 if (!vsi->irqs_ready)
3743                         return;
3744
3745                 vsi->irqs_ready = false;
3746                 for (i = 0; i < vsi->num_q_vectors; i++) {
3747                         u16 vector = i + base;
3748
3749                         /* free only the irqs that were actually requested */
3750                         if (!vsi->q_vectors[i] ||
3751                             !vsi->q_vectors[i]->num_ringpairs)
3752                                 continue;
3753
3754                         /* clear the affinity_mask in the IRQ descriptor */
3755                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
3756                                               NULL);
3757                         free_irq(pf->msix_entries[vector].vector,
3758                                  vsi->q_vectors[i]);
3759
3760                         /* Tear down the interrupt queue link list
3761                          *
3762                          * We know that they come in pairs and always
3763                          * the Rx first, then the Tx.  To clear the
3764                          * link list, stick the EOL value into the
3765                          * next_q field of the registers.
3766                          */
3767                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3768                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3769                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3770                         val |= I40E_QUEUE_END_OF_LIST
3771                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3772                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3773
3774                         while (qp != I40E_QUEUE_END_OF_LIST) {
3775                                 u32 next;
3776
3777                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3778
3779                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3780                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3781                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3782                                          I40E_QINT_RQCTL_INTEVENT_MASK);
3783
3784                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3785                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3786
3787                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3788
3789                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3790
3791                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3792                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3793
3794                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3795                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3796                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3797                                          I40E_QINT_TQCTL_INTEVENT_MASK);
3798
3799                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3800                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3801
3802                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3803                                 qp = next;
3804                         }
3805                 }
3806         } else {
3807                 free_irq(pf->pdev->irq, pf);
3808
3809                 val = rd32(hw, I40E_PFINT_LNKLST0);
3810                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3811                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3812                 val |= I40E_QUEUE_END_OF_LIST
3813                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3814                 wr32(hw, I40E_PFINT_LNKLST0, val);
3815
3816                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3817                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3818                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3819                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3820                          I40E_QINT_RQCTL_INTEVENT_MASK);
3821
3822                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3823                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3824
3825                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3826
3827                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3828
3829                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3830                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3831                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3832                          I40E_QINT_TQCTL_INTEVENT_MASK);
3833
3834                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3835                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3836
3837                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3838         }
3839 }
3840
3841 /**
3842  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3843  * @vsi: the VSI being configured
3844  * @v_idx: Index of vector to be freed
3845  *
3846  * This function frees the memory allocated to the q_vector.  In addition if
3847  * NAPI is enabled it will delete any references to the NAPI struct prior
3848  * to freeing the q_vector.
3849  **/
3850 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3851 {
3852         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3853         struct i40e_ring *ring;
3854
3855         if (!q_vector)
3856                 return;
3857
3858         /* disassociate q_vector from rings */
3859         i40e_for_each_ring(ring, q_vector->tx)
3860                 ring->q_vector = NULL;
3861
3862         i40e_for_each_ring(ring, q_vector->rx)
3863                 ring->q_vector = NULL;
3864
3865         /* only VSI w/ an associated netdev is set up w/ NAPI */
3866         if (vsi->netdev)
3867                 netif_napi_del(&q_vector->napi);
3868
3869         vsi->q_vectors[v_idx] = NULL;
3870
3871         kfree_rcu(q_vector, rcu);
3872 }
3873
3874 /**
3875  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3876  * @vsi: the VSI being un-configured
3877  *
3878  * This frees the memory allocated to the q_vectors and
3879  * deletes references to the NAPI struct.
3880  **/
3881 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3882 {
3883         int v_idx;
3884
3885         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3886                 i40e_free_q_vector(vsi, v_idx);
3887 }
3888
3889 /**
3890  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3891  * @pf: board private structure
3892  **/
3893 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3894 {
3895         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3896         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3897                 pci_disable_msix(pf->pdev);
3898                 kfree(pf->msix_entries);
3899                 pf->msix_entries = NULL;
3900                 kfree(pf->irq_pile);
3901                 pf->irq_pile = NULL;
3902         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3903                 pci_disable_msi(pf->pdev);
3904         }
3905         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3906 }
3907
3908 /**
3909  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3910  * @pf: board private structure
3911  *
3912  * We go through and clear interrupt specific resources and reset the structure
3913  * to pre-load conditions
3914  **/
3915 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3916 {
3917         int i;
3918
3919         i40e_stop_misc_vector(pf);
3920         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3921                 synchronize_irq(pf->msix_entries[0].vector);
3922                 free_irq(pf->msix_entries[0].vector, pf);
3923         }
3924
3925         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3926         for (i = 0; i < pf->num_alloc_vsi; i++)
3927                 if (pf->vsi[i])
3928                         i40e_vsi_free_q_vectors(pf->vsi[i]);
3929         i40e_reset_interrupt_capability(pf);
3930 }
3931
3932 /**
3933  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3934  * @vsi: the VSI being configured
3935  **/
3936 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3937 {
3938         int q_idx;
3939
3940         if (!vsi->netdev)
3941                 return;
3942
3943         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3944                 napi_enable(&vsi->q_vectors[q_idx]->napi);
3945 }
3946
3947 /**
3948  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3949  * @vsi: the VSI being configured
3950  **/
3951 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3952 {
3953         int q_idx;
3954
3955         if (!vsi->netdev)
3956                 return;
3957
3958         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3959                 napi_disable(&vsi->q_vectors[q_idx]->napi);
3960 }
3961
3962 /**
3963  * i40e_vsi_close - Shut down a VSI
3964  * @vsi: the vsi to be quelled
3965  **/
3966 static void i40e_vsi_close(struct i40e_vsi *vsi)
3967 {
3968         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3969                 i40e_down(vsi);
3970         i40e_vsi_free_irq(vsi);
3971         i40e_vsi_free_tx_resources(vsi);
3972         i40e_vsi_free_rx_resources(vsi);
3973 }
3974
3975 /**
3976  * i40e_quiesce_vsi - Pause a given VSI
3977  * @vsi: the VSI being paused
3978  **/
3979 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3980 {
3981         if (test_bit(__I40E_DOWN, &vsi->state))
3982                 return;
3983
3984         /* No need to disable FCoE VSI when Tx suspended */
3985         if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3986             vsi->type == I40E_VSI_FCOE) {
3987                 dev_dbg(&vsi->back->pdev->dev,
3988                         "%s: VSI seid %d skipping FCoE VSI disable\n",
3989                          __func__, vsi->seid);
3990                 return;
3991         }
3992
3993         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3994         if (vsi->netdev && netif_running(vsi->netdev)) {
3995                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3996         } else {
3997                 i40e_vsi_close(vsi);
3998         }
3999 }
4000
4001 /**
4002  * i40e_unquiesce_vsi - Resume a given VSI
4003  * @vsi: the VSI being resumed
4004  **/
4005 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4006 {
4007         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4008                 return;
4009
4010         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4011         if (vsi->netdev && netif_running(vsi->netdev))
4012                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4013         else
4014                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4015 }
4016
4017 /**
4018  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4019  * @pf: the PF
4020  **/
4021 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4022 {
4023         int v;
4024
4025         for (v = 0; v < pf->num_alloc_vsi; v++) {
4026                 if (pf->vsi[v])
4027                         i40e_quiesce_vsi(pf->vsi[v]);
4028         }
4029 }
4030
4031 /**
4032  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4033  * @pf: the PF
4034  **/
4035 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4036 {
4037         int v;
4038
4039         for (v = 0; v < pf->num_alloc_vsi; v++) {
4040                 if (pf->vsi[v])
4041                         i40e_unquiesce_vsi(pf->vsi[v]);
4042         }
4043 }
4044
4045 #ifdef CONFIG_I40E_DCB
4046 /**
4047  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4048  * @vsi: the VSI being configured
4049  *
4050  * This function waits for the given VSI's Tx queues to be disabled.
4051  **/
4052 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4053 {
4054         struct i40e_pf *pf = vsi->back;
4055         int i, pf_q, ret;
4056
4057         pf_q = vsi->base_queue;
4058         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4059                 /* Check and wait for the disable status of the queue */
4060                 ret = i40e_pf_txq_wait(pf, pf_q, false);
4061                 if (ret) {
4062                         dev_info(&pf->pdev->dev,
4063                                  "%s: VSI seid %d Tx ring %d disable timeout\n",
4064                                  __func__, vsi->seid, pf_q);
4065                         return ret;
4066                 }
4067         }
4068
4069         return 0;
4070 }
4071
4072 /**
4073  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4074  * @pf: the PF
4075  *
4076  * This function waits for the Tx queues to be in disabled state for all the
4077  * VSIs that are managed by this PF.
4078  **/
4079 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4080 {
4081         int v, ret = 0;
4082
4083         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4084                 /* No need to wait for FCoE VSI queues */
4085                 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4086                         ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4087                         if (ret)
4088                                 break;
4089                 }
4090         }
4091
4092         return ret;
4093 }
4094
4095 #endif
4096 /**
4097  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4098  * @pf: pointer to PF
4099  *
4100  * Get TC map for ISCSI PF type that will include iSCSI TC
4101  * and LAN TC.
4102  **/
4103 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4104 {
4105         struct i40e_dcb_app_priority_table app;
4106         struct i40e_hw *hw = &pf->hw;
4107         u8 enabled_tc = 1; /* TC0 is always enabled */
4108         u8 tc, i;
4109         /* Get the iSCSI APP TLV */
4110         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4111
4112         for (i = 0; i < dcbcfg->numapps; i++) {
4113                 app = dcbcfg->app[i];
4114                 if (app.selector == I40E_APP_SEL_TCPIP &&
4115                     app.protocolid == I40E_APP_PROTOID_ISCSI) {
4116                         tc = dcbcfg->etscfg.prioritytable[app.priority];
4117                         enabled_tc |= BIT_ULL(tc);
4118                         break;
4119                 }
4120         }
4121
4122         return enabled_tc;
4123 }
4124
4125 /**
4126  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4127  * @dcbcfg: the corresponding DCBx configuration structure
4128  *
4129  * Return the number of TCs from given DCBx configuration
4130  **/
4131 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4132 {
4133         u8 num_tc = 0;
4134         int i;
4135
4136         /* Scan the ETS Config Priority Table to find
4137          * traffic class enabled for a given priority
4138          * and use the traffic class index to get the
4139          * number of traffic classes enabled
4140          */
4141         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4142                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4143                         num_tc = dcbcfg->etscfg.prioritytable[i];
4144         }
4145
4146         /* Traffic class index starts from zero so
4147          * increment to return the actual count
4148          */
4149         return num_tc + 1;
4150 }
4151
4152 /**
4153  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4154  * @dcbcfg: the corresponding DCBx configuration structure
4155  *
4156  * Query the current DCB configuration and return the number of
4157  * traffic classes enabled from the given DCBX config
4158  **/
4159 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4160 {
4161         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4162         u8 enabled_tc = 1;
4163         u8 i;
4164
4165         for (i = 0; i < num_tc; i++)
4166                 enabled_tc |= BIT(i);
4167
4168         return enabled_tc;
4169 }
4170
4171 /**
4172  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4173  * @pf: PF being queried
4174  *
4175  * Return number of traffic classes enabled for the given PF
4176  **/
4177 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4178 {
4179         struct i40e_hw *hw = &pf->hw;
4180         u8 i, enabled_tc;
4181         u8 num_tc = 0;
4182         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4183
4184         /* If DCB is not enabled then always in single TC */
4185         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4186                 return 1;
4187
4188         /* SFP mode will be enabled for all TCs on port */
4189         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4190                 return i40e_dcb_get_num_tc(dcbcfg);
4191
4192         /* MFP mode return count of enabled TCs for this PF */
4193         if (pf->hw.func_caps.iscsi)
4194                 enabled_tc =  i40e_get_iscsi_tc_map(pf);
4195         else
4196                 return 1; /* Only TC0 */
4197
4198         /* At least have TC0 */
4199         enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4200         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4201                 if (enabled_tc & BIT_ULL(i))
4202                         num_tc++;
4203         }
4204         return num_tc;
4205 }
4206
4207 /**
4208  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4209  * @pf: PF being queried
4210  *
4211  * Return a bitmap for first enabled traffic class for this PF.
4212  **/
4213 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4214 {
4215         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4216         u8 i = 0;
4217
4218         if (!enabled_tc)
4219                 return 0x1; /* TC0 */
4220
4221         /* Find the first enabled TC */
4222         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4223                 if (enabled_tc & BIT_ULL(i))
4224                         break;
4225         }
4226
4227         return BIT(i);
4228 }
4229
4230 /**
4231  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4232  * @pf: PF being queried
4233  *
4234  * Return a bitmap for enabled traffic classes for this PF.
4235  **/
4236 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4237 {
4238         /* If DCB is not enabled for this PF then just return default TC */
4239         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4240                 return i40e_pf_get_default_tc(pf);
4241
4242         /* SFP mode we want PF to be enabled for all TCs */
4243         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4244                 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4245
4246         /* MFP enabled and iSCSI PF type */
4247         if (pf->hw.func_caps.iscsi)
4248                 return i40e_get_iscsi_tc_map(pf);
4249         else
4250                 return i40e_pf_get_default_tc(pf);
4251 }
4252
4253 /**
4254  * i40e_vsi_get_bw_info - Query VSI BW Information
4255  * @vsi: the VSI being queried
4256  *
4257  * Returns 0 on success, negative value on failure
4258  **/
4259 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4260 {
4261         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4262         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4263         struct i40e_pf *pf = vsi->back;
4264         struct i40e_hw *hw = &pf->hw;
4265         i40e_status ret;
4266         u32 tc_bw_max;
4267         int i;
4268
4269         /* Get the VSI level BW configuration */
4270         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4271         if (ret) {
4272                 dev_info(&pf->pdev->dev,
4273                          "couldn't get PF vsi bw config, err %s aq_err %s\n",
4274                          i40e_stat_str(&pf->hw, ret),
4275                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4276                 return -EINVAL;
4277         }
4278
4279         /* Get the VSI level BW configuration per TC */
4280         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4281                                                NULL);
4282         if (ret) {
4283                 dev_info(&pf->pdev->dev,
4284                          "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4285                          i40e_stat_str(&pf->hw, ret),
4286                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4287                 return -EINVAL;
4288         }
4289
4290         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4291                 dev_info(&pf->pdev->dev,
4292                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4293                          bw_config.tc_valid_bits,
4294                          bw_ets_config.tc_valid_bits);
4295                 /* Still continuing */
4296         }
4297
4298         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4299         vsi->bw_max_quanta = bw_config.max_bw;
4300         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4301                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4302         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4303                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4304                 vsi->bw_ets_limit_credits[i] =
4305                                         le16_to_cpu(bw_ets_config.credits[i]);
4306                 /* 3 bits out of 4 for each TC */
4307                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4308         }
4309
4310         return 0;
4311 }
4312
4313 /**
4314  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4315  * @vsi: the VSI being configured
4316  * @enabled_tc: TC bitmap
4317  * @bw_credits: BW shared credits per TC
4318  *
4319  * Returns 0 on success, negative value on failure
4320  **/
4321 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4322                                        u8 *bw_share)
4323 {
4324         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4325         i40e_status ret;
4326         int i;
4327
4328         bw_data.tc_valid_bits = enabled_tc;
4329         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4330                 bw_data.tc_bw_credits[i] = bw_share[i];
4331
4332         ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4333                                        NULL);
4334         if (ret) {
4335                 dev_info(&vsi->back->pdev->dev,
4336                          "AQ command Config VSI BW allocation per TC failed = %d\n",
4337                          vsi->back->hw.aq.asq_last_status);
4338                 return -EINVAL;
4339         }
4340
4341         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4342                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4343
4344         return 0;
4345 }
4346
4347 /**
4348  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4349  * @vsi: the VSI being configured
4350  * @enabled_tc: TC map to be enabled
4351  *
4352  **/
4353 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4354 {
4355         struct net_device *netdev = vsi->netdev;
4356         struct i40e_pf *pf = vsi->back;
4357         struct i40e_hw *hw = &pf->hw;
4358         u8 netdev_tc = 0;
4359         int i;
4360         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4361
4362         if (!netdev)
4363                 return;
4364
4365         if (!enabled_tc) {
4366                 netdev_reset_tc(netdev);
4367                 return;
4368         }
4369
4370         /* Set up actual enabled TCs on the VSI */
4371         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4372                 return;
4373
4374         /* set per TC queues for the VSI */
4375         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4376                 /* Only set TC queues for enabled tcs
4377                  *
4378                  * e.g. For a VSI that has TC0 and TC3 enabled the
4379                  * enabled_tc bitmap would be 0x00001001; the driver
4380                  * will set the numtc for netdev as 2 that will be
4381                  * referenced by the netdev layer as TC 0 and 1.
4382                  */
4383                 if (vsi->tc_config.enabled_tc & BIT_ULL(i))
4384                         netdev_set_tc_queue(netdev,
4385                                         vsi->tc_config.tc_info[i].netdev_tc,
4386                                         vsi->tc_config.tc_info[i].qcount,
4387                                         vsi->tc_config.tc_info[i].qoffset);
4388         }
4389
4390         /* Assign UP2TC map for the VSI */
4391         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4392                 /* Get the actual TC# for the UP */
4393                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4394                 /* Get the mapped netdev TC# for the UP */
4395                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4396                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4397         }
4398 }
4399
4400 /**
4401  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4402  * @vsi: the VSI being configured
4403  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4404  **/
4405 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4406                                       struct i40e_vsi_context *ctxt)
4407 {
4408         /* copy just the sections touched not the entire info
4409          * since not all sections are valid as returned by
4410          * update vsi params
4411          */
4412         vsi->info.mapping_flags = ctxt->info.mapping_flags;
4413         memcpy(&vsi->info.queue_mapping,
4414                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4415         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4416                sizeof(vsi->info.tc_mapping));
4417 }
4418
4419 /**
4420  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4421  * @vsi: VSI to be configured
4422  * @enabled_tc: TC bitmap
4423  *
4424  * This configures a particular VSI for TCs that are mapped to the
4425  * given TC bitmap. It uses default bandwidth share for TCs across
4426  * VSIs to configure TC for a particular VSI.
4427  *
4428  * NOTE:
4429  * It is expected that the VSI queues have been quisced before calling
4430  * this function.
4431  **/
4432 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4433 {
4434         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4435         struct i40e_vsi_context ctxt;
4436         int ret = 0;
4437         int i;
4438
4439         /* Check if enabled_tc is same as existing or new TCs */
4440         if (vsi->tc_config.enabled_tc == enabled_tc)
4441                 return ret;
4442
4443         /* Enable ETS TCs with equal BW Share for now across all VSIs */
4444         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4445                 if (enabled_tc & BIT_ULL(i))
4446                         bw_share[i] = 1;
4447         }
4448
4449         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4450         if (ret) {
4451                 dev_info(&vsi->back->pdev->dev,
4452                          "Failed configuring TC map %d for VSI %d\n",
4453                          enabled_tc, vsi->seid);
4454                 goto out;
4455         }
4456
4457         /* Update Queue Pairs Mapping for currently enabled UPs */
4458         ctxt.seid = vsi->seid;
4459         ctxt.pf_num = vsi->back->hw.pf_id;
4460         ctxt.vf_num = 0;
4461         ctxt.uplink_seid = vsi->uplink_seid;
4462         ctxt.info = vsi->info;
4463         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4464
4465         /* Update the VSI after updating the VSI queue-mapping information */
4466         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4467         if (ret) {
4468                 dev_info(&vsi->back->pdev->dev,
4469                          "Update vsi tc config failed, err %s aq_err %s\n",
4470                          i40e_stat_str(&vsi->back->hw, ret),
4471                          i40e_aq_str(&vsi->back->hw,
4472                                      vsi->back->hw.aq.asq_last_status));
4473                 goto out;
4474         }
4475         /* update the local VSI info with updated queue map */
4476         i40e_vsi_update_queue_map(vsi, &ctxt);
4477         vsi->info.valid_sections = 0;
4478
4479         /* Update current VSI BW information */
4480         ret = i40e_vsi_get_bw_info(vsi);
4481         if (ret) {
4482                 dev_info(&vsi->back->pdev->dev,
4483                          "Failed updating vsi bw info, err %s aq_err %s\n",
4484                          i40e_stat_str(&vsi->back->hw, ret),
4485                          i40e_aq_str(&vsi->back->hw,
4486                                      vsi->back->hw.aq.asq_last_status));
4487                 goto out;
4488         }
4489
4490         /* Update the netdev TC setup */
4491         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4492 out:
4493         return ret;
4494 }
4495
4496 /**
4497  * i40e_veb_config_tc - Configure TCs for given VEB
4498  * @veb: given VEB
4499  * @enabled_tc: TC bitmap
4500  *
4501  * Configures given TC bitmap for VEB (switching) element
4502  **/
4503 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4504 {
4505         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4506         struct i40e_pf *pf = veb->pf;
4507         int ret = 0;
4508         int i;
4509
4510         /* No TCs or already enabled TCs just return */
4511         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4512                 return ret;
4513
4514         bw_data.tc_valid_bits = enabled_tc;
4515         /* bw_data.absolute_credits is not set (relative) */
4516
4517         /* Enable ETS TCs with equal BW Share for now */
4518         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4519                 if (enabled_tc & BIT_ULL(i))
4520                         bw_data.tc_bw_share_credits[i] = 1;
4521         }
4522
4523         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4524                                                    &bw_data, NULL);
4525         if (ret) {
4526                 dev_info(&pf->pdev->dev,
4527                          "VEB bw config failed, err %s aq_err %s\n",
4528                          i40e_stat_str(&pf->hw, ret),
4529                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4530                 goto out;
4531         }
4532
4533         /* Update the BW information */
4534         ret = i40e_veb_get_bw_info(veb);
4535         if (ret) {
4536                 dev_info(&pf->pdev->dev,
4537                          "Failed getting veb bw config, err %s aq_err %s\n",
4538                          i40e_stat_str(&pf->hw, ret),
4539                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4540         }
4541
4542 out:
4543         return ret;
4544 }
4545
4546 #ifdef CONFIG_I40E_DCB
4547 /**
4548  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4549  * @pf: PF struct
4550  *
4551  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4552  * the caller would've quiesce all the VSIs before calling
4553  * this function
4554  **/
4555 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4556 {
4557         u8 tc_map = 0;
4558         int ret;
4559         u8 v;
4560
4561         /* Enable the TCs available on PF to all VEBs */
4562         tc_map = i40e_pf_get_tc_map(pf);
4563         for (v = 0; v < I40E_MAX_VEB; v++) {
4564                 if (!pf->veb[v])
4565                         continue;
4566                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4567                 if (ret) {
4568                         dev_info(&pf->pdev->dev,
4569                                  "Failed configuring TC for VEB seid=%d\n",
4570                                  pf->veb[v]->seid);
4571                         /* Will try to configure as many components */
4572                 }
4573         }
4574
4575         /* Update each VSI */
4576         for (v = 0; v < pf->num_alloc_vsi; v++) {
4577                 if (!pf->vsi[v])
4578                         continue;
4579
4580                 /* - Enable all TCs for the LAN VSI
4581 #ifdef I40E_FCOE
4582                  * - For FCoE VSI only enable the TC configured
4583                  *   as per the APP TLV
4584 #endif
4585                  * - For all others keep them at TC0 for now
4586                  */
4587                 if (v == pf->lan_vsi)
4588                         tc_map = i40e_pf_get_tc_map(pf);
4589                 else
4590                         tc_map = i40e_pf_get_default_tc(pf);
4591 #ifdef I40E_FCOE
4592                 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4593                         tc_map = i40e_get_fcoe_tc_map(pf);
4594 #endif /* #ifdef I40E_FCOE */
4595
4596                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4597                 if (ret) {
4598                         dev_info(&pf->pdev->dev,
4599                                  "Failed configuring TC for VSI seid=%d\n",
4600                                  pf->vsi[v]->seid);
4601                         /* Will try to configure as many components */
4602                 } else {
4603                         /* Re-configure VSI vectors based on updated TC map */
4604                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4605                         if (pf->vsi[v]->netdev)
4606                                 i40e_dcbnl_set_all(pf->vsi[v]);
4607                 }
4608         }
4609 }
4610
4611 /**
4612  * i40e_resume_port_tx - Resume port Tx
4613  * @pf: PF struct
4614  *
4615  * Resume a port's Tx and issue a PF reset in case of failure to
4616  * resume.
4617  **/
4618 static int i40e_resume_port_tx(struct i40e_pf *pf)
4619 {
4620         struct i40e_hw *hw = &pf->hw;
4621         int ret;
4622
4623         ret = i40e_aq_resume_port_tx(hw, NULL);
4624         if (ret) {
4625                 dev_info(&pf->pdev->dev,
4626                          "Resume Port Tx failed, err %s aq_err %s\n",
4627                           i40e_stat_str(&pf->hw, ret),
4628                           i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4629                 /* Schedule PF reset to recover */
4630                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4631                 i40e_service_event_schedule(pf);
4632         }
4633
4634         return ret;
4635 }
4636
4637 /**
4638  * i40e_init_pf_dcb - Initialize DCB configuration
4639  * @pf: PF being configured
4640  *
4641  * Query the current DCB configuration and cache it
4642  * in the hardware structure
4643  **/
4644 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4645 {
4646         struct i40e_hw *hw = &pf->hw;
4647         int err = 0;
4648
4649         /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4650         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4651             (pf->hw.aq.fw_maj_ver < 4))
4652                 goto out;
4653
4654         /* Get the initial DCB configuration */
4655         err = i40e_init_dcb(hw);
4656         if (!err) {
4657                 /* Device/Function is not DCBX capable */
4658                 if ((!hw->func_caps.dcb) ||
4659                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4660                         dev_info(&pf->pdev->dev,
4661                                  "DCBX offload is not supported or is disabled for this PF.\n");
4662
4663                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
4664                                 goto out;
4665
4666                 } else {
4667                         /* When status is not DISABLED then DCBX in FW */
4668                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4669                                        DCB_CAP_DCBX_VER_IEEE;
4670
4671                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
4672                         /* Enable DCB tagging only when more than one TC */
4673                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4674                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
4675                         dev_dbg(&pf->pdev->dev,
4676                                 "DCBX offload is supported for this PF.\n");
4677                 }
4678         } else {
4679                 dev_info(&pf->pdev->dev,
4680                          "Query for DCB configuration failed, err %s aq_err %s\n",
4681                          i40e_stat_str(&pf->hw, err),
4682                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4683         }
4684
4685 out:
4686         return err;
4687 }
4688 #endif /* CONFIG_I40E_DCB */
4689 #define SPEED_SIZE 14
4690 #define FC_SIZE 8
4691 /**
4692  * i40e_print_link_message - print link up or down
4693  * @vsi: the VSI for which link needs a message
4694  */
4695 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4696 {
4697         char speed[SPEED_SIZE] = "Unknown";
4698         char fc[FC_SIZE] = "RX/TX";
4699
4700         if (!isup) {
4701                 netdev_info(vsi->netdev, "NIC Link is Down\n");
4702                 return;
4703         }
4704
4705         /* Warn user if link speed on NPAR enabled partition is not at
4706          * least 10GB
4707          */
4708         if (vsi->back->hw.func_caps.npar_enable &&
4709             (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4710              vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4711                 netdev_warn(vsi->netdev,
4712                             "The partition detected link speed that is less than 10Gbps\n");
4713
4714         switch (vsi->back->hw.phy.link_info.link_speed) {
4715         case I40E_LINK_SPEED_40GB:
4716                 strlcpy(speed, "40 Gbps", SPEED_SIZE);
4717                 break;
4718         case I40E_LINK_SPEED_20GB:
4719                 strncpy(speed, "20 Gbps", SPEED_SIZE);
4720                 break;
4721         case I40E_LINK_SPEED_10GB:
4722                 strlcpy(speed, "10 Gbps", SPEED_SIZE);
4723                 break;
4724         case I40E_LINK_SPEED_1GB:
4725                 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4726                 break;
4727         case I40E_LINK_SPEED_100MB:
4728                 strncpy(speed, "100 Mbps", SPEED_SIZE);
4729                 break;
4730         default:
4731                 break;
4732         }
4733
4734         switch (vsi->back->hw.fc.current_mode) {
4735         case I40E_FC_FULL:
4736                 strlcpy(fc, "RX/TX", FC_SIZE);
4737                 break;
4738         case I40E_FC_TX_PAUSE:
4739                 strlcpy(fc, "TX", FC_SIZE);
4740                 break;
4741         case I40E_FC_RX_PAUSE:
4742                 strlcpy(fc, "RX", FC_SIZE);
4743                 break;
4744         default:
4745                 strlcpy(fc, "None", FC_SIZE);
4746                 break;
4747         }
4748
4749         netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4750                     speed, fc);
4751 }
4752
4753 /**
4754  * i40e_up_complete - Finish the last steps of bringing up a connection
4755  * @vsi: the VSI being configured
4756  **/
4757 static int i40e_up_complete(struct i40e_vsi *vsi)
4758 {
4759         struct i40e_pf *pf = vsi->back;
4760         int err;
4761
4762         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4763                 i40e_vsi_configure_msix(vsi);
4764         else
4765                 i40e_configure_msi_and_legacy(vsi);
4766
4767         /* start rings */
4768         err = i40e_vsi_control_rings(vsi, true);
4769         if (err)
4770                 return err;
4771
4772         clear_bit(__I40E_DOWN, &vsi->state);
4773         i40e_napi_enable_all(vsi);
4774         i40e_vsi_enable_irq(vsi);
4775
4776         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4777             (vsi->netdev)) {
4778                 i40e_print_link_message(vsi, true);
4779                 netif_tx_start_all_queues(vsi->netdev);
4780                 netif_carrier_on(vsi->netdev);
4781         } else if (vsi->netdev) {
4782                 i40e_print_link_message(vsi, false);
4783                 /* need to check for qualified module here*/
4784                 if ((pf->hw.phy.link_info.link_info &
4785                         I40E_AQ_MEDIA_AVAILABLE) &&
4786                     (!(pf->hw.phy.link_info.an_info &
4787                         I40E_AQ_QUALIFIED_MODULE)))
4788                         netdev_err(vsi->netdev,
4789                                    "the driver failed to link because an unqualified module was detected.");
4790         }
4791
4792         /* replay FDIR SB filters */
4793         if (vsi->type == I40E_VSI_FDIR) {
4794                 /* reset fd counters */
4795                 pf->fd_add_err = pf->fd_atr_cnt = 0;
4796                 if (pf->fd_tcp_rule > 0) {
4797                         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4798                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
4799                                 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4800                         pf->fd_tcp_rule = 0;
4801                 }
4802                 i40e_fdir_filter_restore(vsi);
4803         }
4804         i40e_service_event_schedule(pf);
4805
4806         return 0;
4807 }
4808
4809 /**
4810  * i40e_vsi_reinit_locked - Reset the VSI
4811  * @vsi: the VSI being configured
4812  *
4813  * Rebuild the ring structs after some configuration
4814  * has changed, e.g. MTU size.
4815  **/
4816 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4817 {
4818         struct i40e_pf *pf = vsi->back;
4819
4820         WARN_ON(in_interrupt());
4821         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4822                 usleep_range(1000, 2000);
4823         i40e_down(vsi);
4824
4825         /* Give a VF some time to respond to the reset.  The
4826          * two second wait is based upon the watchdog cycle in
4827          * the VF driver.
4828          */
4829         if (vsi->type == I40E_VSI_SRIOV)
4830                 msleep(2000);
4831         i40e_up(vsi);
4832         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4833 }
4834
4835 /**
4836  * i40e_up - Bring the connection back up after being down
4837  * @vsi: the VSI being configured
4838  **/
4839 int i40e_up(struct i40e_vsi *vsi)
4840 {
4841         int err;
4842
4843         err = i40e_vsi_configure(vsi);
4844         if (!err)
4845                 err = i40e_up_complete(vsi);
4846
4847         return err;
4848 }
4849
4850 /**
4851  * i40e_down - Shutdown the connection processing
4852  * @vsi: the VSI being stopped
4853  **/
4854 void i40e_down(struct i40e_vsi *vsi)
4855 {
4856         int i;
4857
4858         /* It is assumed that the caller of this function
4859          * sets the vsi->state __I40E_DOWN bit.
4860          */
4861         if (vsi->netdev) {
4862                 netif_carrier_off(vsi->netdev);
4863                 netif_tx_disable(vsi->netdev);
4864         }
4865         i40e_vsi_disable_irq(vsi);
4866         i40e_vsi_control_rings(vsi, false);
4867         i40e_napi_disable_all(vsi);
4868
4869         for (i = 0; i < vsi->num_queue_pairs; i++) {
4870                 i40e_clean_tx_ring(vsi->tx_rings[i]);
4871                 i40e_clean_rx_ring(vsi->rx_rings[i]);
4872         }
4873 }
4874
4875 /**
4876  * i40e_setup_tc - configure multiple traffic classes
4877  * @netdev: net device to configure
4878  * @tc: number of traffic classes to enable
4879  **/
4880 #ifdef I40E_FCOE
4881 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4882 #else
4883 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4884 #endif
4885 {
4886         struct i40e_netdev_priv *np = netdev_priv(netdev);
4887         struct i40e_vsi *vsi = np->vsi;
4888         struct i40e_pf *pf = vsi->back;
4889         u8 enabled_tc = 0;
4890         int ret = -EINVAL;
4891         int i;
4892
4893         /* Check if DCB enabled to continue */
4894         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4895                 netdev_info(netdev, "DCB is not enabled for adapter\n");
4896                 goto exit;
4897         }
4898
4899         /* Check if MFP enabled */
4900         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4901                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4902                 goto exit;
4903         }
4904
4905         /* Check whether tc count is within enabled limit */
4906         if (tc > i40e_pf_get_num_tc(pf)) {
4907                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4908                 goto exit;
4909         }
4910
4911         /* Generate TC map for number of tc requested */
4912         for (i = 0; i < tc; i++)
4913                 enabled_tc |= BIT_ULL(i);
4914
4915         /* Requesting same TC configuration as already enabled */
4916         if (enabled_tc == vsi->tc_config.enabled_tc)
4917                 return 0;
4918
4919         /* Quiesce VSI queues */
4920         i40e_quiesce_vsi(vsi);
4921
4922         /* Configure VSI for enabled TCs */
4923         ret = i40e_vsi_config_tc(vsi, enabled_tc);
4924         if (ret) {
4925                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4926                             vsi->seid);
4927                 goto exit;
4928         }
4929
4930         /* Unquiesce VSI */
4931         i40e_unquiesce_vsi(vsi);
4932
4933 exit:
4934         return ret;
4935 }
4936
4937 /**
4938  * i40e_open - Called when a network interface is made active
4939  * @netdev: network interface device structure
4940  *
4941  * The open entry point is called when a network interface is made
4942  * active by the system (IFF_UP).  At this point all resources needed
4943  * for transmit and receive operations are allocated, the interrupt
4944  * handler is registered with the OS, the netdev watchdog subtask is
4945  * enabled, and the stack is notified that the interface is ready.
4946  *
4947  * Returns 0 on success, negative value on failure
4948  **/
4949 int i40e_open(struct net_device *netdev)
4950 {
4951         struct i40e_netdev_priv *np = netdev_priv(netdev);
4952         struct i40e_vsi *vsi = np->vsi;
4953         struct i40e_pf *pf = vsi->back;
4954         int err;
4955
4956         /* disallow open during test or if eeprom is broken */
4957         if (test_bit(__I40E_TESTING, &pf->state) ||
4958             test_bit(__I40E_BAD_EEPROM, &pf->state))
4959                 return -EBUSY;
4960
4961         netif_carrier_off(netdev);
4962
4963         err = i40e_vsi_open(vsi);
4964         if (err)
4965                 return err;
4966
4967         /* configure global TSO hardware offload settings */
4968         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4969                                                        TCP_FLAG_FIN) >> 16);
4970         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4971                                                        TCP_FLAG_FIN |
4972                                                        TCP_FLAG_CWR) >> 16);
4973         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4974
4975 #ifdef CONFIG_I40E_VXLAN
4976         vxlan_get_rx_port(netdev);
4977 #endif
4978
4979         return 0;
4980 }
4981
4982 /**
4983  * i40e_vsi_open -
4984  * @vsi: the VSI to open
4985  *
4986  * Finish initialization of the VSI.
4987  *
4988  * Returns 0 on success, negative value on failure
4989  **/
4990 int i40e_vsi_open(struct i40e_vsi *vsi)
4991 {
4992         struct i40e_pf *pf = vsi->back;
4993         char int_name[I40E_INT_NAME_STR_LEN];
4994         int err;
4995
4996         /* allocate descriptors */
4997         err = i40e_vsi_setup_tx_resources(vsi);
4998         if (err)
4999                 goto err_setup_tx;
5000         err = i40e_vsi_setup_rx_resources(vsi);
5001         if (err)
5002                 goto err_setup_rx;
5003
5004         err = i40e_vsi_configure(vsi);
5005         if (err)
5006                 goto err_setup_rx;
5007
5008         if (vsi->netdev) {
5009                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5010                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5011                 err = i40e_vsi_request_irq(vsi, int_name);
5012                 if (err)
5013                         goto err_setup_rx;
5014
5015                 /* Notify the stack of the actual queue counts. */
5016                 err = netif_set_real_num_tx_queues(vsi->netdev,
5017                                                    vsi->num_queue_pairs);
5018                 if (err)
5019                         goto err_set_queues;
5020
5021                 err = netif_set_real_num_rx_queues(vsi->netdev,
5022                                                    vsi->num_queue_pairs);
5023                 if (err)
5024                         goto err_set_queues;
5025
5026         } else if (vsi->type == I40E_VSI_FDIR) {
5027                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5028                          dev_driver_string(&pf->pdev->dev),
5029                          dev_name(&pf->pdev->dev));
5030                 err = i40e_vsi_request_irq(vsi, int_name);
5031
5032         } else {
5033                 err = -EINVAL;
5034                 goto err_setup_rx;
5035         }
5036
5037         err = i40e_up_complete(vsi);
5038         if (err)
5039                 goto err_up_complete;
5040
5041         return 0;
5042
5043 err_up_complete:
5044         i40e_down(vsi);
5045 err_set_queues:
5046         i40e_vsi_free_irq(vsi);
5047 err_setup_rx:
5048         i40e_vsi_free_rx_resources(vsi);
5049 err_setup_tx:
5050         i40e_vsi_free_tx_resources(vsi);
5051         if (vsi == pf->vsi[pf->lan_vsi])
5052                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5053
5054         return err;
5055 }
5056
5057 /**
5058  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5059  * @pf: Pointer to PF
5060  *
5061  * This function destroys the hlist where all the Flow Director
5062  * filters were saved.
5063  **/
5064 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5065 {
5066         struct i40e_fdir_filter *filter;
5067         struct hlist_node *node2;
5068
5069         hlist_for_each_entry_safe(filter, node2,
5070                                   &pf->fdir_filter_list, fdir_node) {
5071                 hlist_del(&filter->fdir_node);
5072                 kfree(filter);
5073         }
5074         pf->fdir_pf_active_filters = 0;
5075 }
5076
5077 /**
5078  * i40e_close - Disables a network interface
5079  * @netdev: network interface device structure
5080  *
5081  * The close entry point is called when an interface is de-activated
5082  * by the OS.  The hardware is still under the driver's control, but
5083  * this netdev interface is disabled.
5084  *
5085  * Returns 0, this is not allowed to fail
5086  **/
5087 #ifdef I40E_FCOE
5088 int i40e_close(struct net_device *netdev)
5089 #else
5090 static int i40e_close(struct net_device *netdev)
5091 #endif
5092 {
5093         struct i40e_netdev_priv *np = netdev_priv(netdev);
5094         struct i40e_vsi *vsi = np->vsi;
5095
5096         i40e_vsi_close(vsi);
5097
5098         return 0;
5099 }
5100
5101 /**
5102  * i40e_do_reset - Start a PF or Core Reset sequence
5103  * @pf: board private structure
5104  * @reset_flags: which reset is requested
5105  *
5106  * The essential difference in resets is that the PF Reset
5107  * doesn't clear the packet buffers, doesn't reset the PE
5108  * firmware, and doesn't bother the other PFs on the chip.
5109  **/
5110 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5111 {
5112         u32 val;
5113
5114         WARN_ON(in_interrupt());
5115
5116         if (i40e_check_asq_alive(&pf->hw))
5117                 i40e_vc_notify_reset(pf);
5118
5119         /* do the biggest reset indicated */
5120         if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5121
5122                 /* Request a Global Reset
5123                  *
5124                  * This will start the chip's countdown to the actual full
5125                  * chip reset event, and a warning interrupt to be sent
5126                  * to all PFs, including the requestor.  Our handler
5127                  * for the warning interrupt will deal with the shutdown
5128                  * and recovery of the switch setup.
5129                  */
5130                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5131                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5132                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5133                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5134
5135         } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5136
5137                 /* Request a Core Reset
5138                  *
5139                  * Same as Global Reset, except does *not* include the MAC/PHY
5140                  */
5141                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5142                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5143                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5144                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5145                 i40e_flush(&pf->hw);
5146
5147         } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5148
5149                 /* Request a PF Reset
5150                  *
5151                  * Resets only the PF-specific registers
5152                  *
5153                  * This goes directly to the tear-down and rebuild of
5154                  * the switch, since we need to do all the recovery as
5155                  * for the Core Reset.
5156                  */
5157                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5158                 i40e_handle_reset_warning(pf);
5159
5160         } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5161                 int v;
5162
5163                 /* Find the VSI(s) that requested a re-init */
5164                 dev_info(&pf->pdev->dev,
5165                          "VSI reinit requested\n");
5166                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5167                         struct i40e_vsi *vsi = pf->vsi[v];
5168                         if (vsi != NULL &&
5169                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5170                                 i40e_vsi_reinit_locked(pf->vsi[v]);
5171                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5172                         }
5173                 }
5174
5175                 /* no further action needed, so return now */
5176                 return;
5177         } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5178                 int v;
5179
5180                 /* Find the VSI(s) that needs to be brought down */
5181                 dev_info(&pf->pdev->dev, "VSI down requested\n");
5182                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5183                         struct i40e_vsi *vsi = pf->vsi[v];
5184                         if (vsi != NULL &&
5185                             test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5186                                 set_bit(__I40E_DOWN, &vsi->state);
5187                                 i40e_down(vsi);
5188                                 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5189                         }
5190                 }
5191
5192                 /* no further action needed, so return now */
5193                 return;
5194         } else {
5195                 dev_info(&pf->pdev->dev,
5196                          "bad reset request 0x%08x\n", reset_flags);
5197                 return;
5198         }
5199 }
5200
5201 #ifdef CONFIG_I40E_DCB
5202 /**
5203  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5204  * @pf: board private structure
5205  * @old_cfg: current DCB config
5206  * @new_cfg: new DCB config
5207  **/
5208 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5209                             struct i40e_dcbx_config *old_cfg,
5210                             struct i40e_dcbx_config *new_cfg)
5211 {
5212         bool need_reconfig = false;
5213
5214         /* Check if ETS configuration has changed */
5215         if (memcmp(&new_cfg->etscfg,
5216                    &old_cfg->etscfg,
5217                    sizeof(new_cfg->etscfg))) {
5218                 /* If Priority Table has changed reconfig is needed */
5219                 if (memcmp(&new_cfg->etscfg.prioritytable,
5220                            &old_cfg->etscfg.prioritytable,
5221                            sizeof(new_cfg->etscfg.prioritytable))) {
5222                         need_reconfig = true;
5223                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5224                 }
5225
5226                 if (memcmp(&new_cfg->etscfg.tcbwtable,
5227                            &old_cfg->etscfg.tcbwtable,
5228                            sizeof(new_cfg->etscfg.tcbwtable)))
5229                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5230
5231                 if (memcmp(&new_cfg->etscfg.tsatable,
5232                            &old_cfg->etscfg.tsatable,
5233                            sizeof(new_cfg->etscfg.tsatable)))
5234                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5235         }
5236
5237         /* Check if PFC configuration has changed */
5238         if (memcmp(&new_cfg->pfc,
5239                    &old_cfg->pfc,
5240                    sizeof(new_cfg->pfc))) {
5241                 need_reconfig = true;
5242                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5243         }
5244
5245         /* Check if APP Table has changed */
5246         if (memcmp(&new_cfg->app,
5247                    &old_cfg->app,
5248                    sizeof(new_cfg->app))) {
5249                 need_reconfig = true;
5250                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5251         }
5252
5253         dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5254                 need_reconfig);
5255         return need_reconfig;
5256 }
5257
5258 /**
5259  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5260  * @pf: board private structure
5261  * @e: event info posted on ARQ
5262  **/
5263 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5264                                   struct i40e_arq_event_info *e)
5265 {
5266         struct i40e_aqc_lldp_get_mib *mib =
5267                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5268         struct i40e_hw *hw = &pf->hw;
5269         struct i40e_dcbx_config tmp_dcbx_cfg;
5270         bool need_reconfig = false;
5271         int ret = 0;
5272         u8 type;
5273
5274         /* Not DCB capable or capability disabled */
5275         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5276                 return ret;
5277
5278         /* Ignore if event is not for Nearest Bridge */
5279         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5280                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5281         dev_dbg(&pf->pdev->dev,
5282                 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5283         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5284                 return ret;
5285
5286         /* Check MIB Type and return if event for Remote MIB update */
5287         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5288         dev_dbg(&pf->pdev->dev,
5289                 "%s: LLDP event mib type %s\n", __func__,
5290                 type ? "remote" : "local");
5291         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5292                 /* Update the remote cached instance and return */
5293                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5294                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5295                                 &hw->remote_dcbx_config);
5296                 goto exit;
5297         }
5298
5299         /* Store the old configuration */
5300         tmp_dcbx_cfg = hw->local_dcbx_config;
5301
5302         /* Reset the old DCBx configuration data */
5303         memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5304         /* Get updated DCBX data from firmware */
5305         ret = i40e_get_dcb_config(&pf->hw);
5306         if (ret) {
5307                 dev_info(&pf->pdev->dev,
5308                          "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5309                          i40e_stat_str(&pf->hw, ret),
5310                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5311                 goto exit;
5312         }
5313
5314         /* No change detected in DCBX configs */
5315         if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5316                     sizeof(tmp_dcbx_cfg))) {
5317                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5318                 goto exit;
5319         }
5320
5321         need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5322                                                &hw->local_dcbx_config);
5323
5324         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5325
5326         if (!need_reconfig)
5327                 goto exit;
5328
5329         /* Enable DCB tagging only when more than one TC */
5330         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5331                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5332         else
5333                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5334
5335         set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5336         /* Reconfiguration needed quiesce all VSIs */
5337         i40e_pf_quiesce_all_vsi(pf);
5338
5339         /* Changes in configuration update VEB/VSI */
5340         i40e_dcb_reconfigure(pf);
5341
5342         ret = i40e_resume_port_tx(pf);
5343
5344         clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5345         /* In case of error no point in resuming VSIs */
5346         if (ret)
5347                 goto exit;
5348
5349         /* Wait for the PF's Tx queues to be disabled */
5350         ret = i40e_pf_wait_txq_disabled(pf);
5351         if (ret) {
5352                 /* Schedule PF reset to recover */
5353                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5354                 i40e_service_event_schedule(pf);
5355         } else {
5356                 i40e_pf_unquiesce_all_vsi(pf);
5357         }
5358
5359 exit:
5360         return ret;
5361 }
5362 #endif /* CONFIG_I40E_DCB */
5363
5364 /**
5365  * i40e_do_reset_safe - Protected reset path for userland calls.
5366  * @pf: board private structure
5367  * @reset_flags: which reset is requested
5368  *
5369  **/
5370 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5371 {
5372         rtnl_lock();
5373         i40e_do_reset(pf, reset_flags);
5374         rtnl_unlock();
5375 }
5376
5377 /**
5378  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5379  * @pf: board private structure
5380  * @e: event info posted on ARQ
5381  *
5382  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5383  * and VF queues
5384  **/
5385 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5386                                            struct i40e_arq_event_info *e)
5387 {
5388         struct i40e_aqc_lan_overflow *data =
5389                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5390         u32 queue = le32_to_cpu(data->prtdcb_rupto);
5391         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5392         struct i40e_hw *hw = &pf->hw;
5393         struct i40e_vf *vf;
5394         u16 vf_id;
5395
5396         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5397                 queue, qtx_ctl);
5398
5399         /* Queue belongs to VF, find the VF and issue VF reset */
5400         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5401             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5402                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5403                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5404                 vf_id -= hw->func_caps.vf_base_id;
5405                 vf = &pf->vf[vf_id];
5406                 i40e_vc_notify_vf_reset(vf);
5407                 /* Allow VF to process pending reset notification */
5408                 msleep(20);
5409                 i40e_reset_vf(vf, false);
5410         }
5411 }
5412
5413 /**
5414  * i40e_service_event_complete - Finish up the service event
5415  * @pf: board private structure
5416  **/
5417 static void i40e_service_event_complete(struct i40e_pf *pf)
5418 {
5419         BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5420
5421         /* flush memory to make sure state is correct before next watchog */
5422         smp_mb__before_atomic();
5423         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5424 }
5425
5426 /**
5427  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5428  * @pf: board private structure
5429  **/
5430 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5431 {
5432         u32 val, fcnt_prog;
5433
5434         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5435         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5436         return fcnt_prog;
5437 }
5438
5439 /**
5440  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5441  * @pf: board private structure
5442  **/
5443 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5444 {
5445         u32 val, fcnt_prog;
5446
5447         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5448         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5449                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5450                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5451         return fcnt_prog;
5452 }
5453
5454 /**
5455  * i40e_get_global_fd_count - Get total FD filters programmed on device
5456  * @pf: board private structure
5457  **/
5458 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5459 {
5460         u32 val, fcnt_prog;
5461
5462         val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5463         fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5464                     ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5465                      I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5466         return fcnt_prog;
5467 }
5468
5469 /**
5470  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5471  * @pf: board private structure
5472  **/
5473 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5474 {
5475         u32 fcnt_prog, fcnt_avail;
5476
5477         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5478                 return;
5479
5480         /* Check if, FD SB or ATR was auto disabled and if there is enough room
5481          * to re-enable
5482          */
5483         fcnt_prog = i40e_get_global_fd_count(pf);
5484         fcnt_avail = pf->fdir_pf_filter_count;
5485         if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5486             (pf->fd_add_err == 0) ||
5487             (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5488                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5489                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5490                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5491                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5492                                 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5493                 }
5494         }
5495         /* Wait for some more space to be available to turn on ATR */
5496         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5497                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5498                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5499                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5500                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5501                                 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5502                 }
5503         }
5504 }
5505
5506 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5507 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5508 /**
5509  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5510  * @pf: board private structure
5511  **/
5512 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5513 {
5514         unsigned long min_flush_time;
5515         int flush_wait_retry = 50;
5516         bool disable_atr = false;
5517         int fd_room;
5518         int reg;
5519
5520         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5521                 return;
5522
5523         if (time_after(jiffies, pf->fd_flush_timestamp +
5524                                 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5525                 /* If the flush is happening too quick and we have mostly
5526                  * SB rules we should not re-enable ATR for some time.
5527                  */
5528                 min_flush_time = pf->fd_flush_timestamp
5529                                 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5530                 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5531
5532                 if (!(time_after(jiffies, min_flush_time)) &&
5533                     (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5534                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5535                                 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5536                         disable_atr = true;
5537                 }
5538
5539                 pf->fd_flush_timestamp = jiffies;
5540                 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5541                 /* flush all filters */
5542                 wr32(&pf->hw, I40E_PFQF_CTL_1,
5543                      I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5544                 i40e_flush(&pf->hw);
5545                 pf->fd_flush_cnt++;
5546                 pf->fd_add_err = 0;
5547                 do {
5548                         /* Check FD flush status every 5-6msec */
5549                         usleep_range(5000, 6000);
5550                         reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5551                         if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5552                                 break;
5553                 } while (flush_wait_retry--);
5554                 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5555                         dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5556                 } else {
5557                         /* replay sideband filters */
5558                         i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5559                         if (!disable_atr)
5560                                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5561                         clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5562                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5563                                 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5564                 }
5565         }
5566 }
5567
5568 /**
5569  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5570  * @pf: board private structure
5571  **/
5572 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5573 {
5574         return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5575 }
5576
5577 /* We can see up to 256 filter programming desc in transit if the filters are
5578  * being applied really fast; before we see the first
5579  * filter miss error on Rx queue 0. Accumulating enough error messages before
5580  * reacting will make sure we don't cause flush too often.
5581  */
5582 #define I40E_MAX_FD_PROGRAM_ERROR 256
5583
5584 /**
5585  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5586  * @pf: board private structure
5587  **/
5588 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5589 {
5590
5591         /* if interface is down do nothing */
5592         if (test_bit(__I40E_DOWN, &pf->state))
5593                 return;
5594
5595         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5596                 return;
5597
5598         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5599                 i40e_fdir_flush_and_replay(pf);
5600
5601         i40e_fdir_check_and_reenable(pf);
5602
5603 }
5604
5605 /**
5606  * i40e_vsi_link_event - notify VSI of a link event
5607  * @vsi: vsi to be notified
5608  * @link_up: link up or down
5609  **/
5610 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5611 {
5612         if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5613                 return;
5614
5615         switch (vsi->type) {
5616         case I40E_VSI_MAIN:
5617 #ifdef I40E_FCOE
5618         case I40E_VSI_FCOE:
5619 #endif
5620                 if (!vsi->netdev || !vsi->netdev_registered)
5621                         break;
5622
5623                 if (link_up) {
5624                         netif_carrier_on(vsi->netdev);
5625                         netif_tx_wake_all_queues(vsi->netdev);
5626                 } else {
5627                         netif_carrier_off(vsi->netdev);
5628                         netif_tx_stop_all_queues(vsi->netdev);
5629                 }
5630                 break;
5631
5632         case I40E_VSI_SRIOV:
5633         case I40E_VSI_VMDQ2:
5634         case I40E_VSI_CTRL:
5635         case I40E_VSI_MIRROR:
5636         default:
5637                 /* there is no notification for other VSIs */
5638                 break;
5639         }
5640 }
5641
5642 /**
5643  * i40e_veb_link_event - notify elements on the veb of a link event
5644  * @veb: veb to be notified
5645  * @link_up: link up or down
5646  **/
5647 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5648 {
5649         struct i40e_pf *pf;
5650         int i;
5651
5652         if (!veb || !veb->pf)
5653                 return;
5654         pf = veb->pf;
5655
5656         /* depth first... */
5657         for (i = 0; i < I40E_MAX_VEB; i++)
5658                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5659                         i40e_veb_link_event(pf->veb[i], link_up);
5660
5661         /* ... now the local VSIs */
5662         for (i = 0; i < pf->num_alloc_vsi; i++)
5663                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5664                         i40e_vsi_link_event(pf->vsi[i], link_up);
5665 }
5666
5667 /**
5668  * i40e_link_event - Update netif_carrier status
5669  * @pf: board private structure
5670  **/
5671 static void i40e_link_event(struct i40e_pf *pf)
5672 {
5673         bool new_link, old_link;
5674         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5675         u8 new_link_speed, old_link_speed;
5676
5677         /* set this to force the get_link_status call to refresh state */
5678         pf->hw.phy.get_link_info = true;
5679
5680         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5681         new_link = i40e_get_link_status(&pf->hw);
5682         old_link_speed = pf->hw.phy.link_info_old.link_speed;
5683         new_link_speed = pf->hw.phy.link_info.link_speed;
5684
5685         if (new_link == old_link &&
5686             new_link_speed == old_link_speed &&
5687             (test_bit(__I40E_DOWN, &vsi->state) ||
5688              new_link == netif_carrier_ok(vsi->netdev)))
5689                 return;
5690
5691         if (!test_bit(__I40E_DOWN, &vsi->state))
5692                 i40e_print_link_message(vsi, new_link);
5693
5694         /* Notify the base of the switch tree connected to
5695          * the link.  Floating VEBs are not notified.
5696          */
5697         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5698                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5699         else
5700                 i40e_vsi_link_event(vsi, new_link);
5701
5702         if (pf->vf)
5703                 i40e_vc_notify_link_state(pf);
5704
5705         if (pf->flags & I40E_FLAG_PTP)
5706                 i40e_ptp_set_increment(pf);
5707 }
5708
5709 /**
5710  * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5711  * @pf: board private structure
5712  *
5713  * Set the per-queue flags to request a check for stuck queues in the irq
5714  * clean functions, then force interrupts to be sure the irq clean is called.
5715  **/
5716 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5717 {
5718         int i, v;
5719
5720         /* If we're down or resetting, just bail */
5721         if (test_bit(__I40E_DOWN, &pf->state) ||
5722             test_bit(__I40E_CONFIG_BUSY, &pf->state))
5723                 return;
5724
5725         /* for each VSI/netdev
5726          *     for each Tx queue
5727          *         set the check flag
5728          *     for each q_vector
5729          *         force an interrupt
5730          */
5731         for (v = 0; v < pf->num_alloc_vsi; v++) {
5732                 struct i40e_vsi *vsi = pf->vsi[v];
5733                 int armed = 0;
5734
5735                 if (!pf->vsi[v] ||
5736                     test_bit(__I40E_DOWN, &vsi->state) ||
5737                     (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5738                         continue;
5739
5740                 for (i = 0; i < vsi->num_queue_pairs; i++) {
5741                         set_check_for_tx_hang(vsi->tx_rings[i]);
5742                         if (test_bit(__I40E_HANG_CHECK_ARMED,
5743                                      &vsi->tx_rings[i]->state))
5744                                 armed++;
5745                 }
5746
5747                 if (armed) {
5748                         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5749                                 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5750                                      (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5751                                       I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5752                                       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5753                                       I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5754                                       I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5755                         } else {
5756                                 u16 vec = vsi->base_vector - 1;
5757                                 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5758                                       I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5759                                       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5760                                       I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5761                                       I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5762                                 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5763                                         wr32(&vsi->back->hw,
5764                                              I40E_PFINT_DYN_CTLN(vec), val);
5765                         }
5766                         i40e_flush(&vsi->back->hw);
5767                 }
5768         }
5769 }
5770
5771 /**
5772  * i40e_watchdog_subtask - periodic checks not using event driven response
5773  * @pf: board private structure
5774  **/
5775 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5776 {
5777         int i;
5778
5779         /* if interface is down do nothing */
5780         if (test_bit(__I40E_DOWN, &pf->state) ||
5781             test_bit(__I40E_CONFIG_BUSY, &pf->state))
5782                 return;
5783
5784         /* make sure we don't do these things too often */
5785         if (time_before(jiffies, (pf->service_timer_previous +
5786                                   pf->service_timer_period)))
5787                 return;
5788         pf->service_timer_previous = jiffies;
5789
5790         i40e_check_hang_subtask(pf);
5791         i40e_link_event(pf);
5792
5793         /* Update the stats for active netdevs so the network stack
5794          * can look at updated numbers whenever it cares to
5795          */
5796         for (i = 0; i < pf->num_alloc_vsi; i++)
5797                 if (pf->vsi[i] && pf->vsi[i]->netdev)
5798                         i40e_update_stats(pf->vsi[i]);
5799
5800         /* Update the stats for the active switching components */
5801         for (i = 0; i < I40E_MAX_VEB; i++)
5802                 if (pf->veb[i])
5803                         i40e_update_veb_stats(pf->veb[i]);
5804
5805         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5806 }
5807
5808 /**
5809  * i40e_reset_subtask - Set up for resetting the device and driver
5810  * @pf: board private structure
5811  **/
5812 static void i40e_reset_subtask(struct i40e_pf *pf)
5813 {
5814         u32 reset_flags = 0;
5815
5816         rtnl_lock();
5817         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5818                 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
5819                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5820         }
5821         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5822                 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
5823                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5824         }
5825         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5826                 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
5827                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5828         }
5829         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5830                 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
5831                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5832         }
5833         if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5834                 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
5835                 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5836         }
5837
5838         /* If there's a recovery already waiting, it takes
5839          * precedence before starting a new reset sequence.
5840          */
5841         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5842                 i40e_handle_reset_warning(pf);
5843                 goto unlock;
5844         }
5845
5846         /* If we're already down or resetting, just bail */
5847         if (reset_flags &&
5848             !test_bit(__I40E_DOWN, &pf->state) &&
5849             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5850                 i40e_do_reset(pf, reset_flags);
5851
5852 unlock:
5853         rtnl_unlock();
5854 }
5855
5856 /**
5857  * i40e_handle_link_event - Handle link event
5858  * @pf: board private structure
5859  * @e: event info posted on ARQ
5860  **/
5861 static void i40e_handle_link_event(struct i40e_pf *pf,
5862                                    struct i40e_arq_event_info *e)
5863 {
5864         struct i40e_hw *hw = &pf->hw;
5865         struct i40e_aqc_get_link_status *status =
5866                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5867
5868         /* save off old link status information */
5869         hw->phy.link_info_old = hw->phy.link_info;
5870
5871         /* Do a new status request to re-enable LSE reporting
5872          * and load new status information into the hw struct
5873          * This completely ignores any state information
5874          * in the ARQ event info, instead choosing to always
5875          * issue the AQ update link status command.
5876          */
5877         i40e_link_event(pf);
5878
5879         /* check for unqualified module, if link is down */
5880         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5881             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5882             (!(status->link_info & I40E_AQ_LINK_UP)))
5883                 dev_err(&pf->pdev->dev,
5884                         "The driver failed to link because an unqualified module was detected.\n");
5885 }
5886
5887 /**
5888  * i40e_clean_adminq_subtask - Clean the AdminQ rings
5889  * @pf: board private structure
5890  **/
5891 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5892 {
5893         struct i40e_arq_event_info event;
5894         struct i40e_hw *hw = &pf->hw;
5895         u16 pending, i = 0;
5896         i40e_status ret;
5897         u16 opcode;
5898         u32 oldval;
5899         u32 val;
5900
5901         /* Do not run clean AQ when PF reset fails */
5902         if (test_bit(__I40E_RESET_FAILED, &pf->state))
5903                 return;
5904
5905         /* check for error indications */
5906         val = rd32(&pf->hw, pf->hw.aq.arq.len);
5907         oldval = val;
5908         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5909                 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5910                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5911         }
5912         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5913                 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5914                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5915         }
5916         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5917                 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5918                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5919         }
5920         if (oldval != val)
5921                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5922
5923         val = rd32(&pf->hw, pf->hw.aq.asq.len);
5924         oldval = val;
5925         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5926                 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5927                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5928         }
5929         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5930                 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5931                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5932         }
5933         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5934                 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5935                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5936         }
5937         if (oldval != val)
5938                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5939
5940         event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5941         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5942         if (!event.msg_buf)
5943                 return;
5944
5945         do {
5946                 ret = i40e_clean_arq_element(hw, &event, &pending);
5947                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5948                         break;
5949                 else if (ret) {
5950                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5951                         break;
5952                 }
5953
5954                 opcode = le16_to_cpu(event.desc.opcode);
5955                 switch (opcode) {
5956
5957                 case i40e_aqc_opc_get_link_status:
5958                         i40e_handle_link_event(pf, &event);
5959                         break;
5960                 case i40e_aqc_opc_send_msg_to_pf:
5961                         ret = i40e_vc_process_vf_msg(pf,
5962                                         le16_to_cpu(event.desc.retval),
5963                                         le32_to_cpu(event.desc.cookie_high),
5964                                         le32_to_cpu(event.desc.cookie_low),
5965                                         event.msg_buf,
5966                                         event.msg_len);
5967                         break;
5968                 case i40e_aqc_opc_lldp_update_mib:
5969                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5970 #ifdef CONFIG_I40E_DCB
5971                         rtnl_lock();
5972                         ret = i40e_handle_lldp_event(pf, &event);
5973                         rtnl_unlock();
5974 #endif /* CONFIG_I40E_DCB */
5975                         break;
5976                 case i40e_aqc_opc_event_lan_overflow:
5977                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5978                         i40e_handle_lan_overflow_event(pf, &event);
5979                         break;
5980                 case i40e_aqc_opc_send_msg_to_peer:
5981                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5982                         break;
5983                 case i40e_aqc_opc_nvm_erase:
5984                 case i40e_aqc_opc_nvm_update:
5985                         i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
5986                         break;
5987                 default:
5988                         dev_info(&pf->pdev->dev,
5989                                  "ARQ Error: Unknown event 0x%04x received\n",
5990                                  opcode);
5991                         break;
5992                 }
5993         } while (pending && (i++ < pf->adminq_work_limit));
5994
5995         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5996         /* re-enable Admin queue interrupt cause */
5997         val = rd32(hw, I40E_PFINT_ICR0_ENA);
5998         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5999         wr32(hw, I40E_PFINT_ICR0_ENA, val);
6000         i40e_flush(hw);
6001
6002         kfree(event.msg_buf);
6003 }
6004
6005 /**
6006  * i40e_verify_eeprom - make sure eeprom is good to use
6007  * @pf: board private structure
6008  **/
6009 static void i40e_verify_eeprom(struct i40e_pf *pf)
6010 {
6011         int err;
6012
6013         err = i40e_diag_eeprom_test(&pf->hw);
6014         if (err) {
6015                 /* retry in case of garbage read */
6016                 err = i40e_diag_eeprom_test(&pf->hw);
6017                 if (err) {
6018                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6019                                  err);
6020                         set_bit(__I40E_BAD_EEPROM, &pf->state);
6021                 }
6022         }
6023
6024         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6025                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6026                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6027         }
6028 }
6029
6030 /**
6031  * i40e_enable_pf_switch_lb
6032  * @pf: pointer to the PF structure
6033  *
6034  * enable switch loop back or die - no point in a return value
6035  **/
6036 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6037 {
6038         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6039         struct i40e_vsi_context ctxt;
6040         int ret;
6041
6042         ctxt.seid = pf->main_vsi_seid;
6043         ctxt.pf_num = pf->hw.pf_id;
6044         ctxt.vf_num = 0;
6045         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6046         if (ret) {
6047                 dev_info(&pf->pdev->dev,
6048                          "couldn't get PF vsi config, err %s aq_err %s\n",
6049                          i40e_stat_str(&pf->hw, ret),
6050                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6051                 return;
6052         }
6053         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6054         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6055         ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6056
6057         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6058         if (ret) {
6059                 dev_info(&pf->pdev->dev,
6060                          "update vsi switch failed, err %s aq_err %s\n",
6061                          i40e_stat_str(&pf->hw, ret),
6062                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6063         }
6064 }
6065
6066 /**
6067  * i40e_disable_pf_switch_lb
6068  * @pf: pointer to the PF structure
6069  *
6070  * disable switch loop back or die - no point in a return value
6071  **/
6072 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6073 {
6074         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6075         struct i40e_vsi_context ctxt;
6076         int ret;
6077
6078         ctxt.seid = pf->main_vsi_seid;
6079         ctxt.pf_num = pf->hw.pf_id;
6080         ctxt.vf_num = 0;
6081         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6082         if (ret) {
6083                 dev_info(&pf->pdev->dev,
6084                          "couldn't get PF vsi config, err %s aq_err %s\n",
6085                          i40e_stat_str(&pf->hw, ret),
6086                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6087                 return;
6088         }
6089         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6090         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6091         ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6092
6093         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6094         if (ret) {
6095                 dev_info(&pf->pdev->dev,
6096                          "update vsi switch failed, err %s aq_err %s\n",
6097                          i40e_stat_str(&pf->hw, ret),
6098                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6099         }
6100 }
6101
6102 /**
6103  * i40e_config_bridge_mode - Configure the HW bridge mode
6104  * @veb: pointer to the bridge instance
6105  *
6106  * Configure the loop back mode for the LAN VSI that is downlink to the
6107  * specified HW bridge instance. It is expected this function is called
6108  * when a new HW bridge is instantiated.
6109  **/
6110 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6111 {
6112         struct i40e_pf *pf = veb->pf;
6113
6114         dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6115                  veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6116         if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6117                 i40e_disable_pf_switch_lb(pf);
6118         else
6119                 i40e_enable_pf_switch_lb(pf);
6120 }
6121
6122 /**
6123  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6124  * @veb: pointer to the VEB instance
6125  *
6126  * This is a recursive function that first builds the attached VSIs then
6127  * recurses in to build the next layer of VEB.  We track the connections
6128  * through our own index numbers because the seid's from the HW could
6129  * change across the reset.
6130  **/
6131 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6132 {
6133         struct i40e_vsi *ctl_vsi = NULL;
6134         struct i40e_pf *pf = veb->pf;
6135         int v, veb_idx;
6136         int ret;
6137
6138         /* build VSI that owns this VEB, temporarily attached to base VEB */
6139         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6140                 if (pf->vsi[v] &&
6141                     pf->vsi[v]->veb_idx == veb->idx &&
6142                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6143                         ctl_vsi = pf->vsi[v];
6144                         break;
6145                 }
6146         }
6147         if (!ctl_vsi) {
6148                 dev_info(&pf->pdev->dev,
6149                          "missing owner VSI for veb_idx %d\n", veb->idx);
6150                 ret = -ENOENT;
6151                 goto end_reconstitute;
6152         }
6153         if (ctl_vsi != pf->vsi[pf->lan_vsi])
6154                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6155         ret = i40e_add_vsi(ctl_vsi);
6156         if (ret) {
6157                 dev_info(&pf->pdev->dev,
6158                          "rebuild of veb_idx %d owner VSI failed: %d\n",
6159                          veb->idx, ret);
6160                 goto end_reconstitute;
6161         }
6162         i40e_vsi_reset_stats(ctl_vsi);
6163
6164         /* create the VEB in the switch and move the VSI onto the VEB */
6165         ret = i40e_add_veb(veb, ctl_vsi);
6166         if (ret)
6167                 goto end_reconstitute;
6168
6169         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6170                 veb->bridge_mode = BRIDGE_MODE_VEB;
6171         else
6172                 veb->bridge_mode = BRIDGE_MODE_VEPA;
6173         i40e_config_bridge_mode(veb);
6174
6175         /* create the remaining VSIs attached to this VEB */
6176         for (v = 0; v < pf->num_alloc_vsi; v++) {
6177                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6178                         continue;
6179
6180                 if (pf->vsi[v]->veb_idx == veb->idx) {
6181                         struct i40e_vsi *vsi = pf->vsi[v];
6182                         vsi->uplink_seid = veb->seid;
6183                         ret = i40e_add_vsi(vsi);
6184                         if (ret) {
6185                                 dev_info(&pf->pdev->dev,
6186                                          "rebuild of vsi_idx %d failed: %d\n",
6187                                          v, ret);
6188                                 goto end_reconstitute;
6189                         }
6190                         i40e_vsi_reset_stats(vsi);
6191                 }
6192         }
6193
6194         /* create any VEBs attached to this VEB - RECURSION */
6195         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6196                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6197                         pf->veb[veb_idx]->uplink_seid = veb->seid;
6198                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6199                         if (ret)
6200                                 break;
6201                 }
6202         }
6203
6204 end_reconstitute:
6205         return ret;
6206 }
6207
6208 /**
6209  * i40e_get_capabilities - get info about the HW
6210  * @pf: the PF struct
6211  **/
6212 static int i40e_get_capabilities(struct i40e_pf *pf)
6213 {
6214         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6215         u16 data_size;
6216         int buf_len;
6217         int err;
6218
6219         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6220         do {
6221                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6222                 if (!cap_buf)
6223                         return -ENOMEM;
6224
6225                 /* this loads the data into the hw struct for us */
6226                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6227                                             &data_size,
6228                                             i40e_aqc_opc_list_func_capabilities,
6229                                             NULL);
6230                 /* data loaded, buffer no longer needed */
6231                 kfree(cap_buf);
6232
6233                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6234                         /* retry with a larger buffer */
6235                         buf_len = data_size;
6236                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6237                         dev_info(&pf->pdev->dev,
6238                                  "capability discovery failed, err %s aq_err %s\n",
6239                                  i40e_stat_str(&pf->hw, err),
6240                                  i40e_aq_str(&pf->hw,
6241                                              pf->hw.aq.asq_last_status));
6242                         return -ENODEV;
6243                 }
6244         } while (err);
6245
6246         if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6247             (pf->hw.aq.fw_maj_ver < 2)) {
6248                 pf->hw.func_caps.num_msix_vectors++;
6249                 pf->hw.func_caps.num_msix_vectors_vf++;
6250         }
6251
6252         if (pf->hw.debug_mask & I40E_DEBUG_USER)
6253                 dev_info(&pf->pdev->dev,
6254                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6255                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6256                          pf->hw.func_caps.num_msix_vectors,
6257                          pf->hw.func_caps.num_msix_vectors_vf,
6258                          pf->hw.func_caps.fd_filters_guaranteed,
6259                          pf->hw.func_caps.fd_filters_best_effort,
6260                          pf->hw.func_caps.num_tx_qp,
6261                          pf->hw.func_caps.num_vsis);
6262
6263 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6264                        + pf->hw.func_caps.num_vfs)
6265         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6266                 dev_info(&pf->pdev->dev,
6267                          "got num_vsis %d, setting num_vsis to %d\n",
6268                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6269                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6270         }
6271
6272         return 0;
6273 }
6274
6275 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6276
6277 /**
6278  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6279  * @pf: board private structure
6280  **/
6281 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6282 {
6283         struct i40e_vsi *vsi;
6284         int i;
6285
6286         /* quick workaround for an NVM issue that leaves a critical register
6287          * uninitialized
6288          */
6289         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6290                 static const u32 hkey[] = {
6291                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6292                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6293                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6294                         0x95b3a76d};
6295
6296                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6297                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6298         }
6299
6300         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6301                 return;
6302
6303         /* find existing VSI and see if it needs configuring */
6304         vsi = NULL;
6305         for (i = 0; i < pf->num_alloc_vsi; i++) {
6306                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6307                         vsi = pf->vsi[i];
6308                         break;
6309                 }
6310         }
6311
6312         /* create a new VSI if none exists */
6313         if (!vsi) {
6314                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6315                                      pf->vsi[pf->lan_vsi]->seid, 0);
6316                 if (!vsi) {
6317                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6318                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6319                         return;
6320                 }
6321         }
6322
6323         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6324 }
6325
6326 /**
6327  * i40e_fdir_teardown - release the Flow Director resources
6328  * @pf: board private structure
6329  **/
6330 static void i40e_fdir_teardown(struct i40e_pf *pf)
6331 {
6332         int i;
6333
6334         i40e_fdir_filter_exit(pf);
6335         for (i = 0; i < pf->num_alloc_vsi; i++) {
6336                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6337                         i40e_vsi_release(pf->vsi[i]);
6338                         break;
6339                 }
6340         }
6341 }
6342
6343 /**
6344  * i40e_prep_for_reset - prep for the core to reset
6345  * @pf: board private structure
6346  *
6347  * Close up the VFs and other things in prep for PF Reset.
6348   **/
6349 static void i40e_prep_for_reset(struct i40e_pf *pf)
6350 {
6351         struct i40e_hw *hw = &pf->hw;
6352         i40e_status ret = 0;
6353         u32 v;
6354
6355         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6356         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6357                 return;
6358
6359         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6360
6361         /* quiesce the VSIs and their queues that are not already DOWN */
6362         i40e_pf_quiesce_all_vsi(pf);
6363
6364         for (v = 0; v < pf->num_alloc_vsi; v++) {
6365                 if (pf->vsi[v])
6366                         pf->vsi[v]->seid = 0;
6367         }
6368
6369         i40e_shutdown_adminq(&pf->hw);
6370
6371         /* call shutdown HMC */
6372         if (hw->hmc.hmc_obj) {
6373                 ret = i40e_shutdown_lan_hmc(hw);
6374                 if (ret)
6375                         dev_warn(&pf->pdev->dev,
6376                                  "shutdown_lan_hmc failed: %d\n", ret);
6377         }
6378 }
6379
6380 /**
6381  * i40e_send_version - update firmware with driver version
6382  * @pf: PF struct
6383  */
6384 static void i40e_send_version(struct i40e_pf *pf)
6385 {
6386         struct i40e_driver_version dv;
6387
6388         dv.major_version = DRV_VERSION_MAJOR;
6389         dv.minor_version = DRV_VERSION_MINOR;
6390         dv.build_version = DRV_VERSION_BUILD;
6391         dv.subbuild_version = 0;
6392         strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6393         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6394 }
6395
6396 /**
6397  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6398  * @pf: board private structure
6399  * @reinit: if the Main VSI needs to re-initialized.
6400  **/
6401 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6402 {
6403         struct i40e_hw *hw = &pf->hw;
6404         u8 set_fc_aq_fail = 0;
6405         i40e_status ret;
6406         u32 v;
6407
6408         /* Now we wait for GRST to settle out.
6409          * We don't have to delete the VEBs or VSIs from the hw switch
6410          * because the reset will make them disappear.
6411          */
6412         ret = i40e_pf_reset(hw);
6413         if (ret) {
6414                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6415                 set_bit(__I40E_RESET_FAILED, &pf->state);
6416                 goto clear_recovery;
6417         }
6418         pf->pfr_count++;
6419
6420         if (test_bit(__I40E_DOWN, &pf->state))
6421                 goto clear_recovery;
6422         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6423
6424         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6425         ret = i40e_init_adminq(&pf->hw);
6426         if (ret) {
6427                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6428                          i40e_stat_str(&pf->hw, ret),
6429                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6430                 goto clear_recovery;
6431         }
6432
6433         /* re-verify the eeprom if we just had an EMP reset */
6434         if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6435                 i40e_verify_eeprom(pf);
6436
6437         i40e_clear_pxe_mode(hw);
6438         ret = i40e_get_capabilities(pf);
6439         if (ret)
6440                 goto end_core_reset;
6441
6442         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6443                                 hw->func_caps.num_rx_qp,
6444                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6445         if (ret) {
6446                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6447                 goto end_core_reset;
6448         }
6449         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6450         if (ret) {
6451                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6452                 goto end_core_reset;
6453         }
6454
6455 #ifdef CONFIG_I40E_DCB
6456         ret = i40e_init_pf_dcb(pf);
6457         if (ret) {
6458                 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6459                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6460                 /* Continue without DCB enabled */
6461         }
6462 #endif /* CONFIG_I40E_DCB */
6463 #ifdef I40E_FCOE
6464         ret = i40e_init_pf_fcoe(pf);
6465         if (ret)
6466                 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6467
6468 #endif
6469         /* do basic switch setup */
6470         ret = i40e_setup_pf_switch(pf, reinit);
6471         if (ret)
6472                 goto end_core_reset;
6473
6474         /* driver is only interested in link up/down and module qualification
6475          * reports from firmware
6476          */
6477         ret = i40e_aq_set_phy_int_mask(&pf->hw,
6478                                        I40E_AQ_EVENT_LINK_UPDOWN |
6479                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6480         if (ret)
6481                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6482                          i40e_stat_str(&pf->hw, ret),
6483                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6484
6485         /* make sure our flow control settings are restored */
6486         ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6487         if (ret)
6488                 dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
6489                          i40e_stat_str(&pf->hw, ret),
6490                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6491
6492         /* Rebuild the VSIs and VEBs that existed before reset.
6493          * They are still in our local switch element arrays, so only
6494          * need to rebuild the switch model in the HW.
6495          *
6496          * If there were VEBs but the reconstitution failed, we'll try
6497          * try to recover minimal use by getting the basic PF VSI working.
6498          */
6499         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6500                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6501                 /* find the one VEB connected to the MAC, and find orphans */
6502                 for (v = 0; v < I40E_MAX_VEB; v++) {
6503                         if (!pf->veb[v])
6504                                 continue;
6505
6506                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6507                             pf->veb[v]->uplink_seid == 0) {
6508                                 ret = i40e_reconstitute_veb(pf->veb[v]);
6509
6510                                 if (!ret)
6511                                         continue;
6512
6513                                 /* If Main VEB failed, we're in deep doodoo,
6514                                  * so give up rebuilding the switch and set up
6515                                  * for minimal rebuild of PF VSI.
6516                                  * If orphan failed, we'll report the error
6517                                  * but try to keep going.
6518                                  */
6519                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6520                                         dev_info(&pf->pdev->dev,
6521                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6522                                                  ret);
6523                                         pf->vsi[pf->lan_vsi]->uplink_seid
6524                                                                 = pf->mac_seid;
6525                                         break;
6526                                 } else if (pf->veb[v]->uplink_seid == 0) {
6527                                         dev_info(&pf->pdev->dev,
6528                                                  "rebuild of orphan VEB failed: %d\n",
6529                                                  ret);
6530                                 }
6531                         }
6532                 }
6533         }
6534
6535         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6536                 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6537                 /* no VEB, so rebuild only the Main VSI */
6538                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6539                 if (ret) {
6540                         dev_info(&pf->pdev->dev,
6541                                  "rebuild of Main VSI failed: %d\n", ret);
6542                         goto end_core_reset;
6543                 }
6544         }
6545
6546         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6547             (pf->hw.aq.fw_maj_ver < 4)) {
6548                 msleep(75);
6549                 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6550                 if (ret)
6551                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6552                                  i40e_stat_str(&pf->hw, ret),
6553                                  i40e_aq_str(&pf->hw,
6554                                              pf->hw.aq.asq_last_status));
6555         }
6556         /* reinit the misc interrupt */
6557         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6558                 ret = i40e_setup_misc_vector(pf);
6559
6560         /* restart the VSIs that were rebuilt and running before the reset */
6561         i40e_pf_unquiesce_all_vsi(pf);
6562
6563         if (pf->num_alloc_vfs) {
6564                 for (v = 0; v < pf->num_alloc_vfs; v++)
6565                         i40e_reset_vf(&pf->vf[v], true);
6566         }
6567
6568         /* tell the firmware that we're starting */
6569         i40e_send_version(pf);
6570
6571 end_core_reset:
6572         clear_bit(__I40E_RESET_FAILED, &pf->state);
6573 clear_recovery:
6574         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6575 }
6576
6577 /**
6578  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6579  * @pf: board private structure
6580  *
6581  * Close up the VFs and other things in prep for a Core Reset,
6582  * then get ready to rebuild the world.
6583  **/
6584 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6585 {
6586         i40e_prep_for_reset(pf);
6587         i40e_reset_and_rebuild(pf, false);
6588 }
6589
6590 /**
6591  * i40e_handle_mdd_event
6592  * @pf: pointer to the PF structure
6593  *
6594  * Called from the MDD irq handler to identify possibly malicious vfs
6595  **/
6596 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6597 {
6598         struct i40e_hw *hw = &pf->hw;
6599         bool mdd_detected = false;
6600         bool pf_mdd_detected = false;
6601         struct i40e_vf *vf;
6602         u32 reg;
6603         int i;
6604
6605         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6606                 return;
6607
6608         /* find what triggered the MDD event */
6609         reg = rd32(hw, I40E_GL_MDET_TX);
6610         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6611                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6612                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6613                 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6614                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6615                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6616                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6617                 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6618                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6619                                 pf->hw.func_caps.base_queue;
6620                 if (netif_msg_tx_err(pf))
6621                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6622                                  event, queue, pf_num, vf_num);
6623                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6624                 mdd_detected = true;
6625         }
6626         reg = rd32(hw, I40E_GL_MDET_RX);
6627         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6628                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6629                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6630                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6631                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6632                 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6633                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6634                                 pf->hw.func_caps.base_queue;
6635                 if (netif_msg_rx_err(pf))
6636                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6637                                  event, queue, func);
6638                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6639                 mdd_detected = true;
6640         }
6641
6642         if (mdd_detected) {
6643                 reg = rd32(hw, I40E_PF_MDET_TX);
6644                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6645                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6646                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6647                         pf_mdd_detected = true;
6648                 }
6649                 reg = rd32(hw, I40E_PF_MDET_RX);
6650                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6651                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6652                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6653                         pf_mdd_detected = true;
6654                 }
6655                 /* Queue belongs to the PF, initiate a reset */
6656                 if (pf_mdd_detected) {
6657                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6658                         i40e_service_event_schedule(pf);
6659                 }
6660         }
6661
6662         /* see if one of the VFs needs its hand slapped */
6663         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6664                 vf = &(pf->vf[i]);
6665                 reg = rd32(hw, I40E_VP_MDET_TX(i));
6666                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6667                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6668                         vf->num_mdd_events++;
6669                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6670                                  i);
6671                 }
6672
6673                 reg = rd32(hw, I40E_VP_MDET_RX(i));
6674                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6675                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6676                         vf->num_mdd_events++;
6677                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6678                                  i);
6679                 }
6680
6681                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6682                         dev_info(&pf->pdev->dev,
6683                                  "Too many MDD events on VF %d, disabled\n", i);
6684                         dev_info(&pf->pdev->dev,
6685                                  "Use PF Control I/F to re-enable the VF\n");
6686                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6687                 }
6688         }
6689
6690         /* re-enable mdd interrupt cause */
6691         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6692         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6693         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6694         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6695         i40e_flush(hw);
6696 }
6697
6698 #ifdef CONFIG_I40E_VXLAN
6699 /**
6700  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6701  * @pf: board private structure
6702  **/
6703 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6704 {
6705         struct i40e_hw *hw = &pf->hw;
6706         i40e_status ret;
6707         __be16 port;
6708         int i;
6709
6710         if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6711                 return;
6712
6713         pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6714
6715         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6716                 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
6717                         pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
6718                         port = pf->vxlan_ports[i];
6719                         if (port)
6720                                 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6721                                                      I40E_AQC_TUNNEL_TYPE_VXLAN,
6722                                                      NULL, NULL);
6723                         else
6724                                 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6725
6726                         if (ret) {
6727                                 dev_info(&pf->pdev->dev,
6728                                          "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
6729                                          port ? "add" : "delete",
6730                                          ntohs(port), i,
6731                                          i40e_stat_str(&pf->hw, ret),
6732                                          i40e_aq_str(&pf->hw,
6733                                                     pf->hw.aq.asq_last_status));
6734                                 pf->vxlan_ports[i] = 0;
6735                         }
6736                 }
6737         }
6738 }
6739
6740 #endif
6741 /**
6742  * i40e_service_task - Run the driver's async subtasks
6743  * @work: pointer to work_struct containing our data
6744  **/
6745 static void i40e_service_task(struct work_struct *work)
6746 {
6747         struct i40e_pf *pf = container_of(work,
6748                                           struct i40e_pf,
6749                                           service_task);
6750         unsigned long start_time = jiffies;
6751
6752         /* don't bother with service tasks if a reset is in progress */
6753         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6754                 i40e_service_event_complete(pf);
6755                 return;
6756         }
6757
6758         i40e_reset_subtask(pf);
6759         i40e_handle_mdd_event(pf);
6760         i40e_vc_process_vflr_event(pf);
6761         i40e_watchdog_subtask(pf);
6762         i40e_fdir_reinit_subtask(pf);
6763         i40e_sync_filters_subtask(pf);
6764 #ifdef CONFIG_I40E_VXLAN
6765         i40e_sync_vxlan_filters_subtask(pf);
6766 #endif
6767         i40e_clean_adminq_subtask(pf);
6768
6769         i40e_service_event_complete(pf);
6770
6771         /* If the tasks have taken longer than one timer cycle or there
6772          * is more work to be done, reschedule the service task now
6773          * rather than wait for the timer to tick again.
6774          */
6775         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6776             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
6777             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
6778             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6779                 i40e_service_event_schedule(pf);
6780 }
6781
6782 /**
6783  * i40e_service_timer - timer callback
6784  * @data: pointer to PF struct
6785  **/
6786 static void i40e_service_timer(unsigned long data)
6787 {
6788         struct i40e_pf *pf = (struct i40e_pf *)data;
6789
6790         mod_timer(&pf->service_timer,
6791                   round_jiffies(jiffies + pf->service_timer_period));
6792         i40e_service_event_schedule(pf);
6793 }
6794
6795 /**
6796  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6797  * @vsi: the VSI being configured
6798  **/
6799 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6800 {
6801         struct i40e_pf *pf = vsi->back;
6802
6803         switch (vsi->type) {
6804         case I40E_VSI_MAIN:
6805                 vsi->alloc_queue_pairs = pf->num_lan_qps;
6806                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6807                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6808                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6809                         vsi->num_q_vectors = pf->num_lan_msix;
6810                 else
6811                         vsi->num_q_vectors = 1;
6812
6813                 break;
6814
6815         case I40E_VSI_FDIR:
6816                 vsi->alloc_queue_pairs = 1;
6817                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6818                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6819                 vsi->num_q_vectors = 1;
6820                 break;
6821
6822         case I40E_VSI_VMDQ2:
6823                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6824                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6825                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6826                 vsi->num_q_vectors = pf->num_vmdq_msix;
6827                 break;
6828
6829         case I40E_VSI_SRIOV:
6830                 vsi->alloc_queue_pairs = pf->num_vf_qps;
6831                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6832                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6833                 break;
6834
6835 #ifdef I40E_FCOE
6836         case I40E_VSI_FCOE:
6837                 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6838                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6839                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6840                 vsi->num_q_vectors = pf->num_fcoe_msix;
6841                 break;
6842
6843 #endif /* I40E_FCOE */
6844         default:
6845                 WARN_ON(1);
6846                 return -ENODATA;
6847         }
6848
6849         return 0;
6850 }
6851
6852 /**
6853  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6854  * @type: VSI pointer
6855  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6856  *
6857  * On error: returns error code (negative)
6858  * On success: returns 0
6859  **/
6860 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6861 {
6862         int size;
6863         int ret = 0;
6864
6865         /* allocate memory for both Tx and Rx ring pointers */
6866         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6867         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6868         if (!vsi->tx_rings)
6869                 return -ENOMEM;
6870         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6871
6872         if (alloc_qvectors) {
6873                 /* allocate memory for q_vector pointers */
6874                 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6875                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6876                 if (!vsi->q_vectors) {
6877                         ret = -ENOMEM;
6878                         goto err_vectors;
6879                 }
6880         }
6881         return ret;
6882
6883 err_vectors:
6884         kfree(vsi->tx_rings);
6885         return ret;
6886 }
6887
6888 /**
6889  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6890  * @pf: board private structure
6891  * @type: type of VSI
6892  *
6893  * On error: returns error code (negative)
6894  * On success: returns vsi index in PF (positive)
6895  **/
6896 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6897 {
6898         int ret = -ENODEV;
6899         struct i40e_vsi *vsi;
6900         int vsi_idx;
6901         int i;
6902
6903         /* Need to protect the allocation of the VSIs at the PF level */
6904         mutex_lock(&pf->switch_mutex);
6905
6906         /* VSI list may be fragmented if VSI creation/destruction has
6907          * been happening.  We can afford to do a quick scan to look
6908          * for any free VSIs in the list.
6909          *
6910          * find next empty vsi slot, looping back around if necessary
6911          */
6912         i = pf->next_vsi;
6913         while (i < pf->num_alloc_vsi && pf->vsi[i])
6914                 i++;
6915         if (i >= pf->num_alloc_vsi) {
6916                 i = 0;
6917                 while (i < pf->next_vsi && pf->vsi[i])
6918                         i++;
6919         }
6920
6921         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6922                 vsi_idx = i;             /* Found one! */
6923         } else {
6924                 ret = -ENODEV;
6925                 goto unlock_pf;  /* out of VSI slots! */
6926         }
6927         pf->next_vsi = ++i;
6928
6929         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6930         if (!vsi) {
6931                 ret = -ENOMEM;
6932                 goto unlock_pf;
6933         }
6934         vsi->type = type;
6935         vsi->back = pf;
6936         set_bit(__I40E_DOWN, &vsi->state);
6937         vsi->flags = 0;
6938         vsi->idx = vsi_idx;
6939         vsi->rx_itr_setting = pf->rx_itr_default;
6940         vsi->tx_itr_setting = pf->tx_itr_default;
6941         vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
6942                                 pf->rss_table_size : 64;
6943         vsi->netdev_registered = false;
6944         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6945         INIT_LIST_HEAD(&vsi->mac_filter_list);
6946         vsi->irqs_ready = false;
6947
6948         ret = i40e_set_num_rings_in_vsi(vsi);
6949         if (ret)
6950                 goto err_rings;
6951
6952         ret = i40e_vsi_alloc_arrays(vsi, true);
6953         if (ret)
6954                 goto err_rings;
6955
6956         /* Setup default MSIX irq handler for VSI */
6957         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6958
6959         pf->vsi[vsi_idx] = vsi;
6960         ret = vsi_idx;
6961         goto unlock_pf;
6962
6963 err_rings:
6964         pf->next_vsi = i - 1;
6965         kfree(vsi);
6966 unlock_pf:
6967         mutex_unlock(&pf->switch_mutex);
6968         return ret;
6969 }
6970
6971 /**
6972  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6973  * @type: VSI pointer
6974  * @free_qvectors: a bool to specify if q_vectors need to be freed.
6975  *
6976  * On error: returns error code (negative)
6977  * On success: returns 0
6978  **/
6979 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6980 {
6981         /* free the ring and vector containers */
6982         if (free_qvectors) {
6983                 kfree(vsi->q_vectors);
6984                 vsi->q_vectors = NULL;
6985         }
6986         kfree(vsi->tx_rings);
6987         vsi->tx_rings = NULL;
6988         vsi->rx_rings = NULL;
6989 }
6990
6991 /**
6992  * i40e_vsi_clear - Deallocate the VSI provided
6993  * @vsi: the VSI being un-configured
6994  **/
6995 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6996 {
6997         struct i40e_pf *pf;
6998
6999         if (!vsi)
7000                 return 0;
7001
7002         if (!vsi->back)
7003                 goto free_vsi;
7004         pf = vsi->back;
7005
7006         mutex_lock(&pf->switch_mutex);
7007         if (!pf->vsi[vsi->idx]) {
7008                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7009                         vsi->idx, vsi->idx, vsi, vsi->type);
7010                 goto unlock_vsi;
7011         }
7012
7013         if (pf->vsi[vsi->idx] != vsi) {
7014                 dev_err(&pf->pdev->dev,
7015                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7016                         pf->vsi[vsi->idx]->idx,
7017                         pf->vsi[vsi->idx],
7018                         pf->vsi[vsi->idx]->type,
7019                         vsi->idx, vsi, vsi->type);
7020                 goto unlock_vsi;
7021         }
7022
7023         /* updates the PF for this cleared vsi */
7024         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7025         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7026
7027         i40e_vsi_free_arrays(vsi, true);
7028
7029         pf->vsi[vsi->idx] = NULL;
7030         if (vsi->idx < pf->next_vsi)
7031                 pf->next_vsi = vsi->idx;
7032
7033 unlock_vsi:
7034         mutex_unlock(&pf->switch_mutex);
7035 free_vsi:
7036         kfree(vsi);
7037
7038         return 0;
7039 }
7040
7041 /**
7042  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7043  * @vsi: the VSI being cleaned
7044  **/
7045 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7046 {
7047         int i;
7048
7049         if (vsi->tx_rings && vsi->tx_rings[0]) {
7050                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7051                         kfree_rcu(vsi->tx_rings[i], rcu);
7052                         vsi->tx_rings[i] = NULL;
7053                         vsi->rx_rings[i] = NULL;
7054                 }
7055         }
7056 }
7057
7058 /**
7059  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7060  * @vsi: the VSI being configured
7061  **/
7062 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7063 {
7064         struct i40e_ring *tx_ring, *rx_ring;
7065         struct i40e_pf *pf = vsi->back;
7066         int i;
7067
7068         /* Set basic values in the rings to be used later during open() */
7069         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7070                 /* allocate space for both Tx and Rx in one shot */
7071                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7072                 if (!tx_ring)
7073                         goto err_out;
7074
7075                 tx_ring->queue_index = i;
7076                 tx_ring->reg_idx = vsi->base_queue + i;
7077                 tx_ring->ring_active = false;
7078                 tx_ring->vsi = vsi;
7079                 tx_ring->netdev = vsi->netdev;
7080                 tx_ring->dev = &pf->pdev->dev;
7081                 tx_ring->count = vsi->num_desc;
7082                 tx_ring->size = 0;
7083                 tx_ring->dcb_tc = 0;
7084                 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7085                         tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7086                 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7087                         tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7088                 vsi->tx_rings[i] = tx_ring;
7089
7090                 rx_ring = &tx_ring[1];
7091                 rx_ring->queue_index = i;
7092                 rx_ring->reg_idx = vsi->base_queue + i;
7093                 rx_ring->ring_active = false;
7094                 rx_ring->vsi = vsi;
7095                 rx_ring->netdev = vsi->netdev;
7096                 rx_ring->dev = &pf->pdev->dev;
7097                 rx_ring->count = vsi->num_desc;
7098                 rx_ring->size = 0;
7099                 rx_ring->dcb_tc = 0;
7100                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7101                         set_ring_16byte_desc_enabled(rx_ring);
7102                 else
7103                         clear_ring_16byte_desc_enabled(rx_ring);
7104                 vsi->rx_rings[i] = rx_ring;
7105         }
7106
7107         return 0;
7108
7109 err_out:
7110         i40e_vsi_clear_rings(vsi);
7111         return -ENOMEM;
7112 }
7113
7114 /**
7115  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7116  * @pf: board private structure
7117  * @vectors: the number of MSI-X vectors to request
7118  *
7119  * Returns the number of vectors reserved, or error
7120  **/
7121 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7122 {
7123         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7124                                         I40E_MIN_MSIX, vectors);
7125         if (vectors < 0) {
7126                 dev_info(&pf->pdev->dev,
7127                          "MSI-X vector reservation failed: %d\n", vectors);
7128                 vectors = 0;
7129         }
7130
7131         return vectors;
7132 }
7133
7134 /**
7135  * i40e_init_msix - Setup the MSIX capability
7136  * @pf: board private structure
7137  *
7138  * Work with the OS to set up the MSIX vectors needed.
7139  *
7140  * Returns the number of vectors reserved or negative on failure
7141  **/
7142 static int i40e_init_msix(struct i40e_pf *pf)
7143 {
7144         struct i40e_hw *hw = &pf->hw;
7145         int vectors_left;
7146         int v_budget, i;
7147         int v_actual;
7148
7149         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7150                 return -ENODEV;
7151
7152         /* The number of vectors we'll request will be comprised of:
7153          *   - Add 1 for "other" cause for Admin Queue events, etc.
7154          *   - The number of LAN queue pairs
7155          *      - Queues being used for RSS.
7156          *              We don't need as many as max_rss_size vectors.
7157          *              use rss_size instead in the calculation since that
7158          *              is governed by number of cpus in the system.
7159          *      - assumes symmetric Tx/Rx pairing
7160          *   - The number of VMDq pairs
7161 #ifdef I40E_FCOE
7162          *   - The number of FCOE qps.
7163 #endif
7164          * Once we count this up, try the request.
7165          *
7166          * If we can't get what we want, we'll simplify to nearly nothing
7167          * and try again.  If that still fails, we punt.
7168          */
7169         vectors_left = hw->func_caps.num_msix_vectors;
7170         v_budget = 0;
7171
7172         /* reserve one vector for miscellaneous handler */
7173         if (vectors_left) {
7174                 v_budget++;
7175                 vectors_left--;
7176         }
7177
7178         /* reserve vectors for the main PF traffic queues */
7179         pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7180         vectors_left -= pf->num_lan_msix;
7181         v_budget += pf->num_lan_msix;
7182
7183         /* reserve one vector for sideband flow director */
7184         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7185                 if (vectors_left) {
7186                         v_budget++;
7187                         vectors_left--;
7188                 } else {
7189                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7190                 }
7191         }
7192
7193 #ifdef I40E_FCOE
7194         /* can we reserve enough for FCoE? */
7195         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7196                 if (!vectors_left)
7197                         pf->num_fcoe_msix = 0;
7198                 else if (vectors_left >= pf->num_fcoe_qps)
7199                         pf->num_fcoe_msix = pf->num_fcoe_qps;
7200                 else
7201                         pf->num_fcoe_msix = 1;
7202                 v_budget += pf->num_fcoe_msix;
7203                 vectors_left -= pf->num_fcoe_msix;
7204         }
7205
7206 #endif
7207         /* any vectors left over go for VMDq support */
7208         if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7209                 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7210                 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7211
7212                 /* if we're short on vectors for what's desired, we limit
7213                  * the queues per vmdq.  If this is still more than are
7214                  * available, the user will need to change the number of
7215                  * queues/vectors used by the PF later with the ethtool
7216                  * channels command
7217                  */
7218                 if (vmdq_vecs < vmdq_vecs_wanted)
7219                         pf->num_vmdq_qps = 1;
7220                 pf->num_vmdq_msix = pf->num_vmdq_qps;
7221
7222                 v_budget += vmdq_vecs;
7223                 vectors_left -= vmdq_vecs;
7224         }
7225
7226         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7227                                    GFP_KERNEL);
7228         if (!pf->msix_entries)
7229                 return -ENOMEM;
7230
7231         for (i = 0; i < v_budget; i++)
7232                 pf->msix_entries[i].entry = i;
7233         v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7234
7235         if (v_actual != v_budget) {
7236                 /* If we have limited resources, we will start with no vectors
7237                  * for the special features and then allocate vectors to some
7238                  * of these features based on the policy and at the end disable
7239                  * the features that did not get any vectors.
7240                  */
7241 #ifdef I40E_FCOE
7242                 pf->num_fcoe_qps = 0;
7243                 pf->num_fcoe_msix = 0;
7244 #endif
7245                 pf->num_vmdq_msix = 0;
7246         }
7247
7248         if (v_actual < I40E_MIN_MSIX) {
7249                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7250                 kfree(pf->msix_entries);
7251                 pf->msix_entries = NULL;
7252                 return -ENODEV;
7253
7254         } else if (v_actual == I40E_MIN_MSIX) {
7255                 /* Adjust for minimal MSIX use */
7256                 pf->num_vmdq_vsis = 0;
7257                 pf->num_vmdq_qps = 0;
7258                 pf->num_lan_qps = 1;
7259                 pf->num_lan_msix = 1;
7260
7261         } else if (v_actual != v_budget) {
7262                 int vec;
7263
7264                 /* reserve the misc vector */
7265                 vec = v_actual - 1;
7266
7267                 /* Scale vector usage down */
7268                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7269                 pf->num_vmdq_vsis = 1;
7270                 pf->num_vmdq_qps = 1;
7271                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7272
7273                 /* partition out the remaining vectors */
7274                 switch (vec) {
7275                 case 2:
7276                         pf->num_lan_msix = 1;
7277                         break;
7278                 case 3:
7279 #ifdef I40E_FCOE
7280                         /* give one vector to FCoE */
7281                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7282                                 pf->num_lan_msix = 1;
7283                                 pf->num_fcoe_msix = 1;
7284                         }
7285 #else
7286                         pf->num_lan_msix = 2;
7287 #endif
7288                         break;
7289                 default:
7290 #ifdef I40E_FCOE
7291                         /* give one vector to FCoE */
7292                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7293                                 pf->num_fcoe_msix = 1;
7294                                 vec--;
7295                         }
7296 #endif
7297                         /* give the rest to the PF */
7298                         pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7299                         break;
7300                 }
7301         }
7302
7303         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7304             (pf->num_vmdq_msix == 0)) {
7305                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7306                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7307         }
7308 #ifdef I40E_FCOE
7309
7310         if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7311                 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7312                 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7313         }
7314 #endif
7315         return v_actual;
7316 }
7317
7318 /**
7319  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7320  * @vsi: the VSI being configured
7321  * @v_idx: index of the vector in the vsi struct
7322  *
7323  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7324  **/
7325 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7326 {
7327         struct i40e_q_vector *q_vector;
7328
7329         /* allocate q_vector */
7330         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7331         if (!q_vector)
7332                 return -ENOMEM;
7333
7334         q_vector->vsi = vsi;
7335         q_vector->v_idx = v_idx;
7336         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7337         if (vsi->netdev)
7338                 netif_napi_add(vsi->netdev, &q_vector->napi,
7339                                i40e_napi_poll, NAPI_POLL_WEIGHT);
7340
7341         q_vector->rx.latency_range = I40E_LOW_LATENCY;
7342         q_vector->tx.latency_range = I40E_LOW_LATENCY;
7343
7344         /* tie q_vector and vsi together */
7345         vsi->q_vectors[v_idx] = q_vector;
7346
7347         return 0;
7348 }
7349
7350 /**
7351  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7352  * @vsi: the VSI being configured
7353  *
7354  * We allocate one q_vector per queue interrupt.  If allocation fails we
7355  * return -ENOMEM.
7356  **/
7357 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7358 {
7359         struct i40e_pf *pf = vsi->back;
7360         int v_idx, num_q_vectors;
7361         int err;
7362
7363         /* if not MSIX, give the one vector only to the LAN VSI */
7364         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7365                 num_q_vectors = vsi->num_q_vectors;
7366         else if (vsi == pf->vsi[pf->lan_vsi])
7367                 num_q_vectors = 1;
7368         else
7369                 return -EINVAL;
7370
7371         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7372                 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7373                 if (err)
7374                         goto err_out;
7375         }
7376
7377         return 0;
7378
7379 err_out:
7380         while (v_idx--)
7381                 i40e_free_q_vector(vsi, v_idx);
7382
7383         return err;
7384 }
7385
7386 /**
7387  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7388  * @pf: board private structure to initialize
7389  **/
7390 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7391 {
7392         int vectors = 0;
7393         ssize_t size;
7394
7395         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7396                 vectors = i40e_init_msix(pf);
7397                 if (vectors < 0) {
7398                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
7399 #ifdef I40E_FCOE
7400                                        I40E_FLAG_FCOE_ENABLED   |
7401 #endif
7402                                        I40E_FLAG_RSS_ENABLED    |
7403                                        I40E_FLAG_DCB_CAPABLE    |
7404                                        I40E_FLAG_SRIOV_ENABLED  |
7405                                        I40E_FLAG_FD_SB_ENABLED  |
7406                                        I40E_FLAG_FD_ATR_ENABLED |
7407                                        I40E_FLAG_VMDQ_ENABLED);
7408
7409                         /* rework the queue expectations without MSIX */
7410                         i40e_determine_queue_usage(pf);
7411                 }
7412         }
7413
7414         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7415             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7416                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7417                 vectors = pci_enable_msi(pf->pdev);
7418                 if (vectors < 0) {
7419                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7420                                  vectors);
7421                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7422                 }
7423                 vectors = 1;  /* one MSI or Legacy vector */
7424         }
7425
7426         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7427                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7428
7429         /* set up vector assignment tracking */
7430         size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7431         pf->irq_pile = kzalloc(size, GFP_KERNEL);
7432         if (!pf->irq_pile) {
7433                 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7434                 return -ENOMEM;
7435         }
7436         pf->irq_pile->num_entries = vectors;
7437         pf->irq_pile->search_hint = 0;
7438
7439         /* track first vector for misc interrupts, ignore return */
7440         (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7441
7442         return 0;
7443 }
7444
7445 /**
7446  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7447  * @pf: board private structure
7448  *
7449  * This sets up the handler for MSIX 0, which is used to manage the
7450  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7451  * when in MSI or Legacy interrupt mode.
7452  **/
7453 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7454 {
7455         struct i40e_hw *hw = &pf->hw;
7456         int err = 0;
7457
7458         /* Only request the irq if this is the first time through, and
7459          * not when we're rebuilding after a Reset
7460          */
7461         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7462                 err = request_irq(pf->msix_entries[0].vector,
7463                                   i40e_intr, 0, pf->int_name, pf);
7464                 if (err) {
7465                         dev_info(&pf->pdev->dev,
7466                                  "request_irq for %s failed: %d\n",
7467                                  pf->int_name, err);
7468                         return -EFAULT;
7469                 }
7470         }
7471
7472         i40e_enable_misc_int_causes(pf);
7473
7474         /* associate no queues to the misc vector */
7475         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7476         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7477
7478         i40e_flush(hw);
7479
7480         i40e_irq_dynamic_enable_icr0(pf);
7481
7482         return err;
7483 }
7484
7485 /**
7486  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7487  * @vsi: vsi structure
7488  * @seed: RSS hash seed
7489  **/
7490 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
7491 {
7492         struct i40e_aqc_get_set_rss_key_data rss_key;
7493         struct i40e_pf *pf = vsi->back;
7494         struct i40e_hw *hw = &pf->hw;
7495         bool pf_lut = false;
7496         u8 *rss_lut;
7497         int ret, i;
7498
7499         memset(&rss_key, 0, sizeof(rss_key));
7500         memcpy(&rss_key, seed, sizeof(rss_key));
7501
7502         rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7503         if (!rss_lut)
7504                 return -ENOMEM;
7505
7506         /* Populate the LUT with max no. of queues in round robin fashion */
7507         for (i = 0; i < vsi->rss_table_size; i++)
7508                 rss_lut[i] = i % vsi->rss_size;
7509
7510         ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7511         if (ret) {
7512                 dev_info(&pf->pdev->dev,
7513                          "Cannot set RSS key, err %s aq_err %s\n",
7514                          i40e_stat_str(&pf->hw, ret),
7515                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7516                 return ret;
7517         }
7518
7519         if (vsi->type == I40E_VSI_MAIN)
7520                 pf_lut = true;
7521
7522         ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7523                                   vsi->rss_table_size);
7524         if (ret)
7525                 dev_info(&pf->pdev->dev,
7526                          "Cannot set RSS lut, err %s aq_err %s\n",
7527                          i40e_stat_str(&pf->hw, ret),
7528                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7529
7530         return ret;
7531 }
7532
7533 /**
7534  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7535  * @vsi: VSI structure
7536  **/
7537 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7538 {
7539         u8 seed[I40E_HKEY_ARRAY_SIZE];
7540         struct i40e_pf *pf = vsi->back;
7541
7542         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7543         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7544
7545         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7546                 return i40e_config_rss_aq(vsi, seed);
7547
7548         return 0;
7549 }
7550
7551 /**
7552  * i40e_config_rss_reg - Prepare for RSS if used
7553  * @pf: board private structure
7554  * @seed: RSS hash seed
7555  **/
7556 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
7557 {
7558         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7559         struct i40e_hw *hw = &pf->hw;
7560         u32 *seed_dw = (u32 *)seed;
7561         u32 current_queue = 0;
7562         u32 lut = 0;
7563         int i, j;
7564
7565         /* Fill out hash function seed */
7566         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7567                 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7568
7569         for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
7570                 lut = 0;
7571                 for (j = 0; j < 4; j++) {
7572                         if (current_queue == vsi->rss_size)
7573                                 current_queue = 0;
7574                         lut |= ((current_queue) << (8 * j));
7575                         current_queue++;
7576                 }
7577                 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
7578         }
7579         i40e_flush(hw);
7580
7581         return 0;
7582 }
7583
7584 /**
7585  * i40e_config_rss - Prepare for RSS if used
7586  * @pf: board private structure
7587  **/
7588 static int i40e_config_rss(struct i40e_pf *pf)
7589 {
7590         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7591         u8 seed[I40E_HKEY_ARRAY_SIZE];
7592         struct i40e_hw *hw = &pf->hw;
7593         u32 reg_val;
7594         u64 hena;
7595
7596         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7597
7598         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7599         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7600                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7601         hena |= i40e_pf_get_default_rss_hena(pf);
7602
7603         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7604         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7605
7606         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7607
7608         /* Determine the RSS table size based on the hardware capabilities */
7609         reg_val = rd32(hw, I40E_PFQF_CTL_0);
7610         reg_val = (pf->rss_table_size == 512) ?
7611                         (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
7612                         (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
7613         wr32(hw, I40E_PFQF_CTL_0, reg_val);
7614
7615         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7616                 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
7617         else
7618                 return i40e_config_rss_reg(pf, seed);
7619 }
7620
7621 /**
7622  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7623  * @pf: board private structure
7624  * @queue_count: the requested queue count for rss.
7625  *
7626  * returns 0 if rss is not enabled, if enabled returns the final rss queue
7627  * count which may be different from the requested queue count.
7628  **/
7629 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7630 {
7631         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7632         int new_rss_size;
7633
7634         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7635                 return 0;
7636
7637         new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7638
7639         if (queue_count != vsi->num_queue_pairs) {
7640                 vsi->req_queue_pairs = queue_count;
7641                 i40e_prep_for_reset(pf);
7642
7643                 pf->rss_size = new_rss_size;
7644
7645                 i40e_reset_and_rebuild(pf, true);
7646                 i40e_config_rss(pf);
7647         }
7648         dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
7649         return pf->rss_size;
7650 }
7651
7652 /**
7653  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7654  * @pf: board private structure
7655  **/
7656 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7657 {
7658         i40e_status status;
7659         bool min_valid, max_valid;
7660         u32 max_bw, min_bw;
7661
7662         status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7663                                            &min_valid, &max_valid);
7664
7665         if (!status) {
7666                 if (min_valid)
7667                         pf->npar_min_bw = min_bw;
7668                 if (max_valid)
7669                         pf->npar_max_bw = max_bw;
7670         }
7671
7672         return status;
7673 }
7674
7675 /**
7676  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7677  * @pf: board private structure
7678  **/
7679 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7680 {
7681         struct i40e_aqc_configure_partition_bw_data bw_data;
7682         i40e_status status;
7683
7684         /* Set the valid bit for this PF */
7685         bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
7686         bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7687         bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7688
7689         /* Set the new bandwidths */
7690         status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7691
7692         return status;
7693 }
7694
7695 /**
7696  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7697  * @pf: board private structure
7698  **/
7699 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7700 {
7701         /* Commit temporary BW setting to permanent NVM image */
7702         enum i40e_admin_queue_err last_aq_status;
7703         i40e_status ret;
7704         u16 nvm_word;
7705
7706         if (pf->hw.partition_id != 1) {
7707                 dev_info(&pf->pdev->dev,
7708                          "Commit BW only works on partition 1! This is partition %d",
7709                          pf->hw.partition_id);
7710                 ret = I40E_NOT_SUPPORTED;
7711                 goto bw_commit_out;
7712         }
7713
7714         /* Acquire NVM for read access */
7715         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7716         last_aq_status = pf->hw.aq.asq_last_status;
7717         if (ret) {
7718                 dev_info(&pf->pdev->dev,
7719                          "Cannot acquire NVM for read access, err %s aq_err %s\n",
7720                          i40e_stat_str(&pf->hw, ret),
7721                          i40e_aq_str(&pf->hw, last_aq_status));
7722                 goto bw_commit_out;
7723         }
7724
7725         /* Read word 0x10 of NVM - SW compatibility word 1 */
7726         ret = i40e_aq_read_nvm(&pf->hw,
7727                                I40E_SR_NVM_CONTROL_WORD,
7728                                0x10, sizeof(nvm_word), &nvm_word,
7729                                false, NULL);
7730         /* Save off last admin queue command status before releasing
7731          * the NVM
7732          */
7733         last_aq_status = pf->hw.aq.asq_last_status;
7734         i40e_release_nvm(&pf->hw);
7735         if (ret) {
7736                 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
7737                          i40e_stat_str(&pf->hw, ret),
7738                          i40e_aq_str(&pf->hw, last_aq_status));
7739                 goto bw_commit_out;
7740         }
7741
7742         /* Wait a bit for NVM release to complete */
7743         msleep(50);
7744
7745         /* Acquire NVM for write access */
7746         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7747         last_aq_status = pf->hw.aq.asq_last_status;
7748         if (ret) {
7749                 dev_info(&pf->pdev->dev,
7750                          "Cannot acquire NVM for write access, err %s aq_err %s\n",
7751                          i40e_stat_str(&pf->hw, ret),
7752                          i40e_aq_str(&pf->hw, last_aq_status));
7753                 goto bw_commit_out;
7754         }
7755         /* Write it back out unchanged to initiate update NVM,
7756          * which will force a write of the shadow (alt) RAM to
7757          * the NVM - thus storing the bandwidth values permanently.
7758          */
7759         ret = i40e_aq_update_nvm(&pf->hw,
7760                                  I40E_SR_NVM_CONTROL_WORD,
7761                                  0x10, sizeof(nvm_word),
7762                                  &nvm_word, true, NULL);
7763         /* Save off last admin queue command status before releasing
7764          * the NVM
7765          */
7766         last_aq_status = pf->hw.aq.asq_last_status;
7767         i40e_release_nvm(&pf->hw);
7768         if (ret)
7769                 dev_info(&pf->pdev->dev,
7770                          "BW settings NOT SAVED, err %s aq_err %s\n",
7771                          i40e_stat_str(&pf->hw, ret),
7772                          i40e_aq_str(&pf->hw, last_aq_status));
7773 bw_commit_out:
7774
7775         return ret;
7776 }
7777
7778 /**
7779  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7780  * @pf: board private structure to initialize
7781  *
7782  * i40e_sw_init initializes the Adapter private data structure.
7783  * Fields are initialized based on PCI device information and
7784  * OS network device settings (MTU size).
7785  **/
7786 static int i40e_sw_init(struct i40e_pf *pf)
7787 {
7788         int err = 0;
7789         int size;
7790
7791         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7792                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7793         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7794         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7795                 if (I40E_DEBUG_USER & debug)
7796                         pf->hw.debug_mask = debug;
7797                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7798                                                 I40E_DEFAULT_MSG_ENABLE);
7799         }
7800
7801         /* Set default capability flags */
7802         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7803                     I40E_FLAG_MSI_ENABLED     |
7804                     I40E_FLAG_MSIX_ENABLED;
7805
7806         if (iommu_present(&pci_bus_type))
7807                 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7808         else
7809                 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7810
7811         /* Set default ITR */
7812         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7813         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7814
7815         /* Depending on PF configurations, it is possible that the RSS
7816          * maximum might end up larger than the available queues
7817          */
7818         pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
7819         pf->rss_size = 1;
7820         pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7821         pf->rss_size_max = min_t(int, pf->rss_size_max,
7822                                  pf->hw.func_caps.num_tx_qp);
7823         if (pf->hw.func_caps.rss) {
7824                 pf->flags |= I40E_FLAG_RSS_ENABLED;
7825                 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7826         }
7827
7828         /* MFP mode enabled */
7829         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
7830                 pf->flags |= I40E_FLAG_MFP_ENABLED;
7831                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7832                 if (i40e_get_npar_bw_setting(pf))
7833                         dev_warn(&pf->pdev->dev,
7834                                  "Could not get NPAR bw settings\n");
7835                 else
7836                         dev_info(&pf->pdev->dev,
7837                                  "Min BW = %8.8x, Max BW = %8.8x\n",
7838                                  pf->npar_min_bw, pf->npar_max_bw);
7839         }
7840
7841         /* FW/NVM is not yet fixed in this regard */
7842         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7843             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7844                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7845                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7846                 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7847                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7848                 } else {
7849                         dev_info(&pf->pdev->dev,
7850                                  "Flow Director Sideband mode Disabled in MFP mode\n");
7851                 }
7852                 pf->fdir_pf_filter_count =
7853                                  pf->hw.func_caps.fd_filters_guaranteed;
7854                 pf->hw.fdir_shared_filter_count =
7855                                  pf->hw.func_caps.fd_filters_best_effort;
7856         }
7857
7858         if (pf->hw.func_caps.vmdq) {
7859                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7860                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7861         }
7862
7863 #ifdef I40E_FCOE
7864         err = i40e_init_pf_fcoe(pf);
7865         if (err)
7866                 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7867
7868 #endif /* I40E_FCOE */
7869 #ifdef CONFIG_PCI_IOV
7870         if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7871                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7872                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7873                 pf->num_req_vfs = min_t(int,
7874                                         pf->hw.func_caps.num_vfs,
7875                                         I40E_MAX_VF_COUNT);
7876         }
7877 #endif /* CONFIG_PCI_IOV */
7878         if (pf->hw.mac.type == I40E_MAC_X722) {
7879                 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
7880                              I40E_FLAG_128_QP_RSS_CAPABLE |
7881                              I40E_FLAG_HW_ATR_EVICT_CAPABLE |
7882                              I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
7883                              I40E_FLAG_WB_ON_ITR_CAPABLE |
7884                              I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
7885         }
7886         pf->eeprom_version = 0xDEAD;
7887         pf->lan_veb = I40E_NO_VEB;
7888         pf->lan_vsi = I40E_NO_VSI;
7889
7890         /* set up queue assignment tracking */
7891         size = sizeof(struct i40e_lump_tracking)
7892                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7893         pf->qp_pile = kzalloc(size, GFP_KERNEL);
7894         if (!pf->qp_pile) {
7895                 err = -ENOMEM;
7896                 goto sw_init_done;
7897         }
7898         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7899         pf->qp_pile->search_hint = 0;
7900
7901         pf->tx_timeout_recovery_level = 1;
7902
7903         mutex_init(&pf->switch_mutex);
7904
7905         /* If NPAR is enabled nudge the Tx scheduler */
7906         if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
7907                 i40e_set_npar_bw_setting(pf);
7908
7909 sw_init_done:
7910         return err;
7911 }
7912
7913 /**
7914  * i40e_set_ntuple - set the ntuple feature flag and take action
7915  * @pf: board private structure to initialize
7916  * @features: the feature set that the stack is suggesting
7917  *
7918  * returns a bool to indicate if reset needs to happen
7919  **/
7920 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7921 {
7922         bool need_reset = false;
7923
7924         /* Check if Flow Director n-tuple support was enabled or disabled.  If
7925          * the state changed, we need to reset.
7926          */
7927         if (features & NETIF_F_NTUPLE) {
7928                 /* Enable filters and mark for reset */
7929                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7930                         need_reset = true;
7931                 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7932         } else {
7933                 /* turn off filters, mark for reset and clear SW filter list */
7934                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7935                         need_reset = true;
7936                         i40e_fdir_filter_exit(pf);
7937                 }
7938                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7939                 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7940                 /* reset fd counters */
7941                 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7942                 pf->fdir_pf_active_filters = 0;
7943                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7944                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
7945                         dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7946                 /* if ATR was auto disabled it can be re-enabled. */
7947                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7948                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7949                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7950         }
7951         return need_reset;
7952 }
7953
7954 /**
7955  * i40e_set_features - set the netdev feature flags
7956  * @netdev: ptr to the netdev being adjusted
7957  * @features: the feature set that the stack is suggesting
7958  **/
7959 static int i40e_set_features(struct net_device *netdev,
7960                              netdev_features_t features)
7961 {
7962         struct i40e_netdev_priv *np = netdev_priv(netdev);
7963         struct i40e_vsi *vsi = np->vsi;
7964         struct i40e_pf *pf = vsi->back;
7965         bool need_reset;
7966
7967         if (features & NETIF_F_HW_VLAN_CTAG_RX)
7968                 i40e_vlan_stripping_enable(vsi);
7969         else
7970                 i40e_vlan_stripping_disable(vsi);
7971
7972         need_reset = i40e_set_ntuple(pf, features);
7973
7974         if (need_reset)
7975                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
7976
7977         return 0;
7978 }
7979
7980 #ifdef CONFIG_I40E_VXLAN
7981 /**
7982  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7983  * @pf: board private structure
7984  * @port: The UDP port to look up
7985  *
7986  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7987  **/
7988 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7989 {
7990         u8 i;
7991
7992         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7993                 if (pf->vxlan_ports[i] == port)
7994                         return i;
7995         }
7996
7997         return i;
7998 }
7999
8000 /**
8001  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8002  * @netdev: This physical port's netdev
8003  * @sa_family: Socket Family that VXLAN is notifying us about
8004  * @port: New UDP port number that VXLAN started listening to
8005  **/
8006 static void i40e_add_vxlan_port(struct net_device *netdev,
8007                                 sa_family_t sa_family, __be16 port)
8008 {
8009         struct i40e_netdev_priv *np = netdev_priv(netdev);
8010         struct i40e_vsi *vsi = np->vsi;
8011         struct i40e_pf *pf = vsi->back;
8012         u8 next_idx;
8013         u8 idx;
8014
8015         if (sa_family == AF_INET6)
8016                 return;
8017
8018         idx = i40e_get_vxlan_port_idx(pf, port);
8019
8020         /* Check if port already exists */
8021         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8022                 netdev_info(netdev, "vxlan port %d already offloaded\n",
8023                             ntohs(port));
8024                 return;
8025         }
8026
8027         /* Now check if there is space to add the new port */
8028         next_idx = i40e_get_vxlan_port_idx(pf, 0);
8029
8030         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8031                 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8032                             ntohs(port));
8033                 return;
8034         }
8035
8036         /* New port: add it and mark its index in the bitmap */
8037         pf->vxlan_ports[next_idx] = port;
8038         pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
8039         pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8040
8041         dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
8042 }
8043
8044 /**
8045  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8046  * @netdev: This physical port's netdev
8047  * @sa_family: Socket Family that VXLAN is notifying us about
8048  * @port: UDP port number that VXLAN stopped listening to
8049  **/
8050 static void i40e_del_vxlan_port(struct net_device *netdev,
8051                                 sa_family_t sa_family, __be16 port)
8052 {
8053         struct i40e_netdev_priv *np = netdev_priv(netdev);
8054         struct i40e_vsi *vsi = np->vsi;
8055         struct i40e_pf *pf = vsi->back;
8056         u8 idx;
8057
8058         if (sa_family == AF_INET6)
8059                 return;
8060
8061         idx = i40e_get_vxlan_port_idx(pf, port);
8062
8063         /* Check if port already exists */
8064         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8065                 /* if port exists, set it to 0 (mark for deletion)
8066                  * and make it pending
8067                  */
8068                 pf->vxlan_ports[idx] = 0;
8069                 pf->pending_vxlan_bitmap |= BIT_ULL(idx);
8070                 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8071
8072                 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
8073                          ntohs(port));
8074         } else {
8075                 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8076                             ntohs(port));
8077         }
8078 }
8079
8080 #endif
8081 static int i40e_get_phys_port_id(struct net_device *netdev,
8082                                  struct netdev_phys_item_id *ppid)
8083 {
8084         struct i40e_netdev_priv *np = netdev_priv(netdev);
8085         struct i40e_pf *pf = np->vsi->back;
8086         struct i40e_hw *hw = &pf->hw;
8087
8088         if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8089                 return -EOPNOTSUPP;
8090
8091         ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8092         memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8093
8094         return 0;
8095 }
8096
8097 /**
8098  * i40e_ndo_fdb_add - add an entry to the hardware database
8099  * @ndm: the input from the stack
8100  * @tb: pointer to array of nladdr (unused)
8101  * @dev: the net device pointer
8102  * @addr: the MAC address entry being added
8103  * @flags: instructions from stack about fdb operation
8104  */
8105 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8106                             struct net_device *dev,
8107                             const unsigned char *addr, u16 vid,
8108                             u16 flags)
8109 {
8110         struct i40e_netdev_priv *np = netdev_priv(dev);
8111         struct i40e_pf *pf = np->vsi->back;
8112         int err = 0;
8113
8114         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8115                 return -EOPNOTSUPP;
8116
8117         if (vid) {
8118                 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8119                 return -EINVAL;
8120         }
8121
8122         /* Hardware does not support aging addresses so if a
8123          * ndm_state is given only allow permanent addresses
8124          */
8125         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8126                 netdev_info(dev, "FDB only supports static addresses\n");
8127                 return -EINVAL;
8128         }
8129
8130         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8131                 err = dev_uc_add_excl(dev, addr);
8132         else if (is_multicast_ether_addr(addr))
8133                 err = dev_mc_add_excl(dev, addr);
8134         else
8135                 err = -EINVAL;
8136
8137         /* Only return duplicate errors if NLM_F_EXCL is set */
8138         if (err == -EEXIST && !(flags & NLM_F_EXCL))
8139                 err = 0;
8140
8141         return err;
8142 }
8143
8144 /**
8145  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8146  * @dev: the netdev being configured
8147  * @nlh: RTNL message
8148  *
8149  * Inserts a new hardware bridge if not already created and
8150  * enables the bridging mode requested (VEB or VEPA). If the
8151  * hardware bridge has already been inserted and the request
8152  * is to change the mode then that requires a PF reset to
8153  * allow rebuild of the components with required hardware
8154  * bridge mode enabled.
8155  **/
8156 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8157                                    struct nlmsghdr *nlh,
8158                                    u16 flags)
8159 {
8160         struct i40e_netdev_priv *np = netdev_priv(dev);
8161         struct i40e_vsi *vsi = np->vsi;
8162         struct i40e_pf *pf = vsi->back;
8163         struct i40e_veb *veb = NULL;
8164         struct nlattr *attr, *br_spec;
8165         int i, rem;
8166
8167         /* Only for PF VSI for now */
8168         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8169                 return -EOPNOTSUPP;
8170
8171         /* Find the HW bridge for PF VSI */
8172         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8173                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8174                         veb = pf->veb[i];
8175         }
8176
8177         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8178
8179         nla_for_each_nested(attr, br_spec, rem) {
8180                 __u16 mode;
8181
8182                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8183                         continue;
8184
8185                 mode = nla_get_u16(attr);
8186                 if ((mode != BRIDGE_MODE_VEPA) &&
8187                     (mode != BRIDGE_MODE_VEB))
8188                         return -EINVAL;
8189
8190                 /* Insert a new HW bridge */
8191                 if (!veb) {
8192                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8193                                              vsi->tc_config.enabled_tc);
8194                         if (veb) {
8195                                 veb->bridge_mode = mode;
8196                                 i40e_config_bridge_mode(veb);
8197                         } else {
8198                                 /* No Bridge HW offload available */
8199                                 return -ENOENT;
8200                         }
8201                         break;
8202                 } else if (mode != veb->bridge_mode) {
8203                         /* Existing HW bridge but different mode needs reset */
8204                         veb->bridge_mode = mode;
8205                         /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8206                         if (mode == BRIDGE_MODE_VEB)
8207                                 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8208                         else
8209                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8210                         i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8211                         break;
8212                 }
8213         }
8214
8215         return 0;
8216 }
8217
8218 /**
8219  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8220  * @skb: skb buff
8221  * @pid: process id
8222  * @seq: RTNL message seq #
8223  * @dev: the netdev being configured
8224  * @filter_mask: unused
8225  *
8226  * Return the mode in which the hardware bridge is operating in
8227  * i.e VEB or VEPA.
8228  **/
8229 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8230                                    struct net_device *dev,
8231                                    u32 filter_mask, int nlflags)
8232 {
8233         struct i40e_netdev_priv *np = netdev_priv(dev);
8234         struct i40e_vsi *vsi = np->vsi;
8235         struct i40e_pf *pf = vsi->back;
8236         struct i40e_veb *veb = NULL;
8237         int i;
8238
8239         /* Only for PF VSI for now */
8240         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8241                 return -EOPNOTSUPP;
8242
8243         /* Find the HW bridge for the PF VSI */
8244         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8245                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8246                         veb = pf->veb[i];
8247         }
8248
8249         if (!veb)
8250                 return 0;
8251
8252         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8253                                        nlflags, 0, 0, filter_mask, NULL);
8254 }
8255
8256 #define I40E_MAX_TUNNEL_HDR_LEN 80
8257 /**
8258  * i40e_features_check - Validate encapsulated packet conforms to limits
8259  * @skb: skb buff
8260  * @netdev: This physical port's netdev
8261  * @features: Offload features that the stack believes apply
8262  **/
8263 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8264                                              struct net_device *dev,
8265                                              netdev_features_t features)
8266 {
8267         if (skb->encapsulation &&
8268             (skb_inner_mac_header(skb) - skb_transport_header(skb) >
8269              I40E_MAX_TUNNEL_HDR_LEN))
8270                 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
8271
8272         return features;
8273 }
8274
8275 static const struct net_device_ops i40e_netdev_ops = {
8276         .ndo_open               = i40e_open,
8277         .ndo_stop               = i40e_close,
8278         .ndo_start_xmit         = i40e_lan_xmit_frame,
8279         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
8280         .ndo_set_rx_mode        = i40e_set_rx_mode,
8281         .ndo_validate_addr      = eth_validate_addr,
8282         .ndo_set_mac_address    = i40e_set_mac,
8283         .ndo_change_mtu         = i40e_change_mtu,
8284         .ndo_do_ioctl           = i40e_ioctl,
8285         .ndo_tx_timeout         = i40e_tx_timeout,
8286         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
8287         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
8288 #ifdef CONFIG_NET_POLL_CONTROLLER
8289         .ndo_poll_controller    = i40e_netpoll,
8290 #endif
8291         .ndo_setup_tc           = i40e_setup_tc,
8292 #ifdef I40E_FCOE
8293         .ndo_fcoe_enable        = i40e_fcoe_enable,
8294         .ndo_fcoe_disable       = i40e_fcoe_disable,
8295 #endif
8296         .ndo_set_features       = i40e_set_features,
8297         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
8298         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
8299         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
8300         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
8301         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
8302         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
8303 #ifdef CONFIG_I40E_VXLAN
8304         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
8305         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
8306 #endif
8307         .ndo_get_phys_port_id   = i40e_get_phys_port_id,
8308         .ndo_fdb_add            = i40e_ndo_fdb_add,
8309         .ndo_features_check     = i40e_features_check,
8310         .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
8311         .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
8312 };
8313
8314 /**
8315  * i40e_config_netdev - Setup the netdev flags
8316  * @vsi: the VSI being configured
8317  *
8318  * Returns 0 on success, negative value on failure
8319  **/
8320 static int i40e_config_netdev(struct i40e_vsi *vsi)
8321 {
8322         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8323         struct i40e_pf *pf = vsi->back;
8324         struct i40e_hw *hw = &pf->hw;
8325         struct i40e_netdev_priv *np;
8326         struct net_device *netdev;
8327         u8 mac_addr[ETH_ALEN];
8328         int etherdev_size;
8329
8330         etherdev_size = sizeof(struct i40e_netdev_priv);
8331         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8332         if (!netdev)
8333                 return -ENOMEM;
8334
8335         vsi->netdev = netdev;
8336         np = netdev_priv(netdev);
8337         np->vsi = vsi;
8338
8339         netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
8340                                   NETIF_F_GSO_UDP_TUNNEL |
8341                                   NETIF_F_TSO;
8342
8343         netdev->features = NETIF_F_SG                  |
8344                            NETIF_F_IP_CSUM             |
8345                            NETIF_F_SCTP_CSUM           |
8346                            NETIF_F_HIGHDMA             |
8347                            NETIF_F_GSO_UDP_TUNNEL      |
8348                            NETIF_F_HW_VLAN_CTAG_TX     |
8349                            NETIF_F_HW_VLAN_CTAG_RX     |
8350                            NETIF_F_HW_VLAN_CTAG_FILTER |
8351                            NETIF_F_IPV6_CSUM           |
8352                            NETIF_F_TSO                 |
8353                            NETIF_F_TSO_ECN             |
8354                            NETIF_F_TSO6                |
8355                            NETIF_F_RXCSUM              |
8356                            NETIF_F_RXHASH              |
8357                            0;
8358
8359         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8360                 netdev->features |= NETIF_F_NTUPLE;
8361
8362         /* copy netdev features into list of user selectable features */
8363         netdev->hw_features |= netdev->features;
8364
8365         if (vsi->type == I40E_VSI_MAIN) {
8366                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8367                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8368                 /* The following steps are necessary to prevent reception
8369                  * of tagged packets - some older NVM configurations load a
8370                  * default a MAC-VLAN filter that accepts any tagged packet
8371                  * which must be replaced by a normal filter.
8372                  */
8373                 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8374                         i40e_add_filter(vsi, mac_addr,
8375                                         I40E_VLAN_ANY, false, true);
8376         } else {
8377                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8378                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8379                          pf->vsi[pf->lan_vsi]->netdev->name);
8380                 random_ether_addr(mac_addr);
8381                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8382         }
8383         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8384
8385         ether_addr_copy(netdev->dev_addr, mac_addr);
8386         ether_addr_copy(netdev->perm_addr, mac_addr);
8387         /* vlan gets same features (except vlan offload)
8388          * after any tweaks for specific VSI types
8389          */
8390         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8391                                                      NETIF_F_HW_VLAN_CTAG_RX |
8392                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
8393         netdev->priv_flags |= IFF_UNICAST_FLT;
8394         netdev->priv_flags |= IFF_SUPP_NOFCS;
8395         /* Setup netdev TC information */
8396         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8397
8398         netdev->netdev_ops = &i40e_netdev_ops;
8399         netdev->watchdog_timeo = 5 * HZ;
8400         i40e_set_ethtool_ops(netdev);
8401 #ifdef I40E_FCOE
8402         i40e_fcoe_config_netdev(netdev, vsi);
8403 #endif
8404
8405         return 0;
8406 }
8407
8408 /**
8409  * i40e_vsi_delete - Delete a VSI from the switch
8410  * @vsi: the VSI being removed
8411  *
8412  * Returns 0 on success, negative value on failure
8413  **/
8414 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8415 {
8416         /* remove default VSI is not allowed */
8417         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8418                 return;
8419
8420         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8421 }
8422
8423 /**
8424  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8425  * @vsi: the VSI being queried
8426  *
8427  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8428  **/
8429 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8430 {
8431         struct i40e_veb *veb;
8432         struct i40e_pf *pf = vsi->back;
8433
8434         /* Uplink is not a bridge so default to VEB */
8435         if (vsi->veb_idx == I40E_NO_VEB)
8436                 return 1;
8437
8438         veb = pf->veb[vsi->veb_idx];
8439         /* Uplink is a bridge in VEPA mode */
8440         if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8441                 return 0;
8442
8443         /* Uplink is a bridge in VEB mode */
8444         return 1;
8445 }
8446
8447 /**
8448  * i40e_add_vsi - Add a VSI to the switch
8449  * @vsi: the VSI being configured
8450  *
8451  * This initializes a VSI context depending on the VSI type to be added and
8452  * passes it down to the add_vsi aq command.
8453  **/
8454 static int i40e_add_vsi(struct i40e_vsi *vsi)
8455 {
8456         int ret = -ENODEV;
8457         struct i40e_mac_filter *f, *ftmp;
8458         struct i40e_pf *pf = vsi->back;
8459         struct i40e_hw *hw = &pf->hw;
8460         struct i40e_vsi_context ctxt;
8461         u8 enabled_tc = 0x1; /* TC0 enabled */
8462         int f_count = 0;
8463
8464         memset(&ctxt, 0, sizeof(ctxt));
8465         switch (vsi->type) {
8466         case I40E_VSI_MAIN:
8467                 /* The PF's main VSI is already setup as part of the
8468                  * device initialization, so we'll not bother with
8469                  * the add_vsi call, but we will retrieve the current
8470                  * VSI context.
8471                  */
8472                 ctxt.seid = pf->main_vsi_seid;
8473                 ctxt.pf_num = pf->hw.pf_id;
8474                 ctxt.vf_num = 0;
8475                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8476                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8477                 if (ret) {
8478                         dev_info(&pf->pdev->dev,
8479                                  "couldn't get PF vsi config, err %s aq_err %s\n",
8480                                  i40e_stat_str(&pf->hw, ret),
8481                                  i40e_aq_str(&pf->hw,
8482                                              pf->hw.aq.asq_last_status));
8483                         return -ENOENT;
8484                 }
8485                 vsi->info = ctxt.info;
8486                 vsi->info.valid_sections = 0;
8487
8488                 vsi->seid = ctxt.seid;
8489                 vsi->id = ctxt.vsi_number;
8490
8491                 enabled_tc = i40e_pf_get_tc_map(pf);
8492
8493                 /* MFP mode setup queue map and update VSI */
8494                 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8495                     !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8496                         memset(&ctxt, 0, sizeof(ctxt));
8497                         ctxt.seid = pf->main_vsi_seid;
8498                         ctxt.pf_num = pf->hw.pf_id;
8499                         ctxt.vf_num = 0;
8500                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8501                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8502                         if (ret) {
8503                                 dev_info(&pf->pdev->dev,
8504                                          "update vsi failed, err %s aq_err %s\n",
8505                                          i40e_stat_str(&pf->hw, ret),
8506                                          i40e_aq_str(&pf->hw,
8507                                                     pf->hw.aq.asq_last_status));
8508                                 ret = -ENOENT;
8509                                 goto err;
8510                         }
8511                         /* update the local VSI info queue map */
8512                         i40e_vsi_update_queue_map(vsi, &ctxt);
8513                         vsi->info.valid_sections = 0;
8514                 } else {
8515                         /* Default/Main VSI is only enabled for TC0
8516                          * reconfigure it to enable all TCs that are
8517                          * available on the port in SFP mode.
8518                          * For MFP case the iSCSI PF would use this
8519                          * flow to enable LAN+iSCSI TC.
8520                          */
8521                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
8522                         if (ret) {
8523                                 dev_info(&pf->pdev->dev,
8524                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
8525                                          enabled_tc,
8526                                          i40e_stat_str(&pf->hw, ret),
8527                                          i40e_aq_str(&pf->hw,
8528                                                     pf->hw.aq.asq_last_status));
8529                                 ret = -ENOENT;
8530                         }
8531                 }
8532                 break;
8533
8534         case I40E_VSI_FDIR:
8535                 ctxt.pf_num = hw->pf_id;
8536                 ctxt.vf_num = 0;
8537                 ctxt.uplink_seid = vsi->uplink_seid;
8538                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8539                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8540                 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8541                     (i40e_is_vsi_uplink_mode_veb(vsi))) {
8542                         ctxt.info.valid_sections |=
8543                              cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8544                         ctxt.info.switch_id =
8545                            cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8546                 }
8547                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8548                 break;
8549
8550         case I40E_VSI_VMDQ2:
8551                 ctxt.pf_num = hw->pf_id;
8552                 ctxt.vf_num = 0;
8553                 ctxt.uplink_seid = vsi->uplink_seid;
8554                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8555                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8556
8557                 /* This VSI is connected to VEB so the switch_id
8558                  * should be set to zero by default.
8559                  */
8560                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8561                         ctxt.info.valid_sections |=
8562                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8563                         ctxt.info.switch_id =
8564                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8565                 }
8566
8567                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8568                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8569                 break;
8570
8571         case I40E_VSI_SRIOV:
8572                 ctxt.pf_num = hw->pf_id;
8573                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8574                 ctxt.uplink_seid = vsi->uplink_seid;
8575                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8576                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8577
8578                 /* This VSI is connected to VEB so the switch_id
8579                  * should be set to zero by default.
8580                  */
8581                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8582                         ctxt.info.valid_sections |=
8583                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8584                         ctxt.info.switch_id =
8585                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8586                 }
8587
8588                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8589                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8590                 if (pf->vf[vsi->vf_id].spoofchk) {
8591                         ctxt.info.valid_sections |=
8592                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8593                         ctxt.info.sec_flags |=
8594                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8595                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8596                 }
8597                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8598                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8599                 break;
8600
8601 #ifdef I40E_FCOE
8602         case I40E_VSI_FCOE:
8603                 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8604                 if (ret) {
8605                         dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8606                         return ret;
8607                 }
8608                 break;
8609
8610 #endif /* I40E_FCOE */
8611         default:
8612                 return -ENODEV;
8613         }
8614
8615         if (vsi->type != I40E_VSI_MAIN) {
8616                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8617                 if (ret) {
8618                         dev_info(&vsi->back->pdev->dev,
8619                                  "add vsi failed, err %s aq_err %s\n",
8620                                  i40e_stat_str(&pf->hw, ret),
8621                                  i40e_aq_str(&pf->hw,
8622                                              pf->hw.aq.asq_last_status));
8623                         ret = -ENOENT;
8624                         goto err;
8625                 }
8626                 vsi->info = ctxt.info;
8627                 vsi->info.valid_sections = 0;
8628                 vsi->seid = ctxt.seid;
8629                 vsi->id = ctxt.vsi_number;
8630         }
8631
8632         /* If macvlan filters already exist, force them to get loaded */
8633         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8634                 f->changed = true;
8635                 f_count++;
8636
8637                 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8638                         struct i40e_aqc_remove_macvlan_element_data element;
8639
8640                         memset(&element, 0, sizeof(element));
8641                         ether_addr_copy(element.mac_addr, f->macaddr);
8642                         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8643                         ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8644                                                      &element, 1, NULL);
8645                         if (ret) {
8646                                 /* some older FW has a different default */
8647                                 element.flags |=
8648                                                I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8649                                 i40e_aq_remove_macvlan(hw, vsi->seid,
8650                                                        &element, 1, NULL);
8651                         }
8652
8653                         i40e_aq_mac_address_write(hw,
8654                                                   I40E_AQC_WRITE_TYPE_LAA_WOL,
8655                                                   f->macaddr, NULL);
8656                 }
8657         }
8658         if (f_count) {
8659                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8660                 pf->flags |= I40E_FLAG_FILTER_SYNC;
8661         }
8662
8663         /* Update VSI BW information */
8664         ret = i40e_vsi_get_bw_info(vsi);
8665         if (ret) {
8666                 dev_info(&pf->pdev->dev,
8667                          "couldn't get vsi bw info, err %s aq_err %s\n",
8668                          i40e_stat_str(&pf->hw, ret),
8669                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8670                 /* VSI is already added so not tearing that up */
8671                 ret = 0;
8672         }
8673
8674 err:
8675         return ret;
8676 }
8677
8678 /**
8679  * i40e_vsi_release - Delete a VSI and free its resources
8680  * @vsi: the VSI being removed
8681  *
8682  * Returns 0 on success or < 0 on error
8683  **/
8684 int i40e_vsi_release(struct i40e_vsi *vsi)
8685 {
8686         struct i40e_mac_filter *f, *ftmp;
8687         struct i40e_veb *veb = NULL;
8688         struct i40e_pf *pf;
8689         u16 uplink_seid;
8690         int i, n;
8691
8692         pf = vsi->back;
8693
8694         /* release of a VEB-owner or last VSI is not allowed */
8695         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8696                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8697                          vsi->seid, vsi->uplink_seid);
8698                 return -ENODEV;
8699         }
8700         if (vsi == pf->vsi[pf->lan_vsi] &&
8701             !test_bit(__I40E_DOWN, &pf->state)) {
8702                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8703                 return -ENODEV;
8704         }
8705
8706         uplink_seid = vsi->uplink_seid;
8707         if (vsi->type != I40E_VSI_SRIOV) {
8708                 if (vsi->netdev_registered) {
8709                         vsi->netdev_registered = false;
8710                         if (vsi->netdev) {
8711                                 /* results in a call to i40e_close() */
8712                                 unregister_netdev(vsi->netdev);
8713                         }
8714                 } else {
8715                         i40e_vsi_close(vsi);
8716                 }
8717                 i40e_vsi_disable_irq(vsi);
8718         }
8719
8720         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8721                 i40e_del_filter(vsi, f->macaddr, f->vlan,
8722                                 f->is_vf, f->is_netdev);
8723         i40e_sync_vsi_filters(vsi);
8724
8725         i40e_vsi_delete(vsi);
8726         i40e_vsi_free_q_vectors(vsi);
8727         if (vsi->netdev) {
8728                 free_netdev(vsi->netdev);
8729                 vsi->netdev = NULL;
8730         }
8731         i40e_vsi_clear_rings(vsi);
8732         i40e_vsi_clear(vsi);
8733
8734         /* If this was the last thing on the VEB, except for the
8735          * controlling VSI, remove the VEB, which puts the controlling
8736          * VSI onto the next level down in the switch.
8737          *
8738          * Well, okay, there's one more exception here: don't remove
8739          * the orphan VEBs yet.  We'll wait for an explicit remove request
8740          * from up the network stack.
8741          */
8742         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8743                 if (pf->vsi[i] &&
8744                     pf->vsi[i]->uplink_seid == uplink_seid &&
8745                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8746                         n++;      /* count the VSIs */
8747                 }
8748         }
8749         for (i = 0; i < I40E_MAX_VEB; i++) {
8750                 if (!pf->veb[i])
8751                         continue;
8752                 if (pf->veb[i]->uplink_seid == uplink_seid)
8753                         n++;     /* count the VEBs */
8754                 if (pf->veb[i]->seid == uplink_seid)
8755                         veb = pf->veb[i];
8756         }
8757         if (n == 0 && veb && veb->uplink_seid != 0)
8758                 i40e_veb_release(veb);
8759
8760         return 0;
8761 }
8762
8763 /**
8764  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8765  * @vsi: ptr to the VSI
8766  *
8767  * This should only be called after i40e_vsi_mem_alloc() which allocates the
8768  * corresponding SW VSI structure and initializes num_queue_pairs for the
8769  * newly allocated VSI.
8770  *
8771  * Returns 0 on success or negative on failure
8772  **/
8773 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8774 {
8775         int ret = -ENOENT;
8776         struct i40e_pf *pf = vsi->back;
8777
8778         if (vsi->q_vectors[0]) {
8779                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8780                          vsi->seid);
8781                 return -EEXIST;
8782         }
8783
8784         if (vsi->base_vector) {
8785                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8786                          vsi->seid, vsi->base_vector);
8787                 return -EEXIST;
8788         }
8789
8790         ret = i40e_vsi_alloc_q_vectors(vsi);
8791         if (ret) {
8792                 dev_info(&pf->pdev->dev,
8793                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8794                          vsi->num_q_vectors, vsi->seid, ret);
8795                 vsi->num_q_vectors = 0;
8796                 goto vector_setup_out;
8797         }
8798
8799         if (vsi->num_q_vectors)
8800                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8801                                                  vsi->num_q_vectors, vsi->idx);
8802         if (vsi->base_vector < 0) {
8803                 dev_info(&pf->pdev->dev,
8804                          "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8805                          vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8806                 i40e_vsi_free_q_vectors(vsi);
8807                 ret = -ENOENT;
8808                 goto vector_setup_out;
8809         }
8810
8811 vector_setup_out:
8812         return ret;
8813 }
8814
8815 /**
8816  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8817  * @vsi: pointer to the vsi.
8818  *
8819  * This re-allocates a vsi's queue resources.
8820  *
8821  * Returns pointer to the successfully allocated and configured VSI sw struct
8822  * on success, otherwise returns NULL on failure.
8823  **/
8824 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8825 {
8826         struct i40e_pf *pf = vsi->back;
8827         u8 enabled_tc;
8828         int ret;
8829
8830         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8831         i40e_vsi_clear_rings(vsi);
8832
8833         i40e_vsi_free_arrays(vsi, false);
8834         i40e_set_num_rings_in_vsi(vsi);
8835         ret = i40e_vsi_alloc_arrays(vsi, false);
8836         if (ret)
8837                 goto err_vsi;
8838
8839         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8840         if (ret < 0) {
8841                 dev_info(&pf->pdev->dev,
8842                          "failed to get tracking for %d queues for VSI %d err %d\n",
8843                          vsi->alloc_queue_pairs, vsi->seid, ret);
8844                 goto err_vsi;
8845         }
8846         vsi->base_queue = ret;
8847
8848         /* Update the FW view of the VSI. Force a reset of TC and queue
8849          * layout configurations.
8850          */
8851         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8852         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8853         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8854         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8855
8856         /* assign it some queues */
8857         ret = i40e_alloc_rings(vsi);
8858         if (ret)
8859                 goto err_rings;
8860
8861         /* map all of the rings to the q_vectors */
8862         i40e_vsi_map_rings_to_vectors(vsi);
8863         return vsi;
8864
8865 err_rings:
8866         i40e_vsi_free_q_vectors(vsi);
8867         if (vsi->netdev_registered) {
8868                 vsi->netdev_registered = false;
8869                 unregister_netdev(vsi->netdev);
8870                 free_netdev(vsi->netdev);
8871                 vsi->netdev = NULL;
8872         }
8873         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8874 err_vsi:
8875         i40e_vsi_clear(vsi);
8876         return NULL;
8877 }
8878
8879 /**
8880  * i40e_vsi_setup - Set up a VSI by a given type
8881  * @pf: board private structure
8882  * @type: VSI type
8883  * @uplink_seid: the switch element to link to
8884  * @param1: usage depends upon VSI type. For VF types, indicates VF id
8885  *
8886  * This allocates the sw VSI structure and its queue resources, then add a VSI
8887  * to the identified VEB.
8888  *
8889  * Returns pointer to the successfully allocated and configure VSI sw struct on
8890  * success, otherwise returns NULL on failure.
8891  **/
8892 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8893                                 u16 uplink_seid, u32 param1)
8894 {
8895         struct i40e_vsi *vsi = NULL;
8896         struct i40e_veb *veb = NULL;
8897         int ret, i;
8898         int v_idx;
8899
8900         /* The requested uplink_seid must be either
8901          *     - the PF's port seid
8902          *              no VEB is needed because this is the PF
8903          *              or this is a Flow Director special case VSI
8904          *     - seid of an existing VEB
8905          *     - seid of a VSI that owns an existing VEB
8906          *     - seid of a VSI that doesn't own a VEB
8907          *              a new VEB is created and the VSI becomes the owner
8908          *     - seid of the PF VSI, which is what creates the first VEB
8909          *              this is a special case of the previous
8910          *
8911          * Find which uplink_seid we were given and create a new VEB if needed
8912          */
8913         for (i = 0; i < I40E_MAX_VEB; i++) {
8914                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8915                         veb = pf->veb[i];
8916                         break;
8917                 }
8918         }
8919
8920         if (!veb && uplink_seid != pf->mac_seid) {
8921
8922                 for (i = 0; i < pf->num_alloc_vsi; i++) {
8923                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8924                                 vsi = pf->vsi[i];
8925                                 break;
8926                         }
8927                 }
8928                 if (!vsi) {
8929                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8930                                  uplink_seid);
8931                         return NULL;
8932                 }
8933
8934                 if (vsi->uplink_seid == pf->mac_seid)
8935                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8936                                              vsi->tc_config.enabled_tc);
8937                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8938                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8939                                              vsi->tc_config.enabled_tc);
8940                 if (veb) {
8941                         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8942                                 dev_info(&vsi->back->pdev->dev,
8943                                          "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8944                                          __func__);
8945                                 return NULL;
8946                         }
8947                         /* We come up by default in VEPA mode if SRIOV is not
8948                          * already enabled, in which case we can't force VEPA
8949                          * mode.
8950                          */
8951                         if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
8952                                 veb->bridge_mode = BRIDGE_MODE_VEPA;
8953                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8954                         }
8955                         i40e_config_bridge_mode(veb);
8956                 }
8957                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8958                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8959                                 veb = pf->veb[i];
8960                 }
8961                 if (!veb) {
8962                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8963                         return NULL;
8964                 }
8965
8966                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8967                 uplink_seid = veb->seid;
8968         }
8969
8970         /* get vsi sw struct */
8971         v_idx = i40e_vsi_mem_alloc(pf, type);
8972         if (v_idx < 0)
8973                 goto err_alloc;
8974         vsi = pf->vsi[v_idx];
8975         if (!vsi)
8976                 goto err_alloc;
8977         vsi->type = type;
8978         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8979
8980         if (type == I40E_VSI_MAIN)
8981                 pf->lan_vsi = v_idx;
8982         else if (type == I40E_VSI_SRIOV)
8983                 vsi->vf_id = param1;
8984         /* assign it some queues */
8985         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8986                                 vsi->idx);
8987         if (ret < 0) {
8988                 dev_info(&pf->pdev->dev,
8989                          "failed to get tracking for %d queues for VSI %d err=%d\n",
8990                          vsi->alloc_queue_pairs, vsi->seid, ret);
8991                 goto err_vsi;
8992         }
8993         vsi->base_queue = ret;
8994
8995         /* get a VSI from the hardware */
8996         vsi->uplink_seid = uplink_seid;
8997         ret = i40e_add_vsi(vsi);
8998         if (ret)
8999                 goto err_vsi;
9000
9001         switch (vsi->type) {
9002         /* setup the netdev if needed */
9003         case I40E_VSI_MAIN:
9004         case I40E_VSI_VMDQ2:
9005         case I40E_VSI_FCOE:
9006                 ret = i40e_config_netdev(vsi);
9007                 if (ret)
9008                         goto err_netdev;
9009                 ret = register_netdev(vsi->netdev);
9010                 if (ret)
9011                         goto err_netdev;
9012                 vsi->netdev_registered = true;
9013                 netif_carrier_off(vsi->netdev);
9014 #ifdef CONFIG_I40E_DCB
9015                 /* Setup DCB netlink interface */
9016                 i40e_dcbnl_setup(vsi);
9017 #endif /* CONFIG_I40E_DCB */
9018                 /* fall through */
9019
9020         case I40E_VSI_FDIR:
9021                 /* set up vectors and rings if needed */
9022                 ret = i40e_vsi_setup_vectors(vsi);
9023                 if (ret)
9024                         goto err_msix;
9025
9026                 ret = i40e_alloc_rings(vsi);
9027                 if (ret)
9028                         goto err_rings;
9029
9030                 /* map all of the rings to the q_vectors */
9031                 i40e_vsi_map_rings_to_vectors(vsi);
9032
9033                 i40e_vsi_reset_stats(vsi);
9034                 break;
9035
9036         default:
9037                 /* no netdev or rings for the other VSI types */
9038                 break;
9039         }
9040
9041         if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9042             (vsi->type == I40E_VSI_VMDQ2)) {
9043                 ret = i40e_vsi_config_rss(vsi);
9044         }
9045         return vsi;
9046
9047 err_rings:
9048         i40e_vsi_free_q_vectors(vsi);
9049 err_msix:
9050         if (vsi->netdev_registered) {
9051                 vsi->netdev_registered = false;
9052                 unregister_netdev(vsi->netdev);
9053                 free_netdev(vsi->netdev);
9054                 vsi->netdev = NULL;
9055         }
9056 err_netdev:
9057         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9058 err_vsi:
9059         i40e_vsi_clear(vsi);
9060 err_alloc:
9061         return NULL;
9062 }
9063
9064 /**
9065  * i40e_veb_get_bw_info - Query VEB BW information
9066  * @veb: the veb to query
9067  *
9068  * Query the Tx scheduler BW configuration data for given VEB
9069  **/
9070 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9071 {
9072         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9073         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9074         struct i40e_pf *pf = veb->pf;
9075         struct i40e_hw *hw = &pf->hw;
9076         u32 tc_bw_max;
9077         int ret = 0;
9078         int i;
9079
9080         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9081                                                   &bw_data, NULL);
9082         if (ret) {
9083                 dev_info(&pf->pdev->dev,
9084                          "query veb bw config failed, err %s aq_err %s\n",
9085                          i40e_stat_str(&pf->hw, ret),
9086                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9087                 goto out;
9088         }
9089
9090         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9091                                                    &ets_data, NULL);
9092         if (ret) {
9093                 dev_info(&pf->pdev->dev,
9094                          "query veb bw ets config failed, err %s aq_err %s\n",
9095                          i40e_stat_str(&pf->hw, ret),
9096                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9097                 goto out;
9098         }
9099
9100         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9101         veb->bw_max_quanta = ets_data.tc_bw_max;
9102         veb->is_abs_credits = bw_data.absolute_credits_enable;
9103         veb->enabled_tc = ets_data.tc_valid_bits;
9104         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9105                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9106         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9107                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9108                 veb->bw_tc_limit_credits[i] =
9109                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
9110                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9111         }
9112
9113 out:
9114         return ret;
9115 }
9116
9117 /**
9118  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9119  * @pf: board private structure
9120  *
9121  * On error: returns error code (negative)
9122  * On success: returns vsi index in PF (positive)
9123  **/
9124 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9125 {
9126         int ret = -ENOENT;
9127         struct i40e_veb *veb;
9128         int i;
9129
9130         /* Need to protect the allocation of switch elements at the PF level */
9131         mutex_lock(&pf->switch_mutex);
9132
9133         /* VEB list may be fragmented if VEB creation/destruction has
9134          * been happening.  We can afford to do a quick scan to look
9135          * for any free slots in the list.
9136          *
9137          * find next empty veb slot, looping back around if necessary
9138          */
9139         i = 0;
9140         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9141                 i++;
9142         if (i >= I40E_MAX_VEB) {
9143                 ret = -ENOMEM;
9144                 goto err_alloc_veb;  /* out of VEB slots! */
9145         }
9146
9147         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9148         if (!veb) {
9149                 ret = -ENOMEM;
9150                 goto err_alloc_veb;
9151         }
9152         veb->pf = pf;
9153         veb->idx = i;
9154         veb->enabled_tc = 1;
9155
9156         pf->veb[i] = veb;
9157         ret = i;
9158 err_alloc_veb:
9159         mutex_unlock(&pf->switch_mutex);
9160         return ret;
9161 }
9162
9163 /**
9164  * i40e_switch_branch_release - Delete a branch of the switch tree
9165  * @branch: where to start deleting
9166  *
9167  * This uses recursion to find the tips of the branch to be
9168  * removed, deleting until we get back to and can delete this VEB.
9169  **/
9170 static void i40e_switch_branch_release(struct i40e_veb *branch)
9171 {
9172         struct i40e_pf *pf = branch->pf;
9173         u16 branch_seid = branch->seid;
9174         u16 veb_idx = branch->idx;
9175         int i;
9176
9177         /* release any VEBs on this VEB - RECURSION */
9178         for (i = 0; i < I40E_MAX_VEB; i++) {
9179                 if (!pf->veb[i])
9180                         continue;
9181                 if (pf->veb[i]->uplink_seid == branch->seid)
9182                         i40e_switch_branch_release(pf->veb[i]);
9183         }
9184
9185         /* Release the VSIs on this VEB, but not the owner VSI.
9186          *
9187          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9188          *       the VEB itself, so don't use (*branch) after this loop.
9189          */
9190         for (i = 0; i < pf->num_alloc_vsi; i++) {
9191                 if (!pf->vsi[i])
9192                         continue;
9193                 if (pf->vsi[i]->uplink_seid == branch_seid &&
9194                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9195                         i40e_vsi_release(pf->vsi[i]);
9196                 }
9197         }
9198
9199         /* There's one corner case where the VEB might not have been
9200          * removed, so double check it here and remove it if needed.
9201          * This case happens if the veb was created from the debugfs
9202          * commands and no VSIs were added to it.
9203          */
9204         if (pf->veb[veb_idx])
9205                 i40e_veb_release(pf->veb[veb_idx]);
9206 }
9207
9208 /**
9209  * i40e_veb_clear - remove veb struct
9210  * @veb: the veb to remove
9211  **/
9212 static void i40e_veb_clear(struct i40e_veb *veb)
9213 {
9214         if (!veb)
9215                 return;
9216
9217         if (veb->pf) {
9218                 struct i40e_pf *pf = veb->pf;
9219
9220                 mutex_lock(&pf->switch_mutex);
9221                 if (pf->veb[veb->idx] == veb)
9222                         pf->veb[veb->idx] = NULL;
9223                 mutex_unlock(&pf->switch_mutex);
9224         }
9225
9226         kfree(veb);
9227 }
9228
9229 /**
9230  * i40e_veb_release - Delete a VEB and free its resources
9231  * @veb: the VEB being removed
9232  **/
9233 void i40e_veb_release(struct i40e_veb *veb)
9234 {
9235         struct i40e_vsi *vsi = NULL;
9236         struct i40e_pf *pf;
9237         int i, n = 0;
9238
9239         pf = veb->pf;
9240
9241         /* find the remaining VSI and check for extras */
9242         for (i = 0; i < pf->num_alloc_vsi; i++) {
9243                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9244                         n++;
9245                         vsi = pf->vsi[i];
9246                 }
9247         }
9248         if (n != 1) {
9249                 dev_info(&pf->pdev->dev,
9250                          "can't remove VEB %d with %d VSIs left\n",
9251                          veb->seid, n);
9252                 return;
9253         }
9254
9255         /* move the remaining VSI to uplink veb */
9256         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9257         if (veb->uplink_seid) {
9258                 vsi->uplink_seid = veb->uplink_seid;
9259                 if (veb->uplink_seid == pf->mac_seid)
9260                         vsi->veb_idx = I40E_NO_VEB;
9261                 else
9262                         vsi->veb_idx = veb->veb_idx;
9263         } else {
9264                 /* floating VEB */
9265                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9266                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9267         }
9268
9269         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9270         i40e_veb_clear(veb);
9271 }
9272
9273 /**
9274  * i40e_add_veb - create the VEB in the switch
9275  * @veb: the VEB to be instantiated
9276  * @vsi: the controlling VSI
9277  **/
9278 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9279 {
9280         struct i40e_pf *pf = veb->pf;
9281         bool is_default = false;
9282         bool is_cloud = false;
9283         int ret;
9284
9285         /* get a VEB from the hardware */
9286         ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9287                               veb->enabled_tc, is_default,
9288                               is_cloud, &veb->seid, NULL);
9289         if (ret) {
9290                 dev_info(&pf->pdev->dev,
9291                          "couldn't add VEB, err %s aq_err %s\n",
9292                          i40e_stat_str(&pf->hw, ret),
9293                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9294                 return -EPERM;
9295         }
9296
9297         /* get statistics counter */
9298         ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
9299                                          &veb->stats_idx, NULL, NULL, NULL);
9300         if (ret) {
9301                 dev_info(&pf->pdev->dev,
9302                          "couldn't get VEB statistics idx, err %s aq_err %s\n",
9303                          i40e_stat_str(&pf->hw, ret),
9304                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9305                 return -EPERM;
9306         }
9307         ret = i40e_veb_get_bw_info(veb);
9308         if (ret) {
9309                 dev_info(&pf->pdev->dev,
9310                          "couldn't get VEB bw info, err %s aq_err %s\n",
9311                          i40e_stat_str(&pf->hw, ret),
9312                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9313                 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9314                 return -ENOENT;
9315         }
9316
9317         vsi->uplink_seid = veb->seid;
9318         vsi->veb_idx = veb->idx;
9319         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9320
9321         return 0;
9322 }
9323
9324 /**
9325  * i40e_veb_setup - Set up a VEB
9326  * @pf: board private structure
9327  * @flags: VEB setup flags
9328  * @uplink_seid: the switch element to link to
9329  * @vsi_seid: the initial VSI seid
9330  * @enabled_tc: Enabled TC bit-map
9331  *
9332  * This allocates the sw VEB structure and links it into the switch
9333  * It is possible and legal for this to be a duplicate of an already
9334  * existing VEB.  It is also possible for both uplink and vsi seids
9335  * to be zero, in order to create a floating VEB.
9336  *
9337  * Returns pointer to the successfully allocated VEB sw struct on
9338  * success, otherwise returns NULL on failure.
9339  **/
9340 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9341                                 u16 uplink_seid, u16 vsi_seid,
9342                                 u8 enabled_tc)
9343 {
9344         struct i40e_veb *veb, *uplink_veb = NULL;
9345         int vsi_idx, veb_idx;
9346         int ret;
9347
9348         /* if one seid is 0, the other must be 0 to create a floating relay */
9349         if ((uplink_seid == 0 || vsi_seid == 0) &&
9350             (uplink_seid + vsi_seid != 0)) {
9351                 dev_info(&pf->pdev->dev,
9352                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
9353                          uplink_seid, vsi_seid);
9354                 return NULL;
9355         }
9356
9357         /* make sure there is such a vsi and uplink */
9358         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9359                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9360                         break;
9361         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9362                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9363                          vsi_seid);
9364                 return NULL;
9365         }
9366
9367         if (uplink_seid && uplink_seid != pf->mac_seid) {
9368                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9369                         if (pf->veb[veb_idx] &&
9370                             pf->veb[veb_idx]->seid == uplink_seid) {
9371                                 uplink_veb = pf->veb[veb_idx];
9372                                 break;
9373                         }
9374                 }
9375                 if (!uplink_veb) {
9376                         dev_info(&pf->pdev->dev,
9377                                  "uplink seid %d not found\n", uplink_seid);
9378                         return NULL;
9379                 }
9380         }
9381
9382         /* get veb sw struct */
9383         veb_idx = i40e_veb_mem_alloc(pf);
9384         if (veb_idx < 0)
9385                 goto err_alloc;
9386         veb = pf->veb[veb_idx];
9387         veb->flags = flags;
9388         veb->uplink_seid = uplink_seid;
9389         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9390         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9391
9392         /* create the VEB in the switch */
9393         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9394         if (ret)
9395                 goto err_veb;
9396         if (vsi_idx == pf->lan_vsi)
9397                 pf->lan_veb = veb->idx;
9398
9399         return veb;
9400
9401 err_veb:
9402         i40e_veb_clear(veb);
9403 err_alloc:
9404         return NULL;
9405 }
9406
9407 /**
9408  * i40e_setup_pf_switch_element - set PF vars based on switch type
9409  * @pf: board private structure
9410  * @ele: element we are building info from
9411  * @num_reported: total number of elements
9412  * @printconfig: should we print the contents
9413  *
9414  * helper function to assist in extracting a few useful SEID values.
9415  **/
9416 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9417                                 struct i40e_aqc_switch_config_element_resp *ele,
9418                                 u16 num_reported, bool printconfig)
9419 {
9420         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9421         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9422         u8 element_type = ele->element_type;
9423         u16 seid = le16_to_cpu(ele->seid);
9424
9425         if (printconfig)
9426                 dev_info(&pf->pdev->dev,
9427                          "type=%d seid=%d uplink=%d downlink=%d\n",
9428                          element_type, seid, uplink_seid, downlink_seid);
9429
9430         switch (element_type) {
9431         case I40E_SWITCH_ELEMENT_TYPE_MAC:
9432                 pf->mac_seid = seid;
9433                 break;
9434         case I40E_SWITCH_ELEMENT_TYPE_VEB:
9435                 /* Main VEB? */
9436                 if (uplink_seid != pf->mac_seid)
9437                         break;
9438                 if (pf->lan_veb == I40E_NO_VEB) {
9439                         int v;
9440
9441                         /* find existing or else empty VEB */
9442                         for (v = 0; v < I40E_MAX_VEB; v++) {
9443                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9444                                         pf->lan_veb = v;
9445                                         break;
9446                                 }
9447                         }
9448                         if (pf->lan_veb == I40E_NO_VEB) {
9449                                 v = i40e_veb_mem_alloc(pf);
9450                                 if (v < 0)
9451                                         break;
9452                                 pf->lan_veb = v;
9453                         }
9454                 }
9455
9456                 pf->veb[pf->lan_veb]->seid = seid;
9457                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9458                 pf->veb[pf->lan_veb]->pf = pf;
9459                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9460                 break;
9461         case I40E_SWITCH_ELEMENT_TYPE_VSI:
9462                 if (num_reported != 1)
9463                         break;
9464                 /* This is immediately after a reset so we can assume this is
9465                  * the PF's VSI
9466                  */
9467                 pf->mac_seid = uplink_seid;
9468                 pf->pf_seid = downlink_seid;
9469                 pf->main_vsi_seid = seid;
9470                 if (printconfig)
9471                         dev_info(&pf->pdev->dev,
9472                                  "pf_seid=%d main_vsi_seid=%d\n",
9473                                  pf->pf_seid, pf->main_vsi_seid);
9474                 break;
9475         case I40E_SWITCH_ELEMENT_TYPE_PF:
9476         case I40E_SWITCH_ELEMENT_TYPE_VF:
9477         case I40E_SWITCH_ELEMENT_TYPE_EMP:
9478         case I40E_SWITCH_ELEMENT_TYPE_BMC:
9479         case I40E_SWITCH_ELEMENT_TYPE_PE:
9480         case I40E_SWITCH_ELEMENT_TYPE_PA:
9481                 /* ignore these for now */
9482                 break;
9483         default:
9484                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9485                          element_type, seid);
9486                 break;
9487         }
9488 }
9489
9490 /**
9491  * i40e_fetch_switch_configuration - Get switch config from firmware
9492  * @pf: board private structure
9493  * @printconfig: should we print the contents
9494  *
9495  * Get the current switch configuration from the device and
9496  * extract a few useful SEID values.
9497  **/
9498 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9499 {
9500         struct i40e_aqc_get_switch_config_resp *sw_config;
9501         u16 next_seid = 0;
9502         int ret = 0;
9503         u8 *aq_buf;
9504         int i;
9505
9506         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9507         if (!aq_buf)
9508                 return -ENOMEM;
9509
9510         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9511         do {
9512                 u16 num_reported, num_total;
9513
9514                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9515                                                 I40E_AQ_LARGE_BUF,
9516                                                 &next_seid, NULL);
9517                 if (ret) {
9518                         dev_info(&pf->pdev->dev,
9519                                  "get switch config failed err %s aq_err %s\n",
9520                                  i40e_stat_str(&pf->hw, ret),
9521                                  i40e_aq_str(&pf->hw,
9522                                              pf->hw.aq.asq_last_status));
9523                         kfree(aq_buf);
9524                         return -ENOENT;
9525                 }
9526
9527                 num_reported = le16_to_cpu(sw_config->header.num_reported);
9528                 num_total = le16_to_cpu(sw_config->header.num_total);
9529
9530                 if (printconfig)
9531                         dev_info(&pf->pdev->dev,
9532                                  "header: %d reported %d total\n",
9533                                  num_reported, num_total);
9534
9535                 for (i = 0; i < num_reported; i++) {
9536                         struct i40e_aqc_switch_config_element_resp *ele =
9537                                 &sw_config->element[i];
9538
9539                         i40e_setup_pf_switch_element(pf, ele, num_reported,
9540                                                      printconfig);
9541                 }
9542         } while (next_seid != 0);
9543
9544         kfree(aq_buf);
9545         return ret;
9546 }
9547
9548 /**
9549  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9550  * @pf: board private structure
9551  * @reinit: if the Main VSI needs to re-initialized.
9552  *
9553  * Returns 0 on success, negative value on failure
9554  **/
9555 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9556 {
9557         int ret;
9558
9559         /* find out what's out there already */
9560         ret = i40e_fetch_switch_configuration(pf, false);
9561         if (ret) {
9562                 dev_info(&pf->pdev->dev,
9563                          "couldn't fetch switch config, err %s aq_err %s\n",
9564                          i40e_stat_str(&pf->hw, ret),
9565                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9566                 return ret;
9567         }
9568         i40e_pf_reset_stats(pf);
9569
9570         /* first time setup */
9571         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9572                 struct i40e_vsi *vsi = NULL;
9573                 u16 uplink_seid;
9574
9575                 /* Set up the PF VSI associated with the PF's main VSI
9576                  * that is already in the HW switch
9577                  */
9578                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9579                         uplink_seid = pf->veb[pf->lan_veb]->seid;
9580                 else
9581                         uplink_seid = pf->mac_seid;
9582                 if (pf->lan_vsi == I40E_NO_VSI)
9583                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9584                 else if (reinit)
9585                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9586                 if (!vsi) {
9587                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9588                         i40e_fdir_teardown(pf);
9589                         return -EAGAIN;
9590                 }
9591         } else {
9592                 /* force a reset of TC and queue layout configurations */
9593                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9594                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9595                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9596                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9597         }
9598         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9599
9600         i40e_fdir_sb_setup(pf);
9601
9602         /* Setup static PF queue filter control settings */
9603         ret = i40e_setup_pf_filter_control(pf);
9604         if (ret) {
9605                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9606                          ret);
9607                 /* Failure here should not stop continuing other steps */
9608         }
9609
9610         /* enable RSS in the HW, even for only one queue, as the stack can use
9611          * the hash
9612          */
9613         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9614                 i40e_config_rss(pf);
9615
9616         /* fill in link information and enable LSE reporting */
9617         i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
9618         i40e_link_event(pf);
9619
9620         /* Initialize user-specific link properties */
9621         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9622                                   I40E_AQ_AN_COMPLETED) ? true : false);
9623
9624         i40e_ptp_init(pf);
9625
9626         return ret;
9627 }
9628
9629 /**
9630  * i40e_determine_queue_usage - Work out queue distribution
9631  * @pf: board private structure
9632  **/
9633 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9634 {
9635         int queues_left;
9636
9637         pf->num_lan_qps = 0;
9638 #ifdef I40E_FCOE
9639         pf->num_fcoe_qps = 0;
9640 #endif
9641
9642         /* Find the max queues to be put into basic use.  We'll always be
9643          * using TC0, whether or not DCB is running, and TC0 will get the
9644          * big RSS set.
9645          */
9646         queues_left = pf->hw.func_caps.num_tx_qp;
9647
9648         if ((queues_left == 1) ||
9649             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9650                 /* one qp for PF, no queues for anything else */
9651                 queues_left = 0;
9652                 pf->rss_size = pf->num_lan_qps = 1;
9653
9654                 /* make sure all the fancies are disabled */
9655                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
9656 #ifdef I40E_FCOE
9657                                I40E_FLAG_FCOE_ENABLED   |
9658 #endif
9659                                I40E_FLAG_FD_SB_ENABLED  |
9660                                I40E_FLAG_FD_ATR_ENABLED |
9661                                I40E_FLAG_DCB_CAPABLE    |
9662                                I40E_FLAG_SRIOV_ENABLED  |
9663                                I40E_FLAG_VMDQ_ENABLED);
9664         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9665                                   I40E_FLAG_FD_SB_ENABLED |
9666                                   I40E_FLAG_FD_ATR_ENABLED |
9667                                   I40E_FLAG_DCB_CAPABLE))) {
9668                 /* one qp for PF */
9669                 pf->rss_size = pf->num_lan_qps = 1;
9670                 queues_left -= pf->num_lan_qps;
9671
9672                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
9673 #ifdef I40E_FCOE
9674                                I40E_FLAG_FCOE_ENABLED   |
9675 #endif
9676                                I40E_FLAG_FD_SB_ENABLED  |
9677                                I40E_FLAG_FD_ATR_ENABLED |
9678                                I40E_FLAG_DCB_ENABLED    |
9679                                I40E_FLAG_VMDQ_ENABLED);
9680         } else {
9681                 /* Not enough queues for all TCs */
9682                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9683                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9684                         pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9685                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9686                 }
9687                 pf->num_lan_qps = max_t(int, pf->rss_size_max,
9688                                         num_online_cpus());
9689                 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9690                                         pf->hw.func_caps.num_tx_qp);
9691
9692                 queues_left -= pf->num_lan_qps;
9693         }
9694
9695 #ifdef I40E_FCOE
9696         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9697                 if (I40E_DEFAULT_FCOE <= queues_left) {
9698                         pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9699                 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9700                         pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9701                 } else {
9702                         pf->num_fcoe_qps = 0;
9703                         pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9704                         dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9705                 }
9706
9707                 queues_left -= pf->num_fcoe_qps;
9708         }
9709
9710 #endif
9711         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9712                 if (queues_left > 1) {
9713                         queues_left -= 1; /* save 1 queue for FD */
9714                 } else {
9715                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9716                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9717                 }
9718         }
9719
9720         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9721             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9722                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9723                                         (queues_left / pf->num_vf_qps));
9724                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9725         }
9726
9727         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9728             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9729                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9730                                           (queues_left / pf->num_vmdq_qps));
9731                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9732         }
9733
9734         pf->queues_left = queues_left;
9735 #ifdef I40E_FCOE
9736         dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9737 #endif
9738 }
9739
9740 /**
9741  * i40e_setup_pf_filter_control - Setup PF static filter control
9742  * @pf: PF to be setup
9743  *
9744  * i40e_setup_pf_filter_control sets up a PF's initial filter control
9745  * settings. If PE/FCoE are enabled then it will also set the per PF
9746  * based filter sizes required for them. It also enables Flow director,
9747  * ethertype and macvlan type filter settings for the pf.
9748  *
9749  * Returns 0 on success, negative on failure
9750  **/
9751 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9752 {
9753         struct i40e_filter_control_settings *settings = &pf->filter_settings;
9754
9755         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9756
9757         /* Flow Director is enabled */
9758         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9759                 settings->enable_fdir = true;
9760
9761         /* Ethtype and MACVLAN filters enabled for PF */
9762         settings->enable_ethtype = true;
9763         settings->enable_macvlan = true;
9764
9765         if (i40e_set_filter_control(&pf->hw, settings))
9766                 return -ENOENT;
9767
9768         return 0;
9769 }
9770
9771 #define INFO_STRING_LEN 255
9772 static void i40e_print_features(struct i40e_pf *pf)
9773 {
9774         struct i40e_hw *hw = &pf->hw;
9775         char *buf, *string;
9776
9777         string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9778         if (!string) {
9779                 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9780                 return;
9781         }
9782
9783         buf = string;
9784
9785         buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9786 #ifdef CONFIG_PCI_IOV
9787         buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9788 #endif
9789         buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9790                        pf->hw.func_caps.num_vsis,
9791                        pf->vsi[pf->lan_vsi]->num_queue_pairs,
9792                        pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9793
9794         if (pf->flags & I40E_FLAG_RSS_ENABLED)
9795                 buf += sprintf(buf, "RSS ");
9796         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9797                 buf += sprintf(buf, "FD_ATR ");
9798         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9799                 buf += sprintf(buf, "FD_SB ");
9800                 buf += sprintf(buf, "NTUPLE ");
9801         }
9802         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9803                 buf += sprintf(buf, "DCB ");
9804         if (pf->flags & I40E_FLAG_PTP)
9805                 buf += sprintf(buf, "PTP ");
9806 #ifdef I40E_FCOE
9807         if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9808                 buf += sprintf(buf, "FCOE ");
9809 #endif
9810
9811         BUG_ON(buf > (string + INFO_STRING_LEN));
9812         dev_info(&pf->pdev->dev, "%s\n", string);
9813         kfree(string);
9814 }
9815
9816 /**
9817  * i40e_probe - Device initialization routine
9818  * @pdev: PCI device information struct
9819  * @ent: entry in i40e_pci_tbl
9820  *
9821  * i40e_probe initializes a PF identified by a pci_dev structure.
9822  * The OS initialization, configuring of the PF private structure,
9823  * and a hardware reset occur.
9824  *
9825  * Returns 0 on success, negative on failure
9826  **/
9827 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9828 {
9829         struct i40e_aq_get_phy_abilities_resp abilities;
9830         unsigned long ioremap_len;
9831         struct i40e_pf *pf;
9832         struct i40e_hw *hw;
9833         static u16 pfs_found;
9834         u16 link_status;
9835         int err = 0;
9836         u32 len;
9837         u32 i;
9838
9839         err = pci_enable_device_mem(pdev);
9840         if (err)
9841                 return err;
9842
9843         /* set up for high or low dma */
9844         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9845         if (err) {
9846                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9847                 if (err) {
9848                         dev_err(&pdev->dev,
9849                                 "DMA configuration failed: 0x%x\n", err);
9850                         goto err_dma;
9851                 }
9852         }
9853
9854         /* set up pci connections */
9855         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9856                                            IORESOURCE_MEM), i40e_driver_name);
9857         if (err) {
9858                 dev_info(&pdev->dev,
9859                          "pci_request_selected_regions failed %d\n", err);
9860                 goto err_pci_reg;
9861         }
9862
9863         pci_enable_pcie_error_reporting(pdev);
9864         pci_set_master(pdev);
9865
9866         /* Now that we have a PCI connection, we need to do the
9867          * low level device setup.  This is primarily setting up
9868          * the Admin Queue structures and then querying for the
9869          * device's current profile information.
9870          */
9871         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9872         if (!pf) {
9873                 err = -ENOMEM;
9874                 goto err_pf_alloc;
9875         }
9876         pf->next_vsi = 0;
9877         pf->pdev = pdev;
9878         set_bit(__I40E_DOWN, &pf->state);
9879
9880         hw = &pf->hw;
9881         hw->back = pf;
9882
9883         ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
9884                             I40E_MAX_CSR_SPACE);
9885
9886         hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
9887         if (!hw->hw_addr) {
9888                 err = -EIO;
9889                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9890                          (unsigned int)pci_resource_start(pdev, 0),
9891                          (unsigned int)pci_resource_len(pdev, 0), err);
9892                 goto err_ioremap;
9893         }
9894         hw->vendor_id = pdev->vendor;
9895         hw->device_id = pdev->device;
9896         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9897         hw->subsystem_vendor_id = pdev->subsystem_vendor;
9898         hw->subsystem_device_id = pdev->subsystem_device;
9899         hw->bus.device = PCI_SLOT(pdev->devfn);
9900         hw->bus.func = PCI_FUNC(pdev->devfn);
9901         pf->instance = pfs_found;
9902
9903         if (debug != -1) {
9904                 pf->msg_enable = pf->hw.debug_mask;
9905                 pf->msg_enable = debug;
9906         }
9907
9908         /* do a special CORER for clearing PXE mode once at init */
9909         if (hw->revision_id == 0 &&
9910             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9911                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9912                 i40e_flush(hw);
9913                 msleep(200);
9914                 pf->corer_count++;
9915
9916                 i40e_clear_pxe_mode(hw);
9917         }
9918
9919         /* Reset here to make sure all is clean and to define PF 'n' */
9920         i40e_clear_hw(hw);
9921         err = i40e_pf_reset(hw);
9922         if (err) {
9923                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9924                 goto err_pf_reset;
9925         }
9926         pf->pfr_count++;
9927
9928         hw->aq.num_arq_entries = I40E_AQ_LEN;
9929         hw->aq.num_asq_entries = I40E_AQ_LEN;
9930         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9931         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9932         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9933
9934         snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9935                  "%s-%s:misc",
9936                  dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9937
9938         err = i40e_init_shared_code(hw);
9939         if (err) {
9940                 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
9941                          err);
9942                 goto err_pf_reset;
9943         }
9944
9945         /* set up a default setting for link flow control */
9946         pf->hw.fc.requested_mode = I40E_FC_NONE;
9947
9948         err = i40e_init_adminq(hw);
9949         dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9950         if (err) {
9951                 dev_info(&pdev->dev,
9952                          "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
9953                 goto err_pf_reset;
9954         }
9955
9956         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9957             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
9958                 dev_info(&pdev->dev,
9959                          "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9960         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9961                  hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
9962                 dev_info(&pdev->dev,
9963                          "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
9964
9965         i40e_verify_eeprom(pf);
9966
9967         /* Rev 0 hardware was never productized */
9968         if (hw->revision_id < 1)
9969                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9970
9971         i40e_clear_pxe_mode(hw);
9972         err = i40e_get_capabilities(pf);
9973         if (err)
9974                 goto err_adminq_setup;
9975
9976         err = i40e_sw_init(pf);
9977         if (err) {
9978                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9979                 goto err_sw_init;
9980         }
9981
9982         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9983                                 hw->func_caps.num_rx_qp,
9984                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9985         if (err) {
9986                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9987                 goto err_init_lan_hmc;
9988         }
9989
9990         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9991         if (err) {
9992                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9993                 err = -ENOENT;
9994                 goto err_configure_lan_hmc;
9995         }
9996
9997         /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9998          * Ignore error return codes because if it was already disabled via
9999          * hardware settings this will fail
10000          */
10001         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10002             (pf->hw.aq.fw_maj_ver < 4)) {
10003                 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10004                 i40e_aq_stop_lldp(hw, true, NULL);
10005         }
10006
10007         i40e_get_mac_addr(hw, hw->mac.addr);
10008         if (!is_valid_ether_addr(hw->mac.addr)) {
10009                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10010                 err = -EIO;
10011                 goto err_mac_addr;
10012         }
10013         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10014         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10015         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10016         if (is_valid_ether_addr(hw->mac.port_addr))
10017                 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10018 #ifdef I40E_FCOE
10019         err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10020         if (err)
10021                 dev_info(&pdev->dev,
10022                          "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10023         if (!is_valid_ether_addr(hw->mac.san_addr)) {
10024                 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10025                          hw->mac.san_addr);
10026                 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10027         }
10028         dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10029 #endif /* I40E_FCOE */
10030
10031         pci_set_drvdata(pdev, pf);
10032         pci_save_state(pdev);
10033 #ifdef CONFIG_I40E_DCB
10034         err = i40e_init_pf_dcb(pf);
10035         if (err) {
10036                 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10037                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10038                 /* Continue without DCB enabled */
10039         }
10040 #endif /* CONFIG_I40E_DCB */
10041
10042         /* set up periodic task facility */
10043         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10044         pf->service_timer_period = HZ;
10045
10046         INIT_WORK(&pf->service_task, i40e_service_task);
10047         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10048         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10049         pf->link_check_timeout = jiffies;
10050
10051         /* WoL defaults to disabled */
10052         pf->wol_en = false;
10053         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10054
10055         /* set up the main switch operations */
10056         i40e_determine_queue_usage(pf);
10057         err = i40e_init_interrupt_scheme(pf);
10058         if (err)
10059                 goto err_switch_setup;
10060
10061         /* The number of VSIs reported by the FW is the minimum guaranteed
10062          * to us; HW supports far more and we share the remaining pool with
10063          * the other PFs. We allocate space for more than the guarantee with
10064          * the understanding that we might not get them all later.
10065          */
10066         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10067                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10068         else
10069                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10070
10071         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10072         len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10073         pf->vsi = kzalloc(len, GFP_KERNEL);
10074         if (!pf->vsi) {
10075                 err = -ENOMEM;
10076                 goto err_switch_setup;
10077         }
10078
10079 #ifdef CONFIG_PCI_IOV
10080         /* prep for VF support */
10081         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10082             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10083             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10084                 if (pci_num_vf(pdev))
10085                         pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10086         }
10087 #endif
10088         err = i40e_setup_pf_switch(pf, false);
10089         if (err) {
10090                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10091                 goto err_vsis;
10092         }
10093         /* if FDIR VSI was set up, start it now */
10094         for (i = 0; i < pf->num_alloc_vsi; i++) {
10095                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10096                         i40e_vsi_open(pf->vsi[i]);
10097                         break;
10098                 }
10099         }
10100
10101         /* driver is only interested in link up/down and module qualification
10102          * reports from firmware
10103          */
10104         err = i40e_aq_set_phy_int_mask(&pf->hw,
10105                                        I40E_AQ_EVENT_LINK_UPDOWN |
10106                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10107         if (err)
10108                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10109                          i40e_stat_str(&pf->hw, err),
10110                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10111
10112         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10113             (pf->hw.aq.fw_maj_ver < 4)) {
10114                 msleep(75);
10115                 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10116                 if (err)
10117                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10118                                  i40e_stat_str(&pf->hw, err),
10119                                  i40e_aq_str(&pf->hw,
10120                                              pf->hw.aq.asq_last_status));
10121         }
10122         /* The main driver is (mostly) up and happy. We need to set this state
10123          * before setting up the misc vector or we get a race and the vector
10124          * ends up disabled forever.
10125          */
10126         clear_bit(__I40E_DOWN, &pf->state);
10127
10128         /* In case of MSIX we are going to setup the misc vector right here
10129          * to handle admin queue events etc. In case of legacy and MSI
10130          * the misc functionality and queue processing is combined in
10131          * the same vector and that gets setup at open.
10132          */
10133         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10134                 err = i40e_setup_misc_vector(pf);
10135                 if (err) {
10136                         dev_info(&pdev->dev,
10137                                  "setup of misc vector failed: %d\n", err);
10138                         goto err_vsis;
10139                 }
10140         }
10141
10142 #ifdef CONFIG_PCI_IOV
10143         /* prep for VF support */
10144         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10145             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10146             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10147                 u32 val;
10148
10149                 /* disable link interrupts for VFs */
10150                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10151                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10152                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10153                 i40e_flush(hw);
10154
10155                 if (pci_num_vf(pdev)) {
10156                         dev_info(&pdev->dev,
10157                                  "Active VFs found, allocating resources.\n");
10158                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10159                         if (err)
10160                                 dev_info(&pdev->dev,
10161                                          "Error %d allocating resources for existing VFs\n",
10162                                          err);
10163                 }
10164         }
10165 #endif /* CONFIG_PCI_IOV */
10166
10167         pfs_found++;
10168
10169         i40e_dbg_pf_init(pf);
10170
10171         /* tell the firmware that we're starting */
10172         i40e_send_version(pf);
10173
10174         /* since everything's happy, start the service_task timer */
10175         mod_timer(&pf->service_timer,
10176                   round_jiffies(jiffies + pf->service_timer_period));
10177
10178 #ifdef I40E_FCOE
10179         /* create FCoE interface */
10180         i40e_fcoe_vsi_setup(pf);
10181
10182 #endif
10183         /* Get the negotiated link width and speed from PCI config space */
10184         pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
10185
10186         i40e_set_pci_config_data(hw, link_status);
10187
10188         dev_info(&pdev->dev, "PCI-Express: %s %s\n",
10189                 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
10190                  hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
10191                  hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
10192                  "Unknown"),
10193                 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
10194                  hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
10195                  hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
10196                  hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
10197                  "Unknown"));
10198
10199         if (hw->bus.width < i40e_bus_width_pcie_x8 ||
10200             hw->bus.speed < i40e_bus_speed_8000) {
10201                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
10202                 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
10203         }
10204
10205         /* get the requested speeds from the fw */
10206         err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10207         if (err)
10208                 dev_info(&pf->pdev->dev,
10209                          "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
10210                          i40e_stat_str(&pf->hw, err),
10211                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10212         pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10213
10214         /* print a string summarizing features */
10215         i40e_print_features(pf);
10216
10217         return 0;
10218
10219         /* Unwind what we've done if something failed in the setup */
10220 err_vsis:
10221         set_bit(__I40E_DOWN, &pf->state);
10222         i40e_clear_interrupt_scheme(pf);
10223         kfree(pf->vsi);
10224 err_switch_setup:
10225         i40e_reset_interrupt_capability(pf);
10226         del_timer_sync(&pf->service_timer);
10227 err_mac_addr:
10228 err_configure_lan_hmc:
10229         (void)i40e_shutdown_lan_hmc(hw);
10230 err_init_lan_hmc:
10231         kfree(pf->qp_pile);
10232 err_sw_init:
10233 err_adminq_setup:
10234         (void)i40e_shutdown_adminq(hw);
10235 err_pf_reset:
10236         iounmap(hw->hw_addr);
10237 err_ioremap:
10238         kfree(pf);
10239 err_pf_alloc:
10240         pci_disable_pcie_error_reporting(pdev);
10241         pci_release_selected_regions(pdev,
10242                                      pci_select_bars(pdev, IORESOURCE_MEM));
10243 err_pci_reg:
10244 err_dma:
10245         pci_disable_device(pdev);
10246         return err;
10247 }
10248
10249 /**
10250  * i40e_remove - Device removal routine
10251  * @pdev: PCI device information struct
10252  *
10253  * i40e_remove is called by the PCI subsystem to alert the driver
10254  * that is should release a PCI device.  This could be caused by a
10255  * Hot-Plug event, or because the driver is going to be removed from
10256  * memory.
10257  **/
10258 static void i40e_remove(struct pci_dev *pdev)
10259 {
10260         struct i40e_pf *pf = pci_get_drvdata(pdev);
10261         i40e_status ret_code;
10262         int i;
10263
10264         i40e_dbg_pf_exit(pf);
10265
10266         i40e_ptp_stop(pf);
10267
10268         /* no more scheduling of any task */
10269         set_bit(__I40E_DOWN, &pf->state);
10270         del_timer_sync(&pf->service_timer);
10271         cancel_work_sync(&pf->service_task);
10272         i40e_fdir_teardown(pf);
10273
10274         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10275                 i40e_free_vfs(pf);
10276                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10277         }
10278
10279         i40e_fdir_teardown(pf);
10280
10281         /* If there is a switch structure or any orphans, remove them.
10282          * This will leave only the PF's VSI remaining.
10283          */
10284         for (i = 0; i < I40E_MAX_VEB; i++) {
10285                 if (!pf->veb[i])
10286                         continue;
10287
10288                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10289                     pf->veb[i]->uplink_seid == 0)
10290                         i40e_switch_branch_release(pf->veb[i]);
10291         }
10292
10293         /* Now we can shutdown the PF's VSI, just before we kill
10294          * adminq and hmc.
10295          */
10296         if (pf->vsi[pf->lan_vsi])
10297                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10298
10299         /* shutdown and destroy the HMC */
10300         if (pf->hw.hmc.hmc_obj) {
10301                 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10302                 if (ret_code)
10303                         dev_warn(&pdev->dev,
10304                                  "Failed to destroy the HMC resources: %d\n",
10305                                  ret_code);
10306         }
10307
10308         /* shutdown the adminq */
10309         ret_code = i40e_shutdown_adminq(&pf->hw);
10310         if (ret_code)
10311                 dev_warn(&pdev->dev,
10312                          "Failed to destroy the Admin Queue resources: %d\n",
10313                          ret_code);
10314
10315         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10316         i40e_clear_interrupt_scheme(pf);
10317         for (i = 0; i < pf->num_alloc_vsi; i++) {
10318                 if (pf->vsi[i]) {
10319                         i40e_vsi_clear_rings(pf->vsi[i]);
10320                         i40e_vsi_clear(pf->vsi[i]);
10321                         pf->vsi[i] = NULL;
10322                 }
10323         }
10324
10325         for (i = 0; i < I40E_MAX_VEB; i++) {
10326                 kfree(pf->veb[i]);
10327                 pf->veb[i] = NULL;
10328         }
10329
10330         kfree(pf->qp_pile);
10331         kfree(pf->vsi);
10332
10333         iounmap(pf->hw.hw_addr);
10334         kfree(pf);
10335         pci_release_selected_regions(pdev,
10336                                      pci_select_bars(pdev, IORESOURCE_MEM));
10337
10338         pci_disable_pcie_error_reporting(pdev);
10339         pci_disable_device(pdev);
10340 }
10341
10342 /**
10343  * i40e_pci_error_detected - warning that something funky happened in PCI land
10344  * @pdev: PCI device information struct
10345  *
10346  * Called to warn that something happened and the error handling steps
10347  * are in progress.  Allows the driver to quiesce things, be ready for
10348  * remediation.
10349  **/
10350 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10351                                                 enum pci_channel_state error)
10352 {
10353         struct i40e_pf *pf = pci_get_drvdata(pdev);
10354
10355         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10356
10357         /* shutdown all operations */
10358         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10359                 rtnl_lock();
10360                 i40e_prep_for_reset(pf);
10361                 rtnl_unlock();
10362         }
10363
10364         /* Request a slot reset */
10365         return PCI_ERS_RESULT_NEED_RESET;
10366 }
10367
10368 /**
10369  * i40e_pci_error_slot_reset - a PCI slot reset just happened
10370  * @pdev: PCI device information struct
10371  *
10372  * Called to find if the driver can work with the device now that
10373  * the pci slot has been reset.  If a basic connection seems good
10374  * (registers are readable and have sane content) then return a
10375  * happy little PCI_ERS_RESULT_xxx.
10376  **/
10377 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10378 {
10379         struct i40e_pf *pf = pci_get_drvdata(pdev);
10380         pci_ers_result_t result;
10381         int err;
10382         u32 reg;
10383
10384         dev_info(&pdev->dev, "%s\n", __func__);
10385         if (pci_enable_device_mem(pdev)) {
10386                 dev_info(&pdev->dev,
10387                          "Cannot re-enable PCI device after reset.\n");
10388                 result = PCI_ERS_RESULT_DISCONNECT;
10389         } else {
10390                 pci_set_master(pdev);
10391                 pci_restore_state(pdev);
10392                 pci_save_state(pdev);
10393                 pci_wake_from_d3(pdev, false);
10394
10395                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10396                 if (reg == 0)
10397                         result = PCI_ERS_RESULT_RECOVERED;
10398                 else
10399                         result = PCI_ERS_RESULT_DISCONNECT;
10400         }
10401
10402         err = pci_cleanup_aer_uncorrect_error_status(pdev);
10403         if (err) {
10404                 dev_info(&pdev->dev,
10405                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10406                          err);
10407                 /* non-fatal, continue */
10408         }
10409
10410         return result;
10411 }
10412
10413 /**
10414  * i40e_pci_error_resume - restart operations after PCI error recovery
10415  * @pdev: PCI device information struct
10416  *
10417  * Called to allow the driver to bring things back up after PCI error
10418  * and/or reset recovery has finished.
10419  **/
10420 static void i40e_pci_error_resume(struct pci_dev *pdev)
10421 {
10422         struct i40e_pf *pf = pci_get_drvdata(pdev);
10423
10424         dev_info(&pdev->dev, "%s\n", __func__);
10425         if (test_bit(__I40E_SUSPENDED, &pf->state))
10426                 return;
10427
10428         rtnl_lock();
10429         i40e_handle_reset_warning(pf);
10430         rtnl_lock();
10431 }
10432
10433 /**
10434  * i40e_shutdown - PCI callback for shutting down
10435  * @pdev: PCI device information struct
10436  **/
10437 static void i40e_shutdown(struct pci_dev *pdev)
10438 {
10439         struct i40e_pf *pf = pci_get_drvdata(pdev);
10440         struct i40e_hw *hw = &pf->hw;
10441
10442         set_bit(__I40E_SUSPENDED, &pf->state);
10443         set_bit(__I40E_DOWN, &pf->state);
10444         rtnl_lock();
10445         i40e_prep_for_reset(pf);
10446         rtnl_unlock();
10447
10448         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10449         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10450
10451         i40e_clear_interrupt_scheme(pf);
10452
10453         if (system_state == SYSTEM_POWER_OFF) {
10454                 pci_wake_from_d3(pdev, pf->wol_en);
10455                 pci_set_power_state(pdev, PCI_D3hot);
10456         }
10457 }
10458
10459 #ifdef CONFIG_PM
10460 /**
10461  * i40e_suspend - PCI callback for moving to D3
10462  * @pdev: PCI device information struct
10463  **/
10464 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10465 {
10466         struct i40e_pf *pf = pci_get_drvdata(pdev);
10467         struct i40e_hw *hw = &pf->hw;
10468
10469         set_bit(__I40E_SUSPENDED, &pf->state);
10470         set_bit(__I40E_DOWN, &pf->state);
10471         del_timer_sync(&pf->service_timer);
10472         cancel_work_sync(&pf->service_task);
10473         i40e_fdir_teardown(pf);
10474
10475         rtnl_lock();
10476         i40e_prep_for_reset(pf);
10477         rtnl_unlock();
10478
10479         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10480         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10481
10482         pci_wake_from_d3(pdev, pf->wol_en);
10483         pci_set_power_state(pdev, PCI_D3hot);
10484
10485         return 0;
10486 }
10487
10488 /**
10489  * i40e_resume - PCI callback for waking up from D3
10490  * @pdev: PCI device information struct
10491  **/
10492 static int i40e_resume(struct pci_dev *pdev)
10493 {
10494         struct i40e_pf *pf = pci_get_drvdata(pdev);
10495         u32 err;
10496
10497         pci_set_power_state(pdev, PCI_D0);
10498         pci_restore_state(pdev);
10499         /* pci_restore_state() clears dev->state_saves, so
10500          * call pci_save_state() again to restore it.
10501          */
10502         pci_save_state(pdev);
10503
10504         err = pci_enable_device_mem(pdev);
10505         if (err) {
10506                 dev_err(&pdev->dev,
10507                         "%s: Cannot enable PCI device from suspend\n",
10508                         __func__);
10509                 return err;
10510         }
10511         pci_set_master(pdev);
10512
10513         /* no wakeup events while running */
10514         pci_wake_from_d3(pdev, false);
10515
10516         /* handling the reset will rebuild the device state */
10517         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10518                 clear_bit(__I40E_DOWN, &pf->state);
10519                 rtnl_lock();
10520                 i40e_reset_and_rebuild(pf, false);
10521                 rtnl_unlock();
10522         }
10523
10524         return 0;
10525 }
10526
10527 #endif
10528 static const struct pci_error_handlers i40e_err_handler = {
10529         .error_detected = i40e_pci_error_detected,
10530         .slot_reset = i40e_pci_error_slot_reset,
10531         .resume = i40e_pci_error_resume,
10532 };
10533
10534 static struct pci_driver i40e_driver = {
10535         .name     = i40e_driver_name,
10536         .id_table = i40e_pci_tbl,
10537         .probe    = i40e_probe,
10538         .remove   = i40e_remove,
10539 #ifdef CONFIG_PM
10540         .suspend  = i40e_suspend,
10541         .resume   = i40e_resume,
10542 #endif
10543         .shutdown = i40e_shutdown,
10544         .err_handler = &i40e_err_handler,
10545         .sriov_configure = i40e_pci_sriov_configure,
10546 };
10547
10548 /**
10549  * i40e_init_module - Driver registration routine
10550  *
10551  * i40e_init_module is the first routine called when the driver is
10552  * loaded. All it does is register with the PCI subsystem.
10553  **/
10554 static int __init i40e_init_module(void)
10555 {
10556         pr_info("%s: %s - version %s\n", i40e_driver_name,
10557                 i40e_driver_string, i40e_driver_version_str);
10558         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10559
10560         i40e_dbg_init();
10561         return pci_register_driver(&i40e_driver);
10562 }
10563 module_init(i40e_init_module);
10564
10565 /**
10566  * i40e_exit_module - Driver exit cleanup routine
10567  *
10568  * i40e_exit_module is called just before the driver is removed
10569  * from memory.
10570  **/
10571 static void __exit i40e_exit_module(void)
10572 {
10573         pci_unregister_driver(&i40e_driver);
10574         i40e_dbg_exit();
10575 }
10576 module_exit(i40e_exit_module);