]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_mismatch_drops =
357                                         port_stats->rx_address_mismatch_drops +
358                                         port_stats->rx_vlan_mismatch_drops;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->jabber_events = port_stats->jabber_events;
414         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
416         drvs->forwarded_packets = rxf_stats->forwarded_packets;
417         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
418         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
420         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421 }
422
423 static void populate_lancer_stats(struct be_adapter *adapter)
424 {
425
426         struct be_drv_stats *drvs = &adapter->drv_stats;
427         struct lancer_pport_stats *pport_stats =
428                                         pport_stats_from_cmd(adapter);
429
430         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
434         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
435         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
436         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440         drvs->rx_dropped_tcp_length =
441                                 pport_stats->rx_dropped_invalid_tcp_length;
442         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445         drvs->rx_dropped_header_too_small =
446                                 pport_stats->rx_dropped_header_too_small;
447         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448         drvs->rx_address_mismatch_drops =
449                                         pport_stats->rx_address_mismatch_drops +
450                                         pport_stats->rx_vlan_mismatch_drops;
451         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
455         drvs->jabber_events = pport_stats->rx_jabbers;
456         drvs->forwarded_packets = pport_stats->num_forwards_lo;
457         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
458         drvs->rx_drops_too_many_frags =
459                                 pport_stats->rx_drops_too_many_frags_lo;
460 }
461
462 static void accumulate_16bit_val(u32 *acc, u16 val)
463 {
464 #define lo(x)                   (x & 0xFFFF)
465 #define hi(x)                   (x & 0xFFFF0000)
466         bool wrapped = val < lo(*acc);
467         u32 newacc = hi(*acc) + val;
468
469         if (wrapped)
470                 newacc += 65536;
471         ACCESS_ONCE(*acc) = newacc;
472 }
473
474 void be_parse_stats(struct be_adapter *adapter)
475 {
476         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477         struct be_rx_obj *rxo;
478         int i;
479
480         if (lancer_chip(adapter)) {
481                 populate_lancer_stats(adapter);
482         } else {
483                 if (BE2_chip(adapter))
484                         populate_be_v0_stats(adapter);
485                 else
486                         /* for BE3 and Skyhawk */
487                         populate_be_v1_stats(adapter);
488
489                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490                 for_all_rx_queues(adapter, rxo, i) {
491                         /* below erx HW counter can actually wrap around after
492                          * 65535. Driver accumulates a 32-bit value
493                          */
494                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495                                              (u16)erx->rx_drops_no_fragments \
496                                              [rxo->q.id]);
497                 }
498         }
499 }
500
501 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502                                         struct rtnl_link_stats64 *stats)
503 {
504         struct be_adapter *adapter = netdev_priv(netdev);
505         struct be_drv_stats *drvs = &adapter->drv_stats;
506         struct be_rx_obj *rxo;
507         struct be_tx_obj *txo;
508         u64 pkts, bytes;
509         unsigned int start;
510         int i;
511
512         for_all_rx_queues(adapter, rxo, i) {
513                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514                 do {
515                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516                         pkts = rx_stats(rxo)->rx_pkts;
517                         bytes = rx_stats(rxo)->rx_bytes;
518                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519                 stats->rx_packets += pkts;
520                 stats->rx_bytes += bytes;
521                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523                                         rx_stats(rxo)->rx_drops_no_frags;
524         }
525
526         for_all_tx_queues(adapter, txo, i) {
527                 const struct be_tx_stats *tx_stats = tx_stats(txo);
528                 do {
529                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530                         pkts = tx_stats(txo)->tx_pkts;
531                         bytes = tx_stats(txo)->tx_bytes;
532                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533                 stats->tx_packets += pkts;
534                 stats->tx_bytes += bytes;
535         }
536
537         /* bad pkts received */
538         stats->rx_errors = drvs->rx_crc_errors +
539                 drvs->rx_alignment_symbol_errors +
540                 drvs->rx_in_range_errors +
541                 drvs->rx_out_range_errors +
542                 drvs->rx_frame_too_long +
543                 drvs->rx_dropped_too_small +
544                 drvs->rx_dropped_too_short +
545                 drvs->rx_dropped_header_too_small +
546                 drvs->rx_dropped_tcp_length +
547                 drvs->rx_dropped_runt;
548
549         /* detailed rx errors */
550         stats->rx_length_errors = drvs->rx_in_range_errors +
551                 drvs->rx_out_range_errors +
552                 drvs->rx_frame_too_long;
553
554         stats->rx_crc_errors = drvs->rx_crc_errors;
555
556         /* frame alignment errors */
557         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
558
559         /* receiver fifo overrun */
560         /* drops_no_pbuf is no per i/f, it's per BE card */
561         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
562                                 drvs->rx_input_fifo_overflow_drop +
563                                 drvs->rx_drops_no_pbuf;
564         return stats;
565 }
566
567 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
568 {
569         struct net_device *netdev = adapter->netdev;
570
571         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
572                 netif_carrier_off(netdev);
573                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
574         }
575
576         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577                 netif_carrier_on(netdev);
578         else
579                 netif_carrier_off(netdev);
580 }
581
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
584 {
585         struct be_tx_stats *stats = tx_stats(txo);
586
587         u64_stats_update_begin(&stats->sync);
588         stats->tx_reqs++;
589         stats->tx_wrbs += wrb_cnt;
590         stats->tx_bytes += copied;
591         stats->tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->tx_stops++;
594         u64_stats_update_end(&stats->sync);
595 }
596
597 /* Determine number of WRB entries needed to xmit data in an skb */
598 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599                                                                 bool *dummy)
600 {
601         int cnt = (skb->len > skb->data_len);
602
603         cnt += skb_shinfo(skb)->nr_frags;
604
605         /* to account for hdr wrb */
606         cnt++;
607         if (lancer_chip(adapter) || !(cnt & 1)) {
608                 *dummy = false;
609         } else {
610                 /* add a dummy to make it an even num */
611                 cnt++;
612                 *dummy = true;
613         }
614         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615         return cnt;
616 }
617
618 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619 {
620         wrb->frag_pa_hi = upper_32_bits(addr);
621         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
623         wrb->rsvd0 = 0;
624 }
625
626 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627                                         struct sk_buff *skb)
628 {
629         u8 vlan_prio;
630         u16 vlan_tag;
631
632         vlan_tag = vlan_tx_tag_get(skb);
633         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634         /* If vlan priority provided by OS is NOT in available bmap */
635         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637                                 adapter->recommended_prio;
638
639         return vlan_tag;
640 }
641
642 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643 {
644         return vlan_tx_tag_present(skb) || adapter->pvid;
645 }
646
647 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
649 {
650         u16 vlan_tag;
651
652         memset(hdr, 0, sizeof(*hdr));
653
654         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
656         if (skb_is_gso(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659                         hdr, skb_shinfo(skb)->gso_size);
660                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
661                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
662         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663                 if (is_tcp_pkt(skb))
664                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665                 else if (is_udp_pkt(skb))
666                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667         }
668
669         if (vlan_tx_tag_present(skb)) {
670                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
671                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
672                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
673         }
674
675         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679 }
680
681 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
682                 bool unmap_single)
683 {
684         dma_addr_t dma;
685
686         be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
689         if (wrb->frag_len) {
690                 if (unmap_single)
691                         dma_unmap_single(dev, dma, wrb->frag_len,
692                                          DMA_TO_DEVICE);
693                 else
694                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
695         }
696 }
697
698 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
699                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700 {
701         dma_addr_t busaddr;
702         int i, copied = 0;
703         struct device *dev = &adapter->pdev->dev;
704         struct sk_buff *first_skb = skb;
705         struct be_eth_wrb *wrb;
706         struct be_eth_hdr_wrb *hdr;
707         bool map_single = false;
708         u16 map_head;
709
710         hdr = queue_head_node(txq);
711         queue_head_inc(txq);
712         map_head = txq->head;
713
714         if (skb->len > skb->data_len) {
715                 int len = skb_headlen(skb);
716                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717                 if (dma_mapping_error(dev, busaddr))
718                         goto dma_err;
719                 map_single = true;
720                 wrb = queue_head_node(txq);
721                 wrb_fill(wrb, busaddr, len);
722                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723                 queue_head_inc(txq);
724                 copied += len;
725         }
726
727         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
728                 const struct skb_frag_struct *frag =
729                         &skb_shinfo(skb)->frags[i];
730                 busaddr = skb_frag_dma_map(dev, frag, 0,
731                                            skb_frag_size(frag), DMA_TO_DEVICE);
732                 if (dma_mapping_error(dev, busaddr))
733                         goto dma_err;
734                 wrb = queue_head_node(txq);
735                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
736                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737                 queue_head_inc(txq);
738                 copied += skb_frag_size(frag);
739         }
740
741         if (dummy_wrb) {
742                 wrb = queue_head_node(txq);
743                 wrb_fill(wrb, 0, 0);
744                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745                 queue_head_inc(txq);
746         }
747
748         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
749         be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751         return copied;
752 dma_err:
753         txq->head = map_head;
754         while (copied) {
755                 wrb = queue_head_node(txq);
756                 unmap_tx_frag(dev, wrb, map_single);
757                 map_single = false;
758                 copied -= wrb->frag_len;
759                 queue_head_inc(txq);
760         }
761         return 0;
762 }
763
764 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765                                              struct sk_buff *skb)
766 {
767         u16 vlan_tag = 0;
768
769         skb = skb_share_check(skb, GFP_ATOMIC);
770         if (unlikely(!skb))
771                 return skb;
772
773         if (vlan_tx_tag_present(skb)) {
774                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
775                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
776                 if (skb)
777                         skb->vlan_tci = 0;
778         }
779
780         return skb;
781 }
782
783 static netdev_tx_t be_xmit(struct sk_buff *skb,
784                         struct net_device *netdev)
785 {
786         struct be_adapter *adapter = netdev_priv(netdev);
787         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
788         struct be_queue_info *txq = &txo->q;
789         struct iphdr *ip = NULL;
790         u32 wrb_cnt = 0, copied = 0;
791         u32 start = txq->head, eth_hdr_len;
792         bool dummy_wrb, stopped = false;
793
794         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
795                 VLAN_ETH_HLEN : ETH_HLEN;
796
797         /* HW has a bug which considers padding bytes as legal
798          * and modifies the IPv4 hdr's 'tot_len' field
799          */
800         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
801                         is_ipv4_pkt(skb)) {
802                 ip = (struct iphdr *)ip_hdr(skb);
803                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
804         }
805
806         /* HW has a bug wherein it will calculate CSUM for VLAN
807          * pkts even though it is disabled.
808          * Manually insert VLAN in pkt.
809          */
810         if (skb->ip_summed != CHECKSUM_PARTIAL &&
811                         be_vlan_tag_chk(adapter, skb)) {
812                 skb = be_insert_vlan_in_pkt(adapter, skb);
813                 if (unlikely(!skb))
814                         goto tx_drop;
815         }
816
817         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
818
819         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
820         if (copied) {
821                 int gso_segs = skb_shinfo(skb)->gso_segs;
822
823                 /* record the sent skb in the sent_skb table */
824                 BUG_ON(txo->sent_skb_list[start]);
825                 txo->sent_skb_list[start] = skb;
826
827                 /* Ensure txq has space for the next skb; Else stop the queue
828                  * *BEFORE* ringing the tx doorbell, so that we serialze the
829                  * tx compls of the current transmit which'll wake up the queue
830                  */
831                 atomic_add(wrb_cnt, &txq->used);
832                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
833                                                                 txq->len) {
834                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
835                         stopped = true;
836                 }
837
838                 be_txq_notify(adapter, txo, wrb_cnt);
839
840                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
841         } else {
842                 txq->head = start;
843                 dev_kfree_skb_any(skb);
844         }
845 tx_drop:
846         return NETDEV_TX_OK;
847 }
848
849 static int be_change_mtu(struct net_device *netdev, int new_mtu)
850 {
851         struct be_adapter *adapter = netdev_priv(netdev);
852         if (new_mtu < BE_MIN_MTU ||
853                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
854                                         (ETH_HLEN + ETH_FCS_LEN))) {
855                 dev_info(&adapter->pdev->dev,
856                         "MTU must be between %d and %d bytes\n",
857                         BE_MIN_MTU,
858                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
859                 return -EINVAL;
860         }
861         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
862                         netdev->mtu, new_mtu);
863         netdev->mtu = new_mtu;
864         return 0;
865 }
866
867 /*
868  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
869  * If the user configures more, place BE in vlan promiscuous mode.
870  */
871 static int be_vid_config(struct be_adapter *adapter)
872 {
873         u16 vids[BE_NUM_VLANS_SUPPORTED];
874         u16 num = 0, i;
875         int status = 0;
876
877         /* No need to further configure vids if in promiscuous mode */
878         if (adapter->promiscuous)
879                 return 0;
880
881         if (adapter->vlans_added > adapter->max_vlans)
882                 goto set_vlan_promisc;
883
884         /* Construct VLAN Table to give to HW */
885         for (i = 0; i < VLAN_N_VID; i++)
886                 if (adapter->vlan_tag[i])
887                         vids[num++] = cpu_to_le16(i);
888
889         status = be_cmd_vlan_config(adapter, adapter->if_handle,
890                                     vids, num, 1, 0);
891
892         /* Set to VLAN promisc mode as setting VLAN filter failed */
893         if (status) {
894                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
895                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
896                 goto set_vlan_promisc;
897         }
898
899         return status;
900
901 set_vlan_promisc:
902         status = be_cmd_vlan_config(adapter, adapter->if_handle,
903                                     NULL, 0, 1, 1);
904         return status;
905 }
906
907 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
908 {
909         struct be_adapter *adapter = netdev_priv(netdev);
910         int status = 0;
911
912         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
913                 status = -EINVAL;
914                 goto ret;
915         }
916
917         /* Packets with VID 0 are always received by Lancer by default */
918         if (lancer_chip(adapter) && vid == 0)
919                 goto ret;
920
921         adapter->vlan_tag[vid] = 1;
922         if (adapter->vlans_added <= (adapter->max_vlans + 1))
923                 status = be_vid_config(adapter);
924
925         if (!status)
926                 adapter->vlans_added++;
927         else
928                 adapter->vlan_tag[vid] = 0;
929 ret:
930         return status;
931 }
932
933 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
934 {
935         struct be_adapter *adapter = netdev_priv(netdev);
936         int status = 0;
937
938         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
939                 status = -EINVAL;
940                 goto ret;
941         }
942
943         /* Packets with VID 0 are always received by Lancer by default */
944         if (lancer_chip(adapter) && vid == 0)
945                 goto ret;
946
947         adapter->vlan_tag[vid] = 0;
948         if (adapter->vlans_added <= adapter->max_vlans)
949                 status = be_vid_config(adapter);
950
951         if (!status)
952                 adapter->vlans_added--;
953         else
954                 adapter->vlan_tag[vid] = 1;
955 ret:
956         return status;
957 }
958
959 static void be_set_rx_mode(struct net_device *netdev)
960 {
961         struct be_adapter *adapter = netdev_priv(netdev);
962         int status;
963
964         if (netdev->flags & IFF_PROMISC) {
965                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
966                 adapter->promiscuous = true;
967                 goto done;
968         }
969
970         /* BE was previously in promiscuous mode; disable it */
971         if (adapter->promiscuous) {
972                 adapter->promiscuous = false;
973                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
974
975                 if (adapter->vlans_added)
976                         be_vid_config(adapter);
977         }
978
979         /* Enable multicast promisc if num configured exceeds what we support */
980         if (netdev->flags & IFF_ALLMULTI ||
981             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
982                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
983                 goto done;
984         }
985
986         if (netdev_uc_count(netdev) != adapter->uc_macs) {
987                 struct netdev_hw_addr *ha;
988                 int i = 1; /* First slot is claimed by the Primary MAC */
989
990                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
991                         be_cmd_pmac_del(adapter, adapter->if_handle,
992                                         adapter->pmac_id[i], 0);
993                 }
994
995                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
996                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
997                         adapter->promiscuous = true;
998                         goto done;
999                 }
1000
1001                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1002                         adapter->uc_macs++; /* First slot is for Primary MAC */
1003                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1004                                         adapter->if_handle,
1005                                         &adapter->pmac_id[adapter->uc_macs], 0);
1006                 }
1007         }
1008
1009         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1010
1011         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1012         if (status) {
1013                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1014                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1015                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1016         }
1017 done:
1018         return;
1019 }
1020
1021 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1022 {
1023         struct be_adapter *adapter = netdev_priv(netdev);
1024         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1025         int status;
1026         bool active_mac = false;
1027         u32 pmac_id;
1028         u8 old_mac[ETH_ALEN];
1029
1030         if (!sriov_enabled(adapter))
1031                 return -EPERM;
1032
1033         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1034                 return -EINVAL;
1035
1036         if (lancer_chip(adapter)) {
1037                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1038                                                   &pmac_id, vf + 1);
1039                 if (!status && active_mac)
1040                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1041                                         pmac_id, vf + 1);
1042
1043                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1044         } else {
1045                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1046                                          vf_cfg->pmac_id, vf + 1);
1047
1048                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1049                                          &vf_cfg->pmac_id, vf + 1);
1050         }
1051
1052         if (status)
1053                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1054                                 mac, vf);
1055         else
1056                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1057
1058         return status;
1059 }
1060
1061 static int be_get_vf_config(struct net_device *netdev, int vf,
1062                         struct ifla_vf_info *vi)
1063 {
1064         struct be_adapter *adapter = netdev_priv(netdev);
1065         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1066
1067         if (!sriov_enabled(adapter))
1068                 return -EPERM;
1069
1070         if (vf >= adapter->num_vfs)
1071                 return -EINVAL;
1072
1073         vi->vf = vf;
1074         vi->tx_rate = vf_cfg->tx_rate;
1075         vi->vlan = vf_cfg->vlan_tag;
1076         vi->qos = 0;
1077         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1078
1079         return 0;
1080 }
1081
1082 static int be_set_vf_vlan(struct net_device *netdev,
1083                         int vf, u16 vlan, u8 qos)
1084 {
1085         struct be_adapter *adapter = netdev_priv(netdev);
1086         int status = 0;
1087
1088         if (!sriov_enabled(adapter))
1089                 return -EPERM;
1090
1091         if (vf >= adapter->num_vfs || vlan > 4095)
1092                 return -EINVAL;
1093
1094         if (vlan) {
1095                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1096                         /* If this is new value, program it. Else skip. */
1097                         adapter->vf_cfg[vf].vlan_tag = vlan;
1098
1099                         status = be_cmd_set_hsw_config(adapter, vlan,
1100                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1101                 }
1102         } else {
1103                 /* Reset Transparent Vlan Tagging. */
1104                 adapter->vf_cfg[vf].vlan_tag = 0;
1105                 vlan = adapter->vf_cfg[vf].def_vid;
1106                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1107                         adapter->vf_cfg[vf].if_handle);
1108         }
1109
1110
1111         if (status)
1112                 dev_info(&adapter->pdev->dev,
1113                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1114         return status;
1115 }
1116
1117 static int be_set_vf_tx_rate(struct net_device *netdev,
1118                         int vf, int rate)
1119 {
1120         struct be_adapter *adapter = netdev_priv(netdev);
1121         int status = 0;
1122
1123         if (!sriov_enabled(adapter))
1124                 return -EPERM;
1125
1126         if (vf >= adapter->num_vfs)
1127                 return -EINVAL;
1128
1129         if (rate < 100 || rate > 10000) {
1130                 dev_err(&adapter->pdev->dev,
1131                         "tx rate must be between 100 and 10000 Mbps\n");
1132                 return -EINVAL;
1133         }
1134
1135         if (lancer_chip(adapter))
1136                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1137         else
1138                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1139
1140         if (status)
1141                 dev_err(&adapter->pdev->dev,
1142                                 "tx rate %d on VF %d failed\n", rate, vf);
1143         else
1144                 adapter->vf_cfg[vf].tx_rate = rate;
1145         return status;
1146 }
1147
1148 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1149 {
1150         struct pci_dev *dev, *pdev = adapter->pdev;
1151         int vfs = 0, assigned_vfs = 0, pos;
1152         u16 offset, stride;
1153
1154         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1155         if (!pos)
1156                 return 0;
1157         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1158         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1159
1160         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1161         while (dev) {
1162                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1163                         vfs++;
1164                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1165                                 assigned_vfs++;
1166                 }
1167                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1168         }
1169         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1170 }
1171
1172 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1173 {
1174         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175         ulong now = jiffies;
1176         ulong delta = now - stats->rx_jiffies;
1177         u64 pkts;
1178         unsigned int start, eqd;
1179
1180         if (!eqo->enable_aic) {
1181                 eqd = eqo->eqd;
1182                 goto modify_eqd;
1183         }
1184
1185         if (eqo->idx >= adapter->num_rx_qs)
1186                 return;
1187
1188         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1189
1190         /* Wrapped around */
1191         if (time_before(now, stats->rx_jiffies)) {
1192                 stats->rx_jiffies = now;
1193                 return;
1194         }
1195
1196         /* Update once a second */
1197         if (delta < HZ)
1198                 return;
1199
1200         do {
1201                 start = u64_stats_fetch_begin_bh(&stats->sync);
1202                 pkts = stats->rx_pkts;
1203         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1204
1205         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1206         stats->rx_pkts_prev = pkts;
1207         stats->rx_jiffies = now;
1208         eqd = (stats->rx_pps / 110000) << 3;
1209         eqd = min(eqd, eqo->max_eqd);
1210         eqd = max(eqd, eqo->min_eqd);
1211         if (eqd < 10)
1212                 eqd = 0;
1213
1214 modify_eqd:
1215         if (eqd != eqo->cur_eqd) {
1216                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1217                 eqo->cur_eqd = eqd;
1218         }
1219 }
1220
1221 static void be_rx_stats_update(struct be_rx_obj *rxo,
1222                 struct be_rx_compl_info *rxcp)
1223 {
1224         struct be_rx_stats *stats = rx_stats(rxo);
1225
1226         u64_stats_update_begin(&stats->sync);
1227         stats->rx_compl++;
1228         stats->rx_bytes += rxcp->pkt_size;
1229         stats->rx_pkts++;
1230         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1231                 stats->rx_mcast_pkts++;
1232         if (rxcp->err)
1233                 stats->rx_compl_err++;
1234         u64_stats_update_end(&stats->sync);
1235 }
1236
1237 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1238 {
1239         /* L4 checksum is not reliable for non TCP/UDP packets.
1240          * Also ignore ipcksm for ipv6 pkts */
1241         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1242                                 (rxcp->ip_csum || rxcp->ipv6);
1243 }
1244
1245 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1246                                                 u16 frag_idx)
1247 {
1248         struct be_adapter *adapter = rxo->adapter;
1249         struct be_rx_page_info *rx_page_info;
1250         struct be_queue_info *rxq = &rxo->q;
1251
1252         rx_page_info = &rxo->page_info_tbl[frag_idx];
1253         BUG_ON(!rx_page_info->page);
1254
1255         if (rx_page_info->last_page_user) {
1256                 dma_unmap_page(&adapter->pdev->dev,
1257                                dma_unmap_addr(rx_page_info, bus),
1258                                adapter->big_page_size, DMA_FROM_DEVICE);
1259                 rx_page_info->last_page_user = false;
1260         }
1261
1262         atomic_dec(&rxq->used);
1263         return rx_page_info;
1264 }
1265
1266 /* Throwaway the data in the Rx completion */
1267 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1268                                 struct be_rx_compl_info *rxcp)
1269 {
1270         struct be_queue_info *rxq = &rxo->q;
1271         struct be_rx_page_info *page_info;
1272         u16 i, num_rcvd = rxcp->num_rcvd;
1273
1274         for (i = 0; i < num_rcvd; i++) {
1275                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1276                 put_page(page_info->page);
1277                 memset(page_info, 0, sizeof(*page_info));
1278                 index_inc(&rxcp->rxq_idx, rxq->len);
1279         }
1280 }
1281
1282 /*
1283  * skb_fill_rx_data forms a complete skb for an ether frame
1284  * indicated by rxcp.
1285  */
1286 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1287                              struct be_rx_compl_info *rxcp)
1288 {
1289         struct be_queue_info *rxq = &rxo->q;
1290         struct be_rx_page_info *page_info;
1291         u16 i, j;
1292         u16 hdr_len, curr_frag_len, remaining;
1293         u8 *start;
1294
1295         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1296         start = page_address(page_info->page) + page_info->page_offset;
1297         prefetch(start);
1298
1299         /* Copy data in the first descriptor of this completion */
1300         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1301
1302         skb->len = curr_frag_len;
1303         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1304                 memcpy(skb->data, start, curr_frag_len);
1305                 /* Complete packet has now been moved to data */
1306                 put_page(page_info->page);
1307                 skb->data_len = 0;
1308                 skb->tail += curr_frag_len;
1309         } else {
1310                 hdr_len = ETH_HLEN;
1311                 memcpy(skb->data, start, hdr_len);
1312                 skb_shinfo(skb)->nr_frags = 1;
1313                 skb_frag_set_page(skb, 0, page_info->page);
1314                 skb_shinfo(skb)->frags[0].page_offset =
1315                                         page_info->page_offset + hdr_len;
1316                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1317                 skb->data_len = curr_frag_len - hdr_len;
1318                 skb->truesize += rx_frag_size;
1319                 skb->tail += hdr_len;
1320         }
1321         page_info->page = NULL;
1322
1323         if (rxcp->pkt_size <= rx_frag_size) {
1324                 BUG_ON(rxcp->num_rcvd != 1);
1325                 return;
1326         }
1327
1328         /* More frags present for this completion */
1329         index_inc(&rxcp->rxq_idx, rxq->len);
1330         remaining = rxcp->pkt_size - curr_frag_len;
1331         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1332                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1333                 curr_frag_len = min(remaining, rx_frag_size);
1334
1335                 /* Coalesce all frags from the same physical page in one slot */
1336                 if (page_info->page_offset == 0) {
1337                         /* Fresh page */
1338                         j++;
1339                         skb_frag_set_page(skb, j, page_info->page);
1340                         skb_shinfo(skb)->frags[j].page_offset =
1341                                                         page_info->page_offset;
1342                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1343                         skb_shinfo(skb)->nr_frags++;
1344                 } else {
1345                         put_page(page_info->page);
1346                 }
1347
1348                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1349                 skb->len += curr_frag_len;
1350                 skb->data_len += curr_frag_len;
1351                 skb->truesize += rx_frag_size;
1352                 remaining -= curr_frag_len;
1353                 index_inc(&rxcp->rxq_idx, rxq->len);
1354                 page_info->page = NULL;
1355         }
1356         BUG_ON(j > MAX_SKB_FRAGS);
1357 }
1358
1359 /* Process the RX completion indicated by rxcp when GRO is disabled */
1360 static void be_rx_compl_process(struct be_rx_obj *rxo,
1361                                 struct be_rx_compl_info *rxcp)
1362 {
1363         struct be_adapter *adapter = rxo->adapter;
1364         struct net_device *netdev = adapter->netdev;
1365         struct sk_buff *skb;
1366
1367         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1368         if (unlikely(!skb)) {
1369                 rx_stats(rxo)->rx_drops_no_skbs++;
1370                 be_rx_compl_discard(rxo, rxcp);
1371                 return;
1372         }
1373
1374         skb_fill_rx_data(rxo, skb, rxcp);
1375
1376         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1377                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1378         else
1379                 skb_checksum_none_assert(skb);
1380
1381         skb->protocol = eth_type_trans(skb, netdev);
1382         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1383         if (netdev->features & NETIF_F_RXHASH)
1384                 skb->rxhash = rxcp->rss_hash;
1385
1386
1387         if (rxcp->vlanf)
1388                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1389
1390         netif_receive_skb(skb);
1391 }
1392
1393 /* Process the RX completion indicated by rxcp when GRO is enabled */
1394 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1395                              struct be_rx_compl_info *rxcp)
1396 {
1397         struct be_adapter *adapter = rxo->adapter;
1398         struct be_rx_page_info *page_info;
1399         struct sk_buff *skb = NULL;
1400         struct be_queue_info *rxq = &rxo->q;
1401         u16 remaining, curr_frag_len;
1402         u16 i, j;
1403
1404         skb = napi_get_frags(napi);
1405         if (!skb) {
1406                 be_rx_compl_discard(rxo, rxcp);
1407                 return;
1408         }
1409
1410         remaining = rxcp->pkt_size;
1411         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1412                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1413
1414                 curr_frag_len = min(remaining, rx_frag_size);
1415
1416                 /* Coalesce all frags from the same physical page in one slot */
1417                 if (i == 0 || page_info->page_offset == 0) {
1418                         /* First frag or Fresh page */
1419                         j++;
1420                         skb_frag_set_page(skb, j, page_info->page);
1421                         skb_shinfo(skb)->frags[j].page_offset =
1422                                                         page_info->page_offset;
1423                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1424                 } else {
1425                         put_page(page_info->page);
1426                 }
1427                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1428                 skb->truesize += rx_frag_size;
1429                 remaining -= curr_frag_len;
1430                 index_inc(&rxcp->rxq_idx, rxq->len);
1431                 memset(page_info, 0, sizeof(*page_info));
1432         }
1433         BUG_ON(j > MAX_SKB_FRAGS);
1434
1435         skb_shinfo(skb)->nr_frags = j + 1;
1436         skb->len = rxcp->pkt_size;
1437         skb->data_len = rxcp->pkt_size;
1438         skb->ip_summed = CHECKSUM_UNNECESSARY;
1439         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1440         if (adapter->netdev->features & NETIF_F_RXHASH)
1441                 skb->rxhash = rxcp->rss_hash;
1442
1443         if (rxcp->vlanf)
1444                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1445
1446         napi_gro_frags(napi);
1447 }
1448
1449 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1450                                  struct be_rx_compl_info *rxcp)
1451 {
1452         rxcp->pkt_size =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1454         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1455         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1456         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1457         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1458         rxcp->ip_csum =
1459                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1460         rxcp->l4_csum =
1461                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1462         rxcp->ipv6 =
1463                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1464         rxcp->rxq_idx =
1465                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1466         rxcp->num_rcvd =
1467                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1468         rxcp->pkt_type =
1469                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1470         rxcp->rss_hash =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1472         if (rxcp->vlanf) {
1473                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1474                                           compl);
1475                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1476                                                compl);
1477         }
1478         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1479 }
1480
1481 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1482                                  struct be_rx_compl_info *rxcp)
1483 {
1484         rxcp->pkt_size =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1486         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1487         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1488         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1489         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1490         rxcp->ip_csum =
1491                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1492         rxcp->l4_csum =
1493                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1494         rxcp->ipv6 =
1495                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1496         rxcp->rxq_idx =
1497                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1498         rxcp->num_rcvd =
1499                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1500         rxcp->pkt_type =
1501                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1502         rxcp->rss_hash =
1503                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1504         if (rxcp->vlanf) {
1505                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1506                                           compl);
1507                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1508                                                compl);
1509         }
1510         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1511 }
1512
1513 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1514 {
1515         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1516         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1517         struct be_adapter *adapter = rxo->adapter;
1518
1519         /* For checking the valid bit it is Ok to use either definition as the
1520          * valid bit is at the same position in both v0 and v1 Rx compl */
1521         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1522                 return NULL;
1523
1524         rmb();
1525         be_dws_le_to_cpu(compl, sizeof(*compl));
1526
1527         if (adapter->be3_native)
1528                 be_parse_rx_compl_v1(compl, rxcp);
1529         else
1530                 be_parse_rx_compl_v0(compl, rxcp);
1531
1532         if (rxcp->vlanf) {
1533                 /* vlanf could be wrongly set in some cards.
1534                  * ignore if vtm is not set */
1535                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1536                         rxcp->vlanf = 0;
1537
1538                 if (!lancer_chip(adapter))
1539                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1540
1541                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1542                     !adapter->vlan_tag[rxcp->vlan_tag])
1543                         rxcp->vlanf = 0;
1544         }
1545
1546         /* As the compl has been parsed, reset it; we wont touch it again */
1547         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1548
1549         queue_tail_inc(&rxo->cq);
1550         return rxcp;
1551 }
1552
1553 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1554 {
1555         u32 order = get_order(size);
1556
1557         if (order > 0)
1558                 gfp |= __GFP_COMP;
1559         return  alloc_pages(gfp, order);
1560 }
1561
1562 /*
1563  * Allocate a page, split it to fragments of size rx_frag_size and post as
1564  * receive buffers to BE
1565  */
1566 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1567 {
1568         struct be_adapter *adapter = rxo->adapter;
1569         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1570         struct be_queue_info *rxq = &rxo->q;
1571         struct page *pagep = NULL;
1572         struct be_eth_rx_d *rxd;
1573         u64 page_dmaaddr = 0, frag_dmaaddr;
1574         u32 posted, page_offset = 0;
1575
1576         page_info = &rxo->page_info_tbl[rxq->head];
1577         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1578                 if (!pagep) {
1579                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1580                         if (unlikely(!pagep)) {
1581                                 rx_stats(rxo)->rx_post_fail++;
1582                                 break;
1583                         }
1584                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1585                                                     0, adapter->big_page_size,
1586                                                     DMA_FROM_DEVICE);
1587                         page_info->page_offset = 0;
1588                 } else {
1589                         get_page(pagep);
1590                         page_info->page_offset = page_offset + rx_frag_size;
1591                 }
1592                 page_offset = page_info->page_offset;
1593                 page_info->page = pagep;
1594                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1595                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1596
1597                 rxd = queue_head_node(rxq);
1598                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1599                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1600
1601                 /* Any space left in the current big page for another frag? */
1602                 if ((page_offset + rx_frag_size + rx_frag_size) >
1603                                         adapter->big_page_size) {
1604                         pagep = NULL;
1605                         page_info->last_page_user = true;
1606                 }
1607
1608                 prev_page_info = page_info;
1609                 queue_head_inc(rxq);
1610                 page_info = &rxo->page_info_tbl[rxq->head];
1611         }
1612         if (pagep)
1613                 prev_page_info->last_page_user = true;
1614
1615         if (posted) {
1616                 atomic_add(posted, &rxq->used);
1617                 be_rxq_notify(adapter, rxq->id, posted);
1618         } else if (atomic_read(&rxq->used) == 0) {
1619                 /* Let be_worker replenish when memory is available */
1620                 rxo->rx_post_starved = true;
1621         }
1622 }
1623
1624 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1625 {
1626         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1627
1628         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1629                 return NULL;
1630
1631         rmb();
1632         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1633
1634         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1635
1636         queue_tail_inc(tx_cq);
1637         return txcp;
1638 }
1639
1640 static u16 be_tx_compl_process(struct be_adapter *adapter,
1641                 struct be_tx_obj *txo, u16 last_index)
1642 {
1643         struct be_queue_info *txq = &txo->q;
1644         struct be_eth_wrb *wrb;
1645         struct sk_buff **sent_skbs = txo->sent_skb_list;
1646         struct sk_buff *sent_skb;
1647         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1648         bool unmap_skb_hdr = true;
1649
1650         sent_skb = sent_skbs[txq->tail];
1651         BUG_ON(!sent_skb);
1652         sent_skbs[txq->tail] = NULL;
1653
1654         /* skip header wrb */
1655         queue_tail_inc(txq);
1656
1657         do {
1658                 cur_index = txq->tail;
1659                 wrb = queue_tail_node(txq);
1660                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1661                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1662                 unmap_skb_hdr = false;
1663
1664                 num_wrbs++;
1665                 queue_tail_inc(txq);
1666         } while (cur_index != last_index);
1667
1668         kfree_skb(sent_skb);
1669         return num_wrbs;
1670 }
1671
1672 /* Return the number of events in the event queue */
1673 static inline int events_get(struct be_eq_obj *eqo)
1674 {
1675         struct be_eq_entry *eqe;
1676         int num = 0;
1677
1678         do {
1679                 eqe = queue_tail_node(&eqo->q);
1680                 if (eqe->evt == 0)
1681                         break;
1682
1683                 rmb();
1684                 eqe->evt = 0;
1685                 num++;
1686                 queue_tail_inc(&eqo->q);
1687         } while (true);
1688
1689         return num;
1690 }
1691
1692 /* Leaves the EQ is disarmed state */
1693 static void be_eq_clean(struct be_eq_obj *eqo)
1694 {
1695         int num = events_get(eqo);
1696
1697         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1698 }
1699
1700 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1701 {
1702         struct be_rx_page_info *page_info;
1703         struct be_queue_info *rxq = &rxo->q;
1704         struct be_queue_info *rx_cq = &rxo->cq;
1705         struct be_rx_compl_info *rxcp;
1706         struct be_adapter *adapter = rxo->adapter;
1707         int flush_wait = 0;
1708         u16 tail;
1709
1710         /* Consume pending rx completions.
1711          * Wait for the flush completion (identified by zero num_rcvd)
1712          * to arrive. Notify CQ even when there are no more CQ entries
1713          * for HW to flush partially coalesced CQ entries.
1714          * In Lancer, there is no need to wait for flush compl.
1715          */
1716         for (;;) {
1717                 rxcp = be_rx_compl_get(rxo);
1718                 if (rxcp == NULL) {
1719                         if (lancer_chip(adapter))
1720                                 break;
1721
1722                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1723                                 dev_warn(&adapter->pdev->dev,
1724                                          "did not receive flush compl\n");
1725                                 break;
1726                         }
1727                         be_cq_notify(adapter, rx_cq->id, true, 0);
1728                         mdelay(1);
1729                 } else {
1730                         be_rx_compl_discard(rxo, rxcp);
1731                         be_cq_notify(adapter, rx_cq->id, true, 1);
1732                         if (rxcp->num_rcvd == 0)
1733                                 break;
1734                 }
1735         }
1736
1737         /* After cleanup, leave the CQ in unarmed state */
1738         be_cq_notify(adapter, rx_cq->id, false, 0);
1739
1740         /* Then free posted rx buffers that were not used */
1741         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1742         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1743                 page_info = get_rx_page_info(rxo, tail);
1744                 put_page(page_info->page);
1745                 memset(page_info, 0, sizeof(*page_info));
1746         }
1747         BUG_ON(atomic_read(&rxq->used));
1748         rxq->tail = rxq->head = 0;
1749 }
1750
1751 static void be_tx_compl_clean(struct be_adapter *adapter)
1752 {
1753         struct be_tx_obj *txo;
1754         struct be_queue_info *txq;
1755         struct be_eth_tx_compl *txcp;
1756         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1757         struct sk_buff *sent_skb;
1758         bool dummy_wrb;
1759         int i, pending_txqs;
1760
1761         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1762         do {
1763                 pending_txqs = adapter->num_tx_qs;
1764
1765                 for_all_tx_queues(adapter, txo, i) {
1766                         txq = &txo->q;
1767                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1768                                 end_idx =
1769                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1770                                                       wrb_index, txcp);
1771                                 num_wrbs += be_tx_compl_process(adapter, txo,
1772                                                                 end_idx);
1773                                 cmpl++;
1774                         }
1775                         if (cmpl) {
1776                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1777                                 atomic_sub(num_wrbs, &txq->used);
1778                                 cmpl = 0;
1779                                 num_wrbs = 0;
1780                         }
1781                         if (atomic_read(&txq->used) == 0)
1782                                 pending_txqs--;
1783                 }
1784
1785                 if (pending_txqs == 0 || ++timeo > 200)
1786                         break;
1787
1788                 mdelay(1);
1789         } while (true);
1790
1791         for_all_tx_queues(adapter, txo, i) {
1792                 txq = &txo->q;
1793                 if (atomic_read(&txq->used))
1794                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1795                                 atomic_read(&txq->used));
1796
1797                 /* free posted tx for which compls will never arrive */
1798                 while (atomic_read(&txq->used)) {
1799                         sent_skb = txo->sent_skb_list[txq->tail];
1800                         end_idx = txq->tail;
1801                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1802                                                    &dummy_wrb);
1803                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1804                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1805                         atomic_sub(num_wrbs, &txq->used);
1806                 }
1807         }
1808 }
1809
1810 static void be_evt_queues_destroy(struct be_adapter *adapter)
1811 {
1812         struct be_eq_obj *eqo;
1813         int i;
1814
1815         for_all_evt_queues(adapter, eqo, i) {
1816                 if (eqo->q.created) {
1817                         be_eq_clean(eqo);
1818                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1819                 }
1820                 be_queue_free(adapter, &eqo->q);
1821         }
1822 }
1823
1824 static int be_evt_queues_create(struct be_adapter *adapter)
1825 {
1826         struct be_queue_info *eq;
1827         struct be_eq_obj *eqo;
1828         int i, rc;
1829
1830         adapter->num_evt_qs = num_irqs(adapter);
1831
1832         for_all_evt_queues(adapter, eqo, i) {
1833                 eqo->adapter = adapter;
1834                 eqo->tx_budget = BE_TX_BUDGET;
1835                 eqo->idx = i;
1836                 eqo->max_eqd = BE_MAX_EQD;
1837                 eqo->enable_aic = true;
1838
1839                 eq = &eqo->q;
1840                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1841                                         sizeof(struct be_eq_entry));
1842                 if (rc)
1843                         return rc;
1844
1845                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1846                 if (rc)
1847                         return rc;
1848         }
1849         return 0;
1850 }
1851
1852 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1853 {
1854         struct be_queue_info *q;
1855
1856         q = &adapter->mcc_obj.q;
1857         if (q->created)
1858                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1859         be_queue_free(adapter, q);
1860
1861         q = &adapter->mcc_obj.cq;
1862         if (q->created)
1863                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1864         be_queue_free(adapter, q);
1865 }
1866
1867 /* Must be called only after TX qs are created as MCC shares TX EQ */
1868 static int be_mcc_queues_create(struct be_adapter *adapter)
1869 {
1870         struct be_queue_info *q, *cq;
1871
1872         cq = &adapter->mcc_obj.cq;
1873         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1874                         sizeof(struct be_mcc_compl)))
1875                 goto err;
1876
1877         /* Use the default EQ for MCC completions */
1878         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1879                 goto mcc_cq_free;
1880
1881         q = &adapter->mcc_obj.q;
1882         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1883                 goto mcc_cq_destroy;
1884
1885         if (be_cmd_mccq_create(adapter, q, cq))
1886                 goto mcc_q_free;
1887
1888         return 0;
1889
1890 mcc_q_free:
1891         be_queue_free(adapter, q);
1892 mcc_cq_destroy:
1893         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1894 mcc_cq_free:
1895         be_queue_free(adapter, cq);
1896 err:
1897         return -1;
1898 }
1899
1900 static void be_tx_queues_destroy(struct be_adapter *adapter)
1901 {
1902         struct be_queue_info *q;
1903         struct be_tx_obj *txo;
1904         u8 i;
1905
1906         for_all_tx_queues(adapter, txo, i) {
1907                 q = &txo->q;
1908                 if (q->created)
1909                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1910                 be_queue_free(adapter, q);
1911
1912                 q = &txo->cq;
1913                 if (q->created)
1914                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1915                 be_queue_free(adapter, q);
1916         }
1917 }
1918
1919 static int be_num_txqs_want(struct be_adapter *adapter)
1920 {
1921         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1922             be_is_mc(adapter) ||
1923             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1924             BE2_chip(adapter))
1925                 return 1;
1926         else
1927                 return adapter->max_tx_queues;
1928 }
1929
1930 static int be_tx_cqs_create(struct be_adapter *adapter)
1931 {
1932         struct be_queue_info *cq, *eq;
1933         int status;
1934         struct be_tx_obj *txo;
1935         u8 i;
1936
1937         adapter->num_tx_qs = be_num_txqs_want(adapter);
1938         if (adapter->num_tx_qs != MAX_TX_QS) {
1939                 rtnl_lock();
1940                 netif_set_real_num_tx_queues(adapter->netdev,
1941                         adapter->num_tx_qs);
1942                 rtnl_unlock();
1943         }
1944
1945         for_all_tx_queues(adapter, txo, i) {
1946                 cq = &txo->cq;
1947                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1948                                         sizeof(struct be_eth_tx_compl));
1949                 if (status)
1950                         return status;
1951
1952                 /* If num_evt_qs is less than num_tx_qs, then more than
1953                  * one txq share an eq
1954                  */
1955                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1956                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1957                 if (status)
1958                         return status;
1959         }
1960         return 0;
1961 }
1962
1963 static int be_tx_qs_create(struct be_adapter *adapter)
1964 {
1965         struct be_tx_obj *txo;
1966         int i, status;
1967
1968         for_all_tx_queues(adapter, txo, i) {
1969                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1970                                         sizeof(struct be_eth_wrb));
1971                 if (status)
1972                         return status;
1973
1974                 status = be_cmd_txq_create(adapter, txo);
1975                 if (status)
1976                         return status;
1977         }
1978
1979         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1980                  adapter->num_tx_qs);
1981         return 0;
1982 }
1983
1984 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1985 {
1986         struct be_queue_info *q;
1987         struct be_rx_obj *rxo;
1988         int i;
1989
1990         for_all_rx_queues(adapter, rxo, i) {
1991                 q = &rxo->cq;
1992                 if (q->created)
1993                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1994                 be_queue_free(adapter, q);
1995         }
1996 }
1997
1998 static int be_rx_cqs_create(struct be_adapter *adapter)
1999 {
2000         struct be_queue_info *eq, *cq;
2001         struct be_rx_obj *rxo;
2002         int rc, i;
2003
2004         /* We'll create as many RSS rings as there are irqs.
2005          * But when there's only one irq there's no use creating RSS rings
2006          */
2007         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2008                                 num_irqs(adapter) + 1 : 1;
2009         if (adapter->num_rx_qs != MAX_RX_QS) {
2010                 rtnl_lock();
2011                 netif_set_real_num_rx_queues(adapter->netdev,
2012                                              adapter->num_rx_qs);
2013                 rtnl_unlock();
2014         }
2015
2016         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2017         for_all_rx_queues(adapter, rxo, i) {
2018                 rxo->adapter = adapter;
2019                 cq = &rxo->cq;
2020                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2021                                 sizeof(struct be_eth_rx_compl));
2022                 if (rc)
2023                         return rc;
2024
2025                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2026                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2027                 if (rc)
2028                         return rc;
2029         }
2030
2031         dev_info(&adapter->pdev->dev,
2032                  "created %d RSS queue(s) and 1 default RX queue\n",
2033                  adapter->num_rx_qs - 1);
2034         return 0;
2035 }
2036
2037 static irqreturn_t be_intx(int irq, void *dev)
2038 {
2039         struct be_eq_obj *eqo = dev;
2040         struct be_adapter *adapter = eqo->adapter;
2041         int num_evts = 0;
2042
2043         /* IRQ is not expected when NAPI is scheduled as the EQ
2044          * will not be armed.
2045          * But, this can happen on Lancer INTx where it takes
2046          * a while to de-assert INTx or in BE2 where occasionaly
2047          * an interrupt may be raised even when EQ is unarmed.
2048          * If NAPI is already scheduled, then counting & notifying
2049          * events will orphan them.
2050          */
2051         if (napi_schedule_prep(&eqo->napi)) {
2052                 num_evts = events_get(eqo);
2053                 __napi_schedule(&eqo->napi);
2054                 if (num_evts)
2055                         eqo->spurious_intr = 0;
2056         }
2057         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2058
2059         /* Return IRQ_HANDLED only for the the first spurious intr
2060          * after a valid intr to stop the kernel from branding
2061          * this irq as a bad one!
2062          */
2063         if (num_evts || eqo->spurious_intr++ == 0)
2064                 return IRQ_HANDLED;
2065         else
2066                 return IRQ_NONE;
2067 }
2068
2069 static irqreturn_t be_msix(int irq, void *dev)
2070 {
2071         struct be_eq_obj *eqo = dev;
2072
2073         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2074         napi_schedule(&eqo->napi);
2075         return IRQ_HANDLED;
2076 }
2077
2078 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2079 {
2080         return (rxcp->tcpf && !rxcp->err) ? true : false;
2081 }
2082
2083 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2084                         int budget)
2085 {
2086         struct be_adapter *adapter = rxo->adapter;
2087         struct be_queue_info *rx_cq = &rxo->cq;
2088         struct be_rx_compl_info *rxcp;
2089         u32 work_done;
2090
2091         for (work_done = 0; work_done < budget; work_done++) {
2092                 rxcp = be_rx_compl_get(rxo);
2093                 if (!rxcp)
2094                         break;
2095
2096                 /* Is it a flush compl that has no data */
2097                 if (unlikely(rxcp->num_rcvd == 0))
2098                         goto loop_continue;
2099
2100                 /* Discard compl with partial DMA Lancer B0 */
2101                 if (unlikely(!rxcp->pkt_size)) {
2102                         be_rx_compl_discard(rxo, rxcp);
2103                         goto loop_continue;
2104                 }
2105
2106                 /* On BE drop pkts that arrive due to imperfect filtering in
2107                  * promiscuous mode on some skews
2108                  */
2109                 if (unlikely(rxcp->port != adapter->port_num &&
2110                                 !lancer_chip(adapter))) {
2111                         be_rx_compl_discard(rxo, rxcp);
2112                         goto loop_continue;
2113                 }
2114
2115                 if (do_gro(rxcp))
2116                         be_rx_compl_process_gro(rxo, napi, rxcp);
2117                 else
2118                         be_rx_compl_process(rxo, rxcp);
2119 loop_continue:
2120                 be_rx_stats_update(rxo, rxcp);
2121         }
2122
2123         if (work_done) {
2124                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2125
2126                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2127                         be_post_rx_frags(rxo, GFP_ATOMIC);
2128         }
2129
2130         return work_done;
2131 }
2132
2133 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2134                           int budget, int idx)
2135 {
2136         struct be_eth_tx_compl *txcp;
2137         int num_wrbs = 0, work_done;
2138
2139         for (work_done = 0; work_done < budget; work_done++) {
2140                 txcp = be_tx_compl_get(&txo->cq);
2141                 if (!txcp)
2142                         break;
2143                 num_wrbs += be_tx_compl_process(adapter, txo,
2144                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2145                                         wrb_index, txcp));
2146         }
2147
2148         if (work_done) {
2149                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2150                 atomic_sub(num_wrbs, &txo->q.used);
2151
2152                 /* As Tx wrbs have been freed up, wake up netdev queue
2153                  * if it was stopped due to lack of tx wrbs.  */
2154                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2155                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2156                         netif_wake_subqueue(adapter->netdev, idx);
2157                 }
2158
2159                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2160                 tx_stats(txo)->tx_compl += work_done;
2161                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2162         }
2163         return (work_done < budget); /* Done */
2164 }
2165
2166 int be_poll(struct napi_struct *napi, int budget)
2167 {
2168         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2169         struct be_adapter *adapter = eqo->adapter;
2170         int max_work = 0, work, i, num_evts;
2171         bool tx_done;
2172
2173         num_evts = events_get(eqo);
2174
2175         /* Process all TXQs serviced by this EQ */
2176         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2177                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2178                                         eqo->tx_budget, i);
2179                 if (!tx_done)
2180                         max_work = budget;
2181         }
2182
2183         /* This loop will iterate twice for EQ0 in which
2184          * completions of the last RXQ (default one) are also processed
2185          * For other EQs the loop iterates only once
2186          */
2187         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2188                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2189                 max_work = max(work, max_work);
2190         }
2191
2192         if (is_mcc_eqo(eqo))
2193                 be_process_mcc(adapter);
2194
2195         if (max_work < budget) {
2196                 napi_complete(napi);
2197                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2198         } else {
2199                 /* As we'll continue in polling mode, count and clear events */
2200                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2201         }
2202         return max_work;
2203 }
2204
2205 void be_detect_error(struct be_adapter *adapter)
2206 {
2207         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2208         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2209         u32 i;
2210
2211         if (be_hw_error(adapter))
2212                 return;
2213
2214         if (lancer_chip(adapter)) {
2215                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2216                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2217                         sliport_err1 = ioread32(adapter->db +
2218                                         SLIPORT_ERROR1_OFFSET);
2219                         sliport_err2 = ioread32(adapter->db +
2220                                         SLIPORT_ERROR2_OFFSET);
2221                 }
2222         } else {
2223                 pci_read_config_dword(adapter->pdev,
2224                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2225                 pci_read_config_dword(adapter->pdev,
2226                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2227                 pci_read_config_dword(adapter->pdev,
2228                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2229                 pci_read_config_dword(adapter->pdev,
2230                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2231
2232                 ue_lo = (ue_lo & ~ue_lo_mask);
2233                 ue_hi = (ue_hi & ~ue_hi_mask);
2234         }
2235
2236         /* On certain platforms BE hardware can indicate spurious UEs.
2237          * Allow the h/w to stop working completely in case of a real UE.
2238          * Hence not setting the hw_error for UE detection.
2239          */
2240         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2241                 adapter->hw_error = true;
2242                 dev_err(&adapter->pdev->dev,
2243                         "Error detected in the card\n");
2244         }
2245
2246         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2247                 dev_err(&adapter->pdev->dev,
2248                         "ERR: sliport status 0x%x\n", sliport_status);
2249                 dev_err(&adapter->pdev->dev,
2250                         "ERR: sliport error1 0x%x\n", sliport_err1);
2251                 dev_err(&adapter->pdev->dev,
2252                         "ERR: sliport error2 0x%x\n", sliport_err2);
2253         }
2254
2255         if (ue_lo) {
2256                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2257                         if (ue_lo & 1)
2258                                 dev_err(&adapter->pdev->dev,
2259                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2260                 }
2261         }
2262
2263         if (ue_hi) {
2264                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2265                         if (ue_hi & 1)
2266                                 dev_err(&adapter->pdev->dev,
2267                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2268                 }
2269         }
2270
2271 }
2272
2273 static void be_msix_disable(struct be_adapter *adapter)
2274 {
2275         if (msix_enabled(adapter)) {
2276                 pci_disable_msix(adapter->pdev);
2277                 adapter->num_msix_vec = 0;
2278         }
2279 }
2280
2281 static uint be_num_rss_want(struct be_adapter *adapter)
2282 {
2283         u32 num = 0;
2284
2285         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2286             (lancer_chip(adapter) ||
2287              (!sriov_want(adapter) && be_physfn(adapter)))) {
2288                 num = adapter->max_rss_queues;
2289                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2290         }
2291         return num;
2292 }
2293
2294 static void be_msix_enable(struct be_adapter *adapter)
2295 {
2296 #define BE_MIN_MSIX_VECTORS             1
2297         int i, status, num_vec, num_roce_vec = 0;
2298         struct device *dev = &adapter->pdev->dev;
2299
2300         /* If RSS queues are not used, need a vec for default RX Q */
2301         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2302         if (be_roce_supported(adapter)) {
2303                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2304                                         (num_online_cpus() + 1));
2305                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2306                 num_vec += num_roce_vec;
2307                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2308         }
2309         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2310
2311         for (i = 0; i < num_vec; i++)
2312                 adapter->msix_entries[i].entry = i;
2313
2314         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2315         if (status == 0) {
2316                 goto done;
2317         } else if (status >= BE_MIN_MSIX_VECTORS) {
2318                 num_vec = status;
2319                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2320                                 num_vec) == 0)
2321                         goto done;
2322         }
2323
2324         dev_warn(dev, "MSIx enable failed\n");
2325         return;
2326 done:
2327         if (be_roce_supported(adapter)) {
2328                 if (num_vec > num_roce_vec) {
2329                         adapter->num_msix_vec = num_vec - num_roce_vec;
2330                         adapter->num_msix_roce_vec =
2331                                 num_vec - adapter->num_msix_vec;
2332                 } else {
2333                         adapter->num_msix_vec = num_vec;
2334                         adapter->num_msix_roce_vec = 0;
2335                 }
2336         } else
2337                 adapter->num_msix_vec = num_vec;
2338         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2339         return;
2340 }
2341
2342 static inline int be_msix_vec_get(struct be_adapter *adapter,
2343                                 struct be_eq_obj *eqo)
2344 {
2345         return adapter->msix_entries[eqo->idx].vector;
2346 }
2347
2348 static int be_msix_register(struct be_adapter *adapter)
2349 {
2350         struct net_device *netdev = adapter->netdev;
2351         struct be_eq_obj *eqo;
2352         int status, i, vec;
2353
2354         for_all_evt_queues(adapter, eqo, i) {
2355                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2356                 vec = be_msix_vec_get(adapter, eqo);
2357                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2358                 if (status)
2359                         goto err_msix;
2360         }
2361
2362         return 0;
2363 err_msix:
2364         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2365                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2366         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2367                 status);
2368         be_msix_disable(adapter);
2369         return status;
2370 }
2371
2372 static int be_irq_register(struct be_adapter *adapter)
2373 {
2374         struct net_device *netdev = adapter->netdev;
2375         int status;
2376
2377         if (msix_enabled(adapter)) {
2378                 status = be_msix_register(adapter);
2379                 if (status == 0)
2380                         goto done;
2381                 /* INTx is not supported for VF */
2382                 if (!be_physfn(adapter))
2383                         return status;
2384         }
2385
2386         /* INTx: only the first EQ is used */
2387         netdev->irq = adapter->pdev->irq;
2388         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2389                              &adapter->eq_obj[0]);
2390         if (status) {
2391                 dev_err(&adapter->pdev->dev,
2392                         "INTx request IRQ failed - err %d\n", status);
2393                 return status;
2394         }
2395 done:
2396         adapter->isr_registered = true;
2397         return 0;
2398 }
2399
2400 static void be_irq_unregister(struct be_adapter *adapter)
2401 {
2402         struct net_device *netdev = adapter->netdev;
2403         struct be_eq_obj *eqo;
2404         int i;
2405
2406         if (!adapter->isr_registered)
2407                 return;
2408
2409         /* INTx */
2410         if (!msix_enabled(adapter)) {
2411                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2412                 goto done;
2413         }
2414
2415         /* MSIx */
2416         for_all_evt_queues(adapter, eqo, i)
2417                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2418
2419 done:
2420         adapter->isr_registered = false;
2421 }
2422
2423 static void be_rx_qs_destroy(struct be_adapter *adapter)
2424 {
2425         struct be_queue_info *q;
2426         struct be_rx_obj *rxo;
2427         int i;
2428
2429         for_all_rx_queues(adapter, rxo, i) {
2430                 q = &rxo->q;
2431                 if (q->created) {
2432                         be_cmd_rxq_destroy(adapter, q);
2433                         /* After the rxq is invalidated, wait for a grace time
2434                          * of 1ms for all dma to end and the flush compl to
2435                          * arrive
2436                          */
2437                         mdelay(1);
2438                         be_rx_cq_clean(rxo);
2439                 }
2440                 be_queue_free(adapter, q);
2441         }
2442 }
2443
2444 static int be_close(struct net_device *netdev)
2445 {
2446         struct be_adapter *adapter = netdev_priv(netdev);
2447         struct be_eq_obj *eqo;
2448         int i;
2449
2450         be_roce_dev_close(adapter);
2451
2452         for_all_evt_queues(adapter, eqo, i)
2453                 napi_disable(&eqo->napi);
2454
2455         be_async_mcc_disable(adapter);
2456
2457         /* Wait for all pending tx completions to arrive so that
2458          * all tx skbs are freed.
2459          */
2460         be_tx_compl_clean(adapter);
2461
2462         be_rx_qs_destroy(adapter);
2463
2464         for_all_evt_queues(adapter, eqo, i) {
2465                 if (msix_enabled(adapter))
2466                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2467                 else
2468                         synchronize_irq(netdev->irq);
2469                 be_eq_clean(eqo);
2470         }
2471
2472         be_irq_unregister(adapter);
2473
2474         return 0;
2475 }
2476
2477 static int be_rx_qs_create(struct be_adapter *adapter)
2478 {
2479         struct be_rx_obj *rxo;
2480         int rc, i, j;
2481         u8 rsstable[128];
2482
2483         for_all_rx_queues(adapter, rxo, i) {
2484                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2485                                     sizeof(struct be_eth_rx_d));
2486                 if (rc)
2487                         return rc;
2488         }
2489
2490         /* The FW would like the default RXQ to be created first */
2491         rxo = default_rxo(adapter);
2492         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2493                                adapter->if_handle, false, &rxo->rss_id);
2494         if (rc)
2495                 return rc;
2496
2497         for_all_rss_queues(adapter, rxo, i) {
2498                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2499                                        rx_frag_size, adapter->if_handle,
2500                                        true, &rxo->rss_id);
2501                 if (rc)
2502                         return rc;
2503         }
2504
2505         if (be_multi_rxq(adapter)) {
2506                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2507                         for_all_rss_queues(adapter, rxo, i) {
2508                                 if ((j + i) >= 128)
2509                                         break;
2510                                 rsstable[j + i] = rxo->rss_id;
2511                         }
2512                 }
2513                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2514                 if (rc)
2515                         return rc;
2516         }
2517
2518         /* First time posting */
2519         for_all_rx_queues(adapter, rxo, i)
2520                 be_post_rx_frags(rxo, GFP_KERNEL);
2521         return 0;
2522 }
2523
2524 static int be_open(struct net_device *netdev)
2525 {
2526         struct be_adapter *adapter = netdev_priv(netdev);
2527         struct be_eq_obj *eqo;
2528         struct be_rx_obj *rxo;
2529         struct be_tx_obj *txo;
2530         u8 link_status;
2531         int status, i;
2532
2533         status = be_rx_qs_create(adapter);
2534         if (status)
2535                 goto err;
2536
2537         be_irq_register(adapter);
2538
2539         for_all_rx_queues(adapter, rxo, i)
2540                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2541
2542         for_all_tx_queues(adapter, txo, i)
2543                 be_cq_notify(adapter, txo->cq.id, true, 0);
2544
2545         be_async_mcc_enable(adapter);
2546
2547         for_all_evt_queues(adapter, eqo, i) {
2548                 napi_enable(&eqo->napi);
2549                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2550         }
2551
2552         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2553         if (!status)
2554                 be_link_status_update(adapter, link_status);
2555
2556         be_roce_dev_open(adapter);
2557         return 0;
2558 err:
2559         be_close(adapter->netdev);
2560         return -EIO;
2561 }
2562
2563 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2564 {
2565         struct be_dma_mem cmd;
2566         int status = 0;
2567         u8 mac[ETH_ALEN];
2568
2569         memset(mac, 0, ETH_ALEN);
2570
2571         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2572         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2573                                     GFP_KERNEL | __GFP_ZERO);
2574         if (cmd.va == NULL)
2575                 return -1;
2576
2577         if (enable) {
2578                 status = pci_write_config_dword(adapter->pdev,
2579                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2580                 if (status) {
2581                         dev_err(&adapter->pdev->dev,
2582                                 "Could not enable Wake-on-lan\n");
2583                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2584                                           cmd.dma);
2585                         return status;
2586                 }
2587                 status = be_cmd_enable_magic_wol(adapter,
2588                                 adapter->netdev->dev_addr, &cmd);
2589                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2590                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2591         } else {
2592                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2593                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2594                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2595         }
2596
2597         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2598         return status;
2599 }
2600
2601 /*
2602  * Generate a seed MAC address from the PF MAC Address using jhash.
2603  * MAC Address for VFs are assigned incrementally starting from the seed.
2604  * These addresses are programmed in the ASIC by the PF and the VF driver
2605  * queries for the MAC address during its probe.
2606  */
2607 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2608 {
2609         u32 vf;
2610         int status = 0;
2611         u8 mac[ETH_ALEN];
2612         struct be_vf_cfg *vf_cfg;
2613
2614         be_vf_eth_addr_generate(adapter, mac);
2615
2616         for_all_vfs(adapter, vf_cfg, vf) {
2617                 if (lancer_chip(adapter)) {
2618                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2619                 } else {
2620                         status = be_cmd_pmac_add(adapter, mac,
2621                                                  vf_cfg->if_handle,
2622                                                  &vf_cfg->pmac_id, vf + 1);
2623                 }
2624
2625                 if (status)
2626                         dev_err(&adapter->pdev->dev,
2627                         "Mac address assignment failed for VF %d\n", vf);
2628                 else
2629                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2630
2631                 mac[5] += 1;
2632         }
2633         return status;
2634 }
2635
2636 static int be_vfs_mac_query(struct be_adapter *adapter)
2637 {
2638         int status, vf;
2639         u8 mac[ETH_ALEN];
2640         struct be_vf_cfg *vf_cfg;
2641         bool active;
2642
2643         for_all_vfs(adapter, vf_cfg, vf) {
2644                 be_cmd_get_mac_from_list(adapter, mac, &active,
2645                                          &vf_cfg->pmac_id, 0);
2646
2647                 status = be_cmd_mac_addr_query(adapter, mac, false,
2648                                                vf_cfg->if_handle, 0);
2649                 if (status)
2650                         return status;
2651                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2652         }
2653         return 0;
2654 }
2655
2656 static void be_vf_clear(struct be_adapter *adapter)
2657 {
2658         struct be_vf_cfg *vf_cfg;
2659         u32 vf;
2660
2661         if (be_find_vfs(adapter, ASSIGNED)) {
2662                 dev_warn(&adapter->pdev->dev,
2663                          "VFs are assigned to VMs: not disabling VFs\n");
2664                 goto done;
2665         }
2666
2667         for_all_vfs(adapter, vf_cfg, vf) {
2668                 if (lancer_chip(adapter))
2669                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2670                 else
2671                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2672                                         vf_cfg->pmac_id, vf + 1);
2673
2674                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2675         }
2676         pci_disable_sriov(adapter->pdev);
2677 done:
2678         kfree(adapter->vf_cfg);
2679         adapter->num_vfs = 0;
2680 }
2681
2682 static int be_clear(struct be_adapter *adapter)
2683 {
2684         int i = 1;
2685
2686         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2687                 cancel_delayed_work_sync(&adapter->work);
2688                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2689         }
2690
2691         if (sriov_enabled(adapter))
2692                 be_vf_clear(adapter);
2693
2694         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2695                 be_cmd_pmac_del(adapter, adapter->if_handle,
2696                         adapter->pmac_id[i], 0);
2697
2698         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2699
2700         be_mcc_queues_destroy(adapter);
2701         be_rx_cqs_destroy(adapter);
2702         be_tx_queues_destroy(adapter);
2703         be_evt_queues_destroy(adapter);
2704
2705         kfree(adapter->pmac_id);
2706         adapter->pmac_id = NULL;
2707
2708         be_msix_disable(adapter);
2709         return 0;
2710 }
2711
2712 static int be_vfs_if_create(struct be_adapter *adapter)
2713 {
2714         struct be_vf_cfg *vf_cfg;
2715         u32 cap_flags, en_flags, vf;
2716         int status;
2717
2718         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2719                     BE_IF_FLAGS_MULTICAST;
2720
2721         for_all_vfs(adapter, vf_cfg, vf) {
2722                 if (!BE3_chip(adapter))
2723                         be_cmd_get_profile_config(adapter, &cap_flags,
2724                                                   NULL, vf + 1);
2725
2726                 /* If a FW profile exists, then cap_flags are updated */
2727                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2728                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2729                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2730                                           &vf_cfg->if_handle, vf + 1);
2731                 if (status)
2732                         goto err;
2733         }
2734 err:
2735         return status;
2736 }
2737
2738 static int be_vf_setup_init(struct be_adapter *adapter)
2739 {
2740         struct be_vf_cfg *vf_cfg;
2741         int vf;
2742
2743         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2744                                   GFP_KERNEL);
2745         if (!adapter->vf_cfg)
2746                 return -ENOMEM;
2747
2748         for_all_vfs(adapter, vf_cfg, vf) {
2749                 vf_cfg->if_handle = -1;
2750                 vf_cfg->pmac_id = -1;
2751         }
2752         return 0;
2753 }
2754
2755 static int be_vf_setup(struct be_adapter *adapter)
2756 {
2757         struct be_vf_cfg *vf_cfg;
2758         u16 def_vlan, lnk_speed;
2759         int status, old_vfs, vf;
2760         struct device *dev = &adapter->pdev->dev;
2761
2762         old_vfs = be_find_vfs(adapter, ENABLED);
2763         if (old_vfs) {
2764                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2765                 if (old_vfs != num_vfs)
2766                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2767                 adapter->num_vfs = old_vfs;
2768         } else {
2769                 if (num_vfs > adapter->dev_num_vfs)
2770                         dev_info(dev, "Device supports %d VFs and not %d\n",
2771                                  adapter->dev_num_vfs, num_vfs);
2772                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2773
2774                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2775                 if (status) {
2776                         dev_err(dev, "SRIOV enable failed\n");
2777                         adapter->num_vfs = 0;
2778                         return 0;
2779                 }
2780         }
2781
2782         status = be_vf_setup_init(adapter);
2783         if (status)
2784                 goto err;
2785
2786         if (old_vfs) {
2787                 for_all_vfs(adapter, vf_cfg, vf) {
2788                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2789                         if (status)
2790                                 goto err;
2791                 }
2792         } else {
2793                 status = be_vfs_if_create(adapter);
2794                 if (status)
2795                         goto err;
2796         }
2797
2798         if (old_vfs) {
2799                 status = be_vfs_mac_query(adapter);
2800                 if (status)
2801                         goto err;
2802         } else {
2803                 status = be_vf_eth_addr_config(adapter);
2804                 if (status)
2805                         goto err;
2806         }
2807
2808         for_all_vfs(adapter, vf_cfg, vf) {
2809                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2810                  * Allow full available bandwidth
2811                  */
2812                 if (BE3_chip(adapter) && !old_vfs)
2813                         be_cmd_set_qos(adapter, 1000, vf+1);
2814
2815                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2816                                                   NULL, vf + 1);
2817                 if (!status)
2818                         vf_cfg->tx_rate = lnk_speed;
2819
2820                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2821                                                vf + 1, vf_cfg->if_handle);
2822                 if (status)
2823                         goto err;
2824                 vf_cfg->def_vid = def_vlan;
2825
2826                 be_cmd_enable_vf(adapter, vf + 1);
2827         }
2828         return 0;
2829 err:
2830         dev_err(dev, "VF setup failed\n");
2831         be_vf_clear(adapter);
2832         return status;
2833 }
2834
2835 static void be_setup_init(struct be_adapter *adapter)
2836 {
2837         adapter->vlan_prio_bmap = 0xff;
2838         adapter->phy.link_speed = -1;
2839         adapter->if_handle = -1;
2840         adapter->be3_native = false;
2841         adapter->promiscuous = false;
2842         if (be_physfn(adapter))
2843                 adapter->cmd_privileges = MAX_PRIVILEGES;
2844         else
2845                 adapter->cmd_privileges = MIN_PRIVILEGES;
2846 }
2847
2848 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2849                            bool *active_mac, u32 *pmac_id)
2850 {
2851         int status = 0;
2852
2853         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2854                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2855                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2856                         *active_mac = true;
2857                 else
2858                         *active_mac = false;
2859
2860                 return status;
2861         }
2862
2863         if (lancer_chip(adapter)) {
2864                 status = be_cmd_get_mac_from_list(adapter, mac,
2865                                                   active_mac, pmac_id, 0);
2866                 if (*active_mac) {
2867                         status = be_cmd_mac_addr_query(adapter, mac, false,
2868                                                        if_handle, *pmac_id);
2869                 }
2870         } else if (be_physfn(adapter)) {
2871                 /* For BE3, for PF get permanent MAC */
2872                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2873                 *active_mac = false;
2874         } else {
2875                 /* For BE3, for VF get soft MAC assigned by PF*/
2876                 status = be_cmd_mac_addr_query(adapter, mac, false,
2877                                                if_handle, 0);
2878                 *active_mac = true;
2879         }
2880         return status;
2881 }
2882
2883 static void be_get_resources(struct be_adapter *adapter)
2884 {
2885         u16 dev_num_vfs;
2886         int pos, status;
2887         bool profile_present = false;
2888         u16 txq_count = 0;
2889
2890         if (!BEx_chip(adapter)) {
2891                 status = be_cmd_get_func_config(adapter);
2892                 if (!status)
2893                         profile_present = true;
2894         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2895                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
2896         }
2897
2898         if (profile_present) {
2899                 /* Sanity fixes for Lancer */
2900                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2901                                               BE_UC_PMAC_COUNT);
2902                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2903                                            BE_NUM_VLANS_SUPPORTED);
2904                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2905                                                BE_MAX_MC);
2906                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2907                                                MAX_TX_QS);
2908                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2909                                                 BE3_MAX_RSS_QS);
2910                 adapter->max_event_queues = min_t(u16,
2911                                                   adapter->max_event_queues,
2912                                                   BE3_MAX_RSS_QS);
2913
2914                 if (adapter->max_rss_queues &&
2915                     adapter->max_rss_queues == adapter->max_rx_queues)
2916                         adapter->max_rss_queues -= 1;
2917
2918                 if (adapter->max_event_queues < adapter->max_rss_queues)
2919                         adapter->max_rss_queues = adapter->max_event_queues;
2920
2921         } else {
2922                 if (be_physfn(adapter))
2923                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2924                 else
2925                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2926
2927                 if (adapter->function_mode & FLEX10_MODE)
2928                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2929                 else
2930                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2931
2932                 adapter->max_mcast_mac = BE_MAX_MC;
2933                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
2934                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2935                                                MAX_TX_QS);
2936                 adapter->max_rss_queues = (adapter->be3_native) ?
2937                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2938                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2939
2940                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2941                                         BE_IF_FLAGS_BROADCAST |
2942                                         BE_IF_FLAGS_MULTICAST |
2943                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2944                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2945                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2946                                         BE_IF_FLAGS_PROMISCUOUS;
2947
2948                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2949                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2950         }
2951
2952         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2953         if (pos) {
2954                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2955                                      &dev_num_vfs);
2956                 if (BE3_chip(adapter))
2957                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2958                 adapter->dev_num_vfs = dev_num_vfs;
2959         }
2960 }
2961
2962 /* Routine to query per function resource limits */
2963 static int be_get_config(struct be_adapter *adapter)
2964 {
2965         int status;
2966
2967         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2968                                      &adapter->function_mode,
2969                                      &adapter->function_caps,
2970                                      &adapter->asic_rev);
2971         if (status)
2972                 goto err;
2973
2974         be_get_resources(adapter);
2975
2976         /* primary mac needs 1 pmac entry */
2977         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2978                                    sizeof(u32), GFP_KERNEL);
2979         if (!adapter->pmac_id) {
2980                 status = -ENOMEM;
2981                 goto err;
2982         }
2983
2984 err:
2985         return status;
2986 }
2987
2988 static int be_setup(struct be_adapter *adapter)
2989 {
2990         struct device *dev = &adapter->pdev->dev;
2991         u32 en_flags;
2992         u32 tx_fc, rx_fc;
2993         int status;
2994         u8 mac[ETH_ALEN];
2995         bool active_mac;
2996
2997         be_setup_init(adapter);
2998
2999         if (!lancer_chip(adapter))
3000                 be_cmd_req_native_mode(adapter);
3001
3002         status = be_get_config(adapter);
3003         if (status)
3004                 goto err;
3005
3006         be_msix_enable(adapter);
3007
3008         status = be_evt_queues_create(adapter);
3009         if (status)
3010                 goto err;
3011
3012         status = be_tx_cqs_create(adapter);
3013         if (status)
3014                 goto err;
3015
3016         status = be_rx_cqs_create(adapter);
3017         if (status)
3018                 goto err;
3019
3020         status = be_mcc_queues_create(adapter);
3021         if (status)
3022                 goto err;
3023
3024         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3025         /* In UMC mode FW does not return right privileges.
3026          * Override with correct privilege equivalent to PF.
3027          */
3028         if (be_is_mc(adapter))
3029                 adapter->cmd_privileges = MAX_PRIVILEGES;
3030
3031         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3032                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3033
3034         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3035                 en_flags |= BE_IF_FLAGS_RSS;
3036
3037         en_flags = en_flags & adapter->if_cap_flags;
3038
3039         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3040                                   &adapter->if_handle, 0);
3041         if (status != 0)
3042                 goto err;
3043
3044         memset(mac, 0, ETH_ALEN);
3045         active_mac = false;
3046         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3047                                  &active_mac, &adapter->pmac_id[0]);
3048         if (status != 0)
3049                 goto err;
3050
3051         if (!active_mac) {
3052                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3053                                          &adapter->pmac_id[0], 0);
3054                 if (status != 0)
3055                         goto err;
3056         }
3057
3058         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3059                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3060                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3061         }
3062
3063         status = be_tx_qs_create(adapter);
3064         if (status)
3065                 goto err;
3066
3067         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3068
3069         if (adapter->vlans_added)
3070                 be_vid_config(adapter);
3071
3072         be_set_rx_mode(adapter->netdev);
3073
3074         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3075
3076         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3077                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3078                                         adapter->rx_fc);
3079
3080         if (be_physfn(adapter) && num_vfs) {
3081                 if (adapter->dev_num_vfs)
3082                         be_vf_setup(adapter);
3083                 else
3084                         dev_warn(dev, "device doesn't support SRIOV\n");
3085         }
3086
3087         status = be_cmd_get_phy_info(adapter);
3088         if (!status && be_pause_supported(adapter))
3089                 adapter->phy.fc_autoneg = 1;
3090
3091         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3092         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3093         return 0;
3094 err:
3095         be_clear(adapter);
3096         return status;
3097 }
3098
3099 #ifdef CONFIG_NET_POLL_CONTROLLER
3100 static void be_netpoll(struct net_device *netdev)
3101 {
3102         struct be_adapter *adapter = netdev_priv(netdev);
3103         struct be_eq_obj *eqo;
3104         int i;
3105
3106         for_all_evt_queues(adapter, eqo, i) {
3107                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3108                 napi_schedule(&eqo->napi);
3109         }
3110
3111         return;
3112 }
3113 #endif
3114
3115 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3116 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3117
3118 static bool be_flash_redboot(struct be_adapter *adapter,
3119                         const u8 *p, u32 img_start, int image_size,
3120                         int hdr_size)
3121 {
3122         u32 crc_offset;
3123         u8 flashed_crc[4];
3124         int status;
3125
3126         crc_offset = hdr_size + img_start + image_size - 4;
3127
3128         p += crc_offset;
3129
3130         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3131                         (image_size - 4));
3132         if (status) {
3133                 dev_err(&adapter->pdev->dev,
3134                 "could not get crc from flash, not flashing redboot\n");
3135                 return false;
3136         }
3137
3138         /*update redboot only if crc does not match*/
3139         if (!memcmp(flashed_crc, p, 4))
3140                 return false;
3141         else
3142                 return true;
3143 }
3144
3145 static bool phy_flashing_required(struct be_adapter *adapter)
3146 {
3147         return (adapter->phy.phy_type == TN_8022 &&
3148                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3149 }
3150
3151 static bool is_comp_in_ufi(struct be_adapter *adapter,
3152                            struct flash_section_info *fsec, int type)
3153 {
3154         int i = 0, img_type = 0;
3155         struct flash_section_info_g2 *fsec_g2 = NULL;
3156
3157         if (BE2_chip(adapter))
3158                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3159
3160         for (i = 0; i < MAX_FLASH_COMP; i++) {
3161                 if (fsec_g2)
3162                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3163                 else
3164                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3165
3166                 if (img_type == type)
3167                         return true;
3168         }
3169         return false;
3170
3171 }
3172
3173 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3174                                          int header_size,
3175                                          const struct firmware *fw)
3176 {
3177         struct flash_section_info *fsec = NULL;
3178         const u8 *p = fw->data;
3179
3180         p += header_size;
3181         while (p < (fw->data + fw->size)) {
3182                 fsec = (struct flash_section_info *)p;
3183                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3184                         return fsec;
3185                 p += 32;
3186         }
3187         return NULL;
3188 }
3189
3190 static int be_flash(struct be_adapter *adapter, const u8 *img,
3191                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3192 {
3193         u32 total_bytes = 0, flash_op, num_bytes = 0;
3194         int status = 0;
3195         struct be_cmd_write_flashrom *req = flash_cmd->va;
3196
3197         total_bytes = img_size;
3198         while (total_bytes) {
3199                 num_bytes = min_t(u32, 32*1024, total_bytes);
3200
3201                 total_bytes -= num_bytes;
3202
3203                 if (!total_bytes) {
3204                         if (optype == OPTYPE_PHY_FW)
3205                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3206                         else
3207                                 flash_op = FLASHROM_OPER_FLASH;
3208                 } else {
3209                         if (optype == OPTYPE_PHY_FW)
3210                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3211                         else
3212                                 flash_op = FLASHROM_OPER_SAVE;
3213                 }
3214
3215                 memcpy(req->data_buf, img, num_bytes);
3216                 img += num_bytes;
3217                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3218                                                 flash_op, num_bytes);
3219                 if (status) {
3220                         if (status == ILLEGAL_IOCTL_REQ &&
3221                             optype == OPTYPE_PHY_FW)
3222                                 break;
3223                         dev_err(&adapter->pdev->dev,
3224                                 "cmd to write to flash rom failed.\n");
3225                         return status;
3226                 }
3227         }
3228         return 0;
3229 }
3230
3231 /* For BE2, BE3 and BE3-R */
3232 static int be_flash_BEx(struct be_adapter *adapter,
3233                          const struct firmware *fw,
3234                          struct be_dma_mem *flash_cmd,
3235                          int num_of_images)
3236
3237 {
3238         int status = 0, i, filehdr_size = 0;
3239         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3240         const u8 *p = fw->data;
3241         const struct flash_comp *pflashcomp;
3242         int num_comp, redboot;
3243         struct flash_section_info *fsec = NULL;
3244
3245         struct flash_comp gen3_flash_types[] = {
3246                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3247                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3248                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3249                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3250                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3251                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3252                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3253                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3254                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3255                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3256                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3257                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3258                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3259                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3260                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3261                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3262                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3263                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3264                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3265                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3266         };
3267
3268         struct flash_comp gen2_flash_types[] = {
3269                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3270                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3271                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3272                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3273                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3274                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3275                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3276                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3277                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3278                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3279                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3280                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3281                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3282                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3283                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3284                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3285         };
3286
3287         if (BE3_chip(adapter)) {
3288                 pflashcomp = gen3_flash_types;
3289                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3290                 num_comp = ARRAY_SIZE(gen3_flash_types);
3291         } else {
3292                 pflashcomp = gen2_flash_types;
3293                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3294                 num_comp = ARRAY_SIZE(gen2_flash_types);
3295         }
3296
3297         /* Get flash section info*/
3298         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3299         if (!fsec) {
3300                 dev_err(&adapter->pdev->dev,
3301                         "Invalid Cookie. UFI corrupted ?\n");
3302                 return -1;
3303         }
3304         for (i = 0; i < num_comp; i++) {
3305                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3306                         continue;
3307
3308                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3309                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3310                         continue;
3311
3312                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3313                     !phy_flashing_required(adapter))
3314                                 continue;
3315
3316                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3317                         redboot = be_flash_redboot(adapter, fw->data,
3318                                 pflashcomp[i].offset, pflashcomp[i].size,
3319                                 filehdr_size + img_hdrs_size);
3320                         if (!redboot)
3321                                 continue;
3322                 }
3323
3324                 p = fw->data;
3325                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3326                 if (p + pflashcomp[i].size > fw->data + fw->size)
3327                         return -1;
3328
3329                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3330                                         pflashcomp[i].size);
3331                 if (status) {
3332                         dev_err(&adapter->pdev->dev,
3333                                 "Flashing section type %d failed.\n",
3334                                 pflashcomp[i].img_type);
3335                         return status;
3336                 }
3337         }
3338         return 0;
3339 }
3340
3341 static int be_flash_skyhawk(struct be_adapter *adapter,
3342                 const struct firmware *fw,
3343                 struct be_dma_mem *flash_cmd, int num_of_images)
3344 {
3345         int status = 0, i, filehdr_size = 0;
3346         int img_offset, img_size, img_optype, redboot;
3347         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3348         const u8 *p = fw->data;
3349         struct flash_section_info *fsec = NULL;
3350
3351         filehdr_size = sizeof(struct flash_file_hdr_g3);
3352         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3353         if (!fsec) {
3354                 dev_err(&adapter->pdev->dev,
3355                         "Invalid Cookie. UFI corrupted ?\n");
3356                 return -1;
3357         }
3358
3359         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3360                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3361                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3362
3363                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3364                 case IMAGE_FIRMWARE_iSCSI:
3365                         img_optype = OPTYPE_ISCSI_ACTIVE;
3366                         break;
3367                 case IMAGE_BOOT_CODE:
3368                         img_optype = OPTYPE_REDBOOT;
3369                         break;
3370                 case IMAGE_OPTION_ROM_ISCSI:
3371                         img_optype = OPTYPE_BIOS;
3372                         break;
3373                 case IMAGE_OPTION_ROM_PXE:
3374                         img_optype = OPTYPE_PXE_BIOS;
3375                         break;
3376                 case IMAGE_OPTION_ROM_FCoE:
3377                         img_optype = OPTYPE_FCOE_BIOS;
3378                         break;
3379                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3380                         img_optype = OPTYPE_ISCSI_BACKUP;
3381                         break;
3382                 case IMAGE_NCSI:
3383                         img_optype = OPTYPE_NCSI_FW;
3384                         break;
3385                 default:
3386                         continue;
3387                 }
3388
3389                 if (img_optype == OPTYPE_REDBOOT) {
3390                         redboot = be_flash_redboot(adapter, fw->data,
3391                                         img_offset, img_size,
3392                                         filehdr_size + img_hdrs_size);
3393                         if (!redboot)
3394                                 continue;
3395                 }
3396
3397                 p = fw->data;
3398                 p += filehdr_size + img_offset + img_hdrs_size;
3399                 if (p + img_size > fw->data + fw->size)
3400                         return -1;
3401
3402                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3403                 if (status) {
3404                         dev_err(&adapter->pdev->dev,
3405                                 "Flashing section type %d failed.\n",
3406                                 fsec->fsec_entry[i].type);
3407                         return status;
3408                 }
3409         }
3410         return 0;
3411 }
3412
3413 static int lancer_wait_idle(struct be_adapter *adapter)
3414 {
3415 #define SLIPORT_IDLE_TIMEOUT 30
3416         u32 reg_val;
3417         int status = 0, i;
3418
3419         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3420                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3421                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3422                         break;
3423
3424                 ssleep(1);
3425         }
3426
3427         if (i == SLIPORT_IDLE_TIMEOUT)
3428                 status = -1;
3429
3430         return status;
3431 }
3432
3433 static int lancer_fw_reset(struct be_adapter *adapter)
3434 {
3435         int status = 0;
3436
3437         status = lancer_wait_idle(adapter);
3438         if (status)
3439                 return status;
3440
3441         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3442                   PHYSDEV_CONTROL_OFFSET);
3443
3444         return status;
3445 }
3446
3447 static int lancer_fw_download(struct be_adapter *adapter,
3448                                 const struct firmware *fw)
3449 {
3450 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3451 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3452         struct be_dma_mem flash_cmd;
3453         const u8 *data_ptr = NULL;
3454         u8 *dest_image_ptr = NULL;
3455         size_t image_size = 0;
3456         u32 chunk_size = 0;
3457         u32 data_written = 0;
3458         u32 offset = 0;
3459         int status = 0;
3460         u8 add_status = 0;
3461         u8 change_status;
3462
3463         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3464                 dev_err(&adapter->pdev->dev,
3465                         "FW Image not properly aligned. "
3466                         "Length must be 4 byte aligned.\n");
3467                 status = -EINVAL;
3468                 goto lancer_fw_exit;
3469         }
3470
3471         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3472                                 + LANCER_FW_DOWNLOAD_CHUNK;
3473         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3474                                           &flash_cmd.dma, GFP_KERNEL);
3475         if (!flash_cmd.va) {
3476                 status = -ENOMEM;
3477                 goto lancer_fw_exit;
3478         }
3479
3480         dest_image_ptr = flash_cmd.va +
3481                                 sizeof(struct lancer_cmd_req_write_object);
3482         image_size = fw->size;
3483         data_ptr = fw->data;
3484
3485         while (image_size) {
3486                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3487
3488                 /* Copy the image chunk content. */
3489                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3490
3491                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3492                                                  chunk_size, offset,
3493                                                  LANCER_FW_DOWNLOAD_LOCATION,
3494                                                  &data_written, &change_status,
3495                                                  &add_status);
3496                 if (status)
3497                         break;
3498
3499                 offset += data_written;
3500                 data_ptr += data_written;
3501                 image_size -= data_written;
3502         }
3503
3504         if (!status) {
3505                 /* Commit the FW written */
3506                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3507                                                  0, offset,
3508                                                  LANCER_FW_DOWNLOAD_LOCATION,
3509                                                  &data_written, &change_status,
3510                                                  &add_status);
3511         }
3512
3513         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3514                                 flash_cmd.dma);
3515         if (status) {
3516                 dev_err(&adapter->pdev->dev,
3517                         "Firmware load error. "
3518                         "Status code: 0x%x Additional Status: 0x%x\n",
3519                         status, add_status);
3520                 goto lancer_fw_exit;
3521         }
3522
3523         if (change_status == LANCER_FW_RESET_NEEDED) {
3524                 status = lancer_fw_reset(adapter);
3525                 if (status) {
3526                         dev_err(&adapter->pdev->dev,
3527                                 "Adapter busy for FW reset.\n"
3528                                 "New FW will not be active.\n");
3529                         goto lancer_fw_exit;
3530                 }
3531         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3532                         dev_err(&adapter->pdev->dev,
3533                                 "System reboot required for new FW"
3534                                 " to be active\n");
3535         }
3536
3537         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3538 lancer_fw_exit:
3539         return status;
3540 }
3541
3542 #define UFI_TYPE2               2
3543 #define UFI_TYPE3               3
3544 #define UFI_TYPE3R              10
3545 #define UFI_TYPE4               4
3546 static int be_get_ufi_type(struct be_adapter *adapter,
3547                            struct flash_file_hdr_g3 *fhdr)
3548 {
3549         if (fhdr == NULL)
3550                 goto be_get_ufi_exit;
3551
3552         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3553                 return UFI_TYPE4;
3554         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3555                 if (fhdr->asic_type_rev == 0x10)
3556                         return UFI_TYPE3R;
3557                 else
3558                         return UFI_TYPE3;
3559         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3560                 return UFI_TYPE2;
3561
3562 be_get_ufi_exit:
3563         dev_err(&adapter->pdev->dev,
3564                 "UFI and Interface are not compatible for flashing\n");
3565         return -1;
3566 }
3567
3568 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3569 {
3570         struct flash_file_hdr_g3 *fhdr3;
3571         struct image_hdr *img_hdr_ptr = NULL;
3572         struct be_dma_mem flash_cmd;
3573         const u8 *p;
3574         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3575
3576         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3577         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3578                                           &flash_cmd.dma, GFP_KERNEL);
3579         if (!flash_cmd.va) {
3580                 status = -ENOMEM;
3581                 goto be_fw_exit;
3582         }
3583
3584         p = fw->data;
3585         fhdr3 = (struct flash_file_hdr_g3 *)p;
3586
3587         ufi_type = be_get_ufi_type(adapter, fhdr3);
3588
3589         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3590         for (i = 0; i < num_imgs; i++) {
3591                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3592                                 (sizeof(struct flash_file_hdr_g3) +
3593                                  i * sizeof(struct image_hdr)));
3594                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3595                         switch (ufi_type) {
3596                         case UFI_TYPE4:
3597                                 status = be_flash_skyhawk(adapter, fw,
3598                                                         &flash_cmd, num_imgs);
3599                                 break;
3600                         case UFI_TYPE3R:
3601                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3602                                                       num_imgs);
3603                                 break;
3604                         case UFI_TYPE3:
3605                                 /* Do not flash this ufi on BE3-R cards */
3606                                 if (adapter->asic_rev < 0x10)
3607                                         status = be_flash_BEx(adapter, fw,
3608                                                               &flash_cmd,
3609                                                               num_imgs);
3610                                 else {
3611                                         status = -1;
3612                                         dev_err(&adapter->pdev->dev,
3613                                                 "Can't load BE3 UFI on BE3R\n");
3614                                 }
3615                         }
3616                 }
3617         }
3618
3619         if (ufi_type == UFI_TYPE2)
3620                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3621         else if (ufi_type == -1)
3622                 status = -1;
3623
3624         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3625                           flash_cmd.dma);
3626         if (status) {
3627                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3628                 goto be_fw_exit;
3629         }
3630
3631         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3632
3633 be_fw_exit:
3634         return status;
3635 }
3636
3637 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3638 {
3639         const struct firmware *fw;
3640         int status;
3641
3642         if (!netif_running(adapter->netdev)) {
3643                 dev_err(&adapter->pdev->dev,
3644                         "Firmware load not allowed (interface is down)\n");
3645                 return -1;
3646         }
3647
3648         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3649         if (status)
3650                 goto fw_exit;
3651
3652         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3653
3654         if (lancer_chip(adapter))
3655                 status = lancer_fw_download(adapter, fw);
3656         else
3657                 status = be_fw_download(adapter, fw);
3658
3659 fw_exit:
3660         release_firmware(fw);
3661         return status;
3662 }
3663
3664 static const struct net_device_ops be_netdev_ops = {
3665         .ndo_open               = be_open,
3666         .ndo_stop               = be_close,
3667         .ndo_start_xmit         = be_xmit,
3668         .ndo_set_rx_mode        = be_set_rx_mode,
3669         .ndo_set_mac_address    = be_mac_addr_set,
3670         .ndo_change_mtu         = be_change_mtu,
3671         .ndo_get_stats64        = be_get_stats64,
3672         .ndo_validate_addr      = eth_validate_addr,
3673         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3674         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3675         .ndo_set_vf_mac         = be_set_vf_mac,
3676         .ndo_set_vf_vlan        = be_set_vf_vlan,
3677         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3678         .ndo_get_vf_config      = be_get_vf_config,
3679 #ifdef CONFIG_NET_POLL_CONTROLLER
3680         .ndo_poll_controller    = be_netpoll,
3681 #endif
3682 };
3683
3684 static void be_netdev_init(struct net_device *netdev)
3685 {
3686         struct be_adapter *adapter = netdev_priv(netdev);
3687         struct be_eq_obj *eqo;
3688         int i;
3689
3690         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3691                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3692                 NETIF_F_HW_VLAN_CTAG_TX;
3693         if (be_multi_rxq(adapter))
3694                 netdev->hw_features |= NETIF_F_RXHASH;
3695
3696         netdev->features |= netdev->hw_features |
3697                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3698
3699         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3700                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3701
3702         netdev->priv_flags |= IFF_UNICAST_FLT;
3703
3704         netdev->flags |= IFF_MULTICAST;
3705
3706         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3707
3708         netdev->netdev_ops = &be_netdev_ops;
3709
3710         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3711
3712         for_all_evt_queues(adapter, eqo, i)
3713                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3714 }
3715
3716 static void be_unmap_pci_bars(struct be_adapter *adapter)
3717 {
3718         if (adapter->csr)
3719                 pci_iounmap(adapter->pdev, adapter->csr);
3720         if (adapter->db)
3721                 pci_iounmap(adapter->pdev, adapter->db);
3722 }
3723
3724 static int db_bar(struct be_adapter *adapter)
3725 {
3726         if (lancer_chip(adapter) || !be_physfn(adapter))
3727                 return 0;
3728         else
3729                 return 4;
3730 }
3731
3732 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3733 {
3734         if (skyhawk_chip(adapter)) {
3735                 adapter->roce_db.size = 4096;
3736                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3737                                                               db_bar(adapter));
3738                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3739                                                                db_bar(adapter));
3740         }
3741         return 0;
3742 }
3743
3744 static int be_map_pci_bars(struct be_adapter *adapter)
3745 {
3746         u8 __iomem *addr;
3747         u32 sli_intf;
3748
3749         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3750         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3751                                 SLI_INTF_IF_TYPE_SHIFT;
3752
3753         if (BEx_chip(adapter) && be_physfn(adapter)) {
3754                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3755                 if (adapter->csr == NULL)
3756                         return -ENOMEM;
3757         }
3758
3759         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3760         if (addr == NULL)
3761                 goto pci_map_err;
3762         adapter->db = addr;
3763
3764         be_roce_map_pci_bars(adapter);
3765         return 0;
3766
3767 pci_map_err:
3768         be_unmap_pci_bars(adapter);
3769         return -ENOMEM;
3770 }
3771
3772 static void be_ctrl_cleanup(struct be_adapter *adapter)
3773 {
3774         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3775
3776         be_unmap_pci_bars(adapter);
3777
3778         if (mem->va)
3779                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3780                                   mem->dma);
3781
3782         mem = &adapter->rx_filter;
3783         if (mem->va)
3784                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3785                                   mem->dma);
3786 }
3787
3788 static int be_ctrl_init(struct be_adapter *adapter)
3789 {
3790         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3791         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3792         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3793         u32 sli_intf;
3794         int status;
3795
3796         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3797         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3798                                  SLI_INTF_FAMILY_SHIFT;
3799         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3800
3801         status = be_map_pci_bars(adapter);
3802         if (status)
3803                 goto done;
3804
3805         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3806         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3807                                                 mbox_mem_alloc->size,
3808                                                 &mbox_mem_alloc->dma,
3809                                                 GFP_KERNEL);
3810         if (!mbox_mem_alloc->va) {
3811                 status = -ENOMEM;
3812                 goto unmap_pci_bars;
3813         }
3814         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3815         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3816         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3817         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3818
3819         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3820         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3821                                            &rx_filter->dma,
3822                                            GFP_KERNEL | __GFP_ZERO);
3823         if (rx_filter->va == NULL) {
3824                 status = -ENOMEM;
3825                 goto free_mbox;
3826         }
3827
3828         mutex_init(&adapter->mbox_lock);
3829         spin_lock_init(&adapter->mcc_lock);
3830         spin_lock_init(&adapter->mcc_cq_lock);
3831
3832         init_completion(&adapter->flash_compl);
3833         pci_save_state(adapter->pdev);
3834         return 0;
3835
3836 free_mbox:
3837         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3838                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3839
3840 unmap_pci_bars:
3841         be_unmap_pci_bars(adapter);
3842
3843 done:
3844         return status;
3845 }
3846
3847 static void be_stats_cleanup(struct be_adapter *adapter)
3848 {
3849         struct be_dma_mem *cmd = &adapter->stats_cmd;
3850
3851         if (cmd->va)
3852                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3853                                   cmd->va, cmd->dma);
3854 }
3855
3856 static int be_stats_init(struct be_adapter *adapter)
3857 {
3858         struct be_dma_mem *cmd = &adapter->stats_cmd;
3859
3860         if (lancer_chip(adapter))
3861                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3862         else if (BE2_chip(adapter))
3863                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3864         else
3865                 /* BE3 and Skyhawk */
3866                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3867
3868         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3869                                      GFP_KERNEL | __GFP_ZERO);
3870         if (cmd->va == NULL)
3871                 return -1;
3872         return 0;
3873 }
3874
3875 static void be_remove(struct pci_dev *pdev)
3876 {
3877         struct be_adapter *adapter = pci_get_drvdata(pdev);
3878
3879         if (!adapter)
3880                 return;
3881
3882         be_roce_dev_remove(adapter);
3883         be_intr_set(adapter, false);
3884
3885         cancel_delayed_work_sync(&adapter->func_recovery_work);
3886
3887         unregister_netdev(adapter->netdev);
3888
3889         be_clear(adapter);
3890
3891         /* tell fw we're done with firing cmds */
3892         be_cmd_fw_clean(adapter);
3893
3894         be_stats_cleanup(adapter);
3895
3896         be_ctrl_cleanup(adapter);
3897
3898         pci_disable_pcie_error_reporting(pdev);
3899
3900         pci_set_drvdata(pdev, NULL);
3901         pci_release_regions(pdev);
3902         pci_disable_device(pdev);
3903
3904         free_netdev(adapter->netdev);
3905 }
3906
3907 bool be_is_wol_supported(struct be_adapter *adapter)
3908 {
3909         return ((adapter->wol_cap & BE_WOL_CAP) &&
3910                 !be_is_wol_excluded(adapter)) ? true : false;
3911 }
3912
3913 u32 be_get_fw_log_level(struct be_adapter *adapter)
3914 {
3915         struct be_dma_mem extfat_cmd;
3916         struct be_fat_conf_params *cfgs;
3917         int status;
3918         u32 level = 0;
3919         int j;
3920
3921         if (lancer_chip(adapter))
3922                 return 0;
3923
3924         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3925         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3926         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3927                                              &extfat_cmd.dma);
3928
3929         if (!extfat_cmd.va) {
3930                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3931                         __func__);
3932                 goto err;
3933         }
3934
3935         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3936         if (!status) {
3937                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3938                                                 sizeof(struct be_cmd_resp_hdr));
3939                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3940                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3941                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3942                 }
3943         }
3944         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3945                             extfat_cmd.dma);
3946 err:
3947         return level;
3948 }
3949
3950 static int be_get_initial_config(struct be_adapter *adapter)
3951 {
3952         int status;
3953         u32 level;
3954
3955         status = be_cmd_get_cntl_attributes(adapter);
3956         if (status)
3957                 return status;
3958
3959         status = be_cmd_get_acpi_wol_cap(adapter);
3960         if (status) {
3961                 /* in case of a failure to get wol capabillities
3962                  * check the exclusion list to determine WOL capability */
3963                 if (!be_is_wol_excluded(adapter))
3964                         adapter->wol_cap |= BE_WOL_CAP;
3965         }
3966
3967         if (be_is_wol_supported(adapter))
3968                 adapter->wol = true;
3969
3970         /* Must be a power of 2 or else MODULO will BUG_ON */
3971         adapter->be_get_temp_freq = 64;
3972
3973         level = be_get_fw_log_level(adapter);
3974         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3975
3976         return 0;
3977 }
3978
3979 static int lancer_recover_func(struct be_adapter *adapter)
3980 {
3981         int status;
3982
3983         status = lancer_test_and_set_rdy_state(adapter);
3984         if (status)
3985                 goto err;
3986
3987         if (netif_running(adapter->netdev))
3988                 be_close(adapter->netdev);
3989
3990         be_clear(adapter);
3991
3992         adapter->hw_error = false;
3993         adapter->fw_timeout = false;
3994
3995         status = be_setup(adapter);
3996         if (status)
3997                 goto err;
3998
3999         if (netif_running(adapter->netdev)) {
4000                 status = be_open(adapter->netdev);
4001                 if (status)
4002                         goto err;
4003         }
4004
4005         dev_err(&adapter->pdev->dev,
4006                 "Adapter SLIPORT recovery succeeded\n");
4007         return 0;
4008 err:
4009         if (adapter->eeh_error)
4010                 dev_err(&adapter->pdev->dev,
4011                         "Adapter SLIPORT recovery failed\n");
4012
4013         return status;
4014 }
4015
4016 static void be_func_recovery_task(struct work_struct *work)
4017 {
4018         struct be_adapter *adapter =
4019                 container_of(work, struct be_adapter,  func_recovery_work.work);
4020         int status;
4021
4022         be_detect_error(adapter);
4023
4024         if (adapter->hw_error && lancer_chip(adapter)) {
4025
4026                 if (adapter->eeh_error)
4027                         goto out;
4028
4029                 rtnl_lock();
4030                 netif_device_detach(adapter->netdev);
4031                 rtnl_unlock();
4032
4033                 status = lancer_recover_func(adapter);
4034
4035                 if (!status)
4036                         netif_device_attach(adapter->netdev);
4037         }
4038
4039 out:
4040         schedule_delayed_work(&adapter->func_recovery_work,
4041                               msecs_to_jiffies(1000));
4042 }
4043
4044 static void be_worker(struct work_struct *work)
4045 {
4046         struct be_adapter *adapter =
4047                 container_of(work, struct be_adapter, work.work);
4048         struct be_rx_obj *rxo;
4049         struct be_eq_obj *eqo;
4050         int i;
4051
4052         /* when interrupts are not yet enabled, just reap any pending
4053         * mcc completions */
4054         if (!netif_running(adapter->netdev)) {
4055                 local_bh_disable();
4056                 be_process_mcc(adapter);
4057                 local_bh_enable();
4058                 goto reschedule;
4059         }
4060
4061         if (!adapter->stats_cmd_sent) {
4062                 if (lancer_chip(adapter))
4063                         lancer_cmd_get_pport_stats(adapter,
4064                                                 &adapter->stats_cmd);
4065                 else
4066                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4067         }
4068
4069         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4070                 be_cmd_get_die_temperature(adapter);
4071
4072         for_all_rx_queues(adapter, rxo, i) {
4073                 if (rxo->rx_post_starved) {
4074                         rxo->rx_post_starved = false;
4075                         be_post_rx_frags(rxo, GFP_KERNEL);
4076                 }
4077         }
4078
4079         for_all_evt_queues(adapter, eqo, i)
4080                 be_eqd_update(adapter, eqo);
4081
4082 reschedule:
4083         adapter->work_counter++;
4084         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4085 }
4086
4087 static bool be_reset_required(struct be_adapter *adapter)
4088 {
4089         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4090 }
4091
4092 static char *mc_name(struct be_adapter *adapter)
4093 {
4094         if (adapter->function_mode & FLEX10_MODE)
4095                 return "FLEX10";
4096         else if (adapter->function_mode & VNIC_MODE)
4097                 return "vNIC";
4098         else if (adapter->function_mode & UMC_ENABLED)
4099                 return "UMC";
4100         else
4101                 return "";
4102 }
4103
4104 static inline char *func_name(struct be_adapter *adapter)
4105 {
4106         return be_physfn(adapter) ? "PF" : "VF";
4107 }
4108
4109 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4110 {
4111         int status = 0;
4112         struct be_adapter *adapter;
4113         struct net_device *netdev;
4114         char port_name;
4115
4116         status = pci_enable_device(pdev);
4117         if (status)
4118                 goto do_none;
4119
4120         status = pci_request_regions(pdev, DRV_NAME);
4121         if (status)
4122                 goto disable_dev;
4123         pci_set_master(pdev);
4124
4125         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4126         if (netdev == NULL) {
4127                 status = -ENOMEM;
4128                 goto rel_reg;
4129         }
4130         adapter = netdev_priv(netdev);
4131         adapter->pdev = pdev;
4132         pci_set_drvdata(pdev, adapter);
4133         adapter->netdev = netdev;
4134         SET_NETDEV_DEV(netdev, &pdev->dev);
4135
4136         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4137         if (!status) {
4138                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4139                 if (status < 0) {
4140                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4141                         goto free_netdev;
4142                 }
4143                 netdev->features |= NETIF_F_HIGHDMA;
4144         } else {
4145                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4146                 if (status) {
4147                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4148                         goto free_netdev;
4149                 }
4150         }
4151
4152         status = pci_enable_pcie_error_reporting(pdev);
4153         if (status)
4154                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4155
4156         status = be_ctrl_init(adapter);
4157         if (status)
4158                 goto free_netdev;
4159
4160         /* sync up with fw's ready state */
4161         if (be_physfn(adapter)) {
4162                 status = be_fw_wait_ready(adapter);
4163                 if (status)
4164                         goto ctrl_clean;
4165         }
4166
4167         /* tell fw we're ready to fire cmds */
4168         status = be_cmd_fw_init(adapter);
4169         if (status)
4170                 goto ctrl_clean;
4171
4172         if (be_reset_required(adapter)) {
4173                 status = be_cmd_reset_function(adapter);
4174                 if (status)
4175                         goto ctrl_clean;
4176         }
4177
4178         /* Wait for interrupts to quiesce after an FLR */
4179         msleep(100);
4180
4181         /* Allow interrupts for other ULPs running on NIC function */
4182         be_intr_set(adapter, true);
4183
4184         status = be_stats_init(adapter);
4185         if (status)
4186                 goto ctrl_clean;
4187
4188         status = be_get_initial_config(adapter);
4189         if (status)
4190                 goto stats_clean;
4191
4192         INIT_DELAYED_WORK(&adapter->work, be_worker);
4193         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4194         adapter->rx_fc = adapter->tx_fc = true;
4195
4196         status = be_setup(adapter);
4197         if (status)
4198                 goto stats_clean;
4199
4200         be_netdev_init(netdev);
4201         status = register_netdev(netdev);
4202         if (status != 0)
4203                 goto unsetup;
4204
4205         be_roce_dev_add(adapter);
4206
4207         schedule_delayed_work(&adapter->func_recovery_work,
4208                               msecs_to_jiffies(1000));
4209
4210         be_cmd_query_port_name(adapter, &port_name);
4211
4212         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4213                  func_name(adapter), mc_name(adapter), port_name);
4214
4215         return 0;
4216
4217 unsetup:
4218         be_clear(adapter);
4219 stats_clean:
4220         be_stats_cleanup(adapter);
4221 ctrl_clean:
4222         be_ctrl_cleanup(adapter);
4223 free_netdev:
4224         free_netdev(netdev);
4225         pci_set_drvdata(pdev, NULL);
4226 rel_reg:
4227         pci_release_regions(pdev);
4228 disable_dev:
4229         pci_disable_device(pdev);
4230 do_none:
4231         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4232         return status;
4233 }
4234
4235 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4236 {
4237         struct be_adapter *adapter = pci_get_drvdata(pdev);
4238         struct net_device *netdev =  adapter->netdev;
4239
4240         if (adapter->wol)
4241                 be_setup_wol(adapter, true);
4242
4243         cancel_delayed_work_sync(&adapter->func_recovery_work);
4244
4245         netif_device_detach(netdev);
4246         if (netif_running(netdev)) {
4247                 rtnl_lock();
4248                 be_close(netdev);
4249                 rtnl_unlock();
4250         }
4251         be_clear(adapter);
4252
4253         pci_save_state(pdev);
4254         pci_disable_device(pdev);
4255         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4256         return 0;
4257 }
4258
4259 static int be_resume(struct pci_dev *pdev)
4260 {
4261         int status = 0;
4262         struct be_adapter *adapter = pci_get_drvdata(pdev);
4263         struct net_device *netdev =  adapter->netdev;
4264
4265         netif_device_detach(netdev);
4266
4267         status = pci_enable_device(pdev);
4268         if (status)
4269                 return status;
4270
4271         pci_set_power_state(pdev, 0);
4272         pci_restore_state(pdev);
4273
4274         /* tell fw we're ready to fire cmds */
4275         status = be_cmd_fw_init(adapter);
4276         if (status)
4277                 return status;
4278
4279         be_setup(adapter);
4280         if (netif_running(netdev)) {
4281                 rtnl_lock();
4282                 be_open(netdev);
4283                 rtnl_unlock();
4284         }
4285
4286         schedule_delayed_work(&adapter->func_recovery_work,
4287                               msecs_to_jiffies(1000));
4288         netif_device_attach(netdev);
4289
4290         if (adapter->wol)
4291                 be_setup_wol(adapter, false);
4292
4293         return 0;
4294 }
4295
4296 /*
4297  * An FLR will stop BE from DMAing any data.
4298  */
4299 static void be_shutdown(struct pci_dev *pdev)
4300 {
4301         struct be_adapter *adapter = pci_get_drvdata(pdev);
4302
4303         if (!adapter)
4304                 return;
4305
4306         cancel_delayed_work_sync(&adapter->work);
4307         cancel_delayed_work_sync(&adapter->func_recovery_work);
4308
4309         netif_device_detach(adapter->netdev);
4310
4311         be_cmd_reset_function(adapter);
4312
4313         pci_disable_device(pdev);
4314 }
4315
4316 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4317                                 pci_channel_state_t state)
4318 {
4319         struct be_adapter *adapter = pci_get_drvdata(pdev);
4320         struct net_device *netdev =  adapter->netdev;
4321
4322         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4323
4324         adapter->eeh_error = true;
4325
4326         cancel_delayed_work_sync(&adapter->func_recovery_work);
4327
4328         rtnl_lock();
4329         netif_device_detach(netdev);
4330         rtnl_unlock();
4331
4332         if (netif_running(netdev)) {
4333                 rtnl_lock();
4334                 be_close(netdev);
4335                 rtnl_unlock();
4336         }
4337         be_clear(adapter);
4338
4339         if (state == pci_channel_io_perm_failure)
4340                 return PCI_ERS_RESULT_DISCONNECT;
4341
4342         pci_disable_device(pdev);
4343
4344         /* The error could cause the FW to trigger a flash debug dump.
4345          * Resetting the card while flash dump is in progress
4346          * can cause it not to recover; wait for it to finish.
4347          * Wait only for first function as it is needed only once per
4348          * adapter.
4349          */
4350         if (pdev->devfn == 0)
4351                 ssleep(30);
4352
4353         return PCI_ERS_RESULT_NEED_RESET;
4354 }
4355
4356 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4357 {
4358         struct be_adapter *adapter = pci_get_drvdata(pdev);
4359         int status;
4360
4361         dev_info(&adapter->pdev->dev, "EEH reset\n");
4362         be_clear_all_error(adapter);
4363
4364         status = pci_enable_device(pdev);
4365         if (status)
4366                 return PCI_ERS_RESULT_DISCONNECT;
4367
4368         pci_set_master(pdev);
4369         pci_set_power_state(pdev, 0);
4370         pci_restore_state(pdev);
4371
4372         /* Check if card is ok and fw is ready */
4373         dev_info(&adapter->pdev->dev,
4374                  "Waiting for FW to be ready after EEH reset\n");
4375         status = be_fw_wait_ready(adapter);
4376         if (status)
4377                 return PCI_ERS_RESULT_DISCONNECT;
4378
4379         pci_cleanup_aer_uncorrect_error_status(pdev);
4380         return PCI_ERS_RESULT_RECOVERED;
4381 }
4382
4383 static void be_eeh_resume(struct pci_dev *pdev)
4384 {
4385         int status = 0;
4386         struct be_adapter *adapter = pci_get_drvdata(pdev);
4387         struct net_device *netdev =  adapter->netdev;
4388
4389         dev_info(&adapter->pdev->dev, "EEH resume\n");
4390
4391         pci_save_state(pdev);
4392
4393         /* tell fw we're ready to fire cmds */
4394         status = be_cmd_fw_init(adapter);
4395         if (status)
4396                 goto err;
4397
4398         status = be_cmd_reset_function(adapter);
4399         if (status)
4400                 goto err;
4401
4402         status = be_setup(adapter);
4403         if (status)
4404                 goto err;
4405
4406         if (netif_running(netdev)) {
4407                 status = be_open(netdev);
4408                 if (status)
4409                         goto err;
4410         }
4411
4412         schedule_delayed_work(&adapter->func_recovery_work,
4413                               msecs_to_jiffies(1000));
4414         netif_device_attach(netdev);
4415         return;
4416 err:
4417         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4418 }
4419
4420 static const struct pci_error_handlers be_eeh_handlers = {
4421         .error_detected = be_eeh_err_detected,
4422         .slot_reset = be_eeh_reset,
4423         .resume = be_eeh_resume,
4424 };
4425
4426 static struct pci_driver be_driver = {
4427         .name = DRV_NAME,
4428         .id_table = be_dev_ids,
4429         .probe = be_probe,
4430         .remove = be_remove,
4431         .suspend = be_suspend,
4432         .resume = be_resume,
4433         .shutdown = be_shutdown,
4434         .err_handler = &be_eeh_handlers
4435 };
4436
4437 static int __init be_init_module(void)
4438 {
4439         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4440             rx_frag_size != 2048) {
4441                 printk(KERN_WARNING DRV_NAME
4442                         " : Module param rx_frag_size must be 2048/4096/8192."
4443                         " Using 2048\n");
4444                 rx_frag_size = 2048;
4445         }
4446
4447         return pci_register_driver(&be_driver);
4448 }
4449 module_init(be_init_module);
4450
4451 static void __exit be_exit_module(void)
4452 {
4453         pci_unregister_driver(&be_driver);
4454 }
4455 module_exit(be_exit_module);