]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge remote-tracking branch 'regulator/topic/max8997' into regulator-next
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 __vlan_put_tag(skb, vlan_tag);
763                 skb->vlan_tci = 0;
764         }
765
766         return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770                         struct net_device *netdev)
771 {
772         struct be_adapter *adapter = netdev_priv(netdev);
773         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774         struct be_queue_info *txq = &txo->q;
775         struct iphdr *ip = NULL;
776         u32 wrb_cnt = 0, copied = 0;
777         u32 start = txq->head, eth_hdr_len;
778         bool dummy_wrb, stopped = false;
779
780         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781                 VLAN_ETH_HLEN : ETH_HLEN;
782
783         /* HW has a bug which considers padding bytes as legal
784          * and modifies the IPv4 hdr's 'tot_len' field
785          */
786         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787                         is_ipv4_pkt(skb)) {
788                 ip = (struct iphdr *)ip_hdr(skb);
789                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790         }
791
792         /* HW has a bug wherein it will calculate CSUM for VLAN
793          * pkts even though it is disabled.
794          * Manually insert VLAN in pkt.
795          */
796         if (skb->ip_summed != CHECKSUM_PARTIAL &&
797                         be_vlan_tag_chk(adapter, skb)) {
798                 skb = be_insert_vlan_in_pkt(adapter, skb);
799                 if (unlikely(!skb))
800                         goto tx_drop;
801         }
802
803         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806         if (copied) {
807                 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809                 /* record the sent skb in the sent_skb table */
810                 BUG_ON(txo->sent_skb_list[start]);
811                 txo->sent_skb_list[start] = skb;
812
813                 /* Ensure txq has space for the next skb; Else stop the queue
814                  * *BEFORE* ringing the tx doorbell, so that we serialze the
815                  * tx compls of the current transmit which'll wake up the queue
816                  */
817                 atomic_add(wrb_cnt, &txq->used);
818                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819                                                                 txq->len) {
820                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821                         stopped = true;
822                 }
823
824                 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827         } else {
828                 txq->head = start;
829                 dev_kfree_skb_any(skb);
830         }
831 tx_drop:
832         return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837         struct be_adapter *adapter = netdev_priv(netdev);
838         if (new_mtu < BE_MIN_MTU ||
839                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840                                         (ETH_HLEN + ETH_FCS_LEN))) {
841                 dev_info(&adapter->pdev->dev,
842                         "MTU must be between %d and %d bytes\n",
843                         BE_MIN_MTU,
844                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845                 return -EINVAL;
846         }
847         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848                         netdev->mtu, new_mtu);
849         netdev->mtu = new_mtu;
850         return 0;
851 }
852
853 /*
854  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855  * If the user configures more, place BE in vlan promiscuous mode.
856  */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859         u16 vids[BE_NUM_VLANS_SUPPORTED];
860         u16 num = 0, i;
861         int status = 0;
862
863         /* No need to further configure vids if in promiscuous mode */
864         if (adapter->promiscuous)
865                 return 0;
866
867         if (adapter->vlans_added > adapter->max_vlans)
868                 goto set_vlan_promisc;
869
870         /* Construct VLAN Table to give to HW */
871         for (i = 0; i < VLAN_N_VID; i++)
872                 if (adapter->vlan_tag[i])
873                         vids[num++] = cpu_to_le16(i);
874
875         status = be_cmd_vlan_config(adapter, adapter->if_handle,
876                                     vids, num, 1, 0);
877
878         /* Set to VLAN promisc mode as setting VLAN filter failed */
879         if (status) {
880                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882                 goto set_vlan_promisc;
883         }
884
885         return status;
886
887 set_vlan_promisc:
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     NULL, 0, 1, 1);
890         return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         int status = 0;
897
898         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899                 status = -EINVAL;
900                 goto ret;
901         }
902
903         /* Packets with VID 0 are always received by Lancer by default */
904         if (lancer_chip(adapter) && vid == 0)
905                 goto ret;
906
907         adapter->vlan_tag[vid] = 1;
908         if (adapter->vlans_added <= (adapter->max_vlans + 1))
909                 status = be_vid_config(adapter);
910
911         if (!status)
912                 adapter->vlans_added++;
913         else
914                 adapter->vlan_tag[vid] = 0;
915 ret:
916         return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925                 status = -EINVAL;
926                 goto ret;
927         }
928
929         /* Packets with VID 0 are always received by Lancer by default */
930         if (lancer_chip(adapter) && vid == 0)
931                 goto ret;
932
933         adapter->vlan_tag[vid] = 0;
934         if (adapter->vlans_added <= adapter->max_vlans)
935                 status = be_vid_config(adapter);
936
937         if (!status)
938                 adapter->vlans_added--;
939         else
940                 adapter->vlan_tag[vid] = 1;
941 ret:
942         return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status;
949
950         if (netdev->flags & IFF_PROMISC) {
951                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952                 adapter->promiscuous = true;
953                 goto done;
954         }
955
956         /* BE was previously in promiscuous mode; disable it */
957         if (adapter->promiscuous) {
958                 adapter->promiscuous = false;
959                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961                 if (adapter->vlans_added)
962                         be_vid_config(adapter);
963         }
964
965         /* Enable multicast promisc if num configured exceeds what we support */
966         if (netdev->flags & IFF_ALLMULTI ||
967             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969                 goto done;
970         }
971
972         if (netdev_uc_count(netdev) != adapter->uc_macs) {
973                 struct netdev_hw_addr *ha;
974                 int i = 1; /* First slot is claimed by the Primary MAC */
975
976                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977                         be_cmd_pmac_del(adapter, adapter->if_handle,
978                                         adapter->pmac_id[i], 0);
979                 }
980
981                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983                         adapter->promiscuous = true;
984                         goto done;
985                 }
986
987                 netdev_for_each_uc_addr(ha, adapter->netdev) {
988                         adapter->uc_macs++; /* First slot is for Primary MAC */
989                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990                                         adapter->if_handle,
991                                         &adapter->pmac_id[adapter->uc_macs], 0);
992                 }
993         }
994
995         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997         /* Set to MCAST promisc mode if setting MULTICAST address fails */
998         if (status) {
999                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002         }
1003 done:
1004         return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011         int status;
1012         bool active_mac = false;
1013         u32 pmac_id;
1014         u8 old_mac[ETH_ALEN];
1015
1016         if (!sriov_enabled(adapter))
1017                 return -EPERM;
1018
1019         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020                 return -EINVAL;
1021
1022         if (lancer_chip(adapter)) {
1023                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024                                                   &pmac_id, vf + 1);
1025                 if (!status && active_mac)
1026                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                         pmac_id, vf + 1);
1028
1029                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1030         } else {
1031                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032                                          vf_cfg->pmac_id, vf + 1);
1033
1034                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035                                          &vf_cfg->pmac_id, vf + 1);
1036         }
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040                                 mac, vf);
1041         else
1042                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044         return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048                         struct ifla_vf_info *vi)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         vi->vf = vf;
1060         vi->tx_rate = vf_cfg->tx_rate;
1061         vi->vlan = vf_cfg->vlan_tag;
1062         vi->qos = 0;
1063         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065         return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069                         int vf, u16 vlan, u8 qos)
1070 {
1071         struct be_adapter *adapter = netdev_priv(netdev);
1072         int status = 0;
1073
1074         if (!sriov_enabled(adapter))
1075                 return -EPERM;
1076
1077         if (vf >= adapter->num_vfs || vlan > 4095)
1078                 return -EINVAL;
1079
1080         if (vlan) {
1081                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082                         /* If this is new value, program it. Else skip. */
1083                         adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085                         status = be_cmd_set_hsw_config(adapter, vlan,
1086                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1087                 }
1088         } else {
1089                 /* Reset Transparent Vlan Tagging. */
1090                 adapter->vf_cfg[vf].vlan_tag = 0;
1091                 vlan = adapter->vf_cfg[vf].def_vid;
1092                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093                         adapter->vf_cfg[vf].if_handle);
1094         }
1095
1096
1097         if (status)
1098                 dev_info(&adapter->pdev->dev,
1099                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1100         return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104                         int vf, int rate)
1105 {
1106         struct be_adapter *adapter = netdev_priv(netdev);
1107         int status = 0;
1108
1109         if (!sriov_enabled(adapter))
1110                 return -EPERM;
1111
1112         if (vf >= adapter->num_vfs)
1113                 return -EINVAL;
1114
1115         if (rate < 100 || rate > 10000) {
1116                 dev_err(&adapter->pdev->dev,
1117                         "tx rate must be between 100 and 10000 Mbps\n");
1118                 return -EINVAL;
1119         }
1120
1121         if (lancer_chip(adapter))
1122                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123         else
1124                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev,
1128                                 "tx rate %d on VF %d failed\n", rate, vf);
1129         else
1130                 adapter->vf_cfg[vf].tx_rate = rate;
1131         return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136         struct pci_dev *dev, *pdev = adapter->pdev;
1137         int vfs = 0, assigned_vfs = 0, pos;
1138         u16 offset, stride;
1139
1140         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141         if (!pos)
1142                 return 0;
1143         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147         while (dev) {
1148                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149                         vfs++;
1150                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151                                 assigned_vfs++;
1152                 }
1153                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154         }
1155         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161         ulong now = jiffies;
1162         ulong delta = now - stats->rx_jiffies;
1163         u64 pkts;
1164         unsigned int start, eqd;
1165
1166         if (!eqo->enable_aic) {
1167                 eqd = eqo->eqd;
1168                 goto modify_eqd;
1169         }
1170
1171         if (eqo->idx >= adapter->num_rx_qs)
1172                 return;
1173
1174         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176         /* Wrapped around */
1177         if (time_before(now, stats->rx_jiffies)) {
1178                 stats->rx_jiffies = now;
1179                 return;
1180         }
1181
1182         /* Update once a second */
1183         if (delta < HZ)
1184                 return;
1185
1186         do {
1187                 start = u64_stats_fetch_begin_bh(&stats->sync);
1188                 pkts = stats->rx_pkts;
1189         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192         stats->rx_pkts_prev = pkts;
1193         stats->rx_jiffies = now;
1194         eqd = (stats->rx_pps / 110000) << 3;
1195         eqd = min(eqd, eqo->max_eqd);
1196         eqd = max(eqd, eqo->min_eqd);
1197         if (eqd < 10)
1198                 eqd = 0;
1199
1200 modify_eqd:
1201         if (eqd != eqo->cur_eqd) {
1202                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203                 eqo->cur_eqd = eqd;
1204         }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208                 struct be_rx_compl_info *rxcp)
1209 {
1210         struct be_rx_stats *stats = rx_stats(rxo);
1211
1212         u64_stats_update_begin(&stats->sync);
1213         stats->rx_compl++;
1214         stats->rx_bytes += rxcp->pkt_size;
1215         stats->rx_pkts++;
1216         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217                 stats->rx_mcast_pkts++;
1218         if (rxcp->err)
1219                 stats->rx_compl_err++;
1220         u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225         /* L4 checksum is not reliable for non TCP/UDP packets.
1226          * Also ignore ipcksm for ipv6 pkts */
1227         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228                                 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232                                                 u16 frag_idx)
1233 {
1234         struct be_adapter *adapter = rxo->adapter;
1235         struct be_rx_page_info *rx_page_info;
1236         struct be_queue_info *rxq = &rxo->q;
1237
1238         rx_page_info = &rxo->page_info_tbl[frag_idx];
1239         BUG_ON(!rx_page_info->page);
1240
1241         if (rx_page_info->last_page_user) {
1242                 dma_unmap_page(&adapter->pdev->dev,
1243                                dma_unmap_addr(rx_page_info, bus),
1244                                adapter->big_page_size, DMA_FROM_DEVICE);
1245                 rx_page_info->last_page_user = false;
1246         }
1247
1248         atomic_dec(&rxq->used);
1249         return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254                                 struct be_rx_compl_info *rxcp)
1255 {
1256         struct be_queue_info *rxq = &rxo->q;
1257         struct be_rx_page_info *page_info;
1258         u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260         for (i = 0; i < num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 put_page(page_info->page);
1263                 memset(page_info, 0, sizeof(*page_info));
1264                 index_inc(&rxcp->rxq_idx, rxq->len);
1265         }
1266 }
1267
1268 /*
1269  * skb_fill_rx_data forms a complete skb for an ether frame
1270  * indicated by rxcp.
1271  */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273                              struct be_rx_compl_info *rxcp)
1274 {
1275         struct be_queue_info *rxq = &rxo->q;
1276         struct be_rx_page_info *page_info;
1277         u16 i, j;
1278         u16 hdr_len, curr_frag_len, remaining;
1279         u8 *start;
1280
1281         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282         start = page_address(page_info->page) + page_info->page_offset;
1283         prefetch(start);
1284
1285         /* Copy data in the first descriptor of this completion */
1286         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288         skb->len = curr_frag_len;
1289         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290                 memcpy(skb->data, start, curr_frag_len);
1291                 /* Complete packet has now been moved to data */
1292                 put_page(page_info->page);
1293                 skb->data_len = 0;
1294                 skb->tail += curr_frag_len;
1295         } else {
1296                 hdr_len = ETH_HLEN;
1297                 memcpy(skb->data, start, hdr_len);
1298                 skb_shinfo(skb)->nr_frags = 1;
1299                 skb_frag_set_page(skb, 0, page_info->page);
1300                 skb_shinfo(skb)->frags[0].page_offset =
1301                                         page_info->page_offset + hdr_len;
1302                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303                 skb->data_len = curr_frag_len - hdr_len;
1304                 skb->truesize += rx_frag_size;
1305                 skb->tail += hdr_len;
1306         }
1307         page_info->page = NULL;
1308
1309         if (rxcp->pkt_size <= rx_frag_size) {
1310                 BUG_ON(rxcp->num_rcvd != 1);
1311                 return;
1312         }
1313
1314         /* More frags present for this completion */
1315         index_inc(&rxcp->rxq_idx, rxq->len);
1316         remaining = rxcp->pkt_size - curr_frag_len;
1317         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319                 curr_frag_len = min(remaining, rx_frag_size);
1320
1321                 /* Coalesce all frags from the same physical page in one slot */
1322                 if (page_info->page_offset == 0) {
1323                         /* Fresh page */
1324                         j++;
1325                         skb_frag_set_page(skb, j, page_info->page);
1326                         skb_shinfo(skb)->frags[j].page_offset =
1327                                                         page_info->page_offset;
1328                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329                         skb_shinfo(skb)->nr_frags++;
1330                 } else {
1331                         put_page(page_info->page);
1332                 }
1333
1334                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335                 skb->len += curr_frag_len;
1336                 skb->data_len += curr_frag_len;
1337                 skb->truesize += rx_frag_size;
1338                 remaining -= curr_frag_len;
1339                 index_inc(&rxcp->rxq_idx, rxq->len);
1340                 page_info->page = NULL;
1341         }
1342         BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347                                 struct be_rx_compl_info *rxcp)
1348 {
1349         struct be_adapter *adapter = rxo->adapter;
1350         struct net_device *netdev = adapter->netdev;
1351         struct sk_buff *skb;
1352
1353         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354         if (unlikely(!skb)) {
1355                 rx_stats(rxo)->rx_drops_no_skbs++;
1356                 be_rx_compl_discard(rxo, rxcp);
1357                 return;
1358         }
1359
1360         skb_fill_rx_data(rxo, skb, rxcp);
1361
1362         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364         else
1365                 skb_checksum_none_assert(skb);
1366
1367         skb->protocol = eth_type_trans(skb, netdev);
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372
1373         if (rxcp->vlanf)
1374                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376         netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381                              struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_adapter *adapter = rxo->adapter;
1384         struct be_rx_page_info *page_info;
1385         struct sk_buff *skb = NULL;
1386         struct be_queue_info *rxq = &rxo->q;
1387         u16 remaining, curr_frag_len;
1388         u16 i, j;
1389
1390         skb = napi_get_frags(napi);
1391         if (!skb) {
1392                 be_rx_compl_discard(rxo, rxcp);
1393                 return;
1394         }
1395
1396         remaining = rxcp->pkt_size;
1397         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400                 curr_frag_len = min(remaining, rx_frag_size);
1401
1402                 /* Coalesce all frags from the same physical page in one slot */
1403                 if (i == 0 || page_info->page_offset == 0) {
1404                         /* First frag or Fresh page */
1405                         j++;
1406                         skb_frag_set_page(skb, j, page_info->page);
1407                         skb_shinfo(skb)->frags[j].page_offset =
1408                                                         page_info->page_offset;
1409                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414                 skb->truesize += rx_frag_size;
1415                 remaining -= curr_frag_len;
1416                 index_inc(&rxcp->rxq_idx, rxq->len);
1417                 memset(page_info, 0, sizeof(*page_info));
1418         }
1419         BUG_ON(j > MAX_SKB_FRAGS);
1420
1421         skb_shinfo(skb)->nr_frags = j + 1;
1422         skb->len = rxcp->pkt_size;
1423         skb->data_len = rxcp->pkt_size;
1424         skb->ip_summed = CHECKSUM_UNNECESSARY;
1425         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426         if (adapter->netdev->features & NETIF_F_RXHASH)
1427                 skb->rxhash = rxcp->rss_hash;
1428
1429         if (rxcp->vlanf)
1430                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432         napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436                                  struct be_rx_compl_info *rxcp)
1437 {
1438         rxcp->pkt_size =
1439                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444         rxcp->ip_csum =
1445                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446         rxcp->l4_csum =
1447                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448         rxcp->ipv6 =
1449                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450         rxcp->rxq_idx =
1451                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452         rxcp->num_rcvd =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454         rxcp->pkt_type =
1455                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456         rxcp->rss_hash =
1457                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458         if (rxcp->vlanf) {
1459                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460                                           compl);
1461                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462                                                compl);
1463         }
1464         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468                                  struct be_rx_compl_info *rxcp)
1469 {
1470         rxcp->pkt_size =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476         rxcp->ip_csum =
1477                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478         rxcp->l4_csum =
1479                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480         rxcp->ipv6 =
1481                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482         rxcp->rxq_idx =
1483                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484         rxcp->num_rcvd =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486         rxcp->pkt_type =
1487                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488         rxcp->rss_hash =
1489                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490         if (rxcp->vlanf) {
1491                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492                                           compl);
1493                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494                                                compl);
1495         }
1496         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503         struct be_adapter *adapter = rxo->adapter;
1504
1505         /* For checking the valid bit it is Ok to use either definition as the
1506          * valid bit is at the same position in both v0 and v1 Rx compl */
1507         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508                 return NULL;
1509
1510         rmb();
1511         be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513         if (adapter->be3_native)
1514                 be_parse_rx_compl_v1(compl, rxcp);
1515         else
1516                 be_parse_rx_compl_v0(compl, rxcp);
1517
1518         if (rxcp->vlanf) {
1519                 /* vlanf could be wrongly set in some cards.
1520                  * ignore if vtm is not set */
1521                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522                         rxcp->vlanf = 0;
1523
1524                 if (!lancer_chip(adapter))
1525                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528                     !adapter->vlan_tag[rxcp->vlan_tag])
1529                         rxcp->vlanf = 0;
1530         }
1531
1532         /* As the compl has been parsed, reset it; we wont touch it again */
1533         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535         queue_tail_inc(&rxo->cq);
1536         return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541         u32 order = get_order(size);
1542
1543         if (order > 0)
1544                 gfp |= __GFP_COMP;
1545         return  alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549  * Allocate a page, split it to fragments of size rx_frag_size and post as
1550  * receive buffers to BE
1551  */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554         struct be_adapter *adapter = rxo->adapter;
1555         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct page *pagep = NULL;
1558         struct be_eth_rx_d *rxd;
1559         u64 page_dmaaddr = 0, frag_dmaaddr;
1560         u32 posted, page_offset = 0;
1561
1562         page_info = &rxo->page_info_tbl[rxq->head];
1563         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564                 if (!pagep) {
1565                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566                         if (unlikely(!pagep)) {
1567                                 rx_stats(rxo)->rx_post_fail++;
1568                                 break;
1569                         }
1570                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571                                                     0, adapter->big_page_size,
1572                                                     DMA_FROM_DEVICE);
1573                         page_info->page_offset = 0;
1574                 } else {
1575                         get_page(pagep);
1576                         page_info->page_offset = page_offset + rx_frag_size;
1577                 }
1578                 page_offset = page_info->page_offset;
1579                 page_info->page = pagep;
1580                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583                 rxd = queue_head_node(rxq);
1584                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587                 /* Any space left in the current big page for another frag? */
1588                 if ((page_offset + rx_frag_size + rx_frag_size) >
1589                                         adapter->big_page_size) {
1590                         pagep = NULL;
1591                         page_info->last_page_user = true;
1592                 }
1593
1594                 prev_page_info = page_info;
1595                 queue_head_inc(rxq);
1596                 page_info = &rxo->page_info_tbl[rxq->head];
1597         }
1598         if (pagep)
1599                 prev_page_info->last_page_user = true;
1600
1601         if (posted) {
1602                 atomic_add(posted, &rxq->used);
1603                 be_rxq_notify(adapter, rxq->id, posted);
1604         } else if (atomic_read(&rxq->used) == 0) {
1605                 /* Let be_worker replenish when memory is available */
1606                 rxo->rx_post_starved = true;
1607         }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622         queue_tail_inc(tx_cq);
1623         return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627                 struct be_tx_obj *txo, u16 last_index)
1628 {
1629         struct be_queue_info *txq = &txo->q;
1630         struct be_eth_wrb *wrb;
1631         struct sk_buff **sent_skbs = txo->sent_skb_list;
1632         struct sk_buff *sent_skb;
1633         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634         bool unmap_skb_hdr = true;
1635
1636         sent_skb = sent_skbs[txq->tail];
1637         BUG_ON(!sent_skb);
1638         sent_skbs[txq->tail] = NULL;
1639
1640         /* skip header wrb */
1641         queue_tail_inc(txq);
1642
1643         do {
1644                 cur_index = txq->tail;
1645                 wrb = queue_tail_node(txq);
1646                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1648                 unmap_skb_hdr = false;
1649
1650                 num_wrbs++;
1651                 queue_tail_inc(txq);
1652         } while (cur_index != last_index);
1653
1654         kfree_skb(sent_skb);
1655         return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661         struct be_eq_entry *eqe;
1662         int num = 0;
1663
1664         do {
1665                 eqe = queue_tail_node(&eqo->q);
1666                 if (eqe->evt == 0)
1667                         break;
1668
1669                 rmb();
1670                 eqe->evt = 0;
1671                 num++;
1672                 queue_tail_inc(&eqo->q);
1673         } while (true);
1674
1675         return num;
1676 }
1677
1678 /* Leaves the EQ is disarmed state */
1679 static void be_eq_clean(struct be_eq_obj *eqo)
1680 {
1681         int num = events_get(eqo);
1682
1683         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1684 }
1685
1686 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1687 {
1688         struct be_rx_page_info *page_info;
1689         struct be_queue_info *rxq = &rxo->q;
1690         struct be_queue_info *rx_cq = &rxo->cq;
1691         struct be_rx_compl_info *rxcp;
1692         struct be_adapter *adapter = rxo->adapter;
1693         int flush_wait = 0;
1694         u16 tail;
1695
1696         /* Consume pending rx completions.
1697          * Wait for the flush completion (identified by zero num_rcvd)
1698          * to arrive. Notify CQ even when there are no more CQ entries
1699          * for HW to flush partially coalesced CQ entries.
1700          * In Lancer, there is no need to wait for flush compl.
1701          */
1702         for (;;) {
1703                 rxcp = be_rx_compl_get(rxo);
1704                 if (rxcp == NULL) {
1705                         if (lancer_chip(adapter))
1706                                 break;
1707
1708                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1709                                 dev_warn(&adapter->pdev->dev,
1710                                          "did not receive flush compl\n");
1711                                 break;
1712                         }
1713                         be_cq_notify(adapter, rx_cq->id, true, 0);
1714                         mdelay(1);
1715                 } else {
1716                         be_rx_compl_discard(rxo, rxcp);
1717                         be_cq_notify(adapter, rx_cq->id, true, 1);
1718                         if (rxcp->num_rcvd == 0)
1719                                 break;
1720                 }
1721         }
1722
1723         /* After cleanup, leave the CQ in unarmed state */
1724         be_cq_notify(adapter, rx_cq->id, false, 0);
1725
1726         /* Then free posted rx buffers that were not used */
1727         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1728         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1729                 page_info = get_rx_page_info(rxo, tail);
1730                 put_page(page_info->page);
1731                 memset(page_info, 0, sizeof(*page_info));
1732         }
1733         BUG_ON(atomic_read(&rxq->used));
1734         rxq->tail = rxq->head = 0;
1735 }
1736
1737 static void be_tx_compl_clean(struct be_adapter *adapter)
1738 {
1739         struct be_tx_obj *txo;
1740         struct be_queue_info *txq;
1741         struct be_eth_tx_compl *txcp;
1742         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1743         struct sk_buff *sent_skb;
1744         bool dummy_wrb;
1745         int i, pending_txqs;
1746
1747         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1748         do {
1749                 pending_txqs = adapter->num_tx_qs;
1750
1751                 for_all_tx_queues(adapter, txo, i) {
1752                         txq = &txo->q;
1753                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1754                                 end_idx =
1755                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1756                                                       wrb_index, txcp);
1757                                 num_wrbs += be_tx_compl_process(adapter, txo,
1758                                                                 end_idx);
1759                                 cmpl++;
1760                         }
1761                         if (cmpl) {
1762                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1763                                 atomic_sub(num_wrbs, &txq->used);
1764                                 cmpl = 0;
1765                                 num_wrbs = 0;
1766                         }
1767                         if (atomic_read(&txq->used) == 0)
1768                                 pending_txqs--;
1769                 }
1770
1771                 if (pending_txqs == 0 || ++timeo > 200)
1772                         break;
1773
1774                 mdelay(1);
1775         } while (true);
1776
1777         for_all_tx_queues(adapter, txo, i) {
1778                 txq = &txo->q;
1779                 if (atomic_read(&txq->used))
1780                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1781                                 atomic_read(&txq->used));
1782
1783                 /* free posted tx for which compls will never arrive */
1784                 while (atomic_read(&txq->used)) {
1785                         sent_skb = txo->sent_skb_list[txq->tail];
1786                         end_idx = txq->tail;
1787                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1788                                                    &dummy_wrb);
1789                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1790                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1791                         atomic_sub(num_wrbs, &txq->used);
1792                 }
1793         }
1794 }
1795
1796 static void be_evt_queues_destroy(struct be_adapter *adapter)
1797 {
1798         struct be_eq_obj *eqo;
1799         int i;
1800
1801         for_all_evt_queues(adapter, eqo, i) {
1802                 if (eqo->q.created) {
1803                         be_eq_clean(eqo);
1804                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1805                 }
1806                 be_queue_free(adapter, &eqo->q);
1807         }
1808 }
1809
1810 static int be_evt_queues_create(struct be_adapter *adapter)
1811 {
1812         struct be_queue_info *eq;
1813         struct be_eq_obj *eqo;
1814         int i, rc;
1815
1816         adapter->num_evt_qs = num_irqs(adapter);
1817
1818         for_all_evt_queues(adapter, eqo, i) {
1819                 eqo->adapter = adapter;
1820                 eqo->tx_budget = BE_TX_BUDGET;
1821                 eqo->idx = i;
1822                 eqo->max_eqd = BE_MAX_EQD;
1823                 eqo->enable_aic = true;
1824
1825                 eq = &eqo->q;
1826                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1827                                         sizeof(struct be_eq_entry));
1828                 if (rc)
1829                         return rc;
1830
1831                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1832                 if (rc)
1833                         return rc;
1834         }
1835         return 0;
1836 }
1837
1838 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1839 {
1840         struct be_queue_info *q;
1841
1842         q = &adapter->mcc_obj.q;
1843         if (q->created)
1844                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1845         be_queue_free(adapter, q);
1846
1847         q = &adapter->mcc_obj.cq;
1848         if (q->created)
1849                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1850         be_queue_free(adapter, q);
1851 }
1852
1853 /* Must be called only after TX qs are created as MCC shares TX EQ */
1854 static int be_mcc_queues_create(struct be_adapter *adapter)
1855 {
1856         struct be_queue_info *q, *cq;
1857
1858         cq = &adapter->mcc_obj.cq;
1859         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1860                         sizeof(struct be_mcc_compl)))
1861                 goto err;
1862
1863         /* Use the default EQ for MCC completions */
1864         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1865                 goto mcc_cq_free;
1866
1867         q = &adapter->mcc_obj.q;
1868         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1869                 goto mcc_cq_destroy;
1870
1871         if (be_cmd_mccq_create(adapter, q, cq))
1872                 goto mcc_q_free;
1873
1874         return 0;
1875
1876 mcc_q_free:
1877         be_queue_free(adapter, q);
1878 mcc_cq_destroy:
1879         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1880 mcc_cq_free:
1881         be_queue_free(adapter, cq);
1882 err:
1883         return -1;
1884 }
1885
1886 static void be_tx_queues_destroy(struct be_adapter *adapter)
1887 {
1888         struct be_queue_info *q;
1889         struct be_tx_obj *txo;
1890         u8 i;
1891
1892         for_all_tx_queues(adapter, txo, i) {
1893                 q = &txo->q;
1894                 if (q->created)
1895                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1896                 be_queue_free(adapter, q);
1897
1898                 q = &txo->cq;
1899                 if (q->created)
1900                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1901                 be_queue_free(adapter, q);
1902         }
1903 }
1904
1905 static int be_num_txqs_want(struct be_adapter *adapter)
1906 {
1907         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1908             be_is_mc(adapter) ||
1909             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1910             BE2_chip(adapter))
1911                 return 1;
1912         else
1913                 return adapter->max_tx_queues;
1914 }
1915
1916 static int be_tx_cqs_create(struct be_adapter *adapter)
1917 {
1918         struct be_queue_info *cq, *eq;
1919         int status;
1920         struct be_tx_obj *txo;
1921         u8 i;
1922
1923         adapter->num_tx_qs = be_num_txqs_want(adapter);
1924         if (adapter->num_tx_qs != MAX_TX_QS) {
1925                 rtnl_lock();
1926                 netif_set_real_num_tx_queues(adapter->netdev,
1927                         adapter->num_tx_qs);
1928                 rtnl_unlock();
1929         }
1930
1931         for_all_tx_queues(adapter, txo, i) {
1932                 cq = &txo->cq;
1933                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1934                                         sizeof(struct be_eth_tx_compl));
1935                 if (status)
1936                         return status;
1937
1938                 /* If num_evt_qs is less than num_tx_qs, then more than
1939                  * one txq share an eq
1940                  */
1941                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1942                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1943                 if (status)
1944                         return status;
1945         }
1946         return 0;
1947 }
1948
1949 static int be_tx_qs_create(struct be_adapter *adapter)
1950 {
1951         struct be_tx_obj *txo;
1952         int i, status;
1953
1954         for_all_tx_queues(adapter, txo, i) {
1955                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1956                                         sizeof(struct be_eth_wrb));
1957                 if (status)
1958                         return status;
1959
1960                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1961                 if (status)
1962                         return status;
1963         }
1964
1965         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1966                  adapter->num_tx_qs);
1967         return 0;
1968 }
1969
1970 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1971 {
1972         struct be_queue_info *q;
1973         struct be_rx_obj *rxo;
1974         int i;
1975
1976         for_all_rx_queues(adapter, rxo, i) {
1977                 q = &rxo->cq;
1978                 if (q->created)
1979                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980                 be_queue_free(adapter, q);
1981         }
1982 }
1983
1984 static int be_rx_cqs_create(struct be_adapter *adapter)
1985 {
1986         struct be_queue_info *eq, *cq;
1987         struct be_rx_obj *rxo;
1988         int rc, i;
1989
1990         /* We'll create as many RSS rings as there are irqs.
1991          * But when there's only one irq there's no use creating RSS rings
1992          */
1993         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1994                                 num_irqs(adapter) + 1 : 1;
1995         if (adapter->num_rx_qs != MAX_RX_QS) {
1996                 rtnl_lock();
1997                 netif_set_real_num_rx_queues(adapter->netdev,
1998                                              adapter->num_rx_qs);
1999                 rtnl_unlock();
2000         }
2001
2002         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2003         for_all_rx_queues(adapter, rxo, i) {
2004                 rxo->adapter = adapter;
2005                 cq = &rxo->cq;
2006                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2007                                 sizeof(struct be_eth_rx_compl));
2008                 if (rc)
2009                         return rc;
2010
2011                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2012                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2013                 if (rc)
2014                         return rc;
2015         }
2016
2017         dev_info(&adapter->pdev->dev,
2018                  "created %d RSS queue(s) and 1 default RX queue\n",
2019                  adapter->num_rx_qs - 1);
2020         return 0;
2021 }
2022
2023 static irqreturn_t be_intx(int irq, void *dev)
2024 {
2025         struct be_eq_obj *eqo = dev;
2026         struct be_adapter *adapter = eqo->adapter;
2027         int num_evts = 0;
2028
2029         /* IRQ is not expected when NAPI is scheduled as the EQ
2030          * will not be armed.
2031          * But, this can happen on Lancer INTx where it takes
2032          * a while to de-assert INTx or in BE2 where occasionaly
2033          * an interrupt may be raised even when EQ is unarmed.
2034          * If NAPI is already scheduled, then counting & notifying
2035          * events will orphan them.
2036          */
2037         if (napi_schedule_prep(&eqo->napi)) {
2038                 num_evts = events_get(eqo);
2039                 __napi_schedule(&eqo->napi);
2040                 if (num_evts)
2041                         eqo->spurious_intr = 0;
2042         }
2043         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2044
2045         /* Return IRQ_HANDLED only for the the first spurious intr
2046          * after a valid intr to stop the kernel from branding
2047          * this irq as a bad one!
2048          */
2049         if (num_evts || eqo->spurious_intr++ == 0)
2050                 return IRQ_HANDLED;
2051         else
2052                 return IRQ_NONE;
2053 }
2054
2055 static irqreturn_t be_msix(int irq, void *dev)
2056 {
2057         struct be_eq_obj *eqo = dev;
2058
2059         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2060         napi_schedule(&eqo->napi);
2061         return IRQ_HANDLED;
2062 }
2063
2064 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2065 {
2066         return (rxcp->tcpf && !rxcp->err) ? true : false;
2067 }
2068
2069 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2070                         int budget)
2071 {
2072         struct be_adapter *adapter = rxo->adapter;
2073         struct be_queue_info *rx_cq = &rxo->cq;
2074         struct be_rx_compl_info *rxcp;
2075         u32 work_done;
2076
2077         for (work_done = 0; work_done < budget; work_done++) {
2078                 rxcp = be_rx_compl_get(rxo);
2079                 if (!rxcp)
2080                         break;
2081
2082                 /* Is it a flush compl that has no data */
2083                 if (unlikely(rxcp->num_rcvd == 0))
2084                         goto loop_continue;
2085
2086                 /* Discard compl with partial DMA Lancer B0 */
2087                 if (unlikely(!rxcp->pkt_size)) {
2088                         be_rx_compl_discard(rxo, rxcp);
2089                         goto loop_continue;
2090                 }
2091
2092                 /* On BE drop pkts that arrive due to imperfect filtering in
2093                  * promiscuous mode on some skews
2094                  */
2095                 if (unlikely(rxcp->port != adapter->port_num &&
2096                                 !lancer_chip(adapter))) {
2097                         be_rx_compl_discard(rxo, rxcp);
2098                         goto loop_continue;
2099                 }
2100
2101                 if (do_gro(rxcp))
2102                         be_rx_compl_process_gro(rxo, napi, rxcp);
2103                 else
2104                         be_rx_compl_process(rxo, rxcp);
2105 loop_continue:
2106                 be_rx_stats_update(rxo, rxcp);
2107         }
2108
2109         if (work_done) {
2110                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2111
2112                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2113                         be_post_rx_frags(rxo, GFP_ATOMIC);
2114         }
2115
2116         return work_done;
2117 }
2118
2119 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2120                           int budget, int idx)
2121 {
2122         struct be_eth_tx_compl *txcp;
2123         int num_wrbs = 0, work_done;
2124
2125         for (work_done = 0; work_done < budget; work_done++) {
2126                 txcp = be_tx_compl_get(&txo->cq);
2127                 if (!txcp)
2128                         break;
2129                 num_wrbs += be_tx_compl_process(adapter, txo,
2130                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2131                                         wrb_index, txcp));
2132         }
2133
2134         if (work_done) {
2135                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2136                 atomic_sub(num_wrbs, &txo->q.used);
2137
2138                 /* As Tx wrbs have been freed up, wake up netdev queue
2139                  * if it was stopped due to lack of tx wrbs.  */
2140                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2141                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2142                         netif_wake_subqueue(adapter->netdev, idx);
2143                 }
2144
2145                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2146                 tx_stats(txo)->tx_compl += work_done;
2147                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2148         }
2149         return (work_done < budget); /* Done */
2150 }
2151
2152 int be_poll(struct napi_struct *napi, int budget)
2153 {
2154         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2155         struct be_adapter *adapter = eqo->adapter;
2156         int max_work = 0, work, i, num_evts;
2157         bool tx_done;
2158
2159         num_evts = events_get(eqo);
2160
2161         /* Process all TXQs serviced by this EQ */
2162         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2163                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2164                                         eqo->tx_budget, i);
2165                 if (!tx_done)
2166                         max_work = budget;
2167         }
2168
2169         /* This loop will iterate twice for EQ0 in which
2170          * completions of the last RXQ (default one) are also processed
2171          * For other EQs the loop iterates only once
2172          */
2173         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2174                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2175                 max_work = max(work, max_work);
2176         }
2177
2178         if (is_mcc_eqo(eqo))
2179                 be_process_mcc(adapter);
2180
2181         if (max_work < budget) {
2182                 napi_complete(napi);
2183                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2184         } else {
2185                 /* As we'll continue in polling mode, count and clear events */
2186                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2187         }
2188         return max_work;
2189 }
2190
2191 void be_detect_error(struct be_adapter *adapter)
2192 {
2193         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2194         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2195         u32 i;
2196
2197         if (be_hw_error(adapter))
2198                 return;
2199
2200         if (lancer_chip(adapter)) {
2201                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2202                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2203                         sliport_err1 = ioread32(adapter->db +
2204                                         SLIPORT_ERROR1_OFFSET);
2205                         sliport_err2 = ioread32(adapter->db +
2206                                         SLIPORT_ERROR2_OFFSET);
2207                 }
2208         } else {
2209                 pci_read_config_dword(adapter->pdev,
2210                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2211                 pci_read_config_dword(adapter->pdev,
2212                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2213                 pci_read_config_dword(adapter->pdev,
2214                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2215                 pci_read_config_dword(adapter->pdev,
2216                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2217
2218                 ue_lo = (ue_lo & ~ue_lo_mask);
2219                 ue_hi = (ue_hi & ~ue_hi_mask);
2220         }
2221
2222         /* On certain platforms BE hardware can indicate spurious UEs.
2223          * Allow the h/w to stop working completely in case of a real UE.
2224          * Hence not setting the hw_error for UE detection.
2225          */
2226         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2227                 adapter->hw_error = true;
2228                 dev_err(&adapter->pdev->dev,
2229                         "Error detected in the card\n");
2230         }
2231
2232         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2233                 dev_err(&adapter->pdev->dev,
2234                         "ERR: sliport status 0x%x\n", sliport_status);
2235                 dev_err(&adapter->pdev->dev,
2236                         "ERR: sliport error1 0x%x\n", sliport_err1);
2237                 dev_err(&adapter->pdev->dev,
2238                         "ERR: sliport error2 0x%x\n", sliport_err2);
2239         }
2240
2241         if (ue_lo) {
2242                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2243                         if (ue_lo & 1)
2244                                 dev_err(&adapter->pdev->dev,
2245                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2246                 }
2247         }
2248
2249         if (ue_hi) {
2250                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2251                         if (ue_hi & 1)
2252                                 dev_err(&adapter->pdev->dev,
2253                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2254                 }
2255         }
2256
2257 }
2258
2259 static void be_msix_disable(struct be_adapter *adapter)
2260 {
2261         if (msix_enabled(adapter)) {
2262                 pci_disable_msix(adapter->pdev);
2263                 adapter->num_msix_vec = 0;
2264         }
2265 }
2266
2267 static uint be_num_rss_want(struct be_adapter *adapter)
2268 {
2269         u32 num = 0;
2270
2271         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2272             (lancer_chip(adapter) ||
2273              (!sriov_want(adapter) && be_physfn(adapter)))) {
2274                 num = adapter->max_rss_queues;
2275                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2276         }
2277         return num;
2278 }
2279
2280 static void be_msix_enable(struct be_adapter *adapter)
2281 {
2282 #define BE_MIN_MSIX_VECTORS             1
2283         int i, status, num_vec, num_roce_vec = 0;
2284         struct device *dev = &adapter->pdev->dev;
2285
2286         /* If RSS queues are not used, need a vec for default RX Q */
2287         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2288         if (be_roce_supported(adapter)) {
2289                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2290                                         (num_online_cpus() + 1));
2291                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2292                 num_vec += num_roce_vec;
2293                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2294         }
2295         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2296
2297         for (i = 0; i < num_vec; i++)
2298                 adapter->msix_entries[i].entry = i;
2299
2300         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2301         if (status == 0) {
2302                 goto done;
2303         } else if (status >= BE_MIN_MSIX_VECTORS) {
2304                 num_vec = status;
2305                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2306                                 num_vec) == 0)
2307                         goto done;
2308         }
2309
2310         dev_warn(dev, "MSIx enable failed\n");
2311         return;
2312 done:
2313         if (be_roce_supported(adapter)) {
2314                 if (num_vec > num_roce_vec) {
2315                         adapter->num_msix_vec = num_vec - num_roce_vec;
2316                         adapter->num_msix_roce_vec =
2317                                 num_vec - adapter->num_msix_vec;
2318                 } else {
2319                         adapter->num_msix_vec = num_vec;
2320                         adapter->num_msix_roce_vec = 0;
2321                 }
2322         } else
2323                 adapter->num_msix_vec = num_vec;
2324         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2325         return;
2326 }
2327
2328 static inline int be_msix_vec_get(struct be_adapter *adapter,
2329                                 struct be_eq_obj *eqo)
2330 {
2331         return adapter->msix_entries[eqo->idx].vector;
2332 }
2333
2334 static int be_msix_register(struct be_adapter *adapter)
2335 {
2336         struct net_device *netdev = adapter->netdev;
2337         struct be_eq_obj *eqo;
2338         int status, i, vec;
2339
2340         for_all_evt_queues(adapter, eqo, i) {
2341                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2342                 vec = be_msix_vec_get(adapter, eqo);
2343                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2344                 if (status)
2345                         goto err_msix;
2346         }
2347
2348         return 0;
2349 err_msix:
2350         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2351                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2352         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2353                 status);
2354         be_msix_disable(adapter);
2355         return status;
2356 }
2357
2358 static int be_irq_register(struct be_adapter *adapter)
2359 {
2360         struct net_device *netdev = adapter->netdev;
2361         int status;
2362
2363         if (msix_enabled(adapter)) {
2364                 status = be_msix_register(adapter);
2365                 if (status == 0)
2366                         goto done;
2367                 /* INTx is not supported for VF */
2368                 if (!be_physfn(adapter))
2369                         return status;
2370         }
2371
2372         /* INTx: only the first EQ is used */
2373         netdev->irq = adapter->pdev->irq;
2374         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2375                              &adapter->eq_obj[0]);
2376         if (status) {
2377                 dev_err(&adapter->pdev->dev,
2378                         "INTx request IRQ failed - err %d\n", status);
2379                 return status;
2380         }
2381 done:
2382         adapter->isr_registered = true;
2383         return 0;
2384 }
2385
2386 static void be_irq_unregister(struct be_adapter *adapter)
2387 {
2388         struct net_device *netdev = adapter->netdev;
2389         struct be_eq_obj *eqo;
2390         int i;
2391
2392         if (!adapter->isr_registered)
2393                 return;
2394
2395         /* INTx */
2396         if (!msix_enabled(adapter)) {
2397                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2398                 goto done;
2399         }
2400
2401         /* MSIx */
2402         for_all_evt_queues(adapter, eqo, i)
2403                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2404
2405 done:
2406         adapter->isr_registered = false;
2407 }
2408
2409 static void be_rx_qs_destroy(struct be_adapter *adapter)
2410 {
2411         struct be_queue_info *q;
2412         struct be_rx_obj *rxo;
2413         int i;
2414
2415         for_all_rx_queues(adapter, rxo, i) {
2416                 q = &rxo->q;
2417                 if (q->created) {
2418                         be_cmd_rxq_destroy(adapter, q);
2419                         /* After the rxq is invalidated, wait for a grace time
2420                          * of 1ms for all dma to end and the flush compl to
2421                          * arrive
2422                          */
2423                         mdelay(1);
2424                         be_rx_cq_clean(rxo);
2425                 }
2426                 be_queue_free(adapter, q);
2427         }
2428 }
2429
2430 static int be_close(struct net_device *netdev)
2431 {
2432         struct be_adapter *adapter = netdev_priv(netdev);
2433         struct be_eq_obj *eqo;
2434         int i;
2435
2436         be_roce_dev_close(adapter);
2437
2438         if (!lancer_chip(adapter))
2439                 be_intr_set(adapter, false);
2440
2441         for_all_evt_queues(adapter, eqo, i)
2442                 napi_disable(&eqo->napi);
2443
2444         be_async_mcc_disable(adapter);
2445
2446         /* Wait for all pending tx completions to arrive so that
2447          * all tx skbs are freed.
2448          */
2449         be_tx_compl_clean(adapter);
2450
2451         be_rx_qs_destroy(adapter);
2452
2453         for_all_evt_queues(adapter, eqo, i) {
2454                 if (msix_enabled(adapter))
2455                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2456                 else
2457                         synchronize_irq(netdev->irq);
2458                 be_eq_clean(eqo);
2459         }
2460
2461         be_irq_unregister(adapter);
2462
2463         return 0;
2464 }
2465
2466 static int be_rx_qs_create(struct be_adapter *adapter)
2467 {
2468         struct be_rx_obj *rxo;
2469         int rc, i, j;
2470         u8 rsstable[128];
2471
2472         for_all_rx_queues(adapter, rxo, i) {
2473                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2474                                     sizeof(struct be_eth_rx_d));
2475                 if (rc)
2476                         return rc;
2477         }
2478
2479         /* The FW would like the default RXQ to be created first */
2480         rxo = default_rxo(adapter);
2481         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2482                                adapter->if_handle, false, &rxo->rss_id);
2483         if (rc)
2484                 return rc;
2485
2486         for_all_rss_queues(adapter, rxo, i) {
2487                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2488                                        rx_frag_size, adapter->if_handle,
2489                                        true, &rxo->rss_id);
2490                 if (rc)
2491                         return rc;
2492         }
2493
2494         if (be_multi_rxq(adapter)) {
2495                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2496                         for_all_rss_queues(adapter, rxo, i) {
2497                                 if ((j + i) >= 128)
2498                                         break;
2499                                 rsstable[j + i] = rxo->rss_id;
2500                         }
2501                 }
2502                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2503                 if (rc)
2504                         return rc;
2505         }
2506
2507         /* First time posting */
2508         for_all_rx_queues(adapter, rxo, i)
2509                 be_post_rx_frags(rxo, GFP_KERNEL);
2510         return 0;
2511 }
2512
2513 static int be_open(struct net_device *netdev)
2514 {
2515         struct be_adapter *adapter = netdev_priv(netdev);
2516         struct be_eq_obj *eqo;
2517         struct be_rx_obj *rxo;
2518         struct be_tx_obj *txo;
2519         u8 link_status;
2520         int status, i;
2521
2522         status = be_rx_qs_create(adapter);
2523         if (status)
2524                 goto err;
2525
2526         be_irq_register(adapter);
2527
2528         if (!lancer_chip(adapter))
2529                 be_intr_set(adapter, true);
2530
2531         for_all_rx_queues(adapter, rxo, i)
2532                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2533
2534         for_all_tx_queues(adapter, txo, i)
2535                 be_cq_notify(adapter, txo->cq.id, true, 0);
2536
2537         be_async_mcc_enable(adapter);
2538
2539         for_all_evt_queues(adapter, eqo, i) {
2540                 napi_enable(&eqo->napi);
2541                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2542         }
2543
2544         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2545         if (!status)
2546                 be_link_status_update(adapter, link_status);
2547
2548         be_roce_dev_open(adapter);
2549         return 0;
2550 err:
2551         be_close(adapter->netdev);
2552         return -EIO;
2553 }
2554
2555 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2556 {
2557         struct be_dma_mem cmd;
2558         int status = 0;
2559         u8 mac[ETH_ALEN];
2560
2561         memset(mac, 0, ETH_ALEN);
2562
2563         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2564         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2565                                     GFP_KERNEL);
2566         if (cmd.va == NULL)
2567                 return -1;
2568         memset(cmd.va, 0, cmd.size);
2569
2570         if (enable) {
2571                 status = pci_write_config_dword(adapter->pdev,
2572                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2573                 if (status) {
2574                         dev_err(&adapter->pdev->dev,
2575                                 "Could not enable Wake-on-lan\n");
2576                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2577                                           cmd.dma);
2578                         return status;
2579                 }
2580                 status = be_cmd_enable_magic_wol(adapter,
2581                                 adapter->netdev->dev_addr, &cmd);
2582                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2583                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2584         } else {
2585                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2586                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2587                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2588         }
2589
2590         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2591         return status;
2592 }
2593
2594 /*
2595  * Generate a seed MAC address from the PF MAC Address using jhash.
2596  * MAC Address for VFs are assigned incrementally starting from the seed.
2597  * These addresses are programmed in the ASIC by the PF and the VF driver
2598  * queries for the MAC address during its probe.
2599  */
2600 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2601 {
2602         u32 vf;
2603         int status = 0;
2604         u8 mac[ETH_ALEN];
2605         struct be_vf_cfg *vf_cfg;
2606
2607         be_vf_eth_addr_generate(adapter, mac);
2608
2609         for_all_vfs(adapter, vf_cfg, vf) {
2610                 if (lancer_chip(adapter)) {
2611                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2612                 } else {
2613                         status = be_cmd_pmac_add(adapter, mac,
2614                                                  vf_cfg->if_handle,
2615                                                  &vf_cfg->pmac_id, vf + 1);
2616                 }
2617
2618                 if (status)
2619                         dev_err(&adapter->pdev->dev,
2620                         "Mac address assignment failed for VF %d\n", vf);
2621                 else
2622                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2623
2624                 mac[5] += 1;
2625         }
2626         return status;
2627 }
2628
2629 static void be_vf_clear(struct be_adapter *adapter)
2630 {
2631         struct be_vf_cfg *vf_cfg;
2632         u32 vf;
2633
2634         if (be_find_vfs(adapter, ASSIGNED)) {
2635                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2636                 goto done;
2637         }
2638
2639         for_all_vfs(adapter, vf_cfg, vf) {
2640                 if (lancer_chip(adapter))
2641                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2642                 else
2643                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2644                                         vf_cfg->pmac_id, vf + 1);
2645
2646                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2647         }
2648         pci_disable_sriov(adapter->pdev);
2649 done:
2650         kfree(adapter->vf_cfg);
2651         adapter->num_vfs = 0;
2652 }
2653
2654 static int be_clear(struct be_adapter *adapter)
2655 {
2656         int i = 1;
2657
2658         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2659                 cancel_delayed_work_sync(&adapter->work);
2660                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2661         }
2662
2663         if (sriov_enabled(adapter))
2664                 be_vf_clear(adapter);
2665
2666         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2667                 be_cmd_pmac_del(adapter, adapter->if_handle,
2668                         adapter->pmac_id[i], 0);
2669
2670         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2671
2672         be_mcc_queues_destroy(adapter);
2673         be_rx_cqs_destroy(adapter);
2674         be_tx_queues_destroy(adapter);
2675         be_evt_queues_destroy(adapter);
2676
2677         kfree(adapter->pmac_id);
2678         adapter->pmac_id = NULL;
2679
2680         be_msix_disable(adapter);
2681         return 0;
2682 }
2683
2684 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2685                                    u32 *cap_flags, u8 domain)
2686 {
2687         bool profile_present = false;
2688         int status;
2689
2690         if (lancer_chip(adapter)) {
2691                 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2692                 if (!status)
2693                         profile_present = true;
2694         }
2695
2696         if (!profile_present)
2697                 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2698                              BE_IF_FLAGS_MULTICAST;
2699 }
2700
2701 static int be_vf_setup_init(struct be_adapter *adapter)
2702 {
2703         struct be_vf_cfg *vf_cfg;
2704         int vf;
2705
2706         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2707                                   GFP_KERNEL);
2708         if (!adapter->vf_cfg)
2709                 return -ENOMEM;
2710
2711         for_all_vfs(adapter, vf_cfg, vf) {
2712                 vf_cfg->if_handle = -1;
2713                 vf_cfg->pmac_id = -1;
2714         }
2715         return 0;
2716 }
2717
2718 static int be_vf_setup(struct be_adapter *adapter)
2719 {
2720         struct be_vf_cfg *vf_cfg;
2721         struct device *dev = &adapter->pdev->dev;
2722         u32 cap_flags, en_flags, vf;
2723         u16 def_vlan, lnk_speed;
2724         int status, enabled_vfs;
2725
2726         enabled_vfs = be_find_vfs(adapter, ENABLED);
2727         if (enabled_vfs) {
2728                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2729                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2730                 return 0;
2731         }
2732
2733         if (num_vfs > adapter->dev_num_vfs) {
2734                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2735                          adapter->dev_num_vfs, num_vfs);
2736                 num_vfs = adapter->dev_num_vfs;
2737         }
2738
2739         status = pci_enable_sriov(adapter->pdev, num_vfs);
2740         if (!status) {
2741                 adapter->num_vfs = num_vfs;
2742         } else {
2743                 /* Platform doesn't support SRIOV though device supports it */
2744                 dev_warn(dev, "SRIOV enable failed\n");
2745                 return 0;
2746         }
2747
2748         status = be_vf_setup_init(adapter);
2749         if (status)
2750                 goto err;
2751
2752         for_all_vfs(adapter, vf_cfg, vf) {
2753                 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2754
2755                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2756                                         BE_IF_FLAGS_BROADCAST |
2757                                         BE_IF_FLAGS_MULTICAST);
2758
2759                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2760                                           &vf_cfg->if_handle, vf + 1);
2761                 if (status)
2762                         goto err;
2763         }
2764
2765         if (!enabled_vfs) {
2766                 status = be_vf_eth_addr_config(adapter);
2767                 if (status)
2768                         goto err;
2769         }
2770
2771         for_all_vfs(adapter, vf_cfg, vf) {
2772                 lnk_speed = 1000;
2773                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2774                 if (status)
2775                         goto err;
2776                 vf_cfg->tx_rate = lnk_speed * 10;
2777
2778                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2779                                 vf + 1, vf_cfg->if_handle);
2780                 if (status)
2781                         goto err;
2782                 vf_cfg->def_vid = def_vlan;
2783
2784                 be_cmd_enable_vf(adapter, vf + 1);
2785         }
2786         return 0;
2787 err:
2788         return status;
2789 }
2790
2791 static void be_setup_init(struct be_adapter *adapter)
2792 {
2793         adapter->vlan_prio_bmap = 0xff;
2794         adapter->phy.link_speed = -1;
2795         adapter->if_handle = -1;
2796         adapter->be3_native = false;
2797         adapter->promiscuous = false;
2798         if (be_physfn(adapter))
2799                 adapter->cmd_privileges = MAX_PRIVILEGES;
2800         else
2801                 adapter->cmd_privileges = MIN_PRIVILEGES;
2802 }
2803
2804 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2805                            bool *active_mac, u32 *pmac_id)
2806 {
2807         int status = 0;
2808
2809         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2810                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2811                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2812                         *active_mac = true;
2813                 else
2814                         *active_mac = false;
2815
2816                 return status;
2817         }
2818
2819         if (lancer_chip(adapter)) {
2820                 status = be_cmd_get_mac_from_list(adapter, mac,
2821                                                   active_mac, pmac_id, 0);
2822                 if (*active_mac) {
2823                         status = be_cmd_mac_addr_query(adapter, mac, false,
2824                                                        if_handle, *pmac_id);
2825                 }
2826         } else if (be_physfn(adapter)) {
2827                 /* For BE3, for PF get permanent MAC */
2828                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2829                 *active_mac = false;
2830         } else {
2831                 /* For BE3, for VF get soft MAC assigned by PF*/
2832                 status = be_cmd_mac_addr_query(adapter, mac, false,
2833                                                if_handle, 0);
2834                 *active_mac = true;
2835         }
2836         return status;
2837 }
2838
2839 static void be_get_resources(struct be_adapter *adapter)
2840 {
2841         int status;
2842         bool profile_present = false;
2843
2844         if (lancer_chip(adapter)) {
2845                 status = be_cmd_get_func_config(adapter);
2846
2847                 if (!status)
2848                         profile_present = true;
2849         }
2850
2851         if (profile_present) {
2852                 /* Sanity fixes for Lancer */
2853                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2854                                               BE_UC_PMAC_COUNT);
2855                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2856                                            BE_NUM_VLANS_SUPPORTED);
2857                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2858                                                BE_MAX_MC);
2859                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2860                                                MAX_TX_QS);
2861                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2862                                                 BE3_MAX_RSS_QS);
2863                 adapter->max_event_queues = min_t(u16,
2864                                                   adapter->max_event_queues,
2865                                                   BE3_MAX_RSS_QS);
2866
2867                 if (adapter->max_rss_queues &&
2868                     adapter->max_rss_queues == adapter->max_rx_queues)
2869                         adapter->max_rss_queues -= 1;
2870
2871                 if (adapter->max_event_queues < adapter->max_rss_queues)
2872                         adapter->max_rss_queues = adapter->max_event_queues;
2873
2874         } else {
2875                 if (be_physfn(adapter))
2876                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2877                 else
2878                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2879
2880                 if (adapter->function_mode & FLEX10_MODE)
2881                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2882                 else
2883                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2884
2885                 adapter->max_mcast_mac = BE_MAX_MC;
2886                 adapter->max_tx_queues = MAX_TX_QS;
2887                 adapter->max_rss_queues = (adapter->be3_native) ?
2888                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2889                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2890
2891                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2892                                         BE_IF_FLAGS_BROADCAST |
2893                                         BE_IF_FLAGS_MULTICAST |
2894                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2895                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2896                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2897                                         BE_IF_FLAGS_PROMISCUOUS;
2898
2899                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2900                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2901         }
2902 }
2903
2904 /* Routine to query per function resource limits */
2905 static int be_get_config(struct be_adapter *adapter)
2906 {
2907         int pos, status;
2908         u16 dev_num_vfs;
2909
2910         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2911                                      &adapter->function_mode,
2912                                      &adapter->function_caps);
2913         if (status)
2914                 goto err;
2915
2916         be_get_resources(adapter);
2917
2918         /* primary mac needs 1 pmac entry */
2919         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2920                                    sizeof(u32), GFP_KERNEL);
2921         if (!adapter->pmac_id) {
2922                 status = -ENOMEM;
2923                 goto err;
2924         }
2925
2926         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2927         if (pos) {
2928                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2929                                      &dev_num_vfs);
2930                 if (!lancer_chip(adapter))
2931                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2932                 adapter->dev_num_vfs = dev_num_vfs;
2933         }
2934 err:
2935         return status;
2936 }
2937
2938 static int be_setup(struct be_adapter *adapter)
2939 {
2940         struct device *dev = &adapter->pdev->dev;
2941         u32 en_flags;
2942         u32 tx_fc, rx_fc;
2943         int status;
2944         u8 mac[ETH_ALEN];
2945         bool active_mac;
2946
2947         be_setup_init(adapter);
2948
2949         if (!lancer_chip(adapter))
2950                 be_cmd_req_native_mode(adapter);
2951
2952         status = be_get_config(adapter);
2953         if (status)
2954                 goto err;
2955
2956         be_msix_enable(adapter);
2957
2958         status = be_evt_queues_create(adapter);
2959         if (status)
2960                 goto err;
2961
2962         status = be_tx_cqs_create(adapter);
2963         if (status)
2964                 goto err;
2965
2966         status = be_rx_cqs_create(adapter);
2967         if (status)
2968                 goto err;
2969
2970         status = be_mcc_queues_create(adapter);
2971         if (status)
2972                 goto err;
2973
2974         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2975         /* In UMC mode FW does not return right privileges.
2976          * Override with correct privilege equivalent to PF.
2977          */
2978         if (be_is_mc(adapter))
2979                 adapter->cmd_privileges = MAX_PRIVILEGES;
2980
2981         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2982                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2983
2984         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2985                 en_flags |= BE_IF_FLAGS_RSS;
2986
2987         en_flags = en_flags & adapter->if_cap_flags;
2988
2989         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2990                                   &adapter->if_handle, 0);
2991         if (status != 0)
2992                 goto err;
2993
2994         memset(mac, 0, ETH_ALEN);
2995         active_mac = false;
2996         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2997                                  &active_mac, &adapter->pmac_id[0]);
2998         if (status != 0)
2999                 goto err;
3000
3001         if (!active_mac) {
3002                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3003                                          &adapter->pmac_id[0], 0);
3004                 if (status != 0)
3005                         goto err;
3006         }
3007
3008         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3009                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3010                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3011         }
3012
3013         status = be_tx_qs_create(adapter);
3014         if (status)
3015                 goto err;
3016
3017         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3018
3019         if (adapter->vlans_added)
3020                 be_vid_config(adapter);
3021
3022         be_set_rx_mode(adapter->netdev);
3023
3024         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3025
3026         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3027                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3028                                         adapter->rx_fc);
3029
3030         if (be_physfn(adapter) && num_vfs) {
3031                 if (adapter->dev_num_vfs)
3032                         be_vf_setup(adapter);
3033                 else
3034                         dev_warn(dev, "device doesn't support SRIOV\n");
3035         }
3036
3037         status = be_cmd_get_phy_info(adapter);
3038         if (!status && be_pause_supported(adapter))
3039                 adapter->phy.fc_autoneg = 1;
3040
3041         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3042         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3043         return 0;
3044 err:
3045         be_clear(adapter);
3046         return status;
3047 }
3048
3049 #ifdef CONFIG_NET_POLL_CONTROLLER
3050 static void be_netpoll(struct net_device *netdev)
3051 {
3052         struct be_adapter *adapter = netdev_priv(netdev);
3053         struct be_eq_obj *eqo;
3054         int i;
3055
3056         for_all_evt_queues(adapter, eqo, i) {
3057                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3058                 napi_schedule(&eqo->napi);
3059         }
3060
3061         return;
3062 }
3063 #endif
3064
3065 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3066 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3067
3068 static bool be_flash_redboot(struct be_adapter *adapter,
3069                         const u8 *p, u32 img_start, int image_size,
3070                         int hdr_size)
3071 {
3072         u32 crc_offset;
3073         u8 flashed_crc[4];
3074         int status;
3075
3076         crc_offset = hdr_size + img_start + image_size - 4;
3077
3078         p += crc_offset;
3079
3080         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3081                         (image_size - 4));
3082         if (status) {
3083                 dev_err(&adapter->pdev->dev,
3084                 "could not get crc from flash, not flashing redboot\n");
3085                 return false;
3086         }
3087
3088         /*update redboot only if crc does not match*/
3089         if (!memcmp(flashed_crc, p, 4))
3090                 return false;
3091         else
3092                 return true;
3093 }
3094
3095 static bool phy_flashing_required(struct be_adapter *adapter)
3096 {
3097         return (adapter->phy.phy_type == TN_8022 &&
3098                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3099 }
3100
3101 static bool is_comp_in_ufi(struct be_adapter *adapter,
3102                            struct flash_section_info *fsec, int type)
3103 {
3104         int i = 0, img_type = 0;
3105         struct flash_section_info_g2 *fsec_g2 = NULL;
3106
3107         if (BE2_chip(adapter))
3108                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3109
3110         for (i = 0; i < MAX_FLASH_COMP; i++) {
3111                 if (fsec_g2)
3112                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3113                 else
3114                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3115
3116                 if (img_type == type)
3117                         return true;
3118         }
3119         return false;
3120
3121 }
3122
3123 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3124                                          int header_size,
3125                                          const struct firmware *fw)
3126 {
3127         struct flash_section_info *fsec = NULL;
3128         const u8 *p = fw->data;
3129
3130         p += header_size;
3131         while (p < (fw->data + fw->size)) {
3132                 fsec = (struct flash_section_info *)p;
3133                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3134                         return fsec;
3135                 p += 32;
3136         }
3137         return NULL;
3138 }
3139
3140 static int be_flash(struct be_adapter *adapter, const u8 *img,
3141                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3142 {
3143         u32 total_bytes = 0, flash_op, num_bytes = 0;
3144         int status = 0;
3145         struct be_cmd_write_flashrom *req = flash_cmd->va;
3146
3147         total_bytes = img_size;
3148         while (total_bytes) {
3149                 num_bytes = min_t(u32, 32*1024, total_bytes);
3150
3151                 total_bytes -= num_bytes;
3152
3153                 if (!total_bytes) {
3154                         if (optype == OPTYPE_PHY_FW)
3155                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3156                         else
3157                                 flash_op = FLASHROM_OPER_FLASH;
3158                 } else {
3159                         if (optype == OPTYPE_PHY_FW)
3160                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3161                         else
3162                                 flash_op = FLASHROM_OPER_SAVE;
3163                 }
3164
3165                 memcpy(req->data_buf, img, num_bytes);
3166                 img += num_bytes;
3167                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3168                                                 flash_op, num_bytes);
3169                 if (status) {
3170                         if (status == ILLEGAL_IOCTL_REQ &&
3171                             optype == OPTYPE_PHY_FW)
3172                                 break;
3173                         dev_err(&adapter->pdev->dev,
3174                                 "cmd to write to flash rom failed.\n");
3175                         return status;
3176                 }
3177         }
3178         return 0;
3179 }
3180
3181 /* For BE2 and BE3 */
3182 static int be_flash_BEx(struct be_adapter *adapter,
3183                          const struct firmware *fw,
3184                          struct be_dma_mem *flash_cmd,
3185                          int num_of_images)
3186
3187 {
3188         int status = 0, i, filehdr_size = 0;
3189         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3190         const u8 *p = fw->data;
3191         const struct flash_comp *pflashcomp;
3192         int num_comp, redboot;
3193         struct flash_section_info *fsec = NULL;
3194
3195         struct flash_comp gen3_flash_types[] = {
3196                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3197                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3198                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3199                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3200                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3201                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3202                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3203                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3204                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3205                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3206                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3207                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3208                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3209                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3210                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3211                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3212                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3213                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3214                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3215                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3216         };
3217
3218         struct flash_comp gen2_flash_types[] = {
3219                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3220                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3221                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3222                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3223                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3224                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3225                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3226                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3227                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3228                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3229                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3230                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3231                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3232                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3233                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3234                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3235         };
3236
3237         if (BE3_chip(adapter)) {
3238                 pflashcomp = gen3_flash_types;
3239                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3240                 num_comp = ARRAY_SIZE(gen3_flash_types);
3241         } else {
3242                 pflashcomp = gen2_flash_types;
3243                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3244                 num_comp = ARRAY_SIZE(gen2_flash_types);
3245         }
3246
3247         /* Get flash section info*/
3248         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3249         if (!fsec) {
3250                 dev_err(&adapter->pdev->dev,
3251                         "Invalid Cookie. UFI corrupted ?\n");
3252                 return -1;
3253         }
3254         for (i = 0; i < num_comp; i++) {
3255                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3256                         continue;
3257
3258                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3259                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3260                         continue;
3261
3262                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3263                     !phy_flashing_required(adapter))
3264                                 continue;
3265
3266                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3267                         redboot = be_flash_redboot(adapter, fw->data,
3268                                 pflashcomp[i].offset, pflashcomp[i].size,
3269                                 filehdr_size + img_hdrs_size);
3270                         if (!redboot)
3271                                 continue;
3272                 }
3273
3274                 p = fw->data;
3275                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3276                 if (p + pflashcomp[i].size > fw->data + fw->size)
3277                         return -1;
3278
3279                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3280                                         pflashcomp[i].size);
3281                 if (status) {
3282                         dev_err(&adapter->pdev->dev,
3283                                 "Flashing section type %d failed.\n",
3284                                 pflashcomp[i].img_type);
3285                         return status;
3286                 }
3287         }
3288         return 0;
3289 }
3290
3291 static int be_flash_skyhawk(struct be_adapter *adapter,
3292                 const struct firmware *fw,
3293                 struct be_dma_mem *flash_cmd, int num_of_images)
3294 {
3295         int status = 0, i, filehdr_size = 0;
3296         int img_offset, img_size, img_optype, redboot;
3297         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3298         const u8 *p = fw->data;
3299         struct flash_section_info *fsec = NULL;
3300
3301         filehdr_size = sizeof(struct flash_file_hdr_g3);
3302         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3303         if (!fsec) {
3304                 dev_err(&adapter->pdev->dev,
3305                         "Invalid Cookie. UFI corrupted ?\n");
3306                 return -1;
3307         }
3308
3309         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3310                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3311                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3312
3313                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3314                 case IMAGE_FIRMWARE_iSCSI:
3315                         img_optype = OPTYPE_ISCSI_ACTIVE;
3316                         break;
3317                 case IMAGE_BOOT_CODE:
3318                         img_optype = OPTYPE_REDBOOT;
3319                         break;
3320                 case IMAGE_OPTION_ROM_ISCSI:
3321                         img_optype = OPTYPE_BIOS;
3322                         break;
3323                 case IMAGE_OPTION_ROM_PXE:
3324                         img_optype = OPTYPE_PXE_BIOS;
3325                         break;
3326                 case IMAGE_OPTION_ROM_FCoE:
3327                         img_optype = OPTYPE_FCOE_BIOS;
3328                         break;
3329                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3330                         img_optype = OPTYPE_ISCSI_BACKUP;
3331                         break;
3332                 case IMAGE_NCSI:
3333                         img_optype = OPTYPE_NCSI_FW;
3334                         break;
3335                 default:
3336                         continue;
3337                 }
3338
3339                 if (img_optype == OPTYPE_REDBOOT) {
3340                         redboot = be_flash_redboot(adapter, fw->data,
3341                                         img_offset, img_size,
3342                                         filehdr_size + img_hdrs_size);
3343                         if (!redboot)
3344                                 continue;
3345                 }
3346
3347                 p = fw->data;
3348                 p += filehdr_size + img_offset + img_hdrs_size;
3349                 if (p + img_size > fw->data + fw->size)
3350                         return -1;
3351
3352                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3353                 if (status) {
3354                         dev_err(&adapter->pdev->dev,
3355                                 "Flashing section type %d failed.\n",
3356                                 fsec->fsec_entry[i].type);
3357                         return status;
3358                 }
3359         }
3360         return 0;
3361 }
3362
3363 static int lancer_wait_idle(struct be_adapter *adapter)
3364 {
3365 #define SLIPORT_IDLE_TIMEOUT 30
3366         u32 reg_val;
3367         int status = 0, i;
3368
3369         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3370                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3371                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3372                         break;
3373
3374                 ssleep(1);
3375         }
3376
3377         if (i == SLIPORT_IDLE_TIMEOUT)
3378                 status = -1;
3379
3380         return status;
3381 }
3382
3383 static int lancer_fw_reset(struct be_adapter *adapter)
3384 {
3385         int status = 0;
3386
3387         status = lancer_wait_idle(adapter);
3388         if (status)
3389                 return status;
3390
3391         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3392                   PHYSDEV_CONTROL_OFFSET);
3393
3394         return status;
3395 }
3396
3397 static int lancer_fw_download(struct be_adapter *adapter,
3398                                 const struct firmware *fw)
3399 {
3400 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3401 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3402         struct be_dma_mem flash_cmd;
3403         const u8 *data_ptr = NULL;
3404         u8 *dest_image_ptr = NULL;
3405         size_t image_size = 0;
3406         u32 chunk_size = 0;
3407         u32 data_written = 0;
3408         u32 offset = 0;
3409         int status = 0;
3410         u8 add_status = 0;
3411         u8 change_status;
3412
3413         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3414                 dev_err(&adapter->pdev->dev,
3415                         "FW Image not properly aligned. "
3416                         "Length must be 4 byte aligned.\n");
3417                 status = -EINVAL;
3418                 goto lancer_fw_exit;
3419         }
3420
3421         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3422                                 + LANCER_FW_DOWNLOAD_CHUNK;
3423         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3424                                                 &flash_cmd.dma, GFP_KERNEL);
3425         if (!flash_cmd.va) {
3426                 status = -ENOMEM;
3427                 dev_err(&adapter->pdev->dev,
3428                         "Memory allocation failure while flashing\n");
3429                 goto lancer_fw_exit;
3430         }
3431
3432         dest_image_ptr = flash_cmd.va +
3433                                 sizeof(struct lancer_cmd_req_write_object);
3434         image_size = fw->size;
3435         data_ptr = fw->data;
3436
3437         while (image_size) {
3438                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3439
3440                 /* Copy the image chunk content. */
3441                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3442
3443                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3444                                                  chunk_size, offset,
3445                                                  LANCER_FW_DOWNLOAD_LOCATION,
3446                                                  &data_written, &change_status,
3447                                                  &add_status);
3448                 if (status)
3449                         break;
3450
3451                 offset += data_written;
3452                 data_ptr += data_written;
3453                 image_size -= data_written;
3454         }
3455
3456         if (!status) {
3457                 /* Commit the FW written */
3458                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3459                                                  0, offset,
3460                                                  LANCER_FW_DOWNLOAD_LOCATION,
3461                                                  &data_written, &change_status,
3462                                                  &add_status);
3463         }
3464
3465         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3466                                 flash_cmd.dma);
3467         if (status) {
3468                 dev_err(&adapter->pdev->dev,
3469                         "Firmware load error. "
3470                         "Status code: 0x%x Additional Status: 0x%x\n",
3471                         status, add_status);
3472                 goto lancer_fw_exit;
3473         }
3474
3475         if (change_status == LANCER_FW_RESET_NEEDED) {
3476                 status = lancer_fw_reset(adapter);
3477                 if (status) {
3478                         dev_err(&adapter->pdev->dev,
3479                                 "Adapter busy for FW reset.\n"
3480                                 "New FW will not be active.\n");
3481                         goto lancer_fw_exit;
3482                 }
3483         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3484                         dev_err(&adapter->pdev->dev,
3485                                 "System reboot required for new FW"
3486                                 " to be active\n");
3487         }
3488
3489         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3490 lancer_fw_exit:
3491         return status;
3492 }
3493
3494 #define UFI_TYPE2               2
3495 #define UFI_TYPE3               3
3496 #define UFI_TYPE4               4
3497 static int be_get_ufi_type(struct be_adapter *adapter,
3498                            struct flash_file_hdr_g2 *fhdr)
3499 {
3500         if (fhdr == NULL)
3501                 goto be_get_ufi_exit;
3502
3503         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3504                 return UFI_TYPE4;
3505         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3506                 return UFI_TYPE3;
3507         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3508                 return UFI_TYPE2;
3509
3510 be_get_ufi_exit:
3511         dev_err(&adapter->pdev->dev,
3512                 "UFI and Interface are not compatible for flashing\n");
3513         return -1;
3514 }
3515
3516 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3517 {
3518         struct flash_file_hdr_g2 *fhdr;
3519         struct flash_file_hdr_g3 *fhdr3;
3520         struct image_hdr *img_hdr_ptr = NULL;
3521         struct be_dma_mem flash_cmd;
3522         const u8 *p;
3523         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3524
3525         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3526         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3527                                           &flash_cmd.dma, GFP_KERNEL);
3528         if (!flash_cmd.va) {
3529                 status = -ENOMEM;
3530                 dev_err(&adapter->pdev->dev,
3531                         "Memory allocation failure while flashing\n");
3532                 goto be_fw_exit;
3533         }
3534
3535         p = fw->data;
3536         fhdr = (struct flash_file_hdr_g2 *)p;
3537
3538         ufi_type = be_get_ufi_type(adapter, fhdr);
3539
3540         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3541         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3542         for (i = 0; i < num_imgs; i++) {
3543                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3544                                 (sizeof(struct flash_file_hdr_g3) +
3545                                  i * sizeof(struct image_hdr)));
3546                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3547                         if (ufi_type == UFI_TYPE4)
3548                                 status = be_flash_skyhawk(adapter, fw,
3549                                                         &flash_cmd, num_imgs);
3550                         else if (ufi_type == UFI_TYPE3)
3551                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3552                                                       num_imgs);
3553                 }
3554         }
3555
3556         if (ufi_type == UFI_TYPE2)
3557                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3558         else if (ufi_type == -1)
3559                 status = -1;
3560
3561         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3562                           flash_cmd.dma);
3563         if (status) {
3564                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3565                 goto be_fw_exit;
3566         }
3567
3568         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3569
3570 be_fw_exit:
3571         return status;
3572 }
3573
3574 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3575 {
3576         const struct firmware *fw;
3577         int status;
3578
3579         if (!netif_running(adapter->netdev)) {
3580                 dev_err(&adapter->pdev->dev,
3581                         "Firmware load not allowed (interface is down)\n");
3582                 return -1;
3583         }
3584
3585         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3586         if (status)
3587                 goto fw_exit;
3588
3589         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3590
3591         if (lancer_chip(adapter))
3592                 status = lancer_fw_download(adapter, fw);
3593         else
3594                 status = be_fw_download(adapter, fw);
3595
3596 fw_exit:
3597         release_firmware(fw);
3598         return status;
3599 }
3600
3601 static const struct net_device_ops be_netdev_ops = {
3602         .ndo_open               = be_open,
3603         .ndo_stop               = be_close,
3604         .ndo_start_xmit         = be_xmit,
3605         .ndo_set_rx_mode        = be_set_rx_mode,
3606         .ndo_set_mac_address    = be_mac_addr_set,
3607         .ndo_change_mtu         = be_change_mtu,
3608         .ndo_get_stats64        = be_get_stats64,
3609         .ndo_validate_addr      = eth_validate_addr,
3610         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3611         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3612         .ndo_set_vf_mac         = be_set_vf_mac,
3613         .ndo_set_vf_vlan        = be_set_vf_vlan,
3614         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3615         .ndo_get_vf_config      = be_get_vf_config,
3616 #ifdef CONFIG_NET_POLL_CONTROLLER
3617         .ndo_poll_controller    = be_netpoll,
3618 #endif
3619 };
3620
3621 static void be_netdev_init(struct net_device *netdev)
3622 {
3623         struct be_adapter *adapter = netdev_priv(netdev);
3624         struct be_eq_obj *eqo;
3625         int i;
3626
3627         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3628                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3629                 NETIF_F_HW_VLAN_TX;
3630         if (be_multi_rxq(adapter))
3631                 netdev->hw_features |= NETIF_F_RXHASH;
3632
3633         netdev->features |= netdev->hw_features |
3634                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3635
3636         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3637                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3638
3639         netdev->priv_flags |= IFF_UNICAST_FLT;
3640
3641         netdev->flags |= IFF_MULTICAST;
3642
3643         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3644
3645         netdev->netdev_ops = &be_netdev_ops;
3646
3647         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3648
3649         for_all_evt_queues(adapter, eqo, i)
3650                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3651 }
3652
3653 static void be_unmap_pci_bars(struct be_adapter *adapter)
3654 {
3655         if (adapter->db)
3656                 pci_iounmap(adapter->pdev, adapter->db);
3657 }
3658
3659 static int db_bar(struct be_adapter *adapter)
3660 {
3661         if (lancer_chip(adapter) || !be_physfn(adapter))
3662                 return 0;
3663         else
3664                 return 4;
3665 }
3666
3667 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3668 {
3669         if (skyhawk_chip(adapter)) {
3670                 adapter->roce_db.size = 4096;
3671                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3672                                                               db_bar(adapter));
3673                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3674                                                                db_bar(adapter));
3675         }
3676         return 0;
3677 }
3678
3679 static int be_map_pci_bars(struct be_adapter *adapter)
3680 {
3681         u8 __iomem *addr;
3682         u32 sli_intf;
3683
3684         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3685         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3686                                 SLI_INTF_IF_TYPE_SHIFT;
3687
3688         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3689         if (addr == NULL)
3690                 goto pci_map_err;
3691         adapter->db = addr;
3692
3693         be_roce_map_pci_bars(adapter);
3694         return 0;
3695
3696 pci_map_err:
3697         be_unmap_pci_bars(adapter);
3698         return -ENOMEM;
3699 }
3700
3701 static void be_ctrl_cleanup(struct be_adapter *adapter)
3702 {
3703         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3704
3705         be_unmap_pci_bars(adapter);
3706
3707         if (mem->va)
3708                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3709                                   mem->dma);
3710
3711         mem = &adapter->rx_filter;
3712         if (mem->va)
3713                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3714                                   mem->dma);
3715 }
3716
3717 static int be_ctrl_init(struct be_adapter *adapter)
3718 {
3719         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3720         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3721         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3722         u32 sli_intf;
3723         int status;
3724
3725         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3726         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3727                                  SLI_INTF_FAMILY_SHIFT;
3728         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3729
3730         status = be_map_pci_bars(adapter);
3731         if (status)
3732                 goto done;
3733
3734         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3735         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3736                                                 mbox_mem_alloc->size,
3737                                                 &mbox_mem_alloc->dma,
3738                                                 GFP_KERNEL);
3739         if (!mbox_mem_alloc->va) {
3740                 status = -ENOMEM;
3741                 goto unmap_pci_bars;
3742         }
3743         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3744         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3745         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3746         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3747
3748         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3749         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3750                                         &rx_filter->dma, GFP_KERNEL);
3751         if (rx_filter->va == NULL) {
3752                 status = -ENOMEM;
3753                 goto free_mbox;
3754         }
3755         memset(rx_filter->va, 0, rx_filter->size);
3756         mutex_init(&adapter->mbox_lock);
3757         spin_lock_init(&adapter->mcc_lock);
3758         spin_lock_init(&adapter->mcc_cq_lock);
3759
3760         init_completion(&adapter->flash_compl);
3761         pci_save_state(adapter->pdev);
3762         return 0;
3763
3764 free_mbox:
3765         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3766                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3767
3768 unmap_pci_bars:
3769         be_unmap_pci_bars(adapter);
3770
3771 done:
3772         return status;
3773 }
3774
3775 static void be_stats_cleanup(struct be_adapter *adapter)
3776 {
3777         struct be_dma_mem *cmd = &adapter->stats_cmd;
3778
3779         if (cmd->va)
3780                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3781                                   cmd->va, cmd->dma);
3782 }
3783
3784 static int be_stats_init(struct be_adapter *adapter)
3785 {
3786         struct be_dma_mem *cmd = &adapter->stats_cmd;
3787
3788         if (lancer_chip(adapter))
3789                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3790         else if (BE2_chip(adapter))
3791                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3792         else
3793                 /* BE3 and Skyhawk */
3794                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3795
3796         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3797                                      GFP_KERNEL);
3798         if (cmd->va == NULL)
3799                 return -1;
3800         memset(cmd->va, 0, cmd->size);
3801         return 0;
3802 }
3803
3804 static void be_remove(struct pci_dev *pdev)
3805 {
3806         struct be_adapter *adapter = pci_get_drvdata(pdev);
3807
3808         if (!adapter)
3809                 return;
3810
3811         be_roce_dev_remove(adapter);
3812
3813         cancel_delayed_work_sync(&adapter->func_recovery_work);
3814
3815         unregister_netdev(adapter->netdev);
3816
3817         be_clear(adapter);
3818
3819         /* tell fw we're done with firing cmds */
3820         be_cmd_fw_clean(adapter);
3821
3822         be_stats_cleanup(adapter);
3823
3824         be_ctrl_cleanup(adapter);
3825
3826         pci_disable_pcie_error_reporting(pdev);
3827
3828         pci_set_drvdata(pdev, NULL);
3829         pci_release_regions(pdev);
3830         pci_disable_device(pdev);
3831
3832         free_netdev(adapter->netdev);
3833 }
3834
3835 bool be_is_wol_supported(struct be_adapter *adapter)
3836 {
3837         return ((adapter->wol_cap & BE_WOL_CAP) &&
3838                 !be_is_wol_excluded(adapter)) ? true : false;
3839 }
3840
3841 u32 be_get_fw_log_level(struct be_adapter *adapter)
3842 {
3843         struct be_dma_mem extfat_cmd;
3844         struct be_fat_conf_params *cfgs;
3845         int status;
3846         u32 level = 0;
3847         int j;
3848
3849         if (lancer_chip(adapter))
3850                 return 0;
3851
3852         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3853         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3854         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3855                                              &extfat_cmd.dma);
3856
3857         if (!extfat_cmd.va) {
3858                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3859                         __func__);
3860                 goto err;
3861         }
3862
3863         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3864         if (!status) {
3865                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3866                                                 sizeof(struct be_cmd_resp_hdr));
3867                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3868                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3869                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3870                 }
3871         }
3872         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3873                             extfat_cmd.dma);
3874 err:
3875         return level;
3876 }
3877
3878 static int be_get_initial_config(struct be_adapter *adapter)
3879 {
3880         int status;
3881         u32 level;
3882
3883         status = be_cmd_get_cntl_attributes(adapter);
3884         if (status)
3885                 return status;
3886
3887         status = be_cmd_get_acpi_wol_cap(adapter);
3888         if (status) {
3889                 /* in case of a failure to get wol capabillities
3890                  * check the exclusion list to determine WOL capability */
3891                 if (!be_is_wol_excluded(adapter))
3892                         adapter->wol_cap |= BE_WOL_CAP;
3893         }
3894
3895         if (be_is_wol_supported(adapter))
3896                 adapter->wol = true;
3897
3898         /* Must be a power of 2 or else MODULO will BUG_ON */
3899         adapter->be_get_temp_freq = 64;
3900
3901         level = be_get_fw_log_level(adapter);
3902         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3903
3904         return 0;
3905 }
3906
3907 static int lancer_recover_func(struct be_adapter *adapter)
3908 {
3909         int status;
3910
3911         status = lancer_test_and_set_rdy_state(adapter);
3912         if (status)
3913                 goto err;
3914
3915         if (netif_running(adapter->netdev))
3916                 be_close(adapter->netdev);
3917
3918         be_clear(adapter);
3919
3920         adapter->hw_error = false;
3921         adapter->fw_timeout = false;
3922
3923         status = be_setup(adapter);
3924         if (status)
3925                 goto err;
3926
3927         if (netif_running(adapter->netdev)) {
3928                 status = be_open(adapter->netdev);
3929                 if (status)
3930                         goto err;
3931         }
3932
3933         dev_err(&adapter->pdev->dev,
3934                 "Adapter SLIPORT recovery succeeded\n");
3935         return 0;
3936 err:
3937         if (adapter->eeh_error)
3938                 dev_err(&adapter->pdev->dev,
3939                         "Adapter SLIPORT recovery failed\n");
3940
3941         return status;
3942 }
3943
3944 static void be_func_recovery_task(struct work_struct *work)
3945 {
3946         struct be_adapter *adapter =
3947                 container_of(work, struct be_adapter,  func_recovery_work.work);
3948         int status;
3949
3950         be_detect_error(adapter);
3951
3952         if (adapter->hw_error && lancer_chip(adapter)) {
3953
3954                 if (adapter->eeh_error)
3955                         goto out;
3956
3957                 rtnl_lock();
3958                 netif_device_detach(adapter->netdev);
3959                 rtnl_unlock();
3960
3961                 status = lancer_recover_func(adapter);
3962
3963                 if (!status)
3964                         netif_device_attach(adapter->netdev);
3965         }
3966
3967 out:
3968         schedule_delayed_work(&adapter->func_recovery_work,
3969                               msecs_to_jiffies(1000));
3970 }
3971
3972 static void be_worker(struct work_struct *work)
3973 {
3974         struct be_adapter *adapter =
3975                 container_of(work, struct be_adapter, work.work);
3976         struct be_rx_obj *rxo;
3977         struct be_eq_obj *eqo;
3978         int i;
3979
3980         /* when interrupts are not yet enabled, just reap any pending
3981         * mcc completions */
3982         if (!netif_running(adapter->netdev)) {
3983                 local_bh_disable();
3984                 be_process_mcc(adapter);
3985                 local_bh_enable();
3986                 goto reschedule;
3987         }
3988
3989         if (!adapter->stats_cmd_sent) {
3990                 if (lancer_chip(adapter))
3991                         lancer_cmd_get_pport_stats(adapter,
3992                                                 &adapter->stats_cmd);
3993                 else
3994                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3995         }
3996
3997         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3998                 be_cmd_get_die_temperature(adapter);
3999
4000         for_all_rx_queues(adapter, rxo, i) {
4001                 if (rxo->rx_post_starved) {
4002                         rxo->rx_post_starved = false;
4003                         be_post_rx_frags(rxo, GFP_KERNEL);
4004                 }
4005         }
4006
4007         for_all_evt_queues(adapter, eqo, i)
4008                 be_eqd_update(adapter, eqo);
4009
4010 reschedule:
4011         adapter->work_counter++;
4012         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4013 }
4014
4015 static bool be_reset_required(struct be_adapter *adapter)
4016 {
4017         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4018 }
4019
4020 static char *mc_name(struct be_adapter *adapter)
4021 {
4022         if (adapter->function_mode & FLEX10_MODE)
4023                 return "FLEX10";
4024         else if (adapter->function_mode & VNIC_MODE)
4025                 return "vNIC";
4026         else if (adapter->function_mode & UMC_ENABLED)
4027                 return "UMC";
4028         else
4029                 return "";
4030 }
4031
4032 static inline char *func_name(struct be_adapter *adapter)
4033 {
4034         return be_physfn(adapter) ? "PF" : "VF";
4035 }
4036
4037 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4038 {
4039         int status = 0;
4040         struct be_adapter *adapter;
4041         struct net_device *netdev;
4042         char port_name;
4043
4044         status = pci_enable_device(pdev);
4045         if (status)
4046                 goto do_none;
4047
4048         status = pci_request_regions(pdev, DRV_NAME);
4049         if (status)
4050                 goto disable_dev;
4051         pci_set_master(pdev);
4052
4053         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4054         if (netdev == NULL) {
4055                 status = -ENOMEM;
4056                 goto rel_reg;
4057         }
4058         adapter = netdev_priv(netdev);
4059         adapter->pdev = pdev;
4060         pci_set_drvdata(pdev, adapter);
4061         adapter->netdev = netdev;
4062         SET_NETDEV_DEV(netdev, &pdev->dev);
4063
4064         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4065         if (!status) {
4066                 netdev->features |= NETIF_F_HIGHDMA;
4067         } else {
4068                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4069                 if (status) {
4070                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4071                         goto free_netdev;
4072                 }
4073         }
4074
4075         status = pci_enable_pcie_error_reporting(pdev);
4076         if (status)
4077                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4078
4079         status = be_ctrl_init(adapter);
4080         if (status)
4081                 goto free_netdev;
4082
4083         /* sync up with fw's ready state */
4084         if (be_physfn(adapter)) {
4085                 status = be_fw_wait_ready(adapter);
4086                 if (status)
4087                         goto ctrl_clean;
4088         }
4089
4090         /* tell fw we're ready to fire cmds */
4091         status = be_cmd_fw_init(adapter);
4092         if (status)
4093                 goto ctrl_clean;
4094
4095         if (be_reset_required(adapter)) {
4096                 status = be_cmd_reset_function(adapter);
4097                 if (status)
4098                         goto ctrl_clean;
4099         }
4100
4101         /* The INTR bit may be set in the card when probed by a kdump kernel
4102          * after a crash.
4103          */
4104         if (!lancer_chip(adapter))
4105                 be_intr_set(adapter, false);
4106
4107         status = be_stats_init(adapter);
4108         if (status)
4109                 goto ctrl_clean;
4110
4111         status = be_get_initial_config(adapter);
4112         if (status)
4113                 goto stats_clean;
4114
4115         INIT_DELAYED_WORK(&adapter->work, be_worker);
4116         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4117         adapter->rx_fc = adapter->tx_fc = true;
4118
4119         status = be_setup(adapter);
4120         if (status)
4121                 goto stats_clean;
4122
4123         be_netdev_init(netdev);
4124         status = register_netdev(netdev);
4125         if (status != 0)
4126                 goto unsetup;
4127
4128         be_roce_dev_add(adapter);
4129
4130         schedule_delayed_work(&adapter->func_recovery_work,
4131                               msecs_to_jiffies(1000));
4132
4133         be_cmd_query_port_name(adapter, &port_name);
4134
4135         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4136                  func_name(adapter), mc_name(adapter), port_name);
4137
4138         return 0;
4139
4140 unsetup:
4141         be_clear(adapter);
4142 stats_clean:
4143         be_stats_cleanup(adapter);
4144 ctrl_clean:
4145         be_ctrl_cleanup(adapter);
4146 free_netdev:
4147         free_netdev(netdev);
4148         pci_set_drvdata(pdev, NULL);
4149 rel_reg:
4150         pci_release_regions(pdev);
4151 disable_dev:
4152         pci_disable_device(pdev);
4153 do_none:
4154         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4155         return status;
4156 }
4157
4158 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4159 {
4160         struct be_adapter *adapter = pci_get_drvdata(pdev);
4161         struct net_device *netdev =  adapter->netdev;
4162
4163         if (adapter->wol)
4164                 be_setup_wol(adapter, true);
4165
4166         cancel_delayed_work_sync(&adapter->func_recovery_work);
4167
4168         netif_device_detach(netdev);
4169         if (netif_running(netdev)) {
4170                 rtnl_lock();
4171                 be_close(netdev);
4172                 rtnl_unlock();
4173         }
4174         be_clear(adapter);
4175
4176         pci_save_state(pdev);
4177         pci_disable_device(pdev);
4178         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4179         return 0;
4180 }
4181
4182 static int be_resume(struct pci_dev *pdev)
4183 {
4184         int status = 0;
4185         struct be_adapter *adapter = pci_get_drvdata(pdev);
4186         struct net_device *netdev =  adapter->netdev;
4187
4188         netif_device_detach(netdev);
4189
4190         status = pci_enable_device(pdev);
4191         if (status)
4192                 return status;
4193
4194         pci_set_power_state(pdev, 0);
4195         pci_restore_state(pdev);
4196
4197         /* tell fw we're ready to fire cmds */
4198         status = be_cmd_fw_init(adapter);
4199         if (status)
4200                 return status;
4201
4202         be_setup(adapter);
4203         if (netif_running(netdev)) {
4204                 rtnl_lock();
4205                 be_open(netdev);
4206                 rtnl_unlock();
4207         }
4208
4209         schedule_delayed_work(&adapter->func_recovery_work,
4210                               msecs_to_jiffies(1000));
4211         netif_device_attach(netdev);
4212
4213         if (adapter->wol)
4214                 be_setup_wol(adapter, false);
4215
4216         return 0;
4217 }
4218
4219 /*
4220  * An FLR will stop BE from DMAing any data.
4221  */
4222 static void be_shutdown(struct pci_dev *pdev)
4223 {
4224         struct be_adapter *adapter = pci_get_drvdata(pdev);
4225
4226         if (!adapter)
4227                 return;
4228
4229         cancel_delayed_work_sync(&adapter->work);
4230         cancel_delayed_work_sync(&adapter->func_recovery_work);
4231
4232         netif_device_detach(adapter->netdev);
4233
4234         be_cmd_reset_function(adapter);
4235
4236         pci_disable_device(pdev);
4237 }
4238
4239 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4240                                 pci_channel_state_t state)
4241 {
4242         struct be_adapter *adapter = pci_get_drvdata(pdev);
4243         struct net_device *netdev =  adapter->netdev;
4244
4245         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4246
4247         adapter->eeh_error = true;
4248
4249         cancel_delayed_work_sync(&adapter->func_recovery_work);
4250
4251         rtnl_lock();
4252         netif_device_detach(netdev);
4253         rtnl_unlock();
4254
4255         if (netif_running(netdev)) {
4256                 rtnl_lock();
4257                 be_close(netdev);
4258                 rtnl_unlock();
4259         }
4260         be_clear(adapter);
4261
4262         if (state == pci_channel_io_perm_failure)
4263                 return PCI_ERS_RESULT_DISCONNECT;
4264
4265         pci_disable_device(pdev);
4266
4267         /* The error could cause the FW to trigger a flash debug dump.
4268          * Resetting the card while flash dump is in progress
4269          * can cause it not to recover; wait for it to finish.
4270          * Wait only for first function as it is needed only once per
4271          * adapter.
4272          */
4273         if (pdev->devfn == 0)
4274                 ssleep(30);
4275
4276         return PCI_ERS_RESULT_NEED_RESET;
4277 }
4278
4279 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4280 {
4281         struct be_adapter *adapter = pci_get_drvdata(pdev);
4282         int status;
4283
4284         dev_info(&adapter->pdev->dev, "EEH reset\n");
4285         be_clear_all_error(adapter);
4286
4287         status = pci_enable_device(pdev);
4288         if (status)
4289                 return PCI_ERS_RESULT_DISCONNECT;
4290
4291         pci_set_master(pdev);
4292         pci_set_power_state(pdev, 0);
4293         pci_restore_state(pdev);
4294
4295         /* Check if card is ok and fw is ready */
4296         status = be_fw_wait_ready(adapter);
4297         if (status)
4298                 return PCI_ERS_RESULT_DISCONNECT;
4299
4300         pci_cleanup_aer_uncorrect_error_status(pdev);
4301         return PCI_ERS_RESULT_RECOVERED;
4302 }
4303
4304 static void be_eeh_resume(struct pci_dev *pdev)
4305 {
4306         int status = 0;
4307         struct be_adapter *adapter = pci_get_drvdata(pdev);
4308         struct net_device *netdev =  adapter->netdev;
4309
4310         dev_info(&adapter->pdev->dev, "EEH resume\n");
4311
4312         pci_save_state(pdev);
4313
4314         /* tell fw we're ready to fire cmds */
4315         status = be_cmd_fw_init(adapter);
4316         if (status)
4317                 goto err;
4318
4319         status = be_cmd_reset_function(adapter);
4320         if (status)
4321                 goto err;
4322
4323         status = be_setup(adapter);
4324         if (status)
4325                 goto err;
4326
4327         if (netif_running(netdev)) {
4328                 status = be_open(netdev);
4329                 if (status)
4330                         goto err;
4331         }
4332
4333         schedule_delayed_work(&adapter->func_recovery_work,
4334                               msecs_to_jiffies(1000));
4335         netif_device_attach(netdev);
4336         return;
4337 err:
4338         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4339 }
4340
4341 static const struct pci_error_handlers be_eeh_handlers = {
4342         .error_detected = be_eeh_err_detected,
4343         .slot_reset = be_eeh_reset,
4344         .resume = be_eeh_resume,
4345 };
4346
4347 static struct pci_driver be_driver = {
4348         .name = DRV_NAME,
4349         .id_table = be_dev_ids,
4350         .probe = be_probe,
4351         .remove = be_remove,
4352         .suspend = be_suspend,
4353         .resume = be_resume,
4354         .shutdown = be_shutdown,
4355         .err_handler = &be_eeh_handlers
4356 };
4357
4358 static int __init be_init_module(void)
4359 {
4360         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4361             rx_frag_size != 2048) {
4362                 printk(KERN_WARNING DRV_NAME
4363                         " : Module param rx_frag_size must be 2048/4096/8192."
4364                         " Using 2048\n");
4365                 rx_frag_size = 2048;
4366         }
4367
4368         return pci_register_driver(&be_driver);
4369 }
4370 module_init(be_init_module);
4371
4372 static void __exit be_exit_module(void)
4373 {
4374         pci_unregister_driver(&be_driver);
4375 }
4376 module_exit(be_exit_module);