]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
9b734a0f8efa0ce2202702dd1eefd00cd5c71770
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static const char * const ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static const char * const ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 /* Is BE in a multi-channel mode */
122 static inline bool be_is_mc(struct be_adapter *adapter) {
123         return (adapter->function_mode & FLEX10_MODE ||
124                 adapter->function_mode & VNIC_MODE ||
125                 adapter->function_mode & UMC_ENABLED);
126 }
127
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131         if (mem->va) {
132                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133                                   mem->dma);
134                 mem->va = NULL;
135         }
136 }
137
138 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139                 u16 len, u16 entry_size)
140 {
141         struct be_dma_mem *mem = &q->dma_mem;
142
143         memset(q, 0, sizeof(*q));
144         q->len = len;
145         q->entry_size = entry_size;
146         mem->size = len * entry_size;
147         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148                                      GFP_KERNEL);
149         if (!mem->va)
150                 return -ENOMEM;
151         memset(mem->va, 0, mem->size);
152         return 0;
153 }
154
155 static void be_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         if (adapter->eeh_error)
160                 return;
161
162         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163                                 &reg);
164         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
166         if (!enabled && enable)
167                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else if (enabled && !enable)
169                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170         else
171                 return;
172
173         pci_write_config_dword(adapter->pdev,
174                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
175 }
176
177 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179         u32 val = 0;
180         val |= qid & DB_RQ_RING_ID_MASK;
181         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
182
183         wmb();
184         iowrite32(val, adapter->db + DB_RQ_OFFSET);
185 }
186
187 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_TXULP_RING_ID_MASK;
191         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
195 }
196
197 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
198                 bool arm, bool clear_int, u16 num_popped)
199 {
200         u32 val = 0;
201         val |= qid & DB_EQ_RING_ID_MASK;
202         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
204
205         if (adapter->eeh_error)
206                 return;
207
208         if (arm)
209                 val |= 1 << DB_EQ_REARM_SHIFT;
210         if (clear_int)
211                 val |= 1 << DB_EQ_CLR_SHIFT;
212         val |= 1 << DB_EQ_EVNT_SHIFT;
213         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_EQ_OFFSET);
215 }
216
217 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
218 {
219         u32 val = 0;
220         val |= qid & DB_CQ_RING_ID_MASK;
221         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
223
224         if (adapter->eeh_error)
225                 return;
226
227         if (arm)
228                 val |= 1 << DB_CQ_REARM_SHIFT;
229         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_CQ_OFFSET);
231 }
232
233 static int be_mac_addr_set(struct net_device *netdev, void *p)
234 {
235         struct be_adapter *adapter = netdev_priv(netdev);
236         struct sockaddr *addr = p;
237         int status = 0;
238         u8 current_mac[ETH_ALEN];
239         u32 pmac_id = adapter->pmac_id[0];
240
241         if (!is_valid_ether_addr(addr->sa_data))
242                 return -EADDRNOTAVAIL;
243
244         status = be_cmd_mac_addr_query(adapter, current_mac, false,
245                                        adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561         wrb->rsvd0 = 0;
562 }
563
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565                                         struct sk_buff *skb)
566 {
567         u8 vlan_prio;
568         u16 vlan_tag;
569
570         vlan_tag = vlan_tx_tag_get(skb);
571         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572         /* If vlan priority provided by OS is NOT in available bmap */
573         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575                                 adapter->recommended_prio;
576
577         return vlan_tag;
578 }
579
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582         return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588         u16 vlan_tag;
589
590         memset(hdr, 0, sizeof(*hdr));
591
592         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
594         if (skb_is_gso(skb)) {
595                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597                         hdr, skb_shinfo(skb)->gso_size);
598                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600                 if (lancer_chip(adapter) && adapter->sli_family  ==
601                                                         LANCER_A0_SLI_FAMILY) {
602                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603                         if (is_tcp_pkt(skb))
604                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605                                                                 tcpcs, hdr, 1);
606                         else if (is_udp_pkt(skb))
607                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608                                                                 udpcs, hdr, 1);
609                 }
610         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611                 if (is_tcp_pkt(skb))
612                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613                 else if (is_udp_pkt(skb))
614                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615         }
616
617         if (vlan_tx_tag_present(skb)) {
618                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621         }
622
623         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630                 bool unmap_single)
631 {
632         dma_addr_t dma;
633
634         be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637         if (wrb->frag_len) {
638                 if (unmap_single)
639                         dma_unmap_single(dev, dma, wrb->frag_len,
640                                          DMA_TO_DEVICE);
641                 else
642                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643         }
644 }
645
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649         dma_addr_t busaddr;
650         int i, copied = 0;
651         struct device *dev = &adapter->pdev->dev;
652         struct sk_buff *first_skb = skb;
653         struct be_eth_wrb *wrb;
654         struct be_eth_hdr_wrb *hdr;
655         bool map_single = false;
656         u16 map_head;
657
658         hdr = queue_head_node(txq);
659         queue_head_inc(txq);
660         map_head = txq->head;
661
662         if (skb->len > skb->data_len) {
663                 int len = skb_headlen(skb);
664                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665                 if (dma_mapping_error(dev, busaddr))
666                         goto dma_err;
667                 map_single = true;
668                 wrb = queue_head_node(txq);
669                 wrb_fill(wrb, busaddr, len);
670                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671                 queue_head_inc(txq);
672                 copied += len;
673         }
674
675         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676                 const struct skb_frag_struct *frag =
677                         &skb_shinfo(skb)->frags[i];
678                 busaddr = skb_frag_dma_map(dev, frag, 0,
679                                            skb_frag_size(frag), DMA_TO_DEVICE);
680                 if (dma_mapping_error(dev, busaddr))
681                         goto dma_err;
682                 wrb = queue_head_node(txq);
683                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685                 queue_head_inc(txq);
686                 copied += skb_frag_size(frag);
687         }
688
689         if (dummy_wrb) {
690                 wrb = queue_head_node(txq);
691                 wrb_fill(wrb, 0, 0);
692                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693                 queue_head_inc(txq);
694         }
695
696         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697         be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699         return copied;
700 dma_err:
701         txq->head = map_head;
702         while (copied) {
703                 wrb = queue_head_node(txq);
704                 unmap_tx_frag(dev, wrb, map_single);
705                 map_single = false;
706                 copied -= wrb->frag_len;
707                 queue_head_inc(txq);
708         }
709         return 0;
710 }
711
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713                                              struct sk_buff *skb)
714 {
715         u16 vlan_tag = 0;
716
717         skb = skb_share_check(skb, GFP_ATOMIC);
718         if (unlikely(!skb))
719                 return skb;
720
721         if (vlan_tx_tag_present(skb)) {
722                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723                 __vlan_put_tag(skb, vlan_tag);
724                 skb->vlan_tci = 0;
725         }
726
727         return skb;
728 }
729
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731                         struct net_device *netdev)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735         struct be_queue_info *txq = &txo->q;
736         struct iphdr *ip = NULL;
737         u32 wrb_cnt = 0, copied = 0;
738         u32 start = txq->head, eth_hdr_len;
739         bool dummy_wrb, stopped = false;
740
741         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742                 VLAN_ETH_HLEN : ETH_HLEN;
743
744         /* HW has a bug which considers padding bytes as legal
745          * and modifies the IPv4 hdr's 'tot_len' field
746          */
747         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748                         is_ipv4_pkt(skb)) {
749                 ip = (struct iphdr *)ip_hdr(skb);
750                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751         }
752
753         /* HW has a bug wherein it will calculate CSUM for VLAN
754          * pkts even though it is disabled.
755          * Manually insert VLAN in pkt.
756          */
757         if (skb->ip_summed != CHECKSUM_PARTIAL &&
758                         be_vlan_tag_chk(adapter, skb)) {
759                 skb = be_insert_vlan_in_pkt(adapter, skb);
760                 if (unlikely(!skb))
761                         goto tx_drop;
762         }
763
764         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765
766         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767         if (copied) {
768                 int gso_segs = skb_shinfo(skb)->gso_segs;
769
770                 /* record the sent skb in the sent_skb table */
771                 BUG_ON(txo->sent_skb_list[start]);
772                 txo->sent_skb_list[start] = skb;
773
774                 /* Ensure txq has space for the next skb; Else stop the queue
775                  * *BEFORE* ringing the tx doorbell, so that we serialze the
776                  * tx compls of the current transmit which'll wake up the queue
777                  */
778                 atomic_add(wrb_cnt, &txq->used);
779                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780                                                                 txq->len) {
781                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782                         stopped = true;
783                 }
784
785                 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788         } else {
789                 txq->head = start;
790                 dev_kfree_skb_any(skb);
791         }
792 tx_drop:
793         return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799         if (new_mtu < BE_MIN_MTU ||
800                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801                                         (ETH_HLEN + ETH_FCS_LEN))) {
802                 dev_info(&adapter->pdev->dev,
803                         "MTU must be between %d and %d bytes\n",
804                         BE_MIN_MTU,
805                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806                 return -EINVAL;
807         }
808         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809                         netdev->mtu, new_mtu);
810         netdev->mtu = new_mtu;
811         return 0;
812 }
813
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820         u16 vids[BE_NUM_VLANS_SUPPORTED];
821         u16 num = 0, i;
822         int status = 0;
823
824         /* No need to further configure vids if in promiscuous mode */
825         if (adapter->promiscuous)
826                 return 0;
827
828         if (adapter->vlans_added > adapter->max_vlans)
829                 goto set_vlan_promisc;
830
831         /* Construct VLAN Table to give to HW */
832         for (i = 0; i < VLAN_N_VID; i++)
833                 if (adapter->vlan_tag[i])
834                         vids[num++] = cpu_to_le16(i);
835
836         status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                     vids, num, 1, 0);
838
839         /* Set to VLAN promisc mode as setting VLAN filter failed */
840         if (status) {
841                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843                 goto set_vlan_promisc;
844         }
845
846         return status;
847
848 set_vlan_promisc:
849         status = be_cmd_vlan_config(adapter, adapter->if_handle,
850                                     NULL, 0, 1, 1);
851         return status;
852 }
853
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856         struct be_adapter *adapter = netdev_priv(netdev);
857         int status = 0;
858
859         if (!be_physfn(adapter)) {
860                 status = -EINVAL;
861                 goto ret;
862         }
863
864         adapter->vlan_tag[vid] = 1;
865         if (adapter->vlans_added <= (adapter->max_vlans + 1))
866                 status = be_vid_config(adapter);
867
868         if (!status)
869                 adapter->vlans_added++;
870         else
871                 adapter->vlan_tag[vid] = 0;
872 ret:
873         return status;
874 }
875
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878         struct be_adapter *adapter = netdev_priv(netdev);
879         int status = 0;
880
881         if (!be_physfn(adapter)) {
882                 status = -EINVAL;
883                 goto ret;
884         }
885
886         adapter->vlan_tag[vid] = 0;
887         if (adapter->vlans_added <= adapter->max_vlans)
888                 status = be_vid_config(adapter);
889
890         if (!status)
891                 adapter->vlans_added--;
892         else
893                 adapter->vlan_tag[vid] = 1;
894 ret:
895         return status;
896 }
897
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900         struct be_adapter *adapter = netdev_priv(netdev);
901         int status;
902
903         if (netdev->flags & IFF_PROMISC) {
904                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905                 adapter->promiscuous = true;
906                 goto done;
907         }
908
909         /* BE was previously in promiscuous mode; disable it */
910         if (adapter->promiscuous) {
911                 adapter->promiscuous = false;
912                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913
914                 if (adapter->vlans_added)
915                         be_vid_config(adapter);
916         }
917
918         /* Enable multicast promisc if num configured exceeds what we support */
919         if (netdev->flags & IFF_ALLMULTI ||
920             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
921                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922                 goto done;
923         }
924
925         if (netdev_uc_count(netdev) != adapter->uc_macs) {
926                 struct netdev_hw_addr *ha;
927                 int i = 1; /* First slot is claimed by the Primary MAC */
928
929                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930                         be_cmd_pmac_del(adapter, adapter->if_handle,
931                                         adapter->pmac_id[i], 0);
932                 }
933
934                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936                         adapter->promiscuous = true;
937                         goto done;
938                 }
939
940                 netdev_for_each_uc_addr(ha, adapter->netdev) {
941                         adapter->uc_macs++; /* First slot is for Primary MAC */
942                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943                                         adapter->if_handle,
944                                         &adapter->pmac_id[adapter->uc_macs], 0);
945                 }
946         }
947
948         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950         /* Set to MCAST promisc mode if setting MULTICAST address fails */
951         if (status) {
952                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955         }
956 done:
957         return;
958 }
959
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962         struct be_adapter *adapter = netdev_priv(netdev);
963         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964         int status;
965
966         if (!sriov_enabled(adapter))
967                 return -EPERM;
968
969         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970                 return -EINVAL;
971
972         if (lancer_chip(adapter)) {
973                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
974         } else {
975                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976                                          vf_cfg->pmac_id, vf + 1);
977
978                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979                                          &vf_cfg->pmac_id, vf + 1);
980         }
981
982         if (status)
983                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984                                 mac, vf);
985         else
986                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987
988         return status;
989 }
990
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992                         struct ifla_vf_info *vi)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996
997         if (!sriov_enabled(adapter))
998                 return -EPERM;
999
1000         if (vf >= adapter->num_vfs)
1001                 return -EINVAL;
1002
1003         vi->vf = vf;
1004         vi->tx_rate = vf_cfg->tx_rate;
1005         vi->vlan = vf_cfg->vlan_tag;
1006         vi->qos = 0;
1007         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008
1009         return 0;
1010 }
1011
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013                         int vf, u16 vlan, u8 qos)
1014 {
1015         struct be_adapter *adapter = netdev_priv(netdev);
1016         int status = 0;
1017
1018         if (!sriov_enabled(adapter))
1019                 return -EPERM;
1020
1021         if (vf >= adapter->num_vfs || vlan > 4095)
1022                 return -EINVAL;
1023
1024         if (vlan) {
1025                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026                         /* If this is new value, program it. Else skip. */
1027                         adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029                         status = be_cmd_set_hsw_config(adapter, vlan,
1030                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1031                 }
1032         } else {
1033                 /* Reset Transparent Vlan Tagging. */
1034                 adapter->vf_cfg[vf].vlan_tag = 0;
1035                 vlan = adapter->vf_cfg[vf].def_vid;
1036                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037                         adapter->vf_cfg[vf].if_handle);
1038         }
1039
1040
1041         if (status)
1042                 dev_info(&adapter->pdev->dev,
1043                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1044         return status;
1045 }
1046
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048                         int vf, int rate)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         int status = 0;
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         if (rate < 100 || rate > 10000) {
1060                 dev_err(&adapter->pdev->dev,
1061                         "tx rate must be between 100 and 10000 Mbps\n");
1062                 return -EINVAL;
1063         }
1064
1065         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066
1067         if (status)
1068                 dev_err(&adapter->pdev->dev,
1069                                 "tx rate %d on VF %d failed\n", rate, vf);
1070         else
1071                 adapter->vf_cfg[vf].tx_rate = rate;
1072         return status;
1073 }
1074
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077         struct pci_dev *dev, *pdev = adapter->pdev;
1078         int vfs = 0, assigned_vfs = 0, pos;
1079         u16 offset, stride;
1080
1081         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082         if (!pos)
1083                 return 0;
1084         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088         while (dev) {
1089                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1090                         vfs++;
1091                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1092                                 assigned_vfs++;
1093                 }
1094                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1095         }
1096         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1097 }
1098
1099 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1100 {
1101         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1102         ulong now = jiffies;
1103         ulong delta = now - stats->rx_jiffies;
1104         u64 pkts;
1105         unsigned int start, eqd;
1106
1107         if (!eqo->enable_aic) {
1108                 eqd = eqo->eqd;
1109                 goto modify_eqd;
1110         }
1111
1112         if (eqo->idx >= adapter->num_rx_qs)
1113                 return;
1114
1115         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1116
1117         /* Wrapped around */
1118         if (time_before(now, stats->rx_jiffies)) {
1119                 stats->rx_jiffies = now;
1120                 return;
1121         }
1122
1123         /* Update once a second */
1124         if (delta < HZ)
1125                 return;
1126
1127         do {
1128                 start = u64_stats_fetch_begin_bh(&stats->sync);
1129                 pkts = stats->rx_pkts;
1130         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1131
1132         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1133         stats->rx_pkts_prev = pkts;
1134         stats->rx_jiffies = now;
1135         eqd = (stats->rx_pps / 110000) << 3;
1136         eqd = min(eqd, eqo->max_eqd);
1137         eqd = max(eqd, eqo->min_eqd);
1138         if (eqd < 10)
1139                 eqd = 0;
1140
1141 modify_eqd:
1142         if (eqd != eqo->cur_eqd) {
1143                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1144                 eqo->cur_eqd = eqd;
1145         }
1146 }
1147
1148 static void be_rx_stats_update(struct be_rx_obj *rxo,
1149                 struct be_rx_compl_info *rxcp)
1150 {
1151         struct be_rx_stats *stats = rx_stats(rxo);
1152
1153         u64_stats_update_begin(&stats->sync);
1154         stats->rx_compl++;
1155         stats->rx_bytes += rxcp->pkt_size;
1156         stats->rx_pkts++;
1157         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1158                 stats->rx_mcast_pkts++;
1159         if (rxcp->err)
1160                 stats->rx_compl_err++;
1161         u64_stats_update_end(&stats->sync);
1162 }
1163
1164 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1165 {
1166         /* L4 checksum is not reliable for non TCP/UDP packets.
1167          * Also ignore ipcksm for ipv6 pkts */
1168         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1169                                 (rxcp->ip_csum || rxcp->ipv6);
1170 }
1171
1172 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1173                                                 u16 frag_idx)
1174 {
1175         struct be_adapter *adapter = rxo->adapter;
1176         struct be_rx_page_info *rx_page_info;
1177         struct be_queue_info *rxq = &rxo->q;
1178
1179         rx_page_info = &rxo->page_info_tbl[frag_idx];
1180         BUG_ON(!rx_page_info->page);
1181
1182         if (rx_page_info->last_page_user) {
1183                 dma_unmap_page(&adapter->pdev->dev,
1184                                dma_unmap_addr(rx_page_info, bus),
1185                                adapter->big_page_size, DMA_FROM_DEVICE);
1186                 rx_page_info->last_page_user = false;
1187         }
1188
1189         atomic_dec(&rxq->used);
1190         return rx_page_info;
1191 }
1192
1193 /* Throwaway the data in the Rx completion */
1194 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1195                                 struct be_rx_compl_info *rxcp)
1196 {
1197         struct be_queue_info *rxq = &rxo->q;
1198         struct be_rx_page_info *page_info;
1199         u16 i, num_rcvd = rxcp->num_rcvd;
1200
1201         for (i = 0; i < num_rcvd; i++) {
1202                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1203                 put_page(page_info->page);
1204                 memset(page_info, 0, sizeof(*page_info));
1205                 index_inc(&rxcp->rxq_idx, rxq->len);
1206         }
1207 }
1208
1209 /*
1210  * skb_fill_rx_data forms a complete skb for an ether frame
1211  * indicated by rxcp.
1212  */
1213 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1214                              struct be_rx_compl_info *rxcp)
1215 {
1216         struct be_queue_info *rxq = &rxo->q;
1217         struct be_rx_page_info *page_info;
1218         u16 i, j;
1219         u16 hdr_len, curr_frag_len, remaining;
1220         u8 *start;
1221
1222         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1223         start = page_address(page_info->page) + page_info->page_offset;
1224         prefetch(start);
1225
1226         /* Copy data in the first descriptor of this completion */
1227         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1228
1229         skb->len = curr_frag_len;
1230         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1231                 memcpy(skb->data, start, curr_frag_len);
1232                 /* Complete packet has now been moved to data */
1233                 put_page(page_info->page);
1234                 skb->data_len = 0;
1235                 skb->tail += curr_frag_len;
1236         } else {
1237                 hdr_len = ETH_HLEN;
1238                 memcpy(skb->data, start, hdr_len);
1239                 skb_shinfo(skb)->nr_frags = 1;
1240                 skb_frag_set_page(skb, 0, page_info->page);
1241                 skb_shinfo(skb)->frags[0].page_offset =
1242                                         page_info->page_offset + hdr_len;
1243                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1244                 skb->data_len = curr_frag_len - hdr_len;
1245                 skb->truesize += rx_frag_size;
1246                 skb->tail += hdr_len;
1247         }
1248         page_info->page = NULL;
1249
1250         if (rxcp->pkt_size <= rx_frag_size) {
1251                 BUG_ON(rxcp->num_rcvd != 1);
1252                 return;
1253         }
1254
1255         /* More frags present for this completion */
1256         index_inc(&rxcp->rxq_idx, rxq->len);
1257         remaining = rxcp->pkt_size - curr_frag_len;
1258         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1259                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1260                 curr_frag_len = min(remaining, rx_frag_size);
1261
1262                 /* Coalesce all frags from the same physical page in one slot */
1263                 if (page_info->page_offset == 0) {
1264                         /* Fresh page */
1265                         j++;
1266                         skb_frag_set_page(skb, j, page_info->page);
1267                         skb_shinfo(skb)->frags[j].page_offset =
1268                                                         page_info->page_offset;
1269                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1270                         skb_shinfo(skb)->nr_frags++;
1271                 } else {
1272                         put_page(page_info->page);
1273                 }
1274
1275                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1276                 skb->len += curr_frag_len;
1277                 skb->data_len += curr_frag_len;
1278                 skb->truesize += rx_frag_size;
1279                 remaining -= curr_frag_len;
1280                 index_inc(&rxcp->rxq_idx, rxq->len);
1281                 page_info->page = NULL;
1282         }
1283         BUG_ON(j > MAX_SKB_FRAGS);
1284 }
1285
1286 /* Process the RX completion indicated by rxcp when GRO is disabled */
1287 static void be_rx_compl_process(struct be_rx_obj *rxo,
1288                                 struct be_rx_compl_info *rxcp)
1289 {
1290         struct be_adapter *adapter = rxo->adapter;
1291         struct net_device *netdev = adapter->netdev;
1292         struct sk_buff *skb;
1293
1294         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1295         if (unlikely(!skb)) {
1296                 rx_stats(rxo)->rx_drops_no_skbs++;
1297                 be_rx_compl_discard(rxo, rxcp);
1298                 return;
1299         }
1300
1301         skb_fill_rx_data(rxo, skb, rxcp);
1302
1303         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1304                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305         else
1306                 skb_checksum_none_assert(skb);
1307
1308         skb->protocol = eth_type_trans(skb, netdev);
1309         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1310         if (netdev->features & NETIF_F_RXHASH)
1311                 skb->rxhash = rxcp->rss_hash;
1312
1313
1314         if (rxcp->vlanf)
1315                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1316
1317         netif_receive_skb(skb);
1318 }
1319
1320 /* Process the RX completion indicated by rxcp when GRO is enabled */
1321 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1322                              struct be_rx_compl_info *rxcp)
1323 {
1324         struct be_adapter *adapter = rxo->adapter;
1325         struct be_rx_page_info *page_info;
1326         struct sk_buff *skb = NULL;
1327         struct be_queue_info *rxq = &rxo->q;
1328         u16 remaining, curr_frag_len;
1329         u16 i, j;
1330
1331         skb = napi_get_frags(napi);
1332         if (!skb) {
1333                 be_rx_compl_discard(rxo, rxcp);
1334                 return;
1335         }
1336
1337         remaining = rxcp->pkt_size;
1338         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1339                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1340
1341                 curr_frag_len = min(remaining, rx_frag_size);
1342
1343                 /* Coalesce all frags from the same physical page in one slot */
1344                 if (i == 0 || page_info->page_offset == 0) {
1345                         /* First frag or Fresh page */
1346                         j++;
1347                         skb_frag_set_page(skb, j, page_info->page);
1348                         skb_shinfo(skb)->frags[j].page_offset =
1349                                                         page_info->page_offset;
1350                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1351                 } else {
1352                         put_page(page_info->page);
1353                 }
1354                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1355                 skb->truesize += rx_frag_size;
1356                 remaining -= curr_frag_len;
1357                 index_inc(&rxcp->rxq_idx, rxq->len);
1358                 memset(page_info, 0, sizeof(*page_info));
1359         }
1360         BUG_ON(j > MAX_SKB_FRAGS);
1361
1362         skb_shinfo(skb)->nr_frags = j + 1;
1363         skb->len = rxcp->pkt_size;
1364         skb->data_len = rxcp->pkt_size;
1365         skb->ip_summed = CHECKSUM_UNNECESSARY;
1366         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1367         if (adapter->netdev->features & NETIF_F_RXHASH)
1368                 skb->rxhash = rxcp->rss_hash;
1369
1370         if (rxcp->vlanf)
1371                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1372
1373         napi_gro_frags(napi);
1374 }
1375
1376 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1377                                  struct be_rx_compl_info *rxcp)
1378 {
1379         rxcp->pkt_size =
1380                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1381         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1382         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1383         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1384         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1385         rxcp->ip_csum =
1386                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1387         rxcp->l4_csum =
1388                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1389         rxcp->ipv6 =
1390                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1391         rxcp->rxq_idx =
1392                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1393         rxcp->num_rcvd =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1395         rxcp->pkt_type =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1397         rxcp->rss_hash =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1399         if (rxcp->vlanf) {
1400                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1401                                           compl);
1402                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1403                                                compl);
1404         }
1405         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1406 }
1407
1408 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1409                                  struct be_rx_compl_info *rxcp)
1410 {
1411         rxcp->pkt_size =
1412                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1413         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1414         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1415         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1416         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1417         rxcp->ip_csum =
1418                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1419         rxcp->l4_csum =
1420                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1421         rxcp->ipv6 =
1422                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1423         rxcp->rxq_idx =
1424                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1425         rxcp->num_rcvd =
1426                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1427         rxcp->pkt_type =
1428                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1429         rxcp->rss_hash =
1430                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1431         if (rxcp->vlanf) {
1432                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1433                                           compl);
1434                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1435                                                compl);
1436         }
1437         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1438 }
1439
1440 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1441 {
1442         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1443         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1444         struct be_adapter *adapter = rxo->adapter;
1445
1446         /* For checking the valid bit it is Ok to use either definition as the
1447          * valid bit is at the same position in both v0 and v1 Rx compl */
1448         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1449                 return NULL;
1450
1451         rmb();
1452         be_dws_le_to_cpu(compl, sizeof(*compl));
1453
1454         if (adapter->be3_native)
1455                 be_parse_rx_compl_v1(compl, rxcp);
1456         else
1457                 be_parse_rx_compl_v0(compl, rxcp);
1458
1459         if (rxcp->vlanf) {
1460                 /* vlanf could be wrongly set in some cards.
1461                  * ignore if vtm is not set */
1462                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1463                         rxcp->vlanf = 0;
1464
1465                 if (!lancer_chip(adapter))
1466                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1467
1468                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1469                     !adapter->vlan_tag[rxcp->vlan_tag])
1470                         rxcp->vlanf = 0;
1471         }
1472
1473         /* As the compl has been parsed, reset it; we wont touch it again */
1474         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1475
1476         queue_tail_inc(&rxo->cq);
1477         return rxcp;
1478 }
1479
1480 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1481 {
1482         u32 order = get_order(size);
1483
1484         if (order > 0)
1485                 gfp |= __GFP_COMP;
1486         return  alloc_pages(gfp, order);
1487 }
1488
1489 /*
1490  * Allocate a page, split it to fragments of size rx_frag_size and post as
1491  * receive buffers to BE
1492  */
1493 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1494 {
1495         struct be_adapter *adapter = rxo->adapter;
1496         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1497         struct be_queue_info *rxq = &rxo->q;
1498         struct page *pagep = NULL;
1499         struct be_eth_rx_d *rxd;
1500         u64 page_dmaaddr = 0, frag_dmaaddr;
1501         u32 posted, page_offset = 0;
1502
1503         page_info = &rxo->page_info_tbl[rxq->head];
1504         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1505                 if (!pagep) {
1506                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1507                         if (unlikely(!pagep)) {
1508                                 rx_stats(rxo)->rx_post_fail++;
1509                                 break;
1510                         }
1511                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1512                                                     0, adapter->big_page_size,
1513                                                     DMA_FROM_DEVICE);
1514                         page_info->page_offset = 0;
1515                 } else {
1516                         get_page(pagep);
1517                         page_info->page_offset = page_offset + rx_frag_size;
1518                 }
1519                 page_offset = page_info->page_offset;
1520                 page_info->page = pagep;
1521                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1522                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1523
1524                 rxd = queue_head_node(rxq);
1525                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1526                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1527
1528                 /* Any space left in the current big page for another frag? */
1529                 if ((page_offset + rx_frag_size + rx_frag_size) >
1530                                         adapter->big_page_size) {
1531                         pagep = NULL;
1532                         page_info->last_page_user = true;
1533                 }
1534
1535                 prev_page_info = page_info;
1536                 queue_head_inc(rxq);
1537                 page_info = &rxo->page_info_tbl[rxq->head];
1538         }
1539         if (pagep)
1540                 prev_page_info->last_page_user = true;
1541
1542         if (posted) {
1543                 atomic_add(posted, &rxq->used);
1544                 be_rxq_notify(adapter, rxq->id, posted);
1545         } else if (atomic_read(&rxq->used) == 0) {
1546                 /* Let be_worker replenish when memory is available */
1547                 rxo->rx_post_starved = true;
1548         }
1549 }
1550
1551 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1552 {
1553         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1554
1555         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1556                 return NULL;
1557
1558         rmb();
1559         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1560
1561         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1562
1563         queue_tail_inc(tx_cq);
1564         return txcp;
1565 }
1566
1567 static u16 be_tx_compl_process(struct be_adapter *adapter,
1568                 struct be_tx_obj *txo, u16 last_index)
1569 {
1570         struct be_queue_info *txq = &txo->q;
1571         struct be_eth_wrb *wrb;
1572         struct sk_buff **sent_skbs = txo->sent_skb_list;
1573         struct sk_buff *sent_skb;
1574         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1575         bool unmap_skb_hdr = true;
1576
1577         sent_skb = sent_skbs[txq->tail];
1578         BUG_ON(!sent_skb);
1579         sent_skbs[txq->tail] = NULL;
1580
1581         /* skip header wrb */
1582         queue_tail_inc(txq);
1583
1584         do {
1585                 cur_index = txq->tail;
1586                 wrb = queue_tail_node(txq);
1587                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1588                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1589                 unmap_skb_hdr = false;
1590
1591                 num_wrbs++;
1592                 queue_tail_inc(txq);
1593         } while (cur_index != last_index);
1594
1595         kfree_skb(sent_skb);
1596         return num_wrbs;
1597 }
1598
1599 /* Return the number of events in the event queue */
1600 static inline int events_get(struct be_eq_obj *eqo)
1601 {
1602         struct be_eq_entry *eqe;
1603         int num = 0;
1604
1605         do {
1606                 eqe = queue_tail_node(&eqo->q);
1607                 if (eqe->evt == 0)
1608                         break;
1609
1610                 rmb();
1611                 eqe->evt = 0;
1612                 num++;
1613                 queue_tail_inc(&eqo->q);
1614         } while (true);
1615
1616         return num;
1617 }
1618
1619 static int event_handle(struct be_eq_obj *eqo)
1620 {
1621         bool rearm = false;
1622         int num = events_get(eqo);
1623
1624         /* Deal with any spurious interrupts that come without events */
1625         if (!num)
1626                 rearm = true;
1627
1628         if (num || msix_enabled(eqo->adapter))
1629                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1630
1631         if (num)
1632                 napi_schedule(&eqo->napi);
1633
1634         return num;
1635 }
1636
1637 /* Leaves the EQ is disarmed state */
1638 static void be_eq_clean(struct be_eq_obj *eqo)
1639 {
1640         int num = events_get(eqo);
1641
1642         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1643 }
1644
1645 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1646 {
1647         struct be_rx_page_info *page_info;
1648         struct be_queue_info *rxq = &rxo->q;
1649         struct be_queue_info *rx_cq = &rxo->cq;
1650         struct be_rx_compl_info *rxcp;
1651         u16 tail;
1652
1653         /* First cleanup pending rx completions */
1654         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1655                 be_rx_compl_discard(rxo, rxcp);
1656                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1657         }
1658
1659         /* Then free posted rx buffer that were not used */
1660         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1661         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1662                 page_info = get_rx_page_info(rxo, tail);
1663                 put_page(page_info->page);
1664                 memset(page_info, 0, sizeof(*page_info));
1665         }
1666         BUG_ON(atomic_read(&rxq->used));
1667         rxq->tail = rxq->head = 0;
1668 }
1669
1670 static void be_tx_compl_clean(struct be_adapter *adapter)
1671 {
1672         struct be_tx_obj *txo;
1673         struct be_queue_info *txq;
1674         struct be_eth_tx_compl *txcp;
1675         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1676         struct sk_buff *sent_skb;
1677         bool dummy_wrb;
1678         int i, pending_txqs;
1679
1680         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1681         do {
1682                 pending_txqs = adapter->num_tx_qs;
1683
1684                 for_all_tx_queues(adapter, txo, i) {
1685                         txq = &txo->q;
1686                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1687                                 end_idx =
1688                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1689                                                       wrb_index, txcp);
1690                                 num_wrbs += be_tx_compl_process(adapter, txo,
1691                                                                 end_idx);
1692                                 cmpl++;
1693                         }
1694                         if (cmpl) {
1695                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1696                                 atomic_sub(num_wrbs, &txq->used);
1697                                 cmpl = 0;
1698                                 num_wrbs = 0;
1699                         }
1700                         if (atomic_read(&txq->used) == 0)
1701                                 pending_txqs--;
1702                 }
1703
1704                 if (pending_txqs == 0 || ++timeo > 200)
1705                         break;
1706
1707                 mdelay(1);
1708         } while (true);
1709
1710         for_all_tx_queues(adapter, txo, i) {
1711                 txq = &txo->q;
1712                 if (atomic_read(&txq->used))
1713                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1714                                 atomic_read(&txq->used));
1715
1716                 /* free posted tx for which compls will never arrive */
1717                 while (atomic_read(&txq->used)) {
1718                         sent_skb = txo->sent_skb_list[txq->tail];
1719                         end_idx = txq->tail;
1720                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1721                                                    &dummy_wrb);
1722                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1723                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1724                         atomic_sub(num_wrbs, &txq->used);
1725                 }
1726         }
1727 }
1728
1729 static void be_evt_queues_destroy(struct be_adapter *adapter)
1730 {
1731         struct be_eq_obj *eqo;
1732         int i;
1733
1734         for_all_evt_queues(adapter, eqo, i) {
1735                 if (eqo->q.created) {
1736                         be_eq_clean(eqo);
1737                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1738                 }
1739                 be_queue_free(adapter, &eqo->q);
1740         }
1741 }
1742
1743 static int be_evt_queues_create(struct be_adapter *adapter)
1744 {
1745         struct be_queue_info *eq;
1746         struct be_eq_obj *eqo;
1747         int i, rc;
1748
1749         adapter->num_evt_qs = num_irqs(adapter);
1750
1751         for_all_evt_queues(adapter, eqo, i) {
1752                 eqo->adapter = adapter;
1753                 eqo->tx_budget = BE_TX_BUDGET;
1754                 eqo->idx = i;
1755                 eqo->max_eqd = BE_MAX_EQD;
1756                 eqo->enable_aic = true;
1757
1758                 eq = &eqo->q;
1759                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1760                                         sizeof(struct be_eq_entry));
1761                 if (rc)
1762                         return rc;
1763
1764                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1765                 if (rc)
1766                         return rc;
1767         }
1768         return 0;
1769 }
1770
1771 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1772 {
1773         struct be_queue_info *q;
1774
1775         q = &adapter->mcc_obj.q;
1776         if (q->created)
1777                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1778         be_queue_free(adapter, q);
1779
1780         q = &adapter->mcc_obj.cq;
1781         if (q->created)
1782                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1783         be_queue_free(adapter, q);
1784 }
1785
1786 /* Must be called only after TX qs are created as MCC shares TX EQ */
1787 static int be_mcc_queues_create(struct be_adapter *adapter)
1788 {
1789         struct be_queue_info *q, *cq;
1790
1791         cq = &adapter->mcc_obj.cq;
1792         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1793                         sizeof(struct be_mcc_compl)))
1794                 goto err;
1795
1796         /* Use the default EQ for MCC completions */
1797         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1798                 goto mcc_cq_free;
1799
1800         q = &adapter->mcc_obj.q;
1801         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1802                 goto mcc_cq_destroy;
1803
1804         if (be_cmd_mccq_create(adapter, q, cq))
1805                 goto mcc_q_free;
1806
1807         return 0;
1808
1809 mcc_q_free:
1810         be_queue_free(adapter, q);
1811 mcc_cq_destroy:
1812         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1813 mcc_cq_free:
1814         be_queue_free(adapter, cq);
1815 err:
1816         return -1;
1817 }
1818
1819 static void be_tx_queues_destroy(struct be_adapter *adapter)
1820 {
1821         struct be_queue_info *q;
1822         struct be_tx_obj *txo;
1823         u8 i;
1824
1825         for_all_tx_queues(adapter, txo, i) {
1826                 q = &txo->q;
1827                 if (q->created)
1828                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1829                 be_queue_free(adapter, q);
1830
1831                 q = &txo->cq;
1832                 if (q->created)
1833                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1834                 be_queue_free(adapter, q);
1835         }
1836 }
1837
1838 static int be_num_txqs_want(struct be_adapter *adapter)
1839 {
1840         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1841             be_is_mc(adapter) ||
1842             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1843             adapter->generation == BE_GEN2)
1844                 return 1;
1845         else
1846                 return adapter->max_tx_queues;
1847 }
1848
1849 static int be_tx_cqs_create(struct be_adapter *adapter)
1850 {
1851         struct be_queue_info *cq, *eq;
1852         int status;
1853         struct be_tx_obj *txo;
1854         u8 i;
1855
1856         adapter->num_tx_qs = be_num_txqs_want(adapter);
1857         if (adapter->num_tx_qs != MAX_TX_QS) {
1858                 rtnl_lock();
1859                 netif_set_real_num_tx_queues(adapter->netdev,
1860                         adapter->num_tx_qs);
1861                 rtnl_unlock();
1862         }
1863
1864         for_all_tx_queues(adapter, txo, i) {
1865                 cq = &txo->cq;
1866                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867                                         sizeof(struct be_eth_tx_compl));
1868                 if (status)
1869                         return status;
1870
1871                 /* If num_evt_qs is less than num_tx_qs, then more than
1872                  * one txq share an eq
1873                  */
1874                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876                 if (status)
1877                         return status;
1878         }
1879         return 0;
1880 }
1881
1882 static int be_tx_qs_create(struct be_adapter *adapter)
1883 {
1884         struct be_tx_obj *txo;
1885         int i, status;
1886
1887         for_all_tx_queues(adapter, txo, i) {
1888                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889                                         sizeof(struct be_eth_wrb));
1890                 if (status)
1891                         return status;
1892
1893                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894                 if (status)
1895                         return status;
1896         }
1897
1898         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1899                  adapter->num_tx_qs);
1900         return 0;
1901 }
1902
1903 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1904 {
1905         struct be_queue_info *q;
1906         struct be_rx_obj *rxo;
1907         int i;
1908
1909         for_all_rx_queues(adapter, rxo, i) {
1910                 q = &rxo->cq;
1911                 if (q->created)
1912                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1913                 be_queue_free(adapter, q);
1914         }
1915 }
1916
1917 static int be_rx_cqs_create(struct be_adapter *adapter)
1918 {
1919         struct be_queue_info *eq, *cq;
1920         struct be_rx_obj *rxo;
1921         int rc, i;
1922
1923         /* We'll create as many RSS rings as there are irqs.
1924          * But when there's only one irq there's no use creating RSS rings
1925          */
1926         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1927                                 num_irqs(adapter) + 1 : 1;
1928         if (adapter->num_rx_qs != MAX_RX_QS) {
1929                 rtnl_lock();
1930                 netif_set_real_num_rx_queues(adapter->netdev,
1931                                              adapter->num_rx_qs);
1932                 rtnl_unlock();
1933         }
1934
1935         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1936         for_all_rx_queues(adapter, rxo, i) {
1937                 rxo->adapter = adapter;
1938                 cq = &rxo->cq;
1939                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1940                                 sizeof(struct be_eth_rx_compl));
1941                 if (rc)
1942                         return rc;
1943
1944                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1945                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1946                 if (rc)
1947                         return rc;
1948         }
1949
1950         dev_info(&adapter->pdev->dev,
1951                  "created %d RSS queue(s) and 1 default RX queue\n",
1952                  adapter->num_rx_qs - 1);
1953         return 0;
1954 }
1955
1956 static irqreturn_t be_intx(int irq, void *dev)
1957 {
1958         struct be_adapter *adapter = dev;
1959         int num_evts;
1960
1961         /* With INTx only one EQ is used */
1962         num_evts = event_handle(&adapter->eq_obj[0]);
1963         if (num_evts)
1964                 return IRQ_HANDLED;
1965         else
1966                 return IRQ_NONE;
1967 }
1968
1969 static irqreturn_t be_msix(int irq, void *dev)
1970 {
1971         struct be_eq_obj *eqo = dev;
1972
1973         event_handle(eqo);
1974         return IRQ_HANDLED;
1975 }
1976
1977 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1978 {
1979         return (rxcp->tcpf && !rxcp->err) ? true : false;
1980 }
1981
1982 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983                         int budget)
1984 {
1985         struct be_adapter *adapter = rxo->adapter;
1986         struct be_queue_info *rx_cq = &rxo->cq;
1987         struct be_rx_compl_info *rxcp;
1988         u32 work_done;
1989
1990         for (work_done = 0; work_done < budget; work_done++) {
1991                 rxcp = be_rx_compl_get(rxo);
1992                 if (!rxcp)
1993                         break;
1994
1995                 /* Is it a flush compl that has no data */
1996                 if (unlikely(rxcp->num_rcvd == 0))
1997                         goto loop_continue;
1998
1999                 /* Discard compl with partial DMA Lancer B0 */
2000                 if (unlikely(!rxcp->pkt_size)) {
2001                         be_rx_compl_discard(rxo, rxcp);
2002                         goto loop_continue;
2003                 }
2004
2005                 /* On BE drop pkts that arrive due to imperfect filtering in
2006                  * promiscuous mode on some skews
2007                  */
2008                 if (unlikely(rxcp->port != adapter->port_num &&
2009                                 !lancer_chip(adapter))) {
2010                         be_rx_compl_discard(rxo, rxcp);
2011                         goto loop_continue;
2012                 }
2013
2014                 if (do_gro(rxcp))
2015                         be_rx_compl_process_gro(rxo, napi, rxcp);
2016                 else
2017                         be_rx_compl_process(rxo, rxcp);
2018 loop_continue:
2019                 be_rx_stats_update(rxo, rxcp);
2020         }
2021
2022         if (work_done) {
2023                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2024
2025                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026                         be_post_rx_frags(rxo, GFP_ATOMIC);
2027         }
2028
2029         return work_done;
2030 }
2031
2032 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033                           int budget, int idx)
2034 {
2035         struct be_eth_tx_compl *txcp;
2036         int num_wrbs = 0, work_done;
2037
2038         for (work_done = 0; work_done < budget; work_done++) {
2039                 txcp = be_tx_compl_get(&txo->cq);
2040                 if (!txcp)
2041                         break;
2042                 num_wrbs += be_tx_compl_process(adapter, txo,
2043                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2044                                         wrb_index, txcp));
2045         }
2046
2047         if (work_done) {
2048                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049                 atomic_sub(num_wrbs, &txo->q.used);
2050
2051                 /* As Tx wrbs have been freed up, wake up netdev queue
2052                  * if it was stopped due to lack of tx wrbs.  */
2053                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2055                         netif_wake_subqueue(adapter->netdev, idx);
2056                 }
2057
2058                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059                 tx_stats(txo)->tx_compl += work_done;
2060                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2061         }
2062         return (work_done < budget); /* Done */
2063 }
2064
2065 int be_poll(struct napi_struct *napi, int budget)
2066 {
2067         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068         struct be_adapter *adapter = eqo->adapter;
2069         int max_work = 0, work, i;
2070         bool tx_done;
2071
2072         /* Process all TXQs serviced by this EQ */
2073         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075                                         eqo->tx_budget, i);
2076                 if (!tx_done)
2077                         max_work = budget;
2078         }
2079
2080         /* This loop will iterate twice for EQ0 in which
2081          * completions of the last RXQ (default one) are also processed
2082          * For other EQs the loop iterates only once
2083          */
2084         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086                 max_work = max(work, max_work);
2087         }
2088
2089         if (is_mcc_eqo(eqo))
2090                 be_process_mcc(adapter);
2091
2092         if (max_work < budget) {
2093                 napi_complete(napi);
2094                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095         } else {
2096                 /* As we'll continue in polling mode, count and clear events */
2097                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2098         }
2099         return max_work;
2100 }
2101
2102 void be_detect_error(struct be_adapter *adapter)
2103 {
2104         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2106         u32 i;
2107
2108         if (be_crit_error(adapter))
2109                 return;
2110
2111         if (lancer_chip(adapter)) {
2112                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114                         sliport_err1 = ioread32(adapter->db +
2115                                         SLIPORT_ERROR1_OFFSET);
2116                         sliport_err2 = ioread32(adapter->db +
2117                                         SLIPORT_ERROR2_OFFSET);
2118                 }
2119         } else {
2120                 pci_read_config_dword(adapter->pdev,
2121                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2122                 pci_read_config_dword(adapter->pdev,
2123                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124                 pci_read_config_dword(adapter->pdev,
2125                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126                 pci_read_config_dword(adapter->pdev,
2127                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2128
2129                 ue_lo = (ue_lo & ~ue_lo_mask);
2130                 ue_hi = (ue_hi & ~ue_hi_mask);
2131         }
2132
2133         /* On certain platforms BE hardware can indicate spurious UEs.
2134          * Allow the h/w to stop working completely in case of a real UE.
2135          * Hence not setting the hw_error for UE detection.
2136          */
2137         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2138                 adapter->hw_error = true;
2139                 dev_err(&adapter->pdev->dev,
2140                         "Error detected in the card\n");
2141         }
2142
2143         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2144                 dev_err(&adapter->pdev->dev,
2145                         "ERR: sliport status 0x%x\n", sliport_status);
2146                 dev_err(&adapter->pdev->dev,
2147                         "ERR: sliport error1 0x%x\n", sliport_err1);
2148                 dev_err(&adapter->pdev->dev,
2149                         "ERR: sliport error2 0x%x\n", sliport_err2);
2150         }
2151
2152         if (ue_lo) {
2153                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2154                         if (ue_lo & 1)
2155                                 dev_err(&adapter->pdev->dev,
2156                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2157                 }
2158         }
2159
2160         if (ue_hi) {
2161                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2162                         if (ue_hi & 1)
2163                                 dev_err(&adapter->pdev->dev,
2164                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2165                 }
2166         }
2167
2168 }
2169
2170 static void be_msix_disable(struct be_adapter *adapter)
2171 {
2172         if (msix_enabled(adapter)) {
2173                 pci_disable_msix(adapter->pdev);
2174                 adapter->num_msix_vec = 0;
2175         }
2176 }
2177
2178 static uint be_num_rss_want(struct be_adapter *adapter)
2179 {
2180         u32 num = 0;
2181
2182         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2183             (lancer_chip(adapter) ||
2184              (!sriov_want(adapter) && be_physfn(adapter)))) {
2185                 num = adapter->max_rss_queues;
2186                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2187         }
2188         return num;
2189 }
2190
2191 static void be_msix_enable(struct be_adapter *adapter)
2192 {
2193 #define BE_MIN_MSIX_VECTORS             1
2194         int i, status, num_vec, num_roce_vec = 0;
2195         struct device *dev = &adapter->pdev->dev;
2196
2197         /* If RSS queues are not used, need a vec for default RX Q */
2198         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2199         if (be_roce_supported(adapter)) {
2200                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2201                                         (num_online_cpus() + 1));
2202                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2203                 num_vec += num_roce_vec;
2204                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2205         }
2206         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2207
2208         for (i = 0; i < num_vec; i++)
2209                 adapter->msix_entries[i].entry = i;
2210
2211         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2212         if (status == 0) {
2213                 goto done;
2214         } else if (status >= BE_MIN_MSIX_VECTORS) {
2215                 num_vec = status;
2216                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2217                                 num_vec) == 0)
2218                         goto done;
2219         }
2220
2221         dev_warn(dev, "MSIx enable failed\n");
2222         return;
2223 done:
2224         if (be_roce_supported(adapter)) {
2225                 if (num_vec > num_roce_vec) {
2226                         adapter->num_msix_vec = num_vec - num_roce_vec;
2227                         adapter->num_msix_roce_vec =
2228                                 num_vec - adapter->num_msix_vec;
2229                 } else {
2230                         adapter->num_msix_vec = num_vec;
2231                         adapter->num_msix_roce_vec = 0;
2232                 }
2233         } else
2234                 adapter->num_msix_vec = num_vec;
2235         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2236         return;
2237 }
2238
2239 static inline int be_msix_vec_get(struct be_adapter *adapter,
2240                                 struct be_eq_obj *eqo)
2241 {
2242         return adapter->msix_entries[eqo->idx].vector;
2243 }
2244
2245 static int be_msix_register(struct be_adapter *adapter)
2246 {
2247         struct net_device *netdev = adapter->netdev;
2248         struct be_eq_obj *eqo;
2249         int status, i, vec;
2250
2251         for_all_evt_queues(adapter, eqo, i) {
2252                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2253                 vec = be_msix_vec_get(adapter, eqo);
2254                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2255                 if (status)
2256                         goto err_msix;
2257         }
2258
2259         return 0;
2260 err_msix:
2261         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2262                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2263         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2264                 status);
2265         be_msix_disable(adapter);
2266         return status;
2267 }
2268
2269 static int be_irq_register(struct be_adapter *adapter)
2270 {
2271         struct net_device *netdev = adapter->netdev;
2272         int status;
2273
2274         if (msix_enabled(adapter)) {
2275                 status = be_msix_register(adapter);
2276                 if (status == 0)
2277                         goto done;
2278                 /* INTx is not supported for VF */
2279                 if (!be_physfn(adapter))
2280                         return status;
2281         }
2282
2283         /* INTx */
2284         netdev->irq = adapter->pdev->irq;
2285         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2286                         adapter);
2287         if (status) {
2288                 dev_err(&adapter->pdev->dev,
2289                         "INTx request IRQ failed - err %d\n", status);
2290                 return status;
2291         }
2292 done:
2293         adapter->isr_registered = true;
2294         return 0;
2295 }
2296
2297 static void be_irq_unregister(struct be_adapter *adapter)
2298 {
2299         struct net_device *netdev = adapter->netdev;
2300         struct be_eq_obj *eqo;
2301         int i;
2302
2303         if (!adapter->isr_registered)
2304                 return;
2305
2306         /* INTx */
2307         if (!msix_enabled(adapter)) {
2308                 free_irq(netdev->irq, adapter);
2309                 goto done;
2310         }
2311
2312         /* MSIx */
2313         for_all_evt_queues(adapter, eqo, i)
2314                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2315
2316 done:
2317         adapter->isr_registered = false;
2318 }
2319
2320 static void be_rx_qs_destroy(struct be_adapter *adapter)
2321 {
2322         struct be_queue_info *q;
2323         struct be_rx_obj *rxo;
2324         int i;
2325
2326         for_all_rx_queues(adapter, rxo, i) {
2327                 q = &rxo->q;
2328                 if (q->created) {
2329                         be_cmd_rxq_destroy(adapter, q);
2330                         /* After the rxq is invalidated, wait for a grace time
2331                          * of 1ms for all dma to end and the flush compl to
2332                          * arrive
2333                          */
2334                         mdelay(1);
2335                         be_rx_cq_clean(rxo);
2336                 }
2337                 be_queue_free(adapter, q);
2338         }
2339 }
2340
2341 static int be_close(struct net_device *netdev)
2342 {
2343         struct be_adapter *adapter = netdev_priv(netdev);
2344         struct be_eq_obj *eqo;
2345         int i;
2346
2347         be_roce_dev_close(adapter);
2348
2349         be_async_mcc_disable(adapter);
2350
2351         if (!lancer_chip(adapter))
2352                 be_intr_set(adapter, false);
2353
2354         for_all_evt_queues(adapter, eqo, i) {
2355                 napi_disable(&eqo->napi);
2356                 if (msix_enabled(adapter))
2357                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2358                 else
2359                         synchronize_irq(netdev->irq);
2360                 be_eq_clean(eqo);
2361         }
2362
2363         be_irq_unregister(adapter);
2364
2365         /* Wait for all pending tx completions to arrive so that
2366          * all tx skbs are freed.
2367          */
2368         be_tx_compl_clean(adapter);
2369
2370         be_rx_qs_destroy(adapter);
2371         return 0;
2372 }
2373
2374 static int be_rx_qs_create(struct be_adapter *adapter)
2375 {
2376         struct be_rx_obj *rxo;
2377         int rc, i, j;
2378         u8 rsstable[128];
2379
2380         for_all_rx_queues(adapter, rxo, i) {
2381                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2382                                     sizeof(struct be_eth_rx_d));
2383                 if (rc)
2384                         return rc;
2385         }
2386
2387         /* The FW would like the default RXQ to be created first */
2388         rxo = default_rxo(adapter);
2389         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2390                                adapter->if_handle, false, &rxo->rss_id);
2391         if (rc)
2392                 return rc;
2393
2394         for_all_rss_queues(adapter, rxo, i) {
2395                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2396                                        rx_frag_size, adapter->if_handle,
2397                                        true, &rxo->rss_id);
2398                 if (rc)
2399                         return rc;
2400         }
2401
2402         if (be_multi_rxq(adapter)) {
2403                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2404                         for_all_rss_queues(adapter, rxo, i) {
2405                                 if ((j + i) >= 128)
2406                                         break;
2407                                 rsstable[j + i] = rxo->rss_id;
2408                         }
2409                 }
2410                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2411                 if (rc)
2412                         return rc;
2413         }
2414
2415         /* First time posting */
2416         for_all_rx_queues(adapter, rxo, i)
2417                 be_post_rx_frags(rxo, GFP_KERNEL);
2418         return 0;
2419 }
2420
2421 static int be_open(struct net_device *netdev)
2422 {
2423         struct be_adapter *adapter = netdev_priv(netdev);
2424         struct be_eq_obj *eqo;
2425         struct be_rx_obj *rxo;
2426         struct be_tx_obj *txo;
2427         u8 link_status;
2428         int status, i;
2429
2430         status = be_rx_qs_create(adapter);
2431         if (status)
2432                 goto err;
2433
2434         be_irq_register(adapter);
2435
2436         if (!lancer_chip(adapter))
2437                 be_intr_set(adapter, true);
2438
2439         for_all_rx_queues(adapter, rxo, i)
2440                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2441
2442         for_all_tx_queues(adapter, txo, i)
2443                 be_cq_notify(adapter, txo->cq.id, true, 0);
2444
2445         be_async_mcc_enable(adapter);
2446
2447         for_all_evt_queues(adapter, eqo, i) {
2448                 napi_enable(&eqo->napi);
2449                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2450         }
2451
2452         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2453         if (!status)
2454                 be_link_status_update(adapter, link_status);
2455
2456         be_roce_dev_open(adapter);
2457         return 0;
2458 err:
2459         be_close(adapter->netdev);
2460         return -EIO;
2461 }
2462
2463 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2464 {
2465         struct be_dma_mem cmd;
2466         int status = 0;
2467         u8 mac[ETH_ALEN];
2468
2469         memset(mac, 0, ETH_ALEN);
2470
2471         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2472         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2473                                     GFP_KERNEL);
2474         if (cmd.va == NULL)
2475                 return -1;
2476         memset(cmd.va, 0, cmd.size);
2477
2478         if (enable) {
2479                 status = pci_write_config_dword(adapter->pdev,
2480                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2481                 if (status) {
2482                         dev_err(&adapter->pdev->dev,
2483                                 "Could not enable Wake-on-lan\n");
2484                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2485                                           cmd.dma);
2486                         return status;
2487                 }
2488                 status = be_cmd_enable_magic_wol(adapter,
2489                                 adapter->netdev->dev_addr, &cmd);
2490                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2491                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2492         } else {
2493                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2494                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2495                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2496         }
2497
2498         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2499         return status;
2500 }
2501
2502 /*
2503  * Generate a seed MAC address from the PF MAC Address using jhash.
2504  * MAC Address for VFs are assigned incrementally starting from the seed.
2505  * These addresses are programmed in the ASIC by the PF and the VF driver
2506  * queries for the MAC address during its probe.
2507  */
2508 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2509 {
2510         u32 vf;
2511         int status = 0;
2512         u8 mac[ETH_ALEN];
2513         struct be_vf_cfg *vf_cfg;
2514
2515         be_vf_eth_addr_generate(adapter, mac);
2516
2517         for_all_vfs(adapter, vf_cfg, vf) {
2518                 if (lancer_chip(adapter)) {
2519                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2520                 } else {
2521                         status = be_cmd_pmac_add(adapter, mac,
2522                                                  vf_cfg->if_handle,
2523                                                  &vf_cfg->pmac_id, vf + 1);
2524                 }
2525
2526                 if (status)
2527                         dev_err(&adapter->pdev->dev,
2528                         "Mac address assignment failed for VF %d\n", vf);
2529                 else
2530                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2531
2532                 mac[5] += 1;
2533         }
2534         return status;
2535 }
2536
2537 static void be_vf_clear(struct be_adapter *adapter)
2538 {
2539         struct be_vf_cfg *vf_cfg;
2540         u32 vf;
2541
2542         if (be_find_vfs(adapter, ASSIGNED)) {
2543                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2544                 goto done;
2545         }
2546
2547         for_all_vfs(adapter, vf_cfg, vf) {
2548                 if (lancer_chip(adapter))
2549                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2550                 else
2551                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2552                                         vf_cfg->pmac_id, vf + 1);
2553
2554                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2555         }
2556         pci_disable_sriov(adapter->pdev);
2557 done:
2558         kfree(adapter->vf_cfg);
2559         adapter->num_vfs = 0;
2560 }
2561
2562 static int be_clear(struct be_adapter *adapter)
2563 {
2564         int i = 1;
2565
2566         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2567                 cancel_delayed_work_sync(&adapter->work);
2568                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2569         }
2570
2571         if (sriov_enabled(adapter))
2572                 be_vf_clear(adapter);
2573
2574         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2575                 be_cmd_pmac_del(adapter, adapter->if_handle,
2576                         adapter->pmac_id[i], 0);
2577
2578         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2579
2580         be_mcc_queues_destroy(adapter);
2581         be_rx_cqs_destroy(adapter);
2582         be_tx_queues_destroy(adapter);
2583         be_evt_queues_destroy(adapter);
2584
2585         kfree(adapter->pmac_id);
2586         adapter->pmac_id = NULL;
2587
2588         be_msix_disable(adapter);
2589         return 0;
2590 }
2591
2592 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2593                                    u32 *cap_flags, u8 domain)
2594 {
2595         bool profile_present = false;
2596         int status;
2597
2598         if (lancer_chip(adapter)) {
2599                 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2600                 if (!status)
2601                         profile_present = true;
2602         }
2603
2604         if (!profile_present)
2605                 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2606                              BE_IF_FLAGS_MULTICAST;
2607 }
2608
2609 static int be_vf_setup_init(struct be_adapter *adapter)
2610 {
2611         struct be_vf_cfg *vf_cfg;
2612         int vf;
2613
2614         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2615                                   GFP_KERNEL);
2616         if (!adapter->vf_cfg)
2617                 return -ENOMEM;
2618
2619         for_all_vfs(adapter, vf_cfg, vf) {
2620                 vf_cfg->if_handle = -1;
2621                 vf_cfg->pmac_id = -1;
2622         }
2623         return 0;
2624 }
2625
2626 static int be_vf_setup(struct be_adapter *adapter)
2627 {
2628         struct be_vf_cfg *vf_cfg;
2629         struct device *dev = &adapter->pdev->dev;
2630         u32 cap_flags, en_flags, vf;
2631         u16 def_vlan, lnk_speed;
2632         int status, enabled_vfs;
2633
2634         enabled_vfs = be_find_vfs(adapter, ENABLED);
2635         if (enabled_vfs) {
2636                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2637                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2638                 return 0;
2639         }
2640
2641         if (num_vfs > adapter->dev_num_vfs) {
2642                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2643                          adapter->dev_num_vfs, num_vfs);
2644                 num_vfs = adapter->dev_num_vfs;
2645         }
2646
2647         status = pci_enable_sriov(adapter->pdev, num_vfs);
2648         if (!status) {
2649                 adapter->num_vfs = num_vfs;
2650         } else {
2651                 /* Platform doesn't support SRIOV though device supports it */
2652                 dev_warn(dev, "SRIOV enable failed\n");
2653                 return 0;
2654         }
2655
2656         status = be_vf_setup_init(adapter);
2657         if (status)
2658                 goto err;
2659
2660         for_all_vfs(adapter, vf_cfg, vf) {
2661                 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2662
2663                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2664                                         BE_IF_FLAGS_BROADCAST |
2665                                         BE_IF_FLAGS_MULTICAST);
2666
2667                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2668                                           &vf_cfg->if_handle, vf + 1);
2669                 if (status)
2670                         goto err;
2671         }
2672
2673         if (!enabled_vfs) {
2674                 status = be_vf_eth_addr_config(adapter);
2675                 if (status)
2676                         goto err;
2677         }
2678
2679         for_all_vfs(adapter, vf_cfg, vf) {
2680                 lnk_speed = 1000;
2681                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2682                 if (status)
2683                         goto err;
2684                 vf_cfg->tx_rate = lnk_speed * 10;
2685
2686                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2687                                 vf + 1, vf_cfg->if_handle);
2688                 if (status)
2689                         goto err;
2690                 vf_cfg->def_vid = def_vlan;
2691         }
2692         return 0;
2693 err:
2694         return status;
2695 }
2696
2697 static void be_setup_init(struct be_adapter *adapter)
2698 {
2699         adapter->vlan_prio_bmap = 0xff;
2700         adapter->phy.link_speed = -1;
2701         adapter->if_handle = -1;
2702         adapter->be3_native = false;
2703         adapter->promiscuous = false;
2704         adapter->eq_next_idx = 0;
2705 }
2706
2707 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2708                            bool *active_mac, u32 *pmac_id)
2709 {
2710         int status = 0;
2711
2712         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2713                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2714                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2715                         *active_mac = true;
2716                 else
2717                         *active_mac = false;
2718
2719                 return status;
2720         }
2721
2722         if (lancer_chip(adapter)) {
2723                 status = be_cmd_get_mac_from_list(adapter, mac,
2724                                                   active_mac, pmac_id, 0);
2725                 if (*active_mac) {
2726                         status = be_cmd_mac_addr_query(adapter, mac, false,
2727                                                        if_handle, *pmac_id);
2728                 }
2729         } else if (be_physfn(adapter)) {
2730                 /* For BE3, for PF get permanent MAC */
2731                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2732                 *active_mac = false;
2733         } else {
2734                 /* For BE3, for VF get soft MAC assigned by PF*/
2735                 status = be_cmd_mac_addr_query(adapter, mac, false,
2736                                                if_handle, 0);
2737                 *active_mac = true;
2738         }
2739         return status;
2740 }
2741
2742 static void be_get_resources(struct be_adapter *adapter)
2743 {
2744         int status;
2745         bool profile_present = false;
2746
2747         if (lancer_chip(adapter)) {
2748                 status = be_cmd_get_func_config(adapter);
2749
2750                 if (!status)
2751                         profile_present = true;
2752         }
2753
2754         if (profile_present) {
2755                 /* Sanity fixes for Lancer */
2756                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2757                                               BE_UC_PMAC_COUNT);
2758                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2759                                            BE_NUM_VLANS_SUPPORTED);
2760                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2761                                                BE_MAX_MC);
2762                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2763                                                MAX_TX_QS);
2764                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2765                                                 BE3_MAX_RSS_QS);
2766                 adapter->max_event_queues = min_t(u16,
2767                                                   adapter->max_event_queues,
2768                                                   BE3_MAX_RSS_QS);
2769
2770                 if (adapter->max_rss_queues &&
2771                     adapter->max_rss_queues == adapter->max_rx_queues)
2772                         adapter->max_rss_queues -= 1;
2773
2774                 if (adapter->max_event_queues < adapter->max_rss_queues)
2775                         adapter->max_rss_queues = adapter->max_event_queues;
2776
2777         } else {
2778                 if (be_physfn(adapter))
2779                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2780                 else
2781                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2782
2783                 if (adapter->function_mode & FLEX10_MODE)
2784                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2785                 else
2786                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2787
2788                 adapter->max_mcast_mac = BE_MAX_MC;
2789                 adapter->max_tx_queues = MAX_TX_QS;
2790                 adapter->max_rss_queues = (adapter->be3_native) ?
2791                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2792                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2793
2794                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2795                                         BE_IF_FLAGS_BROADCAST |
2796                                         BE_IF_FLAGS_MULTICAST |
2797                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2798                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2799                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2800                                         BE_IF_FLAGS_PROMISCUOUS;
2801
2802                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2803                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2804         }
2805 }
2806
2807 /* Routine to query per function resource limits */
2808 static int be_get_config(struct be_adapter *adapter)
2809 {
2810         int pos, status;
2811         u16 dev_num_vfs;
2812
2813         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2814                                      &adapter->function_mode,
2815                                      &adapter->function_caps);
2816         if (status)
2817                 goto err;
2818
2819         be_get_resources(adapter);
2820
2821         /* primary mac needs 1 pmac entry */
2822         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2823                                    sizeof(u32), GFP_KERNEL);
2824         if (!adapter->pmac_id) {
2825                 status = -ENOMEM;
2826                 goto err;
2827         }
2828
2829         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2830         if (pos) {
2831                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2832                                      &dev_num_vfs);
2833                 if (!lancer_chip(adapter))
2834                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2835                 adapter->dev_num_vfs = dev_num_vfs;
2836         }
2837 err:
2838         return status;
2839 }
2840
2841 static int be_setup(struct be_adapter *adapter)
2842 {
2843         struct device *dev = &adapter->pdev->dev;
2844         u32 en_flags;
2845         u32 tx_fc, rx_fc;
2846         int status;
2847         u8 mac[ETH_ALEN];
2848         bool active_mac;
2849
2850         be_setup_init(adapter);
2851
2852         if (!lancer_chip(adapter))
2853                 be_cmd_req_native_mode(adapter);
2854
2855         status = be_get_config(adapter);
2856         if (status)
2857                 goto err;
2858
2859         be_msix_enable(adapter);
2860
2861         status = be_evt_queues_create(adapter);
2862         if (status)
2863                 goto err;
2864
2865         status = be_tx_cqs_create(adapter);
2866         if (status)
2867                 goto err;
2868
2869         status = be_rx_cqs_create(adapter);
2870         if (status)
2871                 goto err;
2872
2873         status = be_mcc_queues_create(adapter);
2874         if (status)
2875                 goto err;
2876
2877         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2878                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2879
2880         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2881                 en_flags |= BE_IF_FLAGS_RSS;
2882
2883         en_flags = en_flags & adapter->if_cap_flags;
2884
2885         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2886                                   &adapter->if_handle, 0);
2887         if (status != 0)
2888                 goto err;
2889
2890         memset(mac, 0, ETH_ALEN);
2891         active_mac = false;
2892         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2893                                  &active_mac, &adapter->pmac_id[0]);
2894         if (status != 0)
2895                 goto err;
2896
2897         if (!active_mac) {
2898                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2899                                          &adapter->pmac_id[0], 0);
2900                 if (status != 0)
2901                         goto err;
2902         }
2903
2904         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2905                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2906                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2907         }
2908
2909         status = be_tx_qs_create(adapter);
2910         if (status)
2911                 goto err;
2912
2913         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2914
2915         if (adapter->vlans_added)
2916                 be_vid_config(adapter);
2917
2918         be_set_rx_mode(adapter->netdev);
2919
2920         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2921
2922         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2923                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2924                                         adapter->rx_fc);
2925
2926         if (be_physfn(adapter) && num_vfs) {
2927                 if (adapter->dev_num_vfs)
2928                         be_vf_setup(adapter);
2929                 else
2930                         dev_warn(dev, "device doesn't support SRIOV\n");
2931         }
2932
2933         be_cmd_get_phy_info(adapter);
2934         if (be_pause_supported(adapter))
2935                 adapter->phy.fc_autoneg = 1;
2936
2937         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2938         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2939         return 0;
2940 err:
2941         be_clear(adapter);
2942         return status;
2943 }
2944
2945 #ifdef CONFIG_NET_POLL_CONTROLLER
2946 static void be_netpoll(struct net_device *netdev)
2947 {
2948         struct be_adapter *adapter = netdev_priv(netdev);
2949         struct be_eq_obj *eqo;
2950         int i;
2951
2952         for_all_evt_queues(adapter, eqo, i)
2953                 event_handle(eqo);
2954
2955         return;
2956 }
2957 #endif
2958
2959 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2960 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2961
2962 static bool be_flash_redboot(struct be_adapter *adapter,
2963                         const u8 *p, u32 img_start, int image_size,
2964                         int hdr_size)
2965 {
2966         u32 crc_offset;
2967         u8 flashed_crc[4];
2968         int status;
2969
2970         crc_offset = hdr_size + img_start + image_size - 4;
2971
2972         p += crc_offset;
2973
2974         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2975                         (image_size - 4));
2976         if (status) {
2977                 dev_err(&adapter->pdev->dev,
2978                 "could not get crc from flash, not flashing redboot\n");
2979                 return false;
2980         }
2981
2982         /*update redboot only if crc does not match*/
2983         if (!memcmp(flashed_crc, p, 4))
2984                 return false;
2985         else
2986                 return true;
2987 }
2988
2989 static bool phy_flashing_required(struct be_adapter *adapter)
2990 {
2991         return (adapter->phy.phy_type == TN_8022 &&
2992                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2993 }
2994
2995 static bool is_comp_in_ufi(struct be_adapter *adapter,
2996                            struct flash_section_info *fsec, int type)
2997 {
2998         int i = 0, img_type = 0;
2999         struct flash_section_info_g2 *fsec_g2 = NULL;
3000
3001         if (adapter->generation != BE_GEN3)
3002                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3003
3004         for (i = 0; i < MAX_FLASH_COMP; i++) {
3005                 if (fsec_g2)
3006                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3007                 else
3008                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3009
3010                 if (img_type == type)
3011                         return true;
3012         }
3013         return false;
3014
3015 }
3016
3017 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3018                                          int header_size,
3019                                          const struct firmware *fw)
3020 {
3021         struct flash_section_info *fsec = NULL;
3022         const u8 *p = fw->data;
3023
3024         p += header_size;
3025         while (p < (fw->data + fw->size)) {
3026                 fsec = (struct flash_section_info *)p;
3027                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3028                         return fsec;
3029                 p += 32;
3030         }
3031         return NULL;
3032 }
3033
3034 static int be_flash_data(struct be_adapter *adapter,
3035                          const struct firmware *fw,
3036                          struct be_dma_mem *flash_cmd,
3037                          int num_of_images)
3038
3039 {
3040         int status = 0, i, filehdr_size = 0;
3041         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3042         u32 total_bytes = 0, flash_op;
3043         int num_bytes;
3044         const u8 *p = fw->data;
3045         struct be_cmd_write_flashrom *req = flash_cmd->va;
3046         const struct flash_comp *pflashcomp;
3047         int num_comp, hdr_size;
3048         struct flash_section_info *fsec = NULL;
3049
3050         struct flash_comp gen3_flash_types[] = {
3051                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3052                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3053                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3054                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3055                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3056                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3057                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3058                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3059                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3060                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3061                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3062                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3063                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3064                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3065                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3066                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3067                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3068                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3069                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3070                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3071         };
3072
3073         struct flash_comp gen2_flash_types[] = {
3074                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3075                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3076                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3077                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3078                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3079                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3080                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3081                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3082                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3083                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3084                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3085                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3086                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3087                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3088                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3089                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3090         };
3091
3092         if (adapter->generation == BE_GEN3) {
3093                 pflashcomp = gen3_flash_types;
3094                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3095                 num_comp = ARRAY_SIZE(gen3_flash_types);
3096         } else {
3097                 pflashcomp = gen2_flash_types;
3098                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3099                 num_comp = ARRAY_SIZE(gen2_flash_types);
3100         }
3101         /* Get flash section info*/
3102         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3103         if (!fsec) {
3104                 dev_err(&adapter->pdev->dev,
3105                         "Invalid Cookie. UFI corrupted ?\n");
3106                 return -1;
3107         }
3108         for (i = 0; i < num_comp; i++) {
3109                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3110                         continue;
3111
3112                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3113                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3114                         continue;
3115
3116                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3117                         if (!phy_flashing_required(adapter))
3118                                 continue;
3119                 }
3120
3121                 hdr_size = filehdr_size +
3122                            (num_of_images * sizeof(struct image_hdr));
3123
3124                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3125                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3126                                        pflashcomp[i].size, hdr_size)))
3127                         continue;
3128
3129                 /* Flash the component */
3130                 p = fw->data;
3131                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3132                 if (p + pflashcomp[i].size > fw->data + fw->size)
3133                         return -1;
3134                 total_bytes = pflashcomp[i].size;
3135                 while (total_bytes) {
3136                         if (total_bytes > 32*1024)
3137                                 num_bytes = 32*1024;
3138                         else
3139                                 num_bytes = total_bytes;
3140                         total_bytes -= num_bytes;
3141                         if (!total_bytes) {
3142                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3143                                         flash_op = FLASHROM_OPER_PHY_FLASH;
3144                                 else
3145                                         flash_op = FLASHROM_OPER_FLASH;
3146                         } else {
3147                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3148                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3149                                 else
3150                                         flash_op = FLASHROM_OPER_SAVE;
3151                         }
3152                         memcpy(req->params.data_buf, p, num_bytes);
3153                         p += num_bytes;
3154                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3155                                 pflashcomp[i].optype, flash_op, num_bytes);
3156                         if (status) {
3157                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3158                                         (pflashcomp[i].optype ==
3159                                                 OPTYPE_PHY_FW))
3160                                         break;
3161                                 dev_err(&adapter->pdev->dev,
3162                                         "cmd to write to flash rom failed.\n");
3163                                 return -1;
3164                         }
3165                 }
3166         }
3167         return 0;
3168 }
3169
3170 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3171 {
3172         if (fhdr == NULL)
3173                 return 0;
3174         if (fhdr->build[0] == '3')
3175                 return BE_GEN3;
3176         else if (fhdr->build[0] == '2')
3177                 return BE_GEN2;
3178         else
3179                 return 0;
3180 }
3181
3182 static int lancer_wait_idle(struct be_adapter *adapter)
3183 {
3184 #define SLIPORT_IDLE_TIMEOUT 30
3185         u32 reg_val;
3186         int status = 0, i;
3187
3188         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3189                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3190                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3191                         break;
3192
3193                 ssleep(1);
3194         }
3195
3196         if (i == SLIPORT_IDLE_TIMEOUT)
3197                 status = -1;
3198
3199         return status;
3200 }
3201
3202 static int lancer_fw_reset(struct be_adapter *adapter)
3203 {
3204         int status = 0;
3205
3206         status = lancer_wait_idle(adapter);
3207         if (status)
3208                 return status;
3209
3210         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3211                   PHYSDEV_CONTROL_OFFSET);
3212
3213         return status;
3214 }
3215
3216 static int lancer_fw_download(struct be_adapter *adapter,
3217                                 const struct firmware *fw)
3218 {
3219 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3220 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3221         struct be_dma_mem flash_cmd;
3222         const u8 *data_ptr = NULL;
3223         u8 *dest_image_ptr = NULL;
3224         size_t image_size = 0;
3225         u32 chunk_size = 0;
3226         u32 data_written = 0;
3227         u32 offset = 0;
3228         int status = 0;
3229         u8 add_status = 0;
3230         u8 change_status;
3231
3232         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3233                 dev_err(&adapter->pdev->dev,
3234                         "FW Image not properly aligned. "
3235                         "Length must be 4 byte aligned.\n");
3236                 status = -EINVAL;
3237                 goto lancer_fw_exit;
3238         }
3239
3240         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3241                                 + LANCER_FW_DOWNLOAD_CHUNK;
3242         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3243                                                 &flash_cmd.dma, GFP_KERNEL);
3244         if (!flash_cmd.va) {
3245                 status = -ENOMEM;
3246                 dev_err(&adapter->pdev->dev,
3247                         "Memory allocation failure while flashing\n");
3248                 goto lancer_fw_exit;
3249         }
3250
3251         dest_image_ptr = flash_cmd.va +
3252                                 sizeof(struct lancer_cmd_req_write_object);
3253         image_size = fw->size;
3254         data_ptr = fw->data;
3255
3256         while (image_size) {
3257                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3258
3259                 /* Copy the image chunk content. */
3260                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3261
3262                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3263                                                  chunk_size, offset,
3264                                                  LANCER_FW_DOWNLOAD_LOCATION,
3265                                                  &data_written, &change_status,
3266                                                  &add_status);
3267                 if (status)
3268                         break;
3269
3270                 offset += data_written;
3271                 data_ptr += data_written;
3272                 image_size -= data_written;
3273         }
3274
3275         if (!status) {
3276                 /* Commit the FW written */
3277                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3278                                                  0, offset,
3279                                                  LANCER_FW_DOWNLOAD_LOCATION,
3280                                                  &data_written, &change_status,
3281                                                  &add_status);
3282         }
3283
3284         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3285                                 flash_cmd.dma);
3286         if (status) {
3287                 dev_err(&adapter->pdev->dev,
3288                         "Firmware load error. "
3289                         "Status code: 0x%x Additional Status: 0x%x\n",
3290                         status, add_status);
3291                 goto lancer_fw_exit;
3292         }
3293
3294         if (change_status == LANCER_FW_RESET_NEEDED) {
3295                 status = lancer_fw_reset(adapter);
3296                 if (status) {
3297                         dev_err(&adapter->pdev->dev,
3298                                 "Adapter busy for FW reset.\n"
3299                                 "New FW will not be active.\n");
3300                         goto lancer_fw_exit;
3301                 }
3302         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3303                         dev_err(&adapter->pdev->dev,
3304                                 "System reboot required for new FW"
3305                                 " to be active\n");
3306         }
3307
3308         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3309 lancer_fw_exit:
3310         return status;
3311 }
3312
3313 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3314 {
3315         struct flash_file_hdr_g2 *fhdr;
3316         struct flash_file_hdr_g3 *fhdr3;
3317         struct image_hdr *img_hdr_ptr = NULL;
3318         struct be_dma_mem flash_cmd;
3319         const u8 *p;
3320         int status = 0, i = 0, num_imgs = 0;
3321
3322         p = fw->data;
3323         fhdr = (struct flash_file_hdr_g2 *) p;
3324
3325         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3326         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3327                                           &flash_cmd.dma, GFP_KERNEL);
3328         if (!flash_cmd.va) {
3329                 status = -ENOMEM;
3330                 dev_err(&adapter->pdev->dev,
3331                         "Memory allocation failure while flashing\n");
3332                 goto be_fw_exit;
3333         }
3334
3335         if ((adapter->generation == BE_GEN3) &&
3336                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3337                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3338                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3339                 for (i = 0; i < num_imgs; i++) {
3340                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3341                                         (sizeof(struct flash_file_hdr_g3) +
3342                                          i * sizeof(struct image_hdr)));
3343                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3344                                 status = be_flash_data(adapter, fw, &flash_cmd,
3345                                                         num_imgs);
3346                 }
3347         } else if ((adapter->generation == BE_GEN2) &&
3348                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3349                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3350         } else {
3351                 dev_err(&adapter->pdev->dev,
3352                         "UFI and Interface are not compatible for flashing\n");
3353                 status = -1;
3354         }
3355
3356         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3357                           flash_cmd.dma);
3358         if (status) {
3359                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3360                 goto be_fw_exit;
3361         }
3362
3363         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3364
3365 be_fw_exit:
3366         return status;
3367 }
3368
3369 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3370 {
3371         const struct firmware *fw;
3372         int status;
3373
3374         if (!netif_running(adapter->netdev)) {
3375                 dev_err(&adapter->pdev->dev,
3376                         "Firmware load not allowed (interface is down)\n");
3377                 return -1;
3378         }
3379
3380         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3381         if (status)
3382                 goto fw_exit;
3383
3384         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3385
3386         if (lancer_chip(adapter))
3387                 status = lancer_fw_download(adapter, fw);
3388         else
3389                 status = be_fw_download(adapter, fw);
3390
3391 fw_exit:
3392         release_firmware(fw);
3393         return status;
3394 }
3395
3396 static const struct net_device_ops be_netdev_ops = {
3397         .ndo_open               = be_open,
3398         .ndo_stop               = be_close,
3399         .ndo_start_xmit         = be_xmit,
3400         .ndo_set_rx_mode        = be_set_rx_mode,
3401         .ndo_set_mac_address    = be_mac_addr_set,
3402         .ndo_change_mtu         = be_change_mtu,
3403         .ndo_get_stats64        = be_get_stats64,
3404         .ndo_validate_addr      = eth_validate_addr,
3405         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3406         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3407         .ndo_set_vf_mac         = be_set_vf_mac,
3408         .ndo_set_vf_vlan        = be_set_vf_vlan,
3409         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3410         .ndo_get_vf_config      = be_get_vf_config,
3411 #ifdef CONFIG_NET_POLL_CONTROLLER
3412         .ndo_poll_controller    = be_netpoll,
3413 #endif
3414 };
3415
3416 static void be_netdev_init(struct net_device *netdev)
3417 {
3418         struct be_adapter *adapter = netdev_priv(netdev);
3419         struct be_eq_obj *eqo;
3420         int i;
3421
3422         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3423                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3424                 NETIF_F_HW_VLAN_TX;
3425         if (be_multi_rxq(adapter))
3426                 netdev->hw_features |= NETIF_F_RXHASH;
3427
3428         netdev->features |= netdev->hw_features |
3429                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3430
3431         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3432                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3433
3434         netdev->priv_flags |= IFF_UNICAST_FLT;
3435
3436         netdev->flags |= IFF_MULTICAST;
3437
3438         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3439
3440         netdev->netdev_ops = &be_netdev_ops;
3441
3442         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3443
3444         for_all_evt_queues(adapter, eqo, i)
3445                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3446 }
3447
3448 static void be_unmap_pci_bars(struct be_adapter *adapter)
3449 {
3450         if (adapter->csr)
3451                 iounmap(adapter->csr);
3452         if (adapter->db)
3453                 iounmap(adapter->db);
3454         if (adapter->roce_db.base)
3455                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3456 }
3457
3458 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3459 {
3460         struct pci_dev *pdev = adapter->pdev;
3461         u8 __iomem *addr;
3462
3463         addr = pci_iomap(pdev, 2, 0);
3464         if (addr == NULL)
3465                 return -ENOMEM;
3466
3467         adapter->roce_db.base = addr;
3468         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3469         adapter->roce_db.size = 8192;
3470         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3471         return 0;
3472 }
3473
3474 static int be_map_pci_bars(struct be_adapter *adapter)
3475 {
3476         u8 __iomem *addr;
3477         int db_reg;
3478
3479         if (lancer_chip(adapter)) {
3480                 if (be_type_2_3(adapter)) {
3481                         addr = ioremap_nocache(
3482                                         pci_resource_start(adapter->pdev, 0),
3483                                         pci_resource_len(adapter->pdev, 0));
3484                         if (addr == NULL)
3485                                 return -ENOMEM;
3486                         adapter->db = addr;
3487                 }
3488                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3489                         if (lancer_roce_map_pci_bars(adapter))
3490                                 goto pci_map_err;
3491                 }
3492                 return 0;
3493         }
3494
3495         if (be_physfn(adapter)) {
3496                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3497                                 pci_resource_len(adapter->pdev, 2));
3498                 if (addr == NULL)
3499                         return -ENOMEM;
3500                 adapter->csr = addr;
3501         }
3502
3503         if (adapter->generation == BE_GEN2) {
3504                 db_reg = 4;
3505         } else {
3506                 if (be_physfn(adapter))
3507                         db_reg = 4;
3508                 else
3509                         db_reg = 0;
3510         }
3511         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3512                                 pci_resource_len(adapter->pdev, db_reg));
3513         if (addr == NULL)
3514                 goto pci_map_err;
3515         adapter->db = addr;
3516         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3517                 adapter->roce_db.size = 4096;
3518                 adapter->roce_db.io_addr =
3519                                 pci_resource_start(adapter->pdev, db_reg);
3520                 adapter->roce_db.total_size =
3521                                 pci_resource_len(adapter->pdev, db_reg);
3522         }
3523         return 0;
3524 pci_map_err:
3525         be_unmap_pci_bars(adapter);
3526         return -ENOMEM;
3527 }
3528
3529 static void be_ctrl_cleanup(struct be_adapter *adapter)
3530 {
3531         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3532
3533         be_unmap_pci_bars(adapter);
3534
3535         if (mem->va)
3536                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3537                                   mem->dma);
3538
3539         mem = &adapter->rx_filter;
3540         if (mem->va)
3541                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3542                                   mem->dma);
3543 }
3544
3545 static int be_ctrl_init(struct be_adapter *adapter)
3546 {
3547         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3548         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3549         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3550         int status;
3551
3552         status = be_map_pci_bars(adapter);
3553         if (status)
3554                 goto done;
3555
3556         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3557         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3558                                                 mbox_mem_alloc->size,
3559                                                 &mbox_mem_alloc->dma,
3560                                                 GFP_KERNEL);
3561         if (!mbox_mem_alloc->va) {
3562                 status = -ENOMEM;
3563                 goto unmap_pci_bars;
3564         }
3565         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3566         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3567         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3568         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3569
3570         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3571         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3572                                         &rx_filter->dma, GFP_KERNEL);
3573         if (rx_filter->va == NULL) {
3574                 status = -ENOMEM;
3575                 goto free_mbox;
3576         }
3577         memset(rx_filter->va, 0, rx_filter->size);
3578         mutex_init(&adapter->mbox_lock);
3579         spin_lock_init(&adapter->mcc_lock);
3580         spin_lock_init(&adapter->mcc_cq_lock);
3581
3582         init_completion(&adapter->flash_compl);
3583         pci_save_state(adapter->pdev);
3584         return 0;
3585
3586 free_mbox:
3587         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3588                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3589
3590 unmap_pci_bars:
3591         be_unmap_pci_bars(adapter);
3592
3593 done:
3594         return status;
3595 }
3596
3597 static void be_stats_cleanup(struct be_adapter *adapter)
3598 {
3599         struct be_dma_mem *cmd = &adapter->stats_cmd;
3600
3601         if (cmd->va)
3602                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3603                                   cmd->va, cmd->dma);
3604 }
3605
3606 static int be_stats_init(struct be_adapter *adapter)
3607 {
3608         struct be_dma_mem *cmd = &adapter->stats_cmd;
3609
3610         if (adapter->generation == BE_GEN2) {
3611                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3612         } else {
3613                 if (lancer_chip(adapter))
3614                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3615                 else
3616                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3617         }
3618         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3619                                      GFP_KERNEL);
3620         if (cmd->va == NULL)
3621                 return -1;
3622         memset(cmd->va, 0, cmd->size);
3623         return 0;
3624 }
3625
3626 static void __devexit be_remove(struct pci_dev *pdev)
3627 {
3628         struct be_adapter *adapter = pci_get_drvdata(pdev);
3629
3630         if (!adapter)
3631                 return;
3632
3633         be_roce_dev_remove(adapter);
3634
3635         cancel_delayed_work_sync(&adapter->func_recovery_work);
3636
3637         unregister_netdev(adapter->netdev);
3638
3639         be_clear(adapter);
3640
3641         /* tell fw we're done with firing cmds */
3642         be_cmd_fw_clean(adapter);
3643
3644         be_stats_cleanup(adapter);
3645
3646         be_ctrl_cleanup(adapter);
3647
3648         pci_disable_pcie_error_reporting(pdev);
3649
3650         pci_set_drvdata(pdev, NULL);
3651         pci_release_regions(pdev);
3652         pci_disable_device(pdev);
3653
3654         free_netdev(adapter->netdev);
3655 }
3656
3657 bool be_is_wol_supported(struct be_adapter *adapter)
3658 {
3659         return ((adapter->wol_cap & BE_WOL_CAP) &&
3660                 !be_is_wol_excluded(adapter)) ? true : false;
3661 }
3662
3663 u32 be_get_fw_log_level(struct be_adapter *adapter)
3664 {
3665         struct be_dma_mem extfat_cmd;
3666         struct be_fat_conf_params *cfgs;
3667         int status;
3668         u32 level = 0;
3669         int j;
3670
3671         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3672         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3673         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3674                                              &extfat_cmd.dma);
3675
3676         if (!extfat_cmd.va) {
3677                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3678                         __func__);
3679                 goto err;
3680         }
3681
3682         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3683         if (!status) {
3684                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3685                                                 sizeof(struct be_cmd_resp_hdr));
3686                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3687                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3688                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3689                 }
3690         }
3691         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3692                             extfat_cmd.dma);
3693 err:
3694         return level;
3695 }
3696
3697 static int be_get_initial_config(struct be_adapter *adapter)
3698 {
3699         int status;
3700         u32 level;
3701
3702         status = be_cmd_get_cntl_attributes(adapter);
3703         if (status)
3704                 return status;
3705
3706         status = be_cmd_get_acpi_wol_cap(adapter);
3707         if (status) {
3708                 /* in case of a failure to get wol capabillities
3709                  * check the exclusion list to determine WOL capability */
3710                 if (!be_is_wol_excluded(adapter))
3711                         adapter->wol_cap |= BE_WOL_CAP;
3712         }
3713
3714         if (be_is_wol_supported(adapter))
3715                 adapter->wol = true;
3716
3717         /* Must be a power of 2 or else MODULO will BUG_ON */
3718         adapter->be_get_temp_freq = 64;
3719
3720         level = be_get_fw_log_level(adapter);
3721         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3722
3723         return 0;
3724 }
3725
3726 static int be_dev_type_check(struct be_adapter *adapter)
3727 {
3728         struct pci_dev *pdev = adapter->pdev;
3729         u32 sli_intf = 0, if_type;
3730
3731         switch (pdev->device) {
3732         case BE_DEVICE_ID1:
3733         case OC_DEVICE_ID1:
3734                 adapter->generation = BE_GEN2;
3735                 break;
3736         case BE_DEVICE_ID2:
3737         case OC_DEVICE_ID2:
3738                 adapter->generation = BE_GEN3;
3739                 break;
3740         case OC_DEVICE_ID3:
3741         case OC_DEVICE_ID4:
3742                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3743                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3744                                                 SLI_INTF_IF_TYPE_SHIFT;
3745                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3746                                                 SLI_INTF_IF_TYPE_SHIFT;
3747                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3748                         !be_type_2_3(adapter)) {
3749                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3750                         return -EINVAL;
3751                 }
3752                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3753                                          SLI_INTF_FAMILY_SHIFT);
3754                 adapter->generation = BE_GEN3;
3755                 break;
3756         case OC_DEVICE_ID5:
3757                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3758                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3759                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3760                         return -EINVAL;
3761                 }
3762                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3763                                          SLI_INTF_FAMILY_SHIFT);
3764                 adapter->generation = BE_GEN3;
3765                 break;
3766         default:
3767                 adapter->generation = 0;
3768         }
3769
3770         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3772         return 0;
3773 }
3774
3775 static int lancer_recover_func(struct be_adapter *adapter)
3776 {
3777         int status;
3778
3779         status = lancer_test_and_set_rdy_state(adapter);
3780         if (status)
3781                 goto err;
3782
3783         if (netif_running(adapter->netdev))
3784                 be_close(adapter->netdev);
3785
3786         be_clear(adapter);
3787
3788         adapter->hw_error = false;
3789         adapter->fw_timeout = false;
3790
3791         status = be_setup(adapter);
3792         if (status)
3793                 goto err;
3794
3795         if (netif_running(adapter->netdev)) {
3796                 status = be_open(adapter->netdev);
3797                 if (status)
3798                         goto err;
3799         }
3800
3801         dev_err(&adapter->pdev->dev,
3802                 "Adapter SLIPORT recovery succeeded\n");
3803         return 0;
3804 err:
3805         dev_err(&adapter->pdev->dev,
3806                 "Adapter SLIPORT recovery failed\n");
3807
3808         return status;
3809 }
3810
3811 static void be_func_recovery_task(struct work_struct *work)
3812 {
3813         struct be_adapter *adapter =
3814                 container_of(work, struct be_adapter,  func_recovery_work.work);
3815         int status;
3816
3817         be_detect_error(adapter);
3818
3819         if (adapter->hw_error && lancer_chip(adapter)) {
3820
3821                 if (adapter->eeh_error)
3822                         goto out;
3823
3824                 rtnl_lock();
3825                 netif_device_detach(adapter->netdev);
3826                 rtnl_unlock();
3827
3828                 status = lancer_recover_func(adapter);
3829
3830                 if (!status)
3831                         netif_device_attach(adapter->netdev);
3832         }
3833
3834 out:
3835         schedule_delayed_work(&adapter->func_recovery_work,
3836                               msecs_to_jiffies(1000));
3837 }
3838
3839 static void be_worker(struct work_struct *work)
3840 {
3841         struct be_adapter *adapter =
3842                 container_of(work, struct be_adapter, work.work);
3843         struct be_rx_obj *rxo;
3844         struct be_eq_obj *eqo;
3845         int i;
3846
3847         /* when interrupts are not yet enabled, just reap any pending
3848         * mcc completions */
3849         if (!netif_running(adapter->netdev)) {
3850                 local_bh_disable();
3851                 be_process_mcc(adapter);
3852                 local_bh_enable();
3853                 goto reschedule;
3854         }
3855
3856         if (!adapter->stats_cmd_sent) {
3857                 if (lancer_chip(adapter))
3858                         lancer_cmd_get_pport_stats(adapter,
3859                                                 &adapter->stats_cmd);
3860                 else
3861                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3862         }
3863
3864         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3865                 be_cmd_get_die_temperature(adapter);
3866
3867         for_all_rx_queues(adapter, rxo, i) {
3868                 if (rxo->rx_post_starved) {
3869                         rxo->rx_post_starved = false;
3870                         be_post_rx_frags(rxo, GFP_KERNEL);
3871                 }
3872         }
3873
3874         for_all_evt_queues(adapter, eqo, i)
3875                 be_eqd_update(adapter, eqo);
3876
3877 reschedule:
3878         adapter->work_counter++;
3879         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3880 }
3881
3882 static bool be_reset_required(struct be_adapter *adapter)
3883 {
3884         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3885 }
3886
3887 static char *mc_name(struct be_adapter *adapter)
3888 {
3889         if (adapter->function_mode & FLEX10_MODE)
3890                 return "FLEX10";
3891         else if (adapter->function_mode & VNIC_MODE)
3892                 return "vNIC";
3893         else if (adapter->function_mode & UMC_ENABLED)
3894                 return "UMC";
3895         else
3896                 return "";
3897 }
3898
3899 static inline char *func_name(struct be_adapter *adapter)
3900 {
3901         return be_physfn(adapter) ? "PF" : "VF";
3902 }
3903
3904 static int __devinit be_probe(struct pci_dev *pdev,
3905                         const struct pci_device_id *pdev_id)
3906 {
3907         int status = 0;
3908         struct be_adapter *adapter;
3909         struct net_device *netdev;
3910         char port_name;
3911
3912         status = pci_enable_device(pdev);
3913         if (status)
3914                 goto do_none;
3915
3916         status = pci_request_regions(pdev, DRV_NAME);
3917         if (status)
3918                 goto disable_dev;
3919         pci_set_master(pdev);
3920
3921         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3922         if (netdev == NULL) {
3923                 status = -ENOMEM;
3924                 goto rel_reg;
3925         }
3926         adapter = netdev_priv(netdev);
3927         adapter->pdev = pdev;
3928         pci_set_drvdata(pdev, adapter);
3929
3930         status = be_dev_type_check(adapter);
3931         if (status)
3932                 goto free_netdev;
3933
3934         adapter->netdev = netdev;
3935         SET_NETDEV_DEV(netdev, &pdev->dev);
3936
3937         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3938         if (!status) {
3939                 netdev->features |= NETIF_F_HIGHDMA;
3940         } else {
3941                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3942                 if (status) {
3943                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3944                         goto free_netdev;
3945                 }
3946         }
3947
3948         status = pci_enable_pcie_error_reporting(pdev);
3949         if (status)
3950                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3951
3952         status = be_ctrl_init(adapter);
3953         if (status)
3954                 goto free_netdev;
3955
3956         /* sync up with fw's ready state */
3957         if (be_physfn(adapter)) {
3958                 status = be_fw_wait_ready(adapter);
3959                 if (status)
3960                         goto ctrl_clean;
3961         }
3962
3963         /* tell fw we're ready to fire cmds */
3964         status = be_cmd_fw_init(adapter);
3965         if (status)
3966                 goto ctrl_clean;
3967
3968         if (be_reset_required(adapter)) {
3969                 status = be_cmd_reset_function(adapter);
3970                 if (status)
3971                         goto ctrl_clean;
3972         }
3973
3974         /* The INTR bit may be set in the card when probed by a kdump kernel
3975          * after a crash.
3976          */
3977         if (!lancer_chip(adapter))
3978                 be_intr_set(adapter, false);
3979
3980         status = be_stats_init(adapter);
3981         if (status)
3982                 goto ctrl_clean;
3983
3984         status = be_get_initial_config(adapter);
3985         if (status)
3986                 goto stats_clean;
3987
3988         INIT_DELAYED_WORK(&adapter->work, be_worker);
3989         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3990         adapter->rx_fc = adapter->tx_fc = true;
3991
3992         status = be_setup(adapter);
3993         if (status)
3994                 goto stats_clean;
3995
3996         be_netdev_init(netdev);
3997         status = register_netdev(netdev);
3998         if (status != 0)
3999                 goto unsetup;
4000
4001         be_roce_dev_add(adapter);
4002
4003         schedule_delayed_work(&adapter->func_recovery_work,
4004                               msecs_to_jiffies(1000));
4005
4006         be_cmd_query_port_name(adapter, &port_name);
4007
4008         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4009                  func_name(adapter), mc_name(adapter), port_name);
4010
4011         return 0;
4012
4013 unsetup:
4014         be_clear(adapter);
4015 stats_clean:
4016         be_stats_cleanup(adapter);
4017 ctrl_clean:
4018         be_ctrl_cleanup(adapter);
4019 free_netdev:
4020         free_netdev(netdev);
4021         pci_set_drvdata(pdev, NULL);
4022 rel_reg:
4023         pci_release_regions(pdev);
4024 disable_dev:
4025         pci_disable_device(pdev);
4026 do_none:
4027         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4028         return status;
4029 }
4030
4031 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4032 {
4033         struct be_adapter *adapter = pci_get_drvdata(pdev);
4034         struct net_device *netdev =  adapter->netdev;
4035
4036         if (adapter->wol)
4037                 be_setup_wol(adapter, true);
4038
4039         cancel_delayed_work_sync(&adapter->func_recovery_work);
4040
4041         netif_device_detach(netdev);
4042         if (netif_running(netdev)) {
4043                 rtnl_lock();
4044                 be_close(netdev);
4045                 rtnl_unlock();
4046         }
4047         be_clear(adapter);
4048
4049         pci_save_state(pdev);
4050         pci_disable_device(pdev);
4051         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4052         return 0;
4053 }
4054
4055 static int be_resume(struct pci_dev *pdev)
4056 {
4057         int status = 0;
4058         struct be_adapter *adapter = pci_get_drvdata(pdev);
4059         struct net_device *netdev =  adapter->netdev;
4060
4061         netif_device_detach(netdev);
4062
4063         status = pci_enable_device(pdev);
4064         if (status)
4065                 return status;
4066
4067         pci_set_power_state(pdev, 0);
4068         pci_restore_state(pdev);
4069
4070         /* tell fw we're ready to fire cmds */
4071         status = be_cmd_fw_init(adapter);
4072         if (status)
4073                 return status;
4074
4075         be_setup(adapter);
4076         if (netif_running(netdev)) {
4077                 rtnl_lock();
4078                 be_open(netdev);
4079                 rtnl_unlock();
4080         }
4081
4082         schedule_delayed_work(&adapter->func_recovery_work,
4083                               msecs_to_jiffies(1000));
4084         netif_device_attach(netdev);
4085
4086         if (adapter->wol)
4087                 be_setup_wol(adapter, false);
4088
4089         return 0;
4090 }
4091
4092 /*
4093  * An FLR will stop BE from DMAing any data.
4094  */
4095 static void be_shutdown(struct pci_dev *pdev)
4096 {
4097         struct be_adapter *adapter = pci_get_drvdata(pdev);
4098
4099         if (!adapter)
4100                 return;
4101
4102         cancel_delayed_work_sync(&adapter->work);
4103         cancel_delayed_work_sync(&adapter->func_recovery_work);
4104
4105         netif_device_detach(adapter->netdev);
4106
4107         if (adapter->wol)
4108                 be_setup_wol(adapter, true);
4109
4110         be_cmd_reset_function(adapter);
4111
4112         pci_disable_device(pdev);
4113 }
4114
4115 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4116                                 pci_channel_state_t state)
4117 {
4118         struct be_adapter *adapter = pci_get_drvdata(pdev);
4119         struct net_device *netdev =  adapter->netdev;
4120
4121         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4122
4123         adapter->eeh_error = true;
4124
4125         cancel_delayed_work_sync(&adapter->func_recovery_work);
4126
4127         rtnl_lock();
4128         netif_device_detach(netdev);
4129         rtnl_unlock();
4130
4131         if (netif_running(netdev)) {
4132                 rtnl_lock();
4133                 be_close(netdev);
4134                 rtnl_unlock();
4135         }
4136         be_clear(adapter);
4137
4138         if (state == pci_channel_io_perm_failure)
4139                 return PCI_ERS_RESULT_DISCONNECT;
4140
4141         pci_disable_device(pdev);
4142
4143         /* The error could cause the FW to trigger a flash debug dump.
4144          * Resetting the card while flash dump is in progress
4145          * can cause it not to recover; wait for it to finish
4146          */
4147         ssleep(30);
4148         return PCI_ERS_RESULT_NEED_RESET;
4149 }
4150
4151 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4152 {
4153         struct be_adapter *adapter = pci_get_drvdata(pdev);
4154         int status;
4155
4156         dev_info(&adapter->pdev->dev, "EEH reset\n");
4157         be_clear_all_error(adapter);
4158
4159         status = pci_enable_device(pdev);
4160         if (status)
4161                 return PCI_ERS_RESULT_DISCONNECT;
4162
4163         pci_set_master(pdev);
4164         pci_set_power_state(pdev, 0);
4165         pci_restore_state(pdev);
4166
4167         /* Check if card is ok and fw is ready */
4168         status = be_fw_wait_ready(adapter);
4169         if (status)
4170                 return PCI_ERS_RESULT_DISCONNECT;
4171
4172         pci_cleanup_aer_uncorrect_error_status(pdev);
4173         return PCI_ERS_RESULT_RECOVERED;
4174 }
4175
4176 static void be_eeh_resume(struct pci_dev *pdev)
4177 {
4178         int status = 0;
4179         struct be_adapter *adapter = pci_get_drvdata(pdev);
4180         struct net_device *netdev =  adapter->netdev;
4181
4182         dev_info(&adapter->pdev->dev, "EEH resume\n");
4183
4184         pci_save_state(pdev);
4185
4186         /* tell fw we're ready to fire cmds */
4187         status = be_cmd_fw_init(adapter);
4188         if (status)
4189                 goto err;
4190
4191         status = be_cmd_reset_function(adapter);
4192         if (status)
4193                 goto err;
4194
4195         status = be_setup(adapter);
4196         if (status)
4197                 goto err;
4198
4199         if (netif_running(netdev)) {
4200                 status = be_open(netdev);
4201                 if (status)
4202                         goto err;
4203         }
4204
4205         schedule_delayed_work(&adapter->func_recovery_work,
4206                               msecs_to_jiffies(1000));
4207         netif_device_attach(netdev);
4208         return;
4209 err:
4210         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4211 }
4212
4213 static const struct pci_error_handlers be_eeh_handlers = {
4214         .error_detected = be_eeh_err_detected,
4215         .slot_reset = be_eeh_reset,
4216         .resume = be_eeh_resume,
4217 };
4218
4219 static struct pci_driver be_driver = {
4220         .name = DRV_NAME,
4221         .id_table = be_dev_ids,
4222         .probe = be_probe,
4223         .remove = be_remove,
4224         .suspend = be_suspend,
4225         .resume = be_resume,
4226         .shutdown = be_shutdown,
4227         .err_handler = &be_eeh_handlers
4228 };
4229
4230 static int __init be_init_module(void)
4231 {
4232         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4233             rx_frag_size != 2048) {
4234                 printk(KERN_WARNING DRV_NAME
4235                         " : Module param rx_frag_size must be 2048/4096/8192."
4236                         " Using 2048\n");
4237                 rx_frag_size = 2048;
4238         }
4239
4240         return pci_register_driver(&be_driver);
4241 }
4242 module_init(be_init_module);
4243
4244 static void __exit be_exit_module(void)
4245 {
4246         pci_unregister_driver(&be_driver);
4247 }
4248 module_exit(be_exit_module);