]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct device *dev = &adapter->pdev->dev;
251         struct sockaddr *addr = p;
252         int status;
253         u8 mac[ETH_ALEN];
254         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260          * privilege or if PF did not provision the new MAC address.
261          * On BE3, this cmd will always fail if the VF doesn't have the
262          * FILTMGMT privilege. This failure is OK, only if the PF programmed
263          * the MAC for the VF.
264          */
265         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266                                  adapter->if_handle, &adapter->pmac_id[0], 0);
267         if (!status) {
268                 curr_pmac_id = adapter->pmac_id[0];
269
270                 /* Delete the old programmed MAC. This call may fail if the
271                  * old MAC was already deleted by the PF driver.
272                  */
273                 if (adapter->pmac_id[0] != old_pmac_id)
274                         be_cmd_pmac_del(adapter, adapter->if_handle,
275                                         old_pmac_id, 0);
276         }
277
278         /* Decide if the new MAC is successfully activated only after
279          * querying the FW
280          */
281         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
282         if (status)
283                 goto err;
284
285         /* The MAC change did not happen, either due to lack of privilege
286          * or PF didn't pre-provision.
287          */
288         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289                 status = -EPERM;
290                 goto err;
291         }
292
293         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
294         dev_info(dev, "MAC address changed to %pM\n", mac);
295         return 0;
296 err:
297         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 static void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb))
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785
786         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787                 if (!vlan_tag)
788                         vlan_tag = adapter->pvid;
789                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790                  * skip VLAN insertion
791                  */
792                 if (skip_hw_vlan)
793                         *skip_hw_vlan = true;
794         }
795
796         if (vlan_tag) {
797                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
798                 if (unlikely(!skb))
799                         return skb;
800                 skb->vlan_tci = 0;
801         }
802
803         /* Insert the outer VLAN, if any */
804         if (adapter->qnq_vid) {
805                 vlan_tag = adapter->qnq_vid;
806                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
807                 if (unlikely(!skb))
808                         return skb;
809                 if (skip_hw_vlan)
810                         *skip_hw_vlan = true;
811         }
812
813         return skb;
814 }
815
816 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817 {
818         struct ethhdr *eh = (struct ethhdr *)skb->data;
819         u16 offset = ETH_HLEN;
820
821         if (eh->h_proto == htons(ETH_P_IPV6)) {
822                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824                 offset += sizeof(struct ipv6hdr);
825                 if (ip6h->nexthdr != NEXTHDR_TCP &&
826                     ip6h->nexthdr != NEXTHDR_UDP) {
827                         struct ipv6_opt_hdr *ehdr =
828                                 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831                         if (ehdr->hdrlen == 0xff)
832                                 return true;
833                 }
834         }
835         return false;
836 }
837
838 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839 {
840         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841 }
842
843 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844                                 struct sk_buff *skb)
845 {
846         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
847 }
848
849 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850                                            struct sk_buff *skb,
851                                            bool *skip_hw_vlan)
852 {
853         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
854         unsigned int eth_hdr_len;
855         struct iphdr *ip;
856
857         /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858          * may cause a transmit stall on that port. So the work-around is to
859          * pad such packets to a 36-byte length.
860          */
861         if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862                 if (skb_padto(skb, 36))
863                         goto tx_drop;
864                 skb->len = 36;
865         }
866
867         /* For padded packets, BE HW modifies tot_len field in IP header
868          * incorrecly when VLAN tag is inserted by HW.
869          * For padded packets, Lancer computes incorrect checksum.
870          */
871         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872                                                 VLAN_ETH_HLEN : ETH_HLEN;
873         if (skb->len <= 60 &&
874             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
875             is_ipv4_pkt(skb)) {
876                 ip = (struct iphdr *)ip_hdr(skb);
877                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878         }
879
880         /* If vlan tag is already inlined in the packet, skip HW VLAN
881          * tagging in UMC mode
882          */
883         if ((adapter->function_mode & UMC_ENABLED) &&
884             veh->h_vlan_proto == htons(ETH_P_8021Q))
885                         *skip_hw_vlan = true;
886
887         /* HW has a bug wherein it will calculate CSUM for VLAN
888          * pkts even though it is disabled.
889          * Manually insert VLAN in pkt.
890          */
891         if (skb->ip_summed != CHECKSUM_PARTIAL &&
892             vlan_tx_tag_present(skb)) {
893                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
894                 if (unlikely(!skb))
895                         goto tx_drop;
896         }
897
898         /* HW may lockup when VLAN HW tagging is requested on
899          * certain ipv6 packets. Drop such pkts if the HW workaround to
900          * skip HW tagging is not enabled by FW.
901          */
902         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
903             (adapter->pvid || adapter->qnq_vid) &&
904             !qnq_async_evt_rcvd(adapter)))
905                 goto tx_drop;
906
907         /* Manual VLAN tag insertion to prevent:
908          * ASIC lockup when the ASIC inserts VLAN tag into
909          * certain ipv6 packets. Insert VLAN tags in driver,
910          * and set event, completion, vlan bits accordingly
911          * in the Tx WRB.
912          */
913         if (be_ipv6_tx_stall_chk(adapter, skb) &&
914             be_vlan_tag_tx_chk(adapter, skb)) {
915                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
916                 if (unlikely(!skb))
917                         goto tx_drop;
918         }
919
920         return skb;
921 tx_drop:
922         dev_kfree_skb_any(skb);
923         return NULL;
924 }
925
926 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927 {
928         struct be_adapter *adapter = netdev_priv(netdev);
929         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930         struct be_queue_info *txq = &txo->q;
931         bool dummy_wrb, stopped = false;
932         u32 wrb_cnt = 0, copied = 0;
933         bool skip_hw_vlan = false;
934         u32 start = txq->head;
935
936         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937         if (!skb)
938                 return NETDEV_TX_OK;
939
940         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
941
942         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943                               skip_hw_vlan);
944         if (copied) {
945                 int gso_segs = skb_shinfo(skb)->gso_segs;
946
947                 /* record the sent skb in the sent_skb table */
948                 BUG_ON(txo->sent_skb_list[start]);
949                 txo->sent_skb_list[start] = skb;
950
951                 /* Ensure txq has space for the next skb; Else stop the queue
952                  * *BEFORE* ringing the tx doorbell, so that we serialze the
953                  * tx compls of the current transmit which'll wake up the queue
954                  */
955                 atomic_add(wrb_cnt, &txq->used);
956                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957                                                                 txq->len) {
958                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
959                         stopped = true;
960                 }
961
962                 be_txq_notify(adapter, txo, wrb_cnt);
963
964                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
965         } else {
966                 txq->head = start;
967                 dev_kfree_skb_any(skb);
968         }
969         return NETDEV_TX_OK;
970 }
971
972 static int be_change_mtu(struct net_device *netdev, int new_mtu)
973 {
974         struct be_adapter *adapter = netdev_priv(netdev);
975         if (new_mtu < BE_MIN_MTU ||
976                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977                                         (ETH_HLEN + ETH_FCS_LEN))) {
978                 dev_info(&adapter->pdev->dev,
979                         "MTU must be between %d and %d bytes\n",
980                         BE_MIN_MTU,
981                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
982                 return -EINVAL;
983         }
984         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985                         netdev->mtu, new_mtu);
986         netdev->mtu = new_mtu;
987         return 0;
988 }
989
990 /*
991  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992  * If the user configures more, place BE in vlan promiscuous mode.
993  */
994 static int be_vid_config(struct be_adapter *adapter)
995 {
996         u16 vids[BE_NUM_VLANS_SUPPORTED];
997         u16 num = 0, i;
998         int status = 0;
999
1000         /* No need to further configure vids if in promiscuous mode */
1001         if (adapter->promiscuous)
1002                 return 0;
1003
1004         if (adapter->vlans_added > adapter->max_vlans)
1005                 goto set_vlan_promisc;
1006
1007         /* Construct VLAN Table to give to HW */
1008         for (i = 0; i < VLAN_N_VID; i++)
1009                 if (adapter->vlan_tag[i])
1010                         vids[num++] = cpu_to_le16(i);
1011
1012         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1013                                     vids, num, 1, 0);
1014
1015         /* Set to VLAN promisc mode as setting VLAN filter failed */
1016         if (status) {
1017                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019                 goto set_vlan_promisc;
1020         }
1021
1022         return status;
1023
1024 set_vlan_promisc:
1025         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026                                     NULL, 0, 1, 1);
1027         return status;
1028 }
1029
1030 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1031 {
1032         struct be_adapter *adapter = netdev_priv(netdev);
1033         int status = 0;
1034
1035         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1036                 status = -EINVAL;
1037                 goto ret;
1038         }
1039
1040         /* Packets with VID 0 are always received by Lancer by default */
1041         if (lancer_chip(adapter) && vid == 0)
1042                 goto ret;
1043
1044         adapter->vlan_tag[vid] = 1;
1045         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1046                 status = be_vid_config(adapter);
1047
1048         if (!status)
1049                 adapter->vlans_added++;
1050         else
1051                 adapter->vlan_tag[vid] = 0;
1052 ret:
1053         return status;
1054 }
1055
1056 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1057 {
1058         struct be_adapter *adapter = netdev_priv(netdev);
1059         int status = 0;
1060
1061         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1062                 status = -EINVAL;
1063                 goto ret;
1064         }
1065
1066         /* Packets with VID 0 are always received by Lancer by default */
1067         if (lancer_chip(adapter) && vid == 0)
1068                 goto ret;
1069
1070         adapter->vlan_tag[vid] = 0;
1071         if (adapter->vlans_added <= adapter->max_vlans)
1072                 status = be_vid_config(adapter);
1073
1074         if (!status)
1075                 adapter->vlans_added--;
1076         else
1077                 adapter->vlan_tag[vid] = 1;
1078 ret:
1079         return status;
1080 }
1081
1082 static void be_set_rx_mode(struct net_device *netdev)
1083 {
1084         struct be_adapter *adapter = netdev_priv(netdev);
1085         int status;
1086
1087         if (netdev->flags & IFF_PROMISC) {
1088                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1089                 adapter->promiscuous = true;
1090                 goto done;
1091         }
1092
1093         /* BE was previously in promiscuous mode; disable it */
1094         if (adapter->promiscuous) {
1095                 adapter->promiscuous = false;
1096                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1097
1098                 if (adapter->vlans_added)
1099                         be_vid_config(adapter);
1100         }
1101
1102         /* Enable multicast promisc if num configured exceeds what we support */
1103         if (netdev->flags & IFF_ALLMULTI ||
1104             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1105                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1106                 goto done;
1107         }
1108
1109         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110                 struct netdev_hw_addr *ha;
1111                 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114                         be_cmd_pmac_del(adapter, adapter->if_handle,
1115                                         adapter->pmac_id[i], 0);
1116                 }
1117
1118                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1119                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120                         adapter->promiscuous = true;
1121                         goto done;
1122                 }
1123
1124                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125                         adapter->uc_macs++; /* First slot is for Primary MAC */
1126                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127                                         adapter->if_handle,
1128                                         &adapter->pmac_id[adapter->uc_macs], 0);
1129                 }
1130         }
1131
1132         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135         if (status) {
1136                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139         }
1140 done:
1141         return;
1142 }
1143
1144 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145 {
1146         struct be_adapter *adapter = netdev_priv(netdev);
1147         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1148         int status;
1149
1150         if (!sriov_enabled(adapter))
1151                 return -EPERM;
1152
1153         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1154                 return -EINVAL;
1155
1156         if (BEx_chip(adapter)) {
1157                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1158                                 vf + 1);
1159
1160                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1161                                          &vf_cfg->pmac_id, vf + 1);
1162         } else {
1163                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1164                                         vf + 1);
1165         }
1166
1167         if (status)
1168                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1169                                 mac, vf);
1170         else
1171                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1172
1173         return status;
1174 }
1175
1176 static int be_get_vf_config(struct net_device *netdev, int vf,
1177                         struct ifla_vf_info *vi)
1178 {
1179         struct be_adapter *adapter = netdev_priv(netdev);
1180         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1181
1182         if (!sriov_enabled(adapter))
1183                 return -EPERM;
1184
1185         if (vf >= adapter->num_vfs)
1186                 return -EINVAL;
1187
1188         vi->vf = vf;
1189         vi->tx_rate = vf_cfg->tx_rate;
1190         vi->vlan = vf_cfg->vlan_tag;
1191         vi->qos = 0;
1192         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1193
1194         return 0;
1195 }
1196
1197 static int be_set_vf_vlan(struct net_device *netdev,
1198                         int vf, u16 vlan, u8 qos)
1199 {
1200         struct be_adapter *adapter = netdev_priv(netdev);
1201         int status = 0;
1202
1203         if (!sriov_enabled(adapter))
1204                 return -EPERM;
1205
1206         if (vf >= adapter->num_vfs || vlan > 4095)
1207                 return -EINVAL;
1208
1209         if (vlan) {
1210                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1211                         /* If this is new value, program it. Else skip. */
1212                         adapter->vf_cfg[vf].vlan_tag = vlan;
1213
1214                         status = be_cmd_set_hsw_config(adapter, vlan,
1215                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1216                 }
1217         } else {
1218                 /* Reset Transparent Vlan Tagging. */
1219                 adapter->vf_cfg[vf].vlan_tag = 0;
1220                 vlan = adapter->vf_cfg[vf].def_vid;
1221                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1222                         adapter->vf_cfg[vf].if_handle);
1223         }
1224
1225
1226         if (status)
1227                 dev_info(&adapter->pdev->dev,
1228                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1229         return status;
1230 }
1231
1232 static int be_set_vf_tx_rate(struct net_device *netdev,
1233                         int vf, int rate)
1234 {
1235         struct be_adapter *adapter = netdev_priv(netdev);
1236         int status = 0;
1237
1238         if (!sriov_enabled(adapter))
1239                 return -EPERM;
1240
1241         if (vf >= adapter->num_vfs)
1242                 return -EINVAL;
1243
1244         if (rate < 100 || rate > 10000) {
1245                 dev_err(&adapter->pdev->dev,
1246                         "tx rate must be between 100 and 10000 Mbps\n");
1247                 return -EINVAL;
1248         }
1249
1250         if (lancer_chip(adapter))
1251                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1252         else
1253                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1254
1255         if (status)
1256                 dev_err(&adapter->pdev->dev,
1257                                 "tx rate %d on VF %d failed\n", rate, vf);
1258         else
1259                 adapter->vf_cfg[vf].tx_rate = rate;
1260         return status;
1261 }
1262
1263 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1264 {
1265         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1266         ulong now = jiffies;
1267         ulong delta = now - stats->rx_jiffies;
1268         u64 pkts;
1269         unsigned int start, eqd;
1270
1271         if (!eqo->enable_aic) {
1272                 eqd = eqo->eqd;
1273                 goto modify_eqd;
1274         }
1275
1276         if (eqo->idx >= adapter->num_rx_qs)
1277                 return;
1278
1279         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
1281         /* Wrapped around */
1282         if (time_before(now, stats->rx_jiffies)) {
1283                 stats->rx_jiffies = now;
1284                 return;
1285         }
1286
1287         /* Update once a second */
1288         if (delta < HZ)
1289                 return;
1290
1291         do {
1292                 start = u64_stats_fetch_begin_bh(&stats->sync);
1293                 pkts = stats->rx_pkts;
1294         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
1296         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1297         stats->rx_pkts_prev = pkts;
1298         stats->rx_jiffies = now;
1299         eqd = (stats->rx_pps / 110000) << 3;
1300         eqd = min(eqd, eqo->max_eqd);
1301         eqd = max(eqd, eqo->min_eqd);
1302         if (eqd < 10)
1303                 eqd = 0;
1304
1305 modify_eqd:
1306         if (eqd != eqo->cur_eqd) {
1307                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308                 eqo->cur_eqd = eqd;
1309         }
1310 }
1311
1312 static void be_rx_stats_update(struct be_rx_obj *rxo,
1313                 struct be_rx_compl_info *rxcp)
1314 {
1315         struct be_rx_stats *stats = rx_stats(rxo);
1316
1317         u64_stats_update_begin(&stats->sync);
1318         stats->rx_compl++;
1319         stats->rx_bytes += rxcp->pkt_size;
1320         stats->rx_pkts++;
1321         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1322                 stats->rx_mcast_pkts++;
1323         if (rxcp->err)
1324                 stats->rx_compl_err++;
1325         u64_stats_update_end(&stats->sync);
1326 }
1327
1328 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1329 {
1330         /* L4 checksum is not reliable for non TCP/UDP packets.
1331          * Also ignore ipcksm for ipv6 pkts */
1332         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333                                 (rxcp->ip_csum || rxcp->ipv6);
1334 }
1335
1336 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337                                                 u16 frag_idx)
1338 {
1339         struct be_adapter *adapter = rxo->adapter;
1340         struct be_rx_page_info *rx_page_info;
1341         struct be_queue_info *rxq = &rxo->q;
1342
1343         rx_page_info = &rxo->page_info_tbl[frag_idx];
1344         BUG_ON(!rx_page_info->page);
1345
1346         if (rx_page_info->last_page_user) {
1347                 dma_unmap_page(&adapter->pdev->dev,
1348                                dma_unmap_addr(rx_page_info, bus),
1349                                adapter->big_page_size, DMA_FROM_DEVICE);
1350                 rx_page_info->last_page_user = false;
1351         }
1352
1353         atomic_dec(&rxq->used);
1354         return rx_page_info;
1355 }
1356
1357 /* Throwaway the data in the Rx completion */
1358 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359                                 struct be_rx_compl_info *rxcp)
1360 {
1361         struct be_queue_info *rxq = &rxo->q;
1362         struct be_rx_page_info *page_info;
1363         u16 i, num_rcvd = rxcp->num_rcvd;
1364
1365         for (i = 0; i < num_rcvd; i++) {
1366                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1367                 put_page(page_info->page);
1368                 memset(page_info, 0, sizeof(*page_info));
1369                 index_inc(&rxcp->rxq_idx, rxq->len);
1370         }
1371 }
1372
1373 /*
1374  * skb_fill_rx_data forms a complete skb for an ether frame
1375  * indicated by rxcp.
1376  */
1377 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378                              struct be_rx_compl_info *rxcp)
1379 {
1380         struct be_queue_info *rxq = &rxo->q;
1381         struct be_rx_page_info *page_info;
1382         u16 i, j;
1383         u16 hdr_len, curr_frag_len, remaining;
1384         u8 *start;
1385
1386         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1387         start = page_address(page_info->page) + page_info->page_offset;
1388         prefetch(start);
1389
1390         /* Copy data in the first descriptor of this completion */
1391         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1392
1393         skb->len = curr_frag_len;
1394         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1395                 memcpy(skb->data, start, curr_frag_len);
1396                 /* Complete packet has now been moved to data */
1397                 put_page(page_info->page);
1398                 skb->data_len = 0;
1399                 skb->tail += curr_frag_len;
1400         } else {
1401                 hdr_len = ETH_HLEN;
1402                 memcpy(skb->data, start, hdr_len);
1403                 skb_shinfo(skb)->nr_frags = 1;
1404                 skb_frag_set_page(skb, 0, page_info->page);
1405                 skb_shinfo(skb)->frags[0].page_offset =
1406                                         page_info->page_offset + hdr_len;
1407                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1408                 skb->data_len = curr_frag_len - hdr_len;
1409                 skb->truesize += rx_frag_size;
1410                 skb->tail += hdr_len;
1411         }
1412         page_info->page = NULL;
1413
1414         if (rxcp->pkt_size <= rx_frag_size) {
1415                 BUG_ON(rxcp->num_rcvd != 1);
1416                 return;
1417         }
1418
1419         /* More frags present for this completion */
1420         index_inc(&rxcp->rxq_idx, rxq->len);
1421         remaining = rxcp->pkt_size - curr_frag_len;
1422         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1423                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1424                 curr_frag_len = min(remaining, rx_frag_size);
1425
1426                 /* Coalesce all frags from the same physical page in one slot */
1427                 if (page_info->page_offset == 0) {
1428                         /* Fresh page */
1429                         j++;
1430                         skb_frag_set_page(skb, j, page_info->page);
1431                         skb_shinfo(skb)->frags[j].page_offset =
1432                                                         page_info->page_offset;
1433                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1434                         skb_shinfo(skb)->nr_frags++;
1435                 } else {
1436                         put_page(page_info->page);
1437                 }
1438
1439                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1440                 skb->len += curr_frag_len;
1441                 skb->data_len += curr_frag_len;
1442                 skb->truesize += rx_frag_size;
1443                 remaining -= curr_frag_len;
1444                 index_inc(&rxcp->rxq_idx, rxq->len);
1445                 page_info->page = NULL;
1446         }
1447         BUG_ON(j > MAX_SKB_FRAGS);
1448 }
1449
1450 /* Process the RX completion indicated by rxcp when GRO is disabled */
1451 static void be_rx_compl_process(struct be_rx_obj *rxo,
1452                                 struct be_rx_compl_info *rxcp)
1453 {
1454         struct be_adapter *adapter = rxo->adapter;
1455         struct net_device *netdev = adapter->netdev;
1456         struct sk_buff *skb;
1457
1458         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1459         if (unlikely(!skb)) {
1460                 rx_stats(rxo)->rx_drops_no_skbs++;
1461                 be_rx_compl_discard(rxo, rxcp);
1462                 return;
1463         }
1464
1465         skb_fill_rx_data(rxo, skb, rxcp);
1466
1467         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1468                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1469         else
1470                 skb_checksum_none_assert(skb);
1471
1472         skb->protocol = eth_type_trans(skb, netdev);
1473         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1474         if (netdev->features & NETIF_F_RXHASH)
1475                 skb->rxhash = rxcp->rss_hash;
1476
1477
1478         if (rxcp->vlanf)
1479                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1480
1481         netif_receive_skb(skb);
1482 }
1483
1484 /* Process the RX completion indicated by rxcp when GRO is enabled */
1485 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1486                                     struct napi_struct *napi,
1487                                     struct be_rx_compl_info *rxcp)
1488 {
1489         struct be_adapter *adapter = rxo->adapter;
1490         struct be_rx_page_info *page_info;
1491         struct sk_buff *skb = NULL;
1492         struct be_queue_info *rxq = &rxo->q;
1493         u16 remaining, curr_frag_len;
1494         u16 i, j;
1495
1496         skb = napi_get_frags(napi);
1497         if (!skb) {
1498                 be_rx_compl_discard(rxo, rxcp);
1499                 return;
1500         }
1501
1502         remaining = rxcp->pkt_size;
1503         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1504                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1505
1506                 curr_frag_len = min(remaining, rx_frag_size);
1507
1508                 /* Coalesce all frags from the same physical page in one slot */
1509                 if (i == 0 || page_info->page_offset == 0) {
1510                         /* First frag or Fresh page */
1511                         j++;
1512                         skb_frag_set_page(skb, j, page_info->page);
1513                         skb_shinfo(skb)->frags[j].page_offset =
1514                                                         page_info->page_offset;
1515                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1516                 } else {
1517                         put_page(page_info->page);
1518                 }
1519                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1520                 skb->truesize += rx_frag_size;
1521                 remaining -= curr_frag_len;
1522                 index_inc(&rxcp->rxq_idx, rxq->len);
1523                 memset(page_info, 0, sizeof(*page_info));
1524         }
1525         BUG_ON(j > MAX_SKB_FRAGS);
1526
1527         skb_shinfo(skb)->nr_frags = j + 1;
1528         skb->len = rxcp->pkt_size;
1529         skb->data_len = rxcp->pkt_size;
1530         skb->ip_summed = CHECKSUM_UNNECESSARY;
1531         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1532         if (adapter->netdev->features & NETIF_F_RXHASH)
1533                 skb->rxhash = rxcp->rss_hash;
1534
1535         if (rxcp->vlanf)
1536                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1537
1538         napi_gro_frags(napi);
1539 }
1540
1541 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1542                                  struct be_rx_compl_info *rxcp)
1543 {
1544         rxcp->pkt_size =
1545                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1546         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1547         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1548         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1549         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1550         rxcp->ip_csum =
1551                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1552         rxcp->l4_csum =
1553                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1554         rxcp->ipv6 =
1555                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1556         rxcp->rxq_idx =
1557                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1558         rxcp->num_rcvd =
1559                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1560         rxcp->pkt_type =
1561                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1562         rxcp->rss_hash =
1563                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1564         if (rxcp->vlanf) {
1565                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1566                                           compl);
1567                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1568                                                compl);
1569         }
1570         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1571 }
1572
1573 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1574                                  struct be_rx_compl_info *rxcp)
1575 {
1576         rxcp->pkt_size =
1577                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1578         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1579         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1580         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1581         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1582         rxcp->ip_csum =
1583                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1584         rxcp->l4_csum =
1585                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1586         rxcp->ipv6 =
1587                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1588         rxcp->rxq_idx =
1589                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1590         rxcp->num_rcvd =
1591                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1592         rxcp->pkt_type =
1593                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1594         rxcp->rss_hash =
1595                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1596         if (rxcp->vlanf) {
1597                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1598                                           compl);
1599                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1600                                                compl);
1601         }
1602         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1603         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1604                                       ip_frag, compl);
1605 }
1606
1607 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1608 {
1609         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1610         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1611         struct be_adapter *adapter = rxo->adapter;
1612
1613         /* For checking the valid bit it is Ok to use either definition as the
1614          * valid bit is at the same position in both v0 and v1 Rx compl */
1615         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1616                 return NULL;
1617
1618         rmb();
1619         be_dws_le_to_cpu(compl, sizeof(*compl));
1620
1621         if (adapter->be3_native)
1622                 be_parse_rx_compl_v1(compl, rxcp);
1623         else
1624                 be_parse_rx_compl_v0(compl, rxcp);
1625
1626         if (rxcp->ip_frag)
1627                 rxcp->l4_csum = 0;
1628
1629         if (rxcp->vlanf) {
1630                 /* vlanf could be wrongly set in some cards.
1631                  * ignore if vtm is not set */
1632                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1633                         rxcp->vlanf = 0;
1634
1635                 if (!lancer_chip(adapter))
1636                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1637
1638                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1639                     !adapter->vlan_tag[rxcp->vlan_tag])
1640                         rxcp->vlanf = 0;
1641         }
1642
1643         /* As the compl has been parsed, reset it; we wont touch it again */
1644         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1645
1646         queue_tail_inc(&rxo->cq);
1647         return rxcp;
1648 }
1649
1650 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1651 {
1652         u32 order = get_order(size);
1653
1654         if (order > 0)
1655                 gfp |= __GFP_COMP;
1656         return  alloc_pages(gfp, order);
1657 }
1658
1659 /*
1660  * Allocate a page, split it to fragments of size rx_frag_size and post as
1661  * receive buffers to BE
1662  */
1663 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1664 {
1665         struct be_adapter *adapter = rxo->adapter;
1666         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1667         struct be_queue_info *rxq = &rxo->q;
1668         struct page *pagep = NULL;
1669         struct be_eth_rx_d *rxd;
1670         u64 page_dmaaddr = 0, frag_dmaaddr;
1671         u32 posted, page_offset = 0;
1672
1673         page_info = &rxo->page_info_tbl[rxq->head];
1674         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1675                 if (!pagep) {
1676                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1677                         if (unlikely(!pagep)) {
1678                                 rx_stats(rxo)->rx_post_fail++;
1679                                 break;
1680                         }
1681                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1682                                                     0, adapter->big_page_size,
1683                                                     DMA_FROM_DEVICE);
1684                         page_info->page_offset = 0;
1685                 } else {
1686                         get_page(pagep);
1687                         page_info->page_offset = page_offset + rx_frag_size;
1688                 }
1689                 page_offset = page_info->page_offset;
1690                 page_info->page = pagep;
1691                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1692                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1693
1694                 rxd = queue_head_node(rxq);
1695                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1696                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1697
1698                 /* Any space left in the current big page for another frag? */
1699                 if ((page_offset + rx_frag_size + rx_frag_size) >
1700                                         adapter->big_page_size) {
1701                         pagep = NULL;
1702                         page_info->last_page_user = true;
1703                 }
1704
1705                 prev_page_info = page_info;
1706                 queue_head_inc(rxq);
1707                 page_info = &rxo->page_info_tbl[rxq->head];
1708         }
1709         if (pagep)
1710                 prev_page_info->last_page_user = true;
1711
1712         if (posted) {
1713                 atomic_add(posted, &rxq->used);
1714                 be_rxq_notify(adapter, rxq->id, posted);
1715         } else if (atomic_read(&rxq->used) == 0) {
1716                 /* Let be_worker replenish when memory is available */
1717                 rxo->rx_post_starved = true;
1718         }
1719 }
1720
1721 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1722 {
1723         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1724
1725         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1726                 return NULL;
1727
1728         rmb();
1729         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1730
1731         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1732
1733         queue_tail_inc(tx_cq);
1734         return txcp;
1735 }
1736
1737 static u16 be_tx_compl_process(struct be_adapter *adapter,
1738                 struct be_tx_obj *txo, u16 last_index)
1739 {
1740         struct be_queue_info *txq = &txo->q;
1741         struct be_eth_wrb *wrb;
1742         struct sk_buff **sent_skbs = txo->sent_skb_list;
1743         struct sk_buff *sent_skb;
1744         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1745         bool unmap_skb_hdr = true;
1746
1747         sent_skb = sent_skbs[txq->tail];
1748         BUG_ON(!sent_skb);
1749         sent_skbs[txq->tail] = NULL;
1750
1751         /* skip header wrb */
1752         queue_tail_inc(txq);
1753
1754         do {
1755                 cur_index = txq->tail;
1756                 wrb = queue_tail_node(txq);
1757                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1758                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1759                 unmap_skb_hdr = false;
1760
1761                 num_wrbs++;
1762                 queue_tail_inc(txq);
1763         } while (cur_index != last_index);
1764
1765         kfree_skb(sent_skb);
1766         return num_wrbs;
1767 }
1768
1769 /* Return the number of events in the event queue */
1770 static inline int events_get(struct be_eq_obj *eqo)
1771 {
1772         struct be_eq_entry *eqe;
1773         int num = 0;
1774
1775         do {
1776                 eqe = queue_tail_node(&eqo->q);
1777                 if (eqe->evt == 0)
1778                         break;
1779
1780                 rmb();
1781                 eqe->evt = 0;
1782                 num++;
1783                 queue_tail_inc(&eqo->q);
1784         } while (true);
1785
1786         return num;
1787 }
1788
1789 /* Leaves the EQ is disarmed state */
1790 static void be_eq_clean(struct be_eq_obj *eqo)
1791 {
1792         int num = events_get(eqo);
1793
1794         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1795 }
1796
1797 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1798 {
1799         struct be_rx_page_info *page_info;
1800         struct be_queue_info *rxq = &rxo->q;
1801         struct be_queue_info *rx_cq = &rxo->cq;
1802         struct be_rx_compl_info *rxcp;
1803         struct be_adapter *adapter = rxo->adapter;
1804         int flush_wait = 0;
1805         u16 tail;
1806
1807         /* Consume pending rx completions.
1808          * Wait for the flush completion (identified by zero num_rcvd)
1809          * to arrive. Notify CQ even when there are no more CQ entries
1810          * for HW to flush partially coalesced CQ entries.
1811          * In Lancer, there is no need to wait for flush compl.
1812          */
1813         for (;;) {
1814                 rxcp = be_rx_compl_get(rxo);
1815                 if (rxcp == NULL) {
1816                         if (lancer_chip(adapter))
1817                                 break;
1818
1819                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1820                                 dev_warn(&adapter->pdev->dev,
1821                                          "did not receive flush compl\n");
1822                                 break;
1823                         }
1824                         be_cq_notify(adapter, rx_cq->id, true, 0);
1825                         mdelay(1);
1826                 } else {
1827                         be_rx_compl_discard(rxo, rxcp);
1828                         be_cq_notify(adapter, rx_cq->id, false, 1);
1829                         if (rxcp->num_rcvd == 0)
1830                                 break;
1831                 }
1832         }
1833
1834         /* After cleanup, leave the CQ in unarmed state */
1835         be_cq_notify(adapter, rx_cq->id, false, 0);
1836
1837         /* Then free posted rx buffers that were not used */
1838         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1839         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1840                 page_info = get_rx_page_info(rxo, tail);
1841                 put_page(page_info->page);
1842                 memset(page_info, 0, sizeof(*page_info));
1843         }
1844         BUG_ON(atomic_read(&rxq->used));
1845         rxq->tail = rxq->head = 0;
1846 }
1847
1848 static void be_tx_compl_clean(struct be_adapter *adapter)
1849 {
1850         struct be_tx_obj *txo;
1851         struct be_queue_info *txq;
1852         struct be_eth_tx_compl *txcp;
1853         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1854         struct sk_buff *sent_skb;
1855         bool dummy_wrb;
1856         int i, pending_txqs;
1857
1858         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1859         do {
1860                 pending_txqs = adapter->num_tx_qs;
1861
1862                 for_all_tx_queues(adapter, txo, i) {
1863                         txq = &txo->q;
1864                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1865                                 end_idx =
1866                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1867                                                       wrb_index, txcp);
1868                                 num_wrbs += be_tx_compl_process(adapter, txo,
1869                                                                 end_idx);
1870                                 cmpl++;
1871                         }
1872                         if (cmpl) {
1873                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1874                                 atomic_sub(num_wrbs, &txq->used);
1875                                 cmpl = 0;
1876                                 num_wrbs = 0;
1877                         }
1878                         if (atomic_read(&txq->used) == 0)
1879                                 pending_txqs--;
1880                 }
1881
1882                 if (pending_txqs == 0 || ++timeo > 200)
1883                         break;
1884
1885                 mdelay(1);
1886         } while (true);
1887
1888         for_all_tx_queues(adapter, txo, i) {
1889                 txq = &txo->q;
1890                 if (atomic_read(&txq->used))
1891                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1892                                 atomic_read(&txq->used));
1893
1894                 /* free posted tx for which compls will never arrive */
1895                 while (atomic_read(&txq->used)) {
1896                         sent_skb = txo->sent_skb_list[txq->tail];
1897                         end_idx = txq->tail;
1898                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1899                                                    &dummy_wrb);
1900                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1901                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1902                         atomic_sub(num_wrbs, &txq->used);
1903                 }
1904         }
1905 }
1906
1907 static void be_evt_queues_destroy(struct be_adapter *adapter)
1908 {
1909         struct be_eq_obj *eqo;
1910         int i;
1911
1912         for_all_evt_queues(adapter, eqo, i) {
1913                 if (eqo->q.created) {
1914                         be_eq_clean(eqo);
1915                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1916                 }
1917                 be_queue_free(adapter, &eqo->q);
1918         }
1919 }
1920
1921 static int be_evt_queues_create(struct be_adapter *adapter)
1922 {
1923         struct be_queue_info *eq;
1924         struct be_eq_obj *eqo;
1925         int i, rc;
1926
1927         adapter->num_evt_qs = num_irqs(adapter);
1928
1929         for_all_evt_queues(adapter, eqo, i) {
1930                 eqo->adapter = adapter;
1931                 eqo->tx_budget = BE_TX_BUDGET;
1932                 eqo->idx = i;
1933                 eqo->max_eqd = BE_MAX_EQD;
1934                 eqo->enable_aic = true;
1935
1936                 eq = &eqo->q;
1937                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1938                                         sizeof(struct be_eq_entry));
1939                 if (rc)
1940                         return rc;
1941
1942                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1943                 if (rc)
1944                         return rc;
1945         }
1946         return 0;
1947 }
1948
1949 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1950 {
1951         struct be_queue_info *q;
1952
1953         q = &adapter->mcc_obj.q;
1954         if (q->created)
1955                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1956         be_queue_free(adapter, q);
1957
1958         q = &adapter->mcc_obj.cq;
1959         if (q->created)
1960                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1961         be_queue_free(adapter, q);
1962 }
1963
1964 /* Must be called only after TX qs are created as MCC shares TX EQ */
1965 static int be_mcc_queues_create(struct be_adapter *adapter)
1966 {
1967         struct be_queue_info *q, *cq;
1968
1969         cq = &adapter->mcc_obj.cq;
1970         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1971                         sizeof(struct be_mcc_compl)))
1972                 goto err;
1973
1974         /* Use the default EQ for MCC completions */
1975         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1976                 goto mcc_cq_free;
1977
1978         q = &adapter->mcc_obj.q;
1979         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1980                 goto mcc_cq_destroy;
1981
1982         if (be_cmd_mccq_create(adapter, q, cq))
1983                 goto mcc_q_free;
1984
1985         return 0;
1986
1987 mcc_q_free:
1988         be_queue_free(adapter, q);
1989 mcc_cq_destroy:
1990         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1991 mcc_cq_free:
1992         be_queue_free(adapter, cq);
1993 err:
1994         return -1;
1995 }
1996
1997 static void be_tx_queues_destroy(struct be_adapter *adapter)
1998 {
1999         struct be_queue_info *q;
2000         struct be_tx_obj *txo;
2001         u8 i;
2002
2003         for_all_tx_queues(adapter, txo, i) {
2004                 q = &txo->q;
2005                 if (q->created)
2006                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2007                 be_queue_free(adapter, q);
2008
2009                 q = &txo->cq;
2010                 if (q->created)
2011                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2012                 be_queue_free(adapter, q);
2013         }
2014 }
2015
2016 static int be_num_txqs_want(struct be_adapter *adapter)
2017 {
2018         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2019             be_is_mc(adapter) ||
2020             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2021             BE2_chip(adapter))
2022                 return 1;
2023         else
2024                 return adapter->max_tx_queues;
2025 }
2026
2027 static int be_tx_cqs_create(struct be_adapter *adapter)
2028 {
2029         struct be_queue_info *cq, *eq;
2030         int status;
2031         struct be_tx_obj *txo;
2032         u8 i;
2033
2034         adapter->num_tx_qs = be_num_txqs_want(adapter);
2035         if (adapter->num_tx_qs != MAX_TX_QS) {
2036                 rtnl_lock();
2037                 netif_set_real_num_tx_queues(adapter->netdev,
2038                         adapter->num_tx_qs);
2039                 rtnl_unlock();
2040         }
2041
2042         for_all_tx_queues(adapter, txo, i) {
2043                 cq = &txo->cq;
2044                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2045                                         sizeof(struct be_eth_tx_compl));
2046                 if (status)
2047                         return status;
2048
2049                 /* If num_evt_qs is less than num_tx_qs, then more than
2050                  * one txq share an eq
2051                  */
2052                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2053                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2054                 if (status)
2055                         return status;
2056         }
2057         return 0;
2058 }
2059
2060 static int be_tx_qs_create(struct be_adapter *adapter)
2061 {
2062         struct be_tx_obj *txo;
2063         int i, status;
2064
2065         for_all_tx_queues(adapter, txo, i) {
2066                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2067                                         sizeof(struct be_eth_wrb));
2068                 if (status)
2069                         return status;
2070
2071                 status = be_cmd_txq_create(adapter, txo);
2072                 if (status)
2073                         return status;
2074         }
2075
2076         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2077                  adapter->num_tx_qs);
2078         return 0;
2079 }
2080
2081 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2082 {
2083         struct be_queue_info *q;
2084         struct be_rx_obj *rxo;
2085         int i;
2086
2087         for_all_rx_queues(adapter, rxo, i) {
2088                 q = &rxo->cq;
2089                 if (q->created)
2090                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2091                 be_queue_free(adapter, q);
2092         }
2093 }
2094
2095 static int be_rx_cqs_create(struct be_adapter *adapter)
2096 {
2097         struct be_queue_info *eq, *cq;
2098         struct be_rx_obj *rxo;
2099         int rc, i;
2100
2101         /* We'll create as many RSS rings as there are irqs.
2102          * But when there's only one irq there's no use creating RSS rings
2103          */
2104         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2105                                 num_irqs(adapter) + 1 : 1;
2106         if (adapter->num_rx_qs != MAX_RX_QS) {
2107                 rtnl_lock();
2108                 netif_set_real_num_rx_queues(adapter->netdev,
2109                                              adapter->num_rx_qs);
2110                 rtnl_unlock();
2111         }
2112
2113         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2114         for_all_rx_queues(adapter, rxo, i) {
2115                 rxo->adapter = adapter;
2116                 cq = &rxo->cq;
2117                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2118                                 sizeof(struct be_eth_rx_compl));
2119                 if (rc)
2120                         return rc;
2121
2122                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2123                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2124                 if (rc)
2125                         return rc;
2126         }
2127
2128         dev_info(&adapter->pdev->dev,
2129                  "created %d RSS queue(s) and 1 default RX queue\n",
2130                  adapter->num_rx_qs - 1);
2131         return 0;
2132 }
2133
2134 static irqreturn_t be_intx(int irq, void *dev)
2135 {
2136         struct be_eq_obj *eqo = dev;
2137         struct be_adapter *adapter = eqo->adapter;
2138         int num_evts = 0;
2139
2140         /* IRQ is not expected when NAPI is scheduled as the EQ
2141          * will not be armed.
2142          * But, this can happen on Lancer INTx where it takes
2143          * a while to de-assert INTx or in BE2 where occasionaly
2144          * an interrupt may be raised even when EQ is unarmed.
2145          * If NAPI is already scheduled, then counting & notifying
2146          * events will orphan them.
2147          */
2148         if (napi_schedule_prep(&eqo->napi)) {
2149                 num_evts = events_get(eqo);
2150                 __napi_schedule(&eqo->napi);
2151                 if (num_evts)
2152                         eqo->spurious_intr = 0;
2153         }
2154         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2155
2156         /* Return IRQ_HANDLED only for the the first spurious intr
2157          * after a valid intr to stop the kernel from branding
2158          * this irq as a bad one!
2159          */
2160         if (num_evts || eqo->spurious_intr++ == 0)
2161                 return IRQ_HANDLED;
2162         else
2163                 return IRQ_NONE;
2164 }
2165
2166 static irqreturn_t be_msix(int irq, void *dev)
2167 {
2168         struct be_eq_obj *eqo = dev;
2169
2170         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2171         napi_schedule(&eqo->napi);
2172         return IRQ_HANDLED;
2173 }
2174
2175 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2176 {
2177         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2178 }
2179
2180 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2181                         int budget)
2182 {
2183         struct be_adapter *adapter = rxo->adapter;
2184         struct be_queue_info *rx_cq = &rxo->cq;
2185         struct be_rx_compl_info *rxcp;
2186         u32 work_done;
2187
2188         for (work_done = 0; work_done < budget; work_done++) {
2189                 rxcp = be_rx_compl_get(rxo);
2190                 if (!rxcp)
2191                         break;
2192
2193                 /* Is it a flush compl that has no data */
2194                 if (unlikely(rxcp->num_rcvd == 0))
2195                         goto loop_continue;
2196
2197                 /* Discard compl with partial DMA Lancer B0 */
2198                 if (unlikely(!rxcp->pkt_size)) {
2199                         be_rx_compl_discard(rxo, rxcp);
2200                         goto loop_continue;
2201                 }
2202
2203                 /* On BE drop pkts that arrive due to imperfect filtering in
2204                  * promiscuous mode on some skews
2205                  */
2206                 if (unlikely(rxcp->port != adapter->port_num &&
2207                                 !lancer_chip(adapter))) {
2208                         be_rx_compl_discard(rxo, rxcp);
2209                         goto loop_continue;
2210                 }
2211
2212                 if (do_gro(rxcp))
2213                         be_rx_compl_process_gro(rxo, napi, rxcp);
2214                 else
2215                         be_rx_compl_process(rxo, rxcp);
2216 loop_continue:
2217                 be_rx_stats_update(rxo, rxcp);
2218         }
2219
2220         if (work_done) {
2221                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2222
2223                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2224                         be_post_rx_frags(rxo, GFP_ATOMIC);
2225         }
2226
2227         return work_done;
2228 }
2229
2230 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2231                           int budget, int idx)
2232 {
2233         struct be_eth_tx_compl *txcp;
2234         int num_wrbs = 0, work_done;
2235
2236         for (work_done = 0; work_done < budget; work_done++) {
2237                 txcp = be_tx_compl_get(&txo->cq);
2238                 if (!txcp)
2239                         break;
2240                 num_wrbs += be_tx_compl_process(adapter, txo,
2241                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2242                                         wrb_index, txcp));
2243         }
2244
2245         if (work_done) {
2246                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2247                 atomic_sub(num_wrbs, &txo->q.used);
2248
2249                 /* As Tx wrbs have been freed up, wake up netdev queue
2250                  * if it was stopped due to lack of tx wrbs.  */
2251                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2252                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2253                         netif_wake_subqueue(adapter->netdev, idx);
2254                 }
2255
2256                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2257                 tx_stats(txo)->tx_compl += work_done;
2258                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2259         }
2260         return (work_done < budget); /* Done */
2261 }
2262
2263 static int be_poll(struct napi_struct *napi, int budget)
2264 {
2265         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2266         struct be_adapter *adapter = eqo->adapter;
2267         int max_work = 0, work, i, num_evts;
2268         bool tx_done;
2269
2270         num_evts = events_get(eqo);
2271
2272         /* Process all TXQs serviced by this EQ */
2273         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2274                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2275                                         eqo->tx_budget, i);
2276                 if (!tx_done)
2277                         max_work = budget;
2278         }
2279
2280         /* This loop will iterate twice for EQ0 in which
2281          * completions of the last RXQ (default one) are also processed
2282          * For other EQs the loop iterates only once
2283          */
2284         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2285                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2286                 max_work = max(work, max_work);
2287         }
2288
2289         if (is_mcc_eqo(eqo))
2290                 be_process_mcc(adapter);
2291
2292         if (max_work < budget) {
2293                 napi_complete(napi);
2294                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2295         } else {
2296                 /* As we'll continue in polling mode, count and clear events */
2297                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2298         }
2299         return max_work;
2300 }
2301
2302 void be_detect_error(struct be_adapter *adapter)
2303 {
2304         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2305         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2306         u32 i;
2307
2308         if (be_hw_error(adapter))
2309                 return;
2310
2311         if (lancer_chip(adapter)) {
2312                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2313                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2314                         sliport_err1 = ioread32(adapter->db +
2315                                         SLIPORT_ERROR1_OFFSET);
2316                         sliport_err2 = ioread32(adapter->db +
2317                                         SLIPORT_ERROR2_OFFSET);
2318                 }
2319         } else {
2320                 pci_read_config_dword(adapter->pdev,
2321                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2322                 pci_read_config_dword(adapter->pdev,
2323                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2324                 pci_read_config_dword(adapter->pdev,
2325                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2326                 pci_read_config_dword(adapter->pdev,
2327                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2328
2329                 ue_lo = (ue_lo & ~ue_lo_mask);
2330                 ue_hi = (ue_hi & ~ue_hi_mask);
2331         }
2332
2333         /* On certain platforms BE hardware can indicate spurious UEs.
2334          * Allow the h/w to stop working completely in case of a real UE.
2335          * Hence not setting the hw_error for UE detection.
2336          */
2337         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2338                 adapter->hw_error = true;
2339                 dev_err(&adapter->pdev->dev,
2340                         "Error detected in the card\n");
2341         }
2342
2343         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2344                 dev_err(&adapter->pdev->dev,
2345                         "ERR: sliport status 0x%x\n", sliport_status);
2346                 dev_err(&adapter->pdev->dev,
2347                         "ERR: sliport error1 0x%x\n", sliport_err1);
2348                 dev_err(&adapter->pdev->dev,
2349                         "ERR: sliport error2 0x%x\n", sliport_err2);
2350         }
2351
2352         if (ue_lo) {
2353                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2354                         if (ue_lo & 1)
2355                                 dev_err(&adapter->pdev->dev,
2356                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2357                 }
2358         }
2359
2360         if (ue_hi) {
2361                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2362                         if (ue_hi & 1)
2363                                 dev_err(&adapter->pdev->dev,
2364                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2365                 }
2366         }
2367
2368 }
2369
2370 static void be_msix_disable(struct be_adapter *adapter)
2371 {
2372         if (msix_enabled(adapter)) {
2373                 pci_disable_msix(adapter->pdev);
2374                 adapter->num_msix_vec = 0;
2375         }
2376 }
2377
2378 static uint be_num_rss_want(struct be_adapter *adapter)
2379 {
2380         u32 num = 0;
2381
2382         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2383             (lancer_chip(adapter) ||
2384              (!sriov_want(adapter) && be_physfn(adapter)))) {
2385                 num = adapter->max_rss_queues;
2386                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2387         }
2388         return num;
2389 }
2390
2391 static int be_msix_enable(struct be_adapter *adapter)
2392 {
2393 #define BE_MIN_MSIX_VECTORS             1
2394         int i, status, num_vec, num_roce_vec = 0;
2395         struct device *dev = &adapter->pdev->dev;
2396
2397         /* If RSS queues are not used, need a vec for default RX Q */
2398         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2399         if (be_roce_supported(adapter)) {
2400                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2401                                         (num_online_cpus() + 1));
2402                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2403                 num_vec += num_roce_vec;
2404                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2405         }
2406         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2407
2408         for (i = 0; i < num_vec; i++)
2409                 adapter->msix_entries[i].entry = i;
2410
2411         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2412         if (status == 0) {
2413                 goto done;
2414         } else if (status >= BE_MIN_MSIX_VECTORS) {
2415                 num_vec = status;
2416                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2417                                          num_vec);
2418                 if (!status)
2419                         goto done;
2420         }
2421
2422         dev_warn(dev, "MSIx enable failed\n");
2423         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2424         if (!be_physfn(adapter))
2425                 return status;
2426         return 0;
2427 done:
2428         if (be_roce_supported(adapter)) {
2429                 if (num_vec > num_roce_vec) {
2430                         adapter->num_msix_vec = num_vec - num_roce_vec;
2431                         adapter->num_msix_roce_vec =
2432                                 num_vec - adapter->num_msix_vec;
2433                 } else {
2434                         adapter->num_msix_vec = num_vec;
2435                         adapter->num_msix_roce_vec = 0;
2436                 }
2437         } else
2438                 adapter->num_msix_vec = num_vec;
2439         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2440         return 0;
2441 }
2442
2443 static inline int be_msix_vec_get(struct be_adapter *adapter,
2444                                 struct be_eq_obj *eqo)
2445 {
2446         return adapter->msix_entries[eqo->idx].vector;
2447 }
2448
2449 static int be_msix_register(struct be_adapter *adapter)
2450 {
2451         struct net_device *netdev = adapter->netdev;
2452         struct be_eq_obj *eqo;
2453         int status, i, vec;
2454
2455         for_all_evt_queues(adapter, eqo, i) {
2456                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2457                 vec = be_msix_vec_get(adapter, eqo);
2458                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2459                 if (status)
2460                         goto err_msix;
2461         }
2462
2463         return 0;
2464 err_msix:
2465         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2466                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2467         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2468                 status);
2469         be_msix_disable(adapter);
2470         return status;
2471 }
2472
2473 static int be_irq_register(struct be_adapter *adapter)
2474 {
2475         struct net_device *netdev = adapter->netdev;
2476         int status;
2477
2478         if (msix_enabled(adapter)) {
2479                 status = be_msix_register(adapter);
2480                 if (status == 0)
2481                         goto done;
2482                 /* INTx is not supported for VF */
2483                 if (!be_physfn(adapter))
2484                         return status;
2485         }
2486
2487         /* INTx: only the first EQ is used */
2488         netdev->irq = adapter->pdev->irq;
2489         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2490                              &adapter->eq_obj[0]);
2491         if (status) {
2492                 dev_err(&adapter->pdev->dev,
2493                         "INTx request IRQ failed - err %d\n", status);
2494                 return status;
2495         }
2496 done:
2497         adapter->isr_registered = true;
2498         return 0;
2499 }
2500
2501 static void be_irq_unregister(struct be_adapter *adapter)
2502 {
2503         struct net_device *netdev = adapter->netdev;
2504         struct be_eq_obj *eqo;
2505         int i;
2506
2507         if (!adapter->isr_registered)
2508                 return;
2509
2510         /* INTx */
2511         if (!msix_enabled(adapter)) {
2512                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2513                 goto done;
2514         }
2515
2516         /* MSIx */
2517         for_all_evt_queues(adapter, eqo, i)
2518                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2519
2520 done:
2521         adapter->isr_registered = false;
2522 }
2523
2524 static void be_rx_qs_destroy(struct be_adapter *adapter)
2525 {
2526         struct be_queue_info *q;
2527         struct be_rx_obj *rxo;
2528         int i;
2529
2530         for_all_rx_queues(adapter, rxo, i) {
2531                 q = &rxo->q;
2532                 if (q->created) {
2533                         be_cmd_rxq_destroy(adapter, q);
2534                         be_rx_cq_clean(rxo);
2535                 }
2536                 be_queue_free(adapter, q);
2537         }
2538 }
2539
2540 static int be_close(struct net_device *netdev)
2541 {
2542         struct be_adapter *adapter = netdev_priv(netdev);
2543         struct be_eq_obj *eqo;
2544         int i;
2545
2546         be_roce_dev_close(adapter);
2547
2548         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2549                 for_all_evt_queues(adapter, eqo, i)
2550                         napi_disable(&eqo->napi);
2551                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2552         }
2553
2554         be_async_mcc_disable(adapter);
2555
2556         /* Wait for all pending tx completions to arrive so that
2557          * all tx skbs are freed.
2558          */
2559         netif_tx_disable(netdev);
2560         be_tx_compl_clean(adapter);
2561
2562         be_rx_qs_destroy(adapter);
2563
2564         for_all_evt_queues(adapter, eqo, i) {
2565                 if (msix_enabled(adapter))
2566                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2567                 else
2568                         synchronize_irq(netdev->irq);
2569                 be_eq_clean(eqo);
2570         }
2571
2572         be_irq_unregister(adapter);
2573
2574         return 0;
2575 }
2576
2577 static int be_rx_qs_create(struct be_adapter *adapter)
2578 {
2579         struct be_rx_obj *rxo;
2580         int rc, i, j;
2581         u8 rsstable[128];
2582
2583         for_all_rx_queues(adapter, rxo, i) {
2584                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2585                                     sizeof(struct be_eth_rx_d));
2586                 if (rc)
2587                         return rc;
2588         }
2589
2590         /* The FW would like the default RXQ to be created first */
2591         rxo = default_rxo(adapter);
2592         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2593                                adapter->if_handle, false, &rxo->rss_id);
2594         if (rc)
2595                 return rc;
2596
2597         for_all_rss_queues(adapter, rxo, i) {
2598                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2599                                        rx_frag_size, adapter->if_handle,
2600                                        true, &rxo->rss_id);
2601                 if (rc)
2602                         return rc;
2603         }
2604
2605         if (be_multi_rxq(adapter)) {
2606                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2607                         for_all_rss_queues(adapter, rxo, i) {
2608                                 if ((j + i) >= 128)
2609                                         break;
2610                                 rsstable[j + i] = rxo->rss_id;
2611                         }
2612                 }
2613                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2614                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2615
2616                 if (!BEx_chip(adapter))
2617                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2618                                                 RSS_ENABLE_UDP_IPV6;
2619
2620                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2621                                        128);
2622                 if (rc) {
2623                         adapter->rss_flags = 0;
2624                         return rc;
2625                 }
2626         }
2627
2628         /* First time posting */
2629         for_all_rx_queues(adapter, rxo, i)
2630                 be_post_rx_frags(rxo, GFP_KERNEL);
2631         return 0;
2632 }
2633
2634 static int be_open(struct net_device *netdev)
2635 {
2636         struct be_adapter *adapter = netdev_priv(netdev);
2637         struct be_eq_obj *eqo;
2638         struct be_rx_obj *rxo;
2639         struct be_tx_obj *txo;
2640         u8 link_status;
2641         int status, i;
2642
2643         status = be_rx_qs_create(adapter);
2644         if (status)
2645                 goto err;
2646
2647         status = be_irq_register(adapter);
2648         if (status)
2649                 goto err;
2650
2651         for_all_rx_queues(adapter, rxo, i)
2652                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2653
2654         for_all_tx_queues(adapter, txo, i)
2655                 be_cq_notify(adapter, txo->cq.id, true, 0);
2656
2657         be_async_mcc_enable(adapter);
2658
2659         for_all_evt_queues(adapter, eqo, i) {
2660                 napi_enable(&eqo->napi);
2661                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2662         }
2663         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2664
2665         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2666         if (!status)
2667                 be_link_status_update(adapter, link_status);
2668
2669         netif_tx_start_all_queues(netdev);
2670         be_roce_dev_open(adapter);
2671         return 0;
2672 err:
2673         be_close(adapter->netdev);
2674         return -EIO;
2675 }
2676
2677 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2678 {
2679         struct be_dma_mem cmd;
2680         int status = 0;
2681         u8 mac[ETH_ALEN];
2682
2683         memset(mac, 0, ETH_ALEN);
2684
2685         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2686         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2687                                     GFP_KERNEL | __GFP_ZERO);
2688         if (cmd.va == NULL)
2689                 return -1;
2690
2691         if (enable) {
2692                 status = pci_write_config_dword(adapter->pdev,
2693                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2694                 if (status) {
2695                         dev_err(&adapter->pdev->dev,
2696                                 "Could not enable Wake-on-lan\n");
2697                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2698                                           cmd.dma);
2699                         return status;
2700                 }
2701                 status = be_cmd_enable_magic_wol(adapter,
2702                                 adapter->netdev->dev_addr, &cmd);
2703                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2704                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2705         } else {
2706                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2707                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2708                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2709         }
2710
2711         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2712         return status;
2713 }
2714
2715 /*
2716  * Generate a seed MAC address from the PF MAC Address using jhash.
2717  * MAC Address for VFs are assigned incrementally starting from the seed.
2718  * These addresses are programmed in the ASIC by the PF and the VF driver
2719  * queries for the MAC address during its probe.
2720  */
2721 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2722 {
2723         u32 vf;
2724         int status = 0;
2725         u8 mac[ETH_ALEN];
2726         struct be_vf_cfg *vf_cfg;
2727
2728         be_vf_eth_addr_generate(adapter, mac);
2729
2730         for_all_vfs(adapter, vf_cfg, vf) {
2731                 if (BEx_chip(adapter))
2732                         status = be_cmd_pmac_add(adapter, mac,
2733                                                  vf_cfg->if_handle,
2734                                                  &vf_cfg->pmac_id, vf + 1);
2735                 else
2736                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2737                                                 vf + 1);
2738
2739                 if (status)
2740                         dev_err(&adapter->pdev->dev,
2741                         "Mac address assignment failed for VF %d\n", vf);
2742                 else
2743                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2744
2745                 mac[5] += 1;
2746         }
2747         return status;
2748 }
2749
2750 static int be_vfs_mac_query(struct be_adapter *adapter)
2751 {
2752         int status, vf;
2753         u8 mac[ETH_ALEN];
2754         struct be_vf_cfg *vf_cfg;
2755         bool active = false;
2756
2757         for_all_vfs(adapter, vf_cfg, vf) {
2758                 be_cmd_get_mac_from_list(adapter, mac, &active,
2759                                          &vf_cfg->pmac_id, 0);
2760
2761                 status = be_cmd_mac_addr_query(adapter, mac, false,
2762                                                vf_cfg->if_handle, 0);
2763                 if (status)
2764                         return status;
2765                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2766         }
2767         return 0;
2768 }
2769
2770 static void be_vf_clear(struct be_adapter *adapter)
2771 {
2772         struct be_vf_cfg *vf_cfg;
2773         u32 vf;
2774
2775         if (pci_vfs_assigned(adapter->pdev)) {
2776                 dev_warn(&adapter->pdev->dev,
2777                          "VFs are assigned to VMs: not disabling VFs\n");
2778                 goto done;
2779         }
2780
2781         pci_disable_sriov(adapter->pdev);
2782
2783         for_all_vfs(adapter, vf_cfg, vf) {
2784                 if (BEx_chip(adapter))
2785                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2786                                         vf_cfg->pmac_id, vf + 1);
2787                 else
2788                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2789                                        vf + 1);
2790
2791                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2792         }
2793 done:
2794         kfree(adapter->vf_cfg);
2795         adapter->num_vfs = 0;
2796 }
2797
2798 static int be_clear(struct be_adapter *adapter)
2799 {
2800         int i;
2801
2802         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2803                 cancel_delayed_work_sync(&adapter->work);
2804                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2805         }
2806
2807         if (sriov_enabled(adapter))
2808                 be_vf_clear(adapter);
2809
2810         /* delete the primary mac along with the uc-mac list */
2811         for (i = 0; i < (adapter->uc_macs + 1); i++)
2812                 be_cmd_pmac_del(adapter, adapter->if_handle,
2813                                 adapter->pmac_id[i], 0);
2814         adapter->uc_macs = 0;
2815
2816         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2817
2818         be_mcc_queues_destroy(adapter);
2819         be_rx_cqs_destroy(adapter);
2820         be_tx_queues_destroy(adapter);
2821         be_evt_queues_destroy(adapter);
2822
2823         kfree(adapter->pmac_id);
2824         adapter->pmac_id = NULL;
2825
2826         be_msix_disable(adapter);
2827         return 0;
2828 }
2829
2830 static int be_vfs_if_create(struct be_adapter *adapter)
2831 {
2832         struct be_vf_cfg *vf_cfg;
2833         u32 cap_flags, en_flags, vf;
2834         int status;
2835
2836         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2837                     BE_IF_FLAGS_MULTICAST;
2838
2839         for_all_vfs(adapter, vf_cfg, vf) {
2840                 if (!BE3_chip(adapter))
2841                         be_cmd_get_profile_config(adapter, &cap_flags,
2842                                                   NULL, vf + 1);
2843
2844                 /* If a FW profile exists, then cap_flags are updated */
2845                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2846                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2847                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2848                                           &vf_cfg->if_handle, vf + 1);
2849                 if (status)
2850                         goto err;
2851         }
2852 err:
2853         return status;
2854 }
2855
2856 static int be_vf_setup_init(struct be_adapter *adapter)
2857 {
2858         struct be_vf_cfg *vf_cfg;
2859         int vf;
2860
2861         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2862                                   GFP_KERNEL);
2863         if (!adapter->vf_cfg)
2864                 return -ENOMEM;
2865
2866         for_all_vfs(adapter, vf_cfg, vf) {
2867                 vf_cfg->if_handle = -1;
2868                 vf_cfg->pmac_id = -1;
2869         }
2870         return 0;
2871 }
2872
2873 static int be_vf_setup(struct be_adapter *adapter)
2874 {
2875         struct be_vf_cfg *vf_cfg;
2876         u16 def_vlan, lnk_speed;
2877         int status, old_vfs, vf;
2878         struct device *dev = &adapter->pdev->dev;
2879         u32 privileges;
2880
2881         old_vfs = pci_num_vf(adapter->pdev);
2882         if (old_vfs) {
2883                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2884                 if (old_vfs != num_vfs)
2885                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2886                 adapter->num_vfs = old_vfs;
2887         } else {
2888                 if (num_vfs > adapter->dev_num_vfs)
2889                         dev_info(dev, "Device supports %d VFs and not %d\n",
2890                                  adapter->dev_num_vfs, num_vfs);
2891                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2892                 if (!adapter->num_vfs)
2893                         return 0;
2894         }
2895
2896         status = be_vf_setup_init(adapter);
2897         if (status)
2898                 goto err;
2899
2900         if (old_vfs) {
2901                 for_all_vfs(adapter, vf_cfg, vf) {
2902                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2903                         if (status)
2904                                 goto err;
2905                 }
2906         } else {
2907                 status = be_vfs_if_create(adapter);
2908                 if (status)
2909                         goto err;
2910         }
2911
2912         if (old_vfs) {
2913                 status = be_vfs_mac_query(adapter);
2914                 if (status)
2915                         goto err;
2916         } else {
2917                 status = be_vf_eth_addr_config(adapter);
2918                 if (status)
2919                         goto err;
2920         }
2921
2922         for_all_vfs(adapter, vf_cfg, vf) {
2923                 /* Allow VFs to programs MAC/VLAN filters */
2924                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2925                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2926                         status = be_cmd_set_fn_privileges(adapter,
2927                                                           privileges |
2928                                                           BE_PRIV_FILTMGMT,
2929                                                           vf + 1);
2930                         if (!status)
2931                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2932                                          vf);
2933                 }
2934
2935                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2936                  * Allow full available bandwidth
2937                  */
2938                 if (BE3_chip(adapter) && !old_vfs)
2939                         be_cmd_set_qos(adapter, 1000, vf+1);
2940
2941                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2942                                                   NULL, vf + 1);
2943                 if (!status)
2944                         vf_cfg->tx_rate = lnk_speed;
2945
2946                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2947                                                vf + 1, vf_cfg->if_handle);
2948                 if (status)
2949                         goto err;
2950                 vf_cfg->def_vid = def_vlan;
2951
2952                 be_cmd_enable_vf(adapter, vf + 1);
2953         }
2954
2955         if (!old_vfs) {
2956                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2957                 if (status) {
2958                         dev_err(dev, "SRIOV enable failed\n");
2959                         adapter->num_vfs = 0;
2960                         goto err;
2961                 }
2962         }
2963         return 0;
2964 err:
2965         dev_err(dev, "VF setup failed\n");
2966         be_vf_clear(adapter);
2967         return status;
2968 }
2969
2970 static void be_setup_init(struct be_adapter *adapter)
2971 {
2972         adapter->vlan_prio_bmap = 0xff;
2973         adapter->phy.link_speed = -1;
2974         adapter->if_handle = -1;
2975         adapter->be3_native = false;
2976         adapter->promiscuous = false;
2977         if (be_physfn(adapter))
2978                 adapter->cmd_privileges = MAX_PRIVILEGES;
2979         else
2980                 adapter->cmd_privileges = MIN_PRIVILEGES;
2981 }
2982
2983 static void be_get_resources(struct be_adapter *adapter)
2984 {
2985         u16 dev_num_vfs;
2986         int pos, status;
2987         bool profile_present = false;
2988         u16 txq_count = 0;
2989
2990         if (!BEx_chip(adapter)) {
2991                 status = be_cmd_get_func_config(adapter);
2992                 if (!status)
2993                         profile_present = true;
2994         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2995                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
2996         }
2997
2998         if (profile_present) {
2999                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3000                                                MAX_TX_QS);
3001                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3002                                                 BE3_MAX_RSS_QS);
3003                 adapter->max_event_queues = min_t(u16,
3004                                                   adapter->max_event_queues,
3005                                                   BE3_MAX_RSS_QS);
3006
3007                 if (adapter->max_rss_queues &&
3008                     adapter->max_rss_queues == adapter->max_rx_queues)
3009                         adapter->max_rss_queues -= 1;
3010
3011                 if (adapter->max_event_queues < adapter->max_rss_queues)
3012                         adapter->max_rss_queues = adapter->max_event_queues;
3013
3014         } else {
3015                 if (be_physfn(adapter))
3016                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3017                 else
3018                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3019
3020                 if (adapter->function_mode & FLEX10_MODE)
3021                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3022                 else
3023                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3024
3025                 adapter->max_mcast_mac = BE_MAX_MC;
3026                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3027                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3028                                                MAX_TX_QS);
3029                 adapter->max_rss_queues = (adapter->be3_native) ?
3030                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3031                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3032
3033                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3034                                         BE_IF_FLAGS_BROADCAST |
3035                                         BE_IF_FLAGS_MULTICAST |
3036                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3037                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3038                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3039                                         BE_IF_FLAGS_PROMISCUOUS;
3040
3041                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3042                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3043         }
3044
3045         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3046         if (pos) {
3047                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3048                                      &dev_num_vfs);
3049                 if (BE3_chip(adapter))
3050                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3051                 adapter->dev_num_vfs = dev_num_vfs;
3052         }
3053 }
3054
3055 /* Routine to query per function resource limits */
3056 static int be_get_config(struct be_adapter *adapter)
3057 {
3058         int status;
3059
3060         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3061                                      &adapter->function_mode,
3062                                      &adapter->function_caps,
3063                                      &adapter->asic_rev);
3064         if (status)
3065                 goto err;
3066
3067         be_get_resources(adapter);
3068
3069         /* primary mac needs 1 pmac entry */
3070         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3071                                    sizeof(u32), GFP_KERNEL);
3072         if (!adapter->pmac_id) {
3073                 status = -ENOMEM;
3074                 goto err;
3075         }
3076
3077 err:
3078         return status;
3079 }
3080
3081 static int be_mac_setup(struct be_adapter *adapter)
3082 {
3083         u8 mac[ETH_ALEN];
3084         int status;
3085
3086         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3087                 status = be_cmd_get_perm_mac(adapter, mac);
3088                 if (status)
3089                         return status;
3090
3091                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3092                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3093         } else {
3094                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3095                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3096         }
3097
3098         /* On BE3 VFs this cmd may fail due to lack of privilege.
3099          * Ignore the failure as in this case pmac_id is fetched
3100          * in the IFACE_CREATE cmd.
3101          */
3102         be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3103                         &adapter->pmac_id[0], 0);
3104         return 0;
3105 }
3106
3107 static int be_setup(struct be_adapter *adapter)
3108 {
3109         struct device *dev = &adapter->pdev->dev;
3110         u32 en_flags;
3111         u32 tx_fc, rx_fc;
3112         int status;
3113
3114         be_setup_init(adapter);
3115
3116         if (!lancer_chip(adapter))
3117                 be_cmd_req_native_mode(adapter);
3118
3119         status = be_get_config(adapter);
3120         if (status)
3121                 goto err;
3122
3123         status = be_msix_enable(adapter);
3124         if (status)
3125                 goto err;
3126
3127         status = be_evt_queues_create(adapter);
3128         if (status)
3129                 goto err;
3130
3131         status = be_tx_cqs_create(adapter);
3132         if (status)
3133                 goto err;
3134
3135         status = be_rx_cqs_create(adapter);
3136         if (status)
3137                 goto err;
3138
3139         status = be_mcc_queues_create(adapter);
3140         if (status)
3141                 goto err;
3142
3143         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3144         /* In UMC mode FW does not return right privileges.
3145          * Override with correct privilege equivalent to PF.
3146          */
3147         if (be_is_mc(adapter))
3148                 adapter->cmd_privileges = MAX_PRIVILEGES;
3149
3150         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3151                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3152         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3153                 en_flags |= BE_IF_FLAGS_RSS;
3154         en_flags = en_flags & adapter->if_cap_flags;
3155         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3156                                   &adapter->if_handle, 0);
3157         if (status != 0)
3158                 goto err;
3159
3160         status = be_mac_setup(adapter);
3161         if (status)
3162                 goto err;
3163
3164         status = be_tx_qs_create(adapter);
3165         if (status)
3166                 goto err;
3167
3168         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3169
3170         if (adapter->vlans_added)
3171                 be_vid_config(adapter);
3172
3173         be_set_rx_mode(adapter->netdev);
3174
3175         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3176
3177         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3178                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3179                                         adapter->rx_fc);
3180
3181         if (be_physfn(adapter)) {
3182                 if (adapter->dev_num_vfs)
3183                         be_vf_setup(adapter);
3184                 else
3185                         dev_warn(dev, "device doesn't support SRIOV\n");
3186         }
3187
3188         status = be_cmd_get_phy_info(adapter);
3189         if (!status && be_pause_supported(adapter))
3190                 adapter->phy.fc_autoneg = 1;
3191
3192         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3193         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3194         return 0;
3195 err:
3196         be_clear(adapter);
3197         return status;
3198 }
3199
3200 #ifdef CONFIG_NET_POLL_CONTROLLER
3201 static void be_netpoll(struct net_device *netdev)
3202 {
3203         struct be_adapter *adapter = netdev_priv(netdev);
3204         struct be_eq_obj *eqo;
3205         int i;
3206
3207         for_all_evt_queues(adapter, eqo, i) {
3208                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3209                 napi_schedule(&eqo->napi);
3210         }
3211
3212         return;
3213 }
3214 #endif
3215
3216 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3217 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3218
3219 static bool be_flash_redboot(struct be_adapter *adapter,
3220                         const u8 *p, u32 img_start, int image_size,
3221                         int hdr_size)
3222 {
3223         u32 crc_offset;
3224         u8 flashed_crc[4];
3225         int status;
3226
3227         crc_offset = hdr_size + img_start + image_size - 4;
3228
3229         p += crc_offset;
3230
3231         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3232                         (image_size - 4));
3233         if (status) {
3234                 dev_err(&adapter->pdev->dev,
3235                 "could not get crc from flash, not flashing redboot\n");
3236                 return false;
3237         }
3238
3239         /*update redboot only if crc does not match*/
3240         if (!memcmp(flashed_crc, p, 4))
3241                 return false;
3242         else
3243                 return true;
3244 }
3245
3246 static bool phy_flashing_required(struct be_adapter *adapter)
3247 {
3248         return (adapter->phy.phy_type == TN_8022 &&
3249                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3250 }
3251
3252 static bool is_comp_in_ufi(struct be_adapter *adapter,
3253                            struct flash_section_info *fsec, int type)
3254 {
3255         int i = 0, img_type = 0;
3256         struct flash_section_info_g2 *fsec_g2 = NULL;
3257
3258         if (BE2_chip(adapter))
3259                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3260
3261         for (i = 0; i < MAX_FLASH_COMP; i++) {
3262                 if (fsec_g2)
3263                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3264                 else
3265                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3266
3267                 if (img_type == type)
3268                         return true;
3269         }
3270         return false;
3271
3272 }
3273
3274 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3275                                          int header_size,
3276                                          const struct firmware *fw)
3277 {
3278         struct flash_section_info *fsec = NULL;
3279         const u8 *p = fw->data;
3280
3281         p += header_size;
3282         while (p < (fw->data + fw->size)) {
3283                 fsec = (struct flash_section_info *)p;
3284                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3285                         return fsec;
3286                 p += 32;
3287         }
3288         return NULL;
3289 }
3290
3291 static int be_flash(struct be_adapter *adapter, const u8 *img,
3292                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3293 {
3294         u32 total_bytes = 0, flash_op, num_bytes = 0;
3295         int status = 0;
3296         struct be_cmd_write_flashrom *req = flash_cmd->va;
3297
3298         total_bytes = img_size;
3299         while (total_bytes) {
3300                 num_bytes = min_t(u32, 32*1024, total_bytes);
3301
3302                 total_bytes -= num_bytes;
3303
3304                 if (!total_bytes) {
3305                         if (optype == OPTYPE_PHY_FW)
3306                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3307                         else
3308                                 flash_op = FLASHROM_OPER_FLASH;
3309                 } else {
3310                         if (optype == OPTYPE_PHY_FW)
3311                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3312                         else
3313                                 flash_op = FLASHROM_OPER_SAVE;
3314                 }
3315
3316                 memcpy(req->data_buf, img, num_bytes);
3317                 img += num_bytes;
3318                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3319                                                 flash_op, num_bytes);
3320                 if (status) {
3321                         if (status == ILLEGAL_IOCTL_REQ &&
3322                             optype == OPTYPE_PHY_FW)
3323                                 break;
3324                         dev_err(&adapter->pdev->dev,
3325                                 "cmd to write to flash rom failed.\n");
3326                         return status;
3327                 }
3328         }
3329         return 0;
3330 }
3331
3332 /* For BE2, BE3 and BE3-R */
3333 static int be_flash_BEx(struct be_adapter *adapter,
3334                          const struct firmware *fw,
3335                          struct be_dma_mem *flash_cmd,
3336                          int num_of_images)
3337
3338 {
3339         int status = 0, i, filehdr_size = 0;
3340         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3341         const u8 *p = fw->data;
3342         const struct flash_comp *pflashcomp;
3343         int num_comp, redboot;
3344         struct flash_section_info *fsec = NULL;
3345
3346         struct flash_comp gen3_flash_types[] = {
3347                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3348                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3349                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3350                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3351                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3352                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3353                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3354                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3355                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3356                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3357                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3358                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3359                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3360                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3361                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3362                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3363                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3364                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3365                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3366                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3367         };
3368
3369         struct flash_comp gen2_flash_types[] = {
3370                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3371                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3372                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3373                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3374                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3375                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3376                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3377                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3378                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3379                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3380                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3381                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3382                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3383                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3384                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3385                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3386         };
3387
3388         if (BE3_chip(adapter)) {
3389                 pflashcomp = gen3_flash_types;
3390                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3391                 num_comp = ARRAY_SIZE(gen3_flash_types);
3392         } else {
3393                 pflashcomp = gen2_flash_types;
3394                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3395                 num_comp = ARRAY_SIZE(gen2_flash_types);
3396         }
3397
3398         /* Get flash section info*/
3399         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3400         if (!fsec) {
3401                 dev_err(&adapter->pdev->dev,
3402                         "Invalid Cookie. UFI corrupted ?\n");
3403                 return -1;
3404         }
3405         for (i = 0; i < num_comp; i++) {
3406                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3407                         continue;
3408
3409                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3410                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3411                         continue;
3412
3413                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3414                     !phy_flashing_required(adapter))
3415                                 continue;
3416
3417                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3418                         redboot = be_flash_redboot(adapter, fw->data,
3419                                 pflashcomp[i].offset, pflashcomp[i].size,
3420                                 filehdr_size + img_hdrs_size);
3421                         if (!redboot)
3422                                 continue;
3423                 }
3424
3425                 p = fw->data;
3426                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3427                 if (p + pflashcomp[i].size > fw->data + fw->size)
3428                         return -1;
3429
3430                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3431                                         pflashcomp[i].size);
3432                 if (status) {
3433                         dev_err(&adapter->pdev->dev,
3434                                 "Flashing section type %d failed.\n",
3435                                 pflashcomp[i].img_type);
3436                         return status;
3437                 }
3438         }
3439         return 0;
3440 }
3441
3442 static int be_flash_skyhawk(struct be_adapter *adapter,
3443                 const struct firmware *fw,
3444                 struct be_dma_mem *flash_cmd, int num_of_images)
3445 {
3446         int status = 0, i, filehdr_size = 0;
3447         int img_offset, img_size, img_optype, redboot;
3448         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3449         const u8 *p = fw->data;
3450         struct flash_section_info *fsec = NULL;
3451
3452         filehdr_size = sizeof(struct flash_file_hdr_g3);
3453         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3454         if (!fsec) {
3455                 dev_err(&adapter->pdev->dev,
3456                         "Invalid Cookie. UFI corrupted ?\n");
3457                 return -1;
3458         }
3459
3460         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3461                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3462                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3463
3464                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3465                 case IMAGE_FIRMWARE_iSCSI:
3466                         img_optype = OPTYPE_ISCSI_ACTIVE;
3467                         break;
3468                 case IMAGE_BOOT_CODE:
3469                         img_optype = OPTYPE_REDBOOT;
3470                         break;
3471                 case IMAGE_OPTION_ROM_ISCSI:
3472                         img_optype = OPTYPE_BIOS;
3473                         break;
3474                 case IMAGE_OPTION_ROM_PXE:
3475                         img_optype = OPTYPE_PXE_BIOS;
3476                         break;
3477                 case IMAGE_OPTION_ROM_FCoE:
3478                         img_optype = OPTYPE_FCOE_BIOS;
3479                         break;
3480                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3481                         img_optype = OPTYPE_ISCSI_BACKUP;
3482                         break;
3483                 case IMAGE_NCSI:
3484                         img_optype = OPTYPE_NCSI_FW;
3485                         break;
3486                 default:
3487                         continue;
3488                 }
3489
3490                 if (img_optype == OPTYPE_REDBOOT) {
3491                         redboot = be_flash_redboot(adapter, fw->data,
3492                                         img_offset, img_size,
3493                                         filehdr_size + img_hdrs_size);
3494                         if (!redboot)
3495                                 continue;
3496                 }
3497
3498                 p = fw->data;
3499                 p += filehdr_size + img_offset + img_hdrs_size;
3500                 if (p + img_size > fw->data + fw->size)
3501                         return -1;
3502
3503                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3504                 if (status) {
3505                         dev_err(&adapter->pdev->dev,
3506                                 "Flashing section type %d failed.\n",
3507                                 fsec->fsec_entry[i].type);
3508                         return status;
3509                 }
3510         }
3511         return 0;
3512 }
3513
3514 static int lancer_fw_download(struct be_adapter *adapter,
3515                                 const struct firmware *fw)
3516 {
3517 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3518 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3519         struct be_dma_mem flash_cmd;
3520         const u8 *data_ptr = NULL;
3521         u8 *dest_image_ptr = NULL;
3522         size_t image_size = 0;
3523         u32 chunk_size = 0;
3524         u32 data_written = 0;
3525         u32 offset = 0;
3526         int status = 0;
3527         u8 add_status = 0;
3528         u8 change_status;
3529
3530         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3531                 dev_err(&adapter->pdev->dev,
3532                         "FW Image not properly aligned. "
3533                         "Length must be 4 byte aligned.\n");
3534                 status = -EINVAL;
3535                 goto lancer_fw_exit;
3536         }
3537
3538         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3539                                 + LANCER_FW_DOWNLOAD_CHUNK;
3540         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3541                                           &flash_cmd.dma, GFP_KERNEL);
3542         if (!flash_cmd.va) {
3543                 status = -ENOMEM;
3544                 goto lancer_fw_exit;
3545         }
3546
3547         dest_image_ptr = flash_cmd.va +
3548                                 sizeof(struct lancer_cmd_req_write_object);
3549         image_size = fw->size;
3550         data_ptr = fw->data;
3551
3552         while (image_size) {
3553                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3554
3555                 /* Copy the image chunk content. */
3556                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3557
3558                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3559                                                  chunk_size, offset,
3560                                                  LANCER_FW_DOWNLOAD_LOCATION,
3561                                                  &data_written, &change_status,
3562                                                  &add_status);
3563                 if (status)
3564                         break;
3565
3566                 offset += data_written;
3567                 data_ptr += data_written;
3568                 image_size -= data_written;
3569         }
3570
3571         if (!status) {
3572                 /* Commit the FW written */
3573                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3574                                                  0, offset,
3575                                                  LANCER_FW_DOWNLOAD_LOCATION,
3576                                                  &data_written, &change_status,
3577                                                  &add_status);
3578         }
3579
3580         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3581                                 flash_cmd.dma);
3582         if (status) {
3583                 dev_err(&adapter->pdev->dev,
3584                         "Firmware load error. "
3585                         "Status code: 0x%x Additional Status: 0x%x\n",
3586                         status, add_status);
3587                 goto lancer_fw_exit;
3588         }
3589
3590         if (change_status == LANCER_FW_RESET_NEEDED) {
3591                 status = lancer_physdev_ctrl(adapter,
3592                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3593                 if (status) {
3594                         dev_err(&adapter->pdev->dev,
3595                                 "Adapter busy for FW reset.\n"
3596                                 "New FW will not be active.\n");
3597                         goto lancer_fw_exit;
3598                 }
3599         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3600                         dev_err(&adapter->pdev->dev,
3601                                 "System reboot required for new FW"
3602                                 " to be active\n");
3603         }
3604
3605         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3606 lancer_fw_exit:
3607         return status;
3608 }
3609
3610 #define UFI_TYPE2               2
3611 #define UFI_TYPE3               3
3612 #define UFI_TYPE3R              10
3613 #define UFI_TYPE4               4
3614 static int be_get_ufi_type(struct be_adapter *adapter,
3615                            struct flash_file_hdr_g3 *fhdr)
3616 {
3617         if (fhdr == NULL)
3618                 goto be_get_ufi_exit;
3619
3620         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3621                 return UFI_TYPE4;
3622         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3623                 if (fhdr->asic_type_rev == 0x10)
3624                         return UFI_TYPE3R;
3625                 else
3626                         return UFI_TYPE3;
3627         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3628                 return UFI_TYPE2;
3629
3630 be_get_ufi_exit:
3631         dev_err(&adapter->pdev->dev,
3632                 "UFI and Interface are not compatible for flashing\n");
3633         return -1;
3634 }
3635
3636 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3637 {
3638         struct flash_file_hdr_g3 *fhdr3;
3639         struct image_hdr *img_hdr_ptr = NULL;
3640         struct be_dma_mem flash_cmd;
3641         const u8 *p;
3642         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3643
3644         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3645         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3646                                           &flash_cmd.dma, GFP_KERNEL);
3647         if (!flash_cmd.va) {
3648                 status = -ENOMEM;
3649                 goto be_fw_exit;
3650         }
3651
3652         p = fw->data;
3653         fhdr3 = (struct flash_file_hdr_g3 *)p;
3654
3655         ufi_type = be_get_ufi_type(adapter, fhdr3);
3656
3657         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3658         for (i = 0; i < num_imgs; i++) {
3659                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3660                                 (sizeof(struct flash_file_hdr_g3) +
3661                                  i * sizeof(struct image_hdr)));
3662                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3663                         switch (ufi_type) {
3664                         case UFI_TYPE4:
3665                                 status = be_flash_skyhawk(adapter, fw,
3666                                                         &flash_cmd, num_imgs);
3667                                 break;
3668                         case UFI_TYPE3R:
3669                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3670                                                       num_imgs);
3671                                 break;
3672                         case UFI_TYPE3:
3673                                 /* Do not flash this ufi on BE3-R cards */
3674                                 if (adapter->asic_rev < 0x10)
3675                                         status = be_flash_BEx(adapter, fw,
3676                                                               &flash_cmd,
3677                                                               num_imgs);
3678                                 else {
3679                                         status = -1;
3680                                         dev_err(&adapter->pdev->dev,
3681                                                 "Can't load BE3 UFI on BE3R\n");
3682                                 }
3683                         }
3684                 }
3685         }
3686
3687         if (ufi_type == UFI_TYPE2)
3688                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3689         else if (ufi_type == -1)
3690                 status = -1;
3691
3692         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3693                           flash_cmd.dma);
3694         if (status) {
3695                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3696                 goto be_fw_exit;
3697         }
3698
3699         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3700
3701 be_fw_exit:
3702         return status;
3703 }
3704
3705 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3706 {
3707         const struct firmware *fw;
3708         int status;
3709
3710         if (!netif_running(adapter->netdev)) {
3711                 dev_err(&adapter->pdev->dev,
3712                         "Firmware load not allowed (interface is down)\n");
3713                 return -1;
3714         }
3715
3716         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3717         if (status)
3718                 goto fw_exit;
3719
3720         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3721
3722         if (lancer_chip(adapter))
3723                 status = lancer_fw_download(adapter, fw);
3724         else
3725                 status = be_fw_download(adapter, fw);
3726
3727         if (!status)
3728                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3729                                   adapter->fw_on_flash);
3730
3731 fw_exit:
3732         release_firmware(fw);
3733         return status;
3734 }
3735
3736 static const struct net_device_ops be_netdev_ops = {
3737         .ndo_open               = be_open,
3738         .ndo_stop               = be_close,
3739         .ndo_start_xmit         = be_xmit,
3740         .ndo_set_rx_mode        = be_set_rx_mode,
3741         .ndo_set_mac_address    = be_mac_addr_set,
3742         .ndo_change_mtu         = be_change_mtu,
3743         .ndo_get_stats64        = be_get_stats64,
3744         .ndo_validate_addr      = eth_validate_addr,
3745         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3746         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3747         .ndo_set_vf_mac         = be_set_vf_mac,
3748         .ndo_set_vf_vlan        = be_set_vf_vlan,
3749         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3750         .ndo_get_vf_config      = be_get_vf_config,
3751 #ifdef CONFIG_NET_POLL_CONTROLLER
3752         .ndo_poll_controller    = be_netpoll,
3753 #endif
3754 };
3755
3756 static void be_netdev_init(struct net_device *netdev)
3757 {
3758         struct be_adapter *adapter = netdev_priv(netdev);
3759         struct be_eq_obj *eqo;
3760         int i;
3761
3762         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3763                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3764                 NETIF_F_HW_VLAN_CTAG_TX;
3765         if (be_multi_rxq(adapter))
3766                 netdev->hw_features |= NETIF_F_RXHASH;
3767
3768         netdev->features |= netdev->hw_features |
3769                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3770
3771         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3772                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3773
3774         netdev->priv_flags |= IFF_UNICAST_FLT;
3775
3776         netdev->flags |= IFF_MULTICAST;
3777
3778         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3779
3780         netdev->netdev_ops = &be_netdev_ops;
3781
3782         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3783
3784         for_all_evt_queues(adapter, eqo, i)
3785                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3786 }
3787
3788 static void be_unmap_pci_bars(struct be_adapter *adapter)
3789 {
3790         if (adapter->csr)
3791                 pci_iounmap(adapter->pdev, adapter->csr);
3792         if (adapter->db)
3793                 pci_iounmap(adapter->pdev, adapter->db);
3794 }
3795
3796 static int db_bar(struct be_adapter *adapter)
3797 {
3798         if (lancer_chip(adapter) || !be_physfn(adapter))
3799                 return 0;
3800         else
3801                 return 4;
3802 }
3803
3804 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3805 {
3806         if (skyhawk_chip(adapter)) {
3807                 adapter->roce_db.size = 4096;
3808                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3809                                                               db_bar(adapter));
3810                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3811                                                                db_bar(adapter));
3812         }
3813         return 0;
3814 }
3815
3816 static int be_map_pci_bars(struct be_adapter *adapter)
3817 {
3818         u8 __iomem *addr;
3819         u32 sli_intf;
3820
3821         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3822         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3823                                 SLI_INTF_IF_TYPE_SHIFT;
3824
3825         if (BEx_chip(adapter) && be_physfn(adapter)) {
3826                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3827                 if (adapter->csr == NULL)
3828                         return -ENOMEM;
3829         }
3830
3831         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3832         if (addr == NULL)
3833                 goto pci_map_err;
3834         adapter->db = addr;
3835
3836         be_roce_map_pci_bars(adapter);
3837         return 0;
3838
3839 pci_map_err:
3840         be_unmap_pci_bars(adapter);
3841         return -ENOMEM;
3842 }
3843
3844 static void be_ctrl_cleanup(struct be_adapter *adapter)
3845 {
3846         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3847
3848         be_unmap_pci_bars(adapter);
3849
3850         if (mem->va)
3851                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3852                                   mem->dma);
3853
3854         mem = &adapter->rx_filter;
3855         if (mem->va)
3856                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3857                                   mem->dma);
3858 }
3859
3860 static int be_ctrl_init(struct be_adapter *adapter)
3861 {
3862         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3863         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3864         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3865         u32 sli_intf;
3866         int status;
3867
3868         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3869         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3870                                  SLI_INTF_FAMILY_SHIFT;
3871         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3872
3873         status = be_map_pci_bars(adapter);
3874         if (status)
3875                 goto done;
3876
3877         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3878         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3879                                                 mbox_mem_alloc->size,
3880                                                 &mbox_mem_alloc->dma,
3881                                                 GFP_KERNEL);
3882         if (!mbox_mem_alloc->va) {
3883                 status = -ENOMEM;
3884                 goto unmap_pci_bars;
3885         }
3886         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3887         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3888         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3889         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3890
3891         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3892         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3893                                            &rx_filter->dma,
3894                                            GFP_KERNEL | __GFP_ZERO);
3895         if (rx_filter->va == NULL) {
3896                 status = -ENOMEM;
3897                 goto free_mbox;
3898         }
3899
3900         mutex_init(&adapter->mbox_lock);
3901         spin_lock_init(&adapter->mcc_lock);
3902         spin_lock_init(&adapter->mcc_cq_lock);
3903
3904         init_completion(&adapter->flash_compl);
3905         pci_save_state(adapter->pdev);
3906         return 0;
3907
3908 free_mbox:
3909         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3910                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3911
3912 unmap_pci_bars:
3913         be_unmap_pci_bars(adapter);
3914
3915 done:
3916         return status;
3917 }
3918
3919 static void be_stats_cleanup(struct be_adapter *adapter)
3920 {
3921         struct be_dma_mem *cmd = &adapter->stats_cmd;
3922
3923         if (cmd->va)
3924                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3925                                   cmd->va, cmd->dma);
3926 }
3927
3928 static int be_stats_init(struct be_adapter *adapter)
3929 {
3930         struct be_dma_mem *cmd = &adapter->stats_cmd;
3931
3932         if (lancer_chip(adapter))
3933                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3934         else if (BE2_chip(adapter))
3935                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3936         else
3937                 /* BE3 and Skyhawk */
3938                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3939
3940         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3941                                      GFP_KERNEL | __GFP_ZERO);
3942         if (cmd->va == NULL)
3943                 return -1;
3944         return 0;
3945 }
3946
3947 static void be_remove(struct pci_dev *pdev)
3948 {
3949         struct be_adapter *adapter = pci_get_drvdata(pdev);
3950
3951         if (!adapter)
3952                 return;
3953
3954         be_roce_dev_remove(adapter);
3955         be_intr_set(adapter, false);
3956
3957         cancel_delayed_work_sync(&adapter->func_recovery_work);
3958
3959         unregister_netdev(adapter->netdev);
3960
3961         be_clear(adapter);
3962
3963         /* tell fw we're done with firing cmds */
3964         be_cmd_fw_clean(adapter);
3965
3966         be_stats_cleanup(adapter);
3967
3968         be_ctrl_cleanup(adapter);
3969
3970         pci_disable_pcie_error_reporting(pdev);
3971
3972         pci_set_drvdata(pdev, NULL);
3973         pci_release_regions(pdev);
3974         pci_disable_device(pdev);
3975
3976         free_netdev(adapter->netdev);
3977 }
3978
3979 bool be_is_wol_supported(struct be_adapter *adapter)
3980 {
3981         return ((adapter->wol_cap & BE_WOL_CAP) &&
3982                 !be_is_wol_excluded(adapter)) ? true : false;
3983 }
3984
3985 u32 be_get_fw_log_level(struct be_adapter *adapter)
3986 {
3987         struct be_dma_mem extfat_cmd;
3988         struct be_fat_conf_params *cfgs;
3989         int status;
3990         u32 level = 0;
3991         int j;
3992
3993         if (lancer_chip(adapter))
3994                 return 0;
3995
3996         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3997         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3998         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3999                                              &extfat_cmd.dma);
4000
4001         if (!extfat_cmd.va) {
4002                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4003                         __func__);
4004                 goto err;
4005         }
4006
4007         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4008         if (!status) {
4009                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4010                                                 sizeof(struct be_cmd_resp_hdr));
4011                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4012                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4013                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4014                 }
4015         }
4016         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4017                             extfat_cmd.dma);
4018 err:
4019         return level;
4020 }
4021
4022 static int be_get_initial_config(struct be_adapter *adapter)
4023 {
4024         int status;
4025         u32 level;
4026
4027         status = be_cmd_get_cntl_attributes(adapter);
4028         if (status)
4029                 return status;
4030
4031         status = be_cmd_get_acpi_wol_cap(adapter);
4032         if (status) {
4033                 /* in case of a failure to get wol capabillities
4034                  * check the exclusion list to determine WOL capability */
4035                 if (!be_is_wol_excluded(adapter))
4036                         adapter->wol_cap |= BE_WOL_CAP;
4037         }
4038
4039         if (be_is_wol_supported(adapter))
4040                 adapter->wol = true;
4041
4042         /* Must be a power of 2 or else MODULO will BUG_ON */
4043         adapter->be_get_temp_freq = 64;
4044
4045         level = be_get_fw_log_level(adapter);
4046         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4047
4048         return 0;
4049 }
4050
4051 static int lancer_recover_func(struct be_adapter *adapter)
4052 {
4053         struct device *dev = &adapter->pdev->dev;
4054         int status;
4055
4056         status = lancer_test_and_set_rdy_state(adapter);
4057         if (status)
4058                 goto err;
4059
4060         if (netif_running(adapter->netdev))
4061                 be_close(adapter->netdev);
4062
4063         be_clear(adapter);
4064
4065         be_clear_all_error(adapter);
4066
4067         status = be_setup(adapter);
4068         if (status)
4069                 goto err;
4070
4071         if (netif_running(adapter->netdev)) {
4072                 status = be_open(adapter->netdev);
4073                 if (status)
4074                         goto err;
4075         }
4076
4077         dev_err(dev, "Error recovery successful\n");
4078         return 0;
4079 err:
4080         if (status == -EAGAIN)
4081                 dev_err(dev, "Waiting for resource provisioning\n");
4082         else
4083                 dev_err(dev, "Error recovery failed\n");
4084
4085         return status;
4086 }
4087
4088 static void be_func_recovery_task(struct work_struct *work)
4089 {
4090         struct be_adapter *adapter =
4091                 container_of(work, struct be_adapter,  func_recovery_work.work);
4092         int status = 0;
4093
4094         be_detect_error(adapter);
4095
4096         if (adapter->hw_error && lancer_chip(adapter)) {
4097
4098                 rtnl_lock();
4099                 netif_device_detach(adapter->netdev);
4100                 rtnl_unlock();
4101
4102                 status = lancer_recover_func(adapter);
4103                 if (!status)
4104                         netif_device_attach(adapter->netdev);
4105         }
4106
4107         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4108          * no need to attempt further recovery.
4109          */
4110         if (!status || status == -EAGAIN)
4111                 schedule_delayed_work(&adapter->func_recovery_work,
4112                                       msecs_to_jiffies(1000));
4113 }
4114
4115 static void be_worker(struct work_struct *work)
4116 {
4117         struct be_adapter *adapter =
4118                 container_of(work, struct be_adapter, work.work);
4119         struct be_rx_obj *rxo;
4120         struct be_eq_obj *eqo;
4121         int i;
4122
4123         /* when interrupts are not yet enabled, just reap any pending
4124         * mcc completions */
4125         if (!netif_running(adapter->netdev)) {
4126                 local_bh_disable();
4127                 be_process_mcc(adapter);
4128                 local_bh_enable();
4129                 goto reschedule;
4130         }
4131
4132         if (!adapter->stats_cmd_sent) {
4133                 if (lancer_chip(adapter))
4134                         lancer_cmd_get_pport_stats(adapter,
4135                                                 &adapter->stats_cmd);
4136                 else
4137                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4138         }
4139
4140         if (be_physfn(adapter) &&
4141             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4142                 be_cmd_get_die_temperature(adapter);
4143
4144         for_all_rx_queues(adapter, rxo, i) {
4145                 if (rxo->rx_post_starved) {
4146                         rxo->rx_post_starved = false;
4147                         be_post_rx_frags(rxo, GFP_KERNEL);
4148                 }
4149         }
4150
4151         for_all_evt_queues(adapter, eqo, i)
4152                 be_eqd_update(adapter, eqo);
4153
4154 reschedule:
4155         adapter->work_counter++;
4156         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4157 }
4158
4159 /* If any VFs are already enabled don't FLR the PF */
4160 static bool be_reset_required(struct be_adapter *adapter)
4161 {
4162         return pci_num_vf(adapter->pdev) ? false : true;
4163 }
4164
4165 static char *mc_name(struct be_adapter *adapter)
4166 {
4167         if (adapter->function_mode & FLEX10_MODE)
4168                 return "FLEX10";
4169         else if (adapter->function_mode & VNIC_MODE)
4170                 return "vNIC";
4171         else if (adapter->function_mode & UMC_ENABLED)
4172                 return "UMC";
4173         else
4174                 return "";
4175 }
4176
4177 static inline char *func_name(struct be_adapter *adapter)
4178 {
4179         return be_physfn(adapter) ? "PF" : "VF";
4180 }
4181
4182 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4183 {
4184         int status = 0;
4185         struct be_adapter *adapter;
4186         struct net_device *netdev;
4187         char port_name;
4188
4189         status = pci_enable_device(pdev);
4190         if (status)
4191                 goto do_none;
4192
4193         status = pci_request_regions(pdev, DRV_NAME);
4194         if (status)
4195                 goto disable_dev;
4196         pci_set_master(pdev);
4197
4198         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4199         if (netdev == NULL) {
4200                 status = -ENOMEM;
4201                 goto rel_reg;
4202         }
4203         adapter = netdev_priv(netdev);
4204         adapter->pdev = pdev;
4205         pci_set_drvdata(pdev, adapter);
4206         adapter->netdev = netdev;
4207         SET_NETDEV_DEV(netdev, &pdev->dev);
4208
4209         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4210         if (!status) {
4211                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4212                 if (status < 0) {
4213                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4214                         goto free_netdev;
4215                 }
4216                 netdev->features |= NETIF_F_HIGHDMA;
4217         } else {
4218                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4219                 if (!status)
4220                         status = dma_set_coherent_mask(&pdev->dev,
4221                                                        DMA_BIT_MASK(32));
4222                 if (status) {
4223                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4224                         goto free_netdev;
4225                 }
4226         }
4227
4228         status = pci_enable_pcie_error_reporting(pdev);
4229         if (status)
4230                 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4231
4232         status = be_ctrl_init(adapter);
4233         if (status)
4234                 goto free_netdev;
4235
4236         /* sync up with fw's ready state */
4237         if (be_physfn(adapter)) {
4238                 status = be_fw_wait_ready(adapter);
4239                 if (status)
4240                         goto ctrl_clean;
4241         }
4242
4243         if (be_reset_required(adapter)) {
4244                 status = be_cmd_reset_function(adapter);
4245                 if (status)
4246                         goto ctrl_clean;
4247
4248                 /* Wait for interrupts to quiesce after an FLR */
4249                 msleep(100);
4250         }
4251
4252         /* Allow interrupts for other ULPs running on NIC function */
4253         be_intr_set(adapter, true);
4254
4255         /* tell fw we're ready to fire cmds */
4256         status = be_cmd_fw_init(adapter);
4257         if (status)
4258                 goto ctrl_clean;
4259
4260         status = be_stats_init(adapter);
4261         if (status)
4262                 goto ctrl_clean;
4263
4264         status = be_get_initial_config(adapter);
4265         if (status)
4266                 goto stats_clean;
4267
4268         INIT_DELAYED_WORK(&adapter->work, be_worker);
4269         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4270         adapter->rx_fc = adapter->tx_fc = true;
4271
4272         status = be_setup(adapter);
4273         if (status)
4274                 goto stats_clean;
4275
4276         be_netdev_init(netdev);
4277         status = register_netdev(netdev);
4278         if (status != 0)
4279                 goto unsetup;
4280
4281         be_roce_dev_add(adapter);
4282
4283         schedule_delayed_work(&adapter->func_recovery_work,
4284                               msecs_to_jiffies(1000));
4285
4286         be_cmd_query_port_name(adapter, &port_name);
4287
4288         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4289                  func_name(adapter), mc_name(adapter), port_name);
4290
4291         return 0;
4292
4293 unsetup:
4294         be_clear(adapter);
4295 stats_clean:
4296         be_stats_cleanup(adapter);
4297 ctrl_clean:
4298         be_ctrl_cleanup(adapter);
4299 free_netdev:
4300         free_netdev(netdev);
4301         pci_set_drvdata(pdev, NULL);
4302 rel_reg:
4303         pci_release_regions(pdev);
4304 disable_dev:
4305         pci_disable_device(pdev);
4306 do_none:
4307         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4308         return status;
4309 }
4310
4311 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4312 {
4313         struct be_adapter *adapter = pci_get_drvdata(pdev);
4314         struct net_device *netdev =  adapter->netdev;
4315
4316         if (adapter->wol)
4317                 be_setup_wol(adapter, true);
4318
4319         cancel_delayed_work_sync(&adapter->func_recovery_work);
4320
4321         netif_device_detach(netdev);
4322         if (netif_running(netdev)) {
4323                 rtnl_lock();
4324                 be_close(netdev);
4325                 rtnl_unlock();
4326         }
4327         be_clear(adapter);
4328
4329         pci_save_state(pdev);
4330         pci_disable_device(pdev);
4331         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4332         return 0;
4333 }
4334
4335 static int be_resume(struct pci_dev *pdev)
4336 {
4337         int status = 0;
4338         struct be_adapter *adapter = pci_get_drvdata(pdev);
4339         struct net_device *netdev =  adapter->netdev;
4340
4341         netif_device_detach(netdev);
4342
4343         status = pci_enable_device(pdev);
4344         if (status)
4345                 return status;
4346
4347         pci_set_power_state(pdev, PCI_D0);
4348         pci_restore_state(pdev);
4349
4350         /* tell fw we're ready to fire cmds */
4351         status = be_cmd_fw_init(adapter);
4352         if (status)
4353                 return status;
4354
4355         be_setup(adapter);
4356         if (netif_running(netdev)) {
4357                 rtnl_lock();
4358                 be_open(netdev);
4359                 rtnl_unlock();
4360         }
4361
4362         schedule_delayed_work(&adapter->func_recovery_work,
4363                               msecs_to_jiffies(1000));
4364         netif_device_attach(netdev);
4365
4366         if (adapter->wol)
4367                 be_setup_wol(adapter, false);
4368
4369         return 0;
4370 }
4371
4372 /*
4373  * An FLR will stop BE from DMAing any data.
4374  */
4375 static void be_shutdown(struct pci_dev *pdev)
4376 {
4377         struct be_adapter *adapter = pci_get_drvdata(pdev);
4378
4379         if (!adapter)
4380                 return;
4381
4382         cancel_delayed_work_sync(&adapter->work);
4383         cancel_delayed_work_sync(&adapter->func_recovery_work);
4384
4385         netif_device_detach(adapter->netdev);
4386
4387         be_cmd_reset_function(adapter);
4388
4389         pci_disable_device(pdev);
4390 }
4391
4392 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4393                                 pci_channel_state_t state)
4394 {
4395         struct be_adapter *adapter = pci_get_drvdata(pdev);
4396         struct net_device *netdev =  adapter->netdev;
4397
4398         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4399
4400         if (!adapter->eeh_error) {
4401                 adapter->eeh_error = true;
4402
4403                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4404
4405                 rtnl_lock();
4406                 netif_device_detach(netdev);
4407                 if (netif_running(netdev))
4408                         be_close(netdev);
4409                 rtnl_unlock();
4410
4411                 be_clear(adapter);
4412         }
4413
4414         if (state == pci_channel_io_perm_failure)
4415                 return PCI_ERS_RESULT_DISCONNECT;
4416
4417         pci_disable_device(pdev);
4418
4419         /* The error could cause the FW to trigger a flash debug dump.
4420          * Resetting the card while flash dump is in progress
4421          * can cause it not to recover; wait for it to finish.
4422          * Wait only for first function as it is needed only once per
4423          * adapter.
4424          */
4425         if (pdev->devfn == 0)
4426                 ssleep(30);
4427
4428         return PCI_ERS_RESULT_NEED_RESET;
4429 }
4430
4431 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4432 {
4433         struct be_adapter *adapter = pci_get_drvdata(pdev);
4434         int status;
4435
4436         dev_info(&adapter->pdev->dev, "EEH reset\n");
4437
4438         status = pci_enable_device(pdev);
4439         if (status)
4440                 return PCI_ERS_RESULT_DISCONNECT;
4441
4442         pci_set_master(pdev);
4443         pci_set_power_state(pdev, PCI_D0);
4444         pci_restore_state(pdev);
4445
4446         /* Check if card is ok and fw is ready */
4447         dev_info(&adapter->pdev->dev,
4448                  "Waiting for FW to be ready after EEH reset\n");
4449         status = be_fw_wait_ready(adapter);
4450         if (status)
4451                 return PCI_ERS_RESULT_DISCONNECT;
4452
4453         pci_cleanup_aer_uncorrect_error_status(pdev);
4454         be_clear_all_error(adapter);
4455         return PCI_ERS_RESULT_RECOVERED;
4456 }
4457
4458 static void be_eeh_resume(struct pci_dev *pdev)
4459 {
4460         int status = 0;
4461         struct be_adapter *adapter = pci_get_drvdata(pdev);
4462         struct net_device *netdev =  adapter->netdev;
4463
4464         dev_info(&adapter->pdev->dev, "EEH resume\n");
4465
4466         pci_save_state(pdev);
4467
4468         status = be_cmd_reset_function(adapter);
4469         if (status)
4470                 goto err;
4471
4472         /* tell fw we're ready to fire cmds */
4473         status = be_cmd_fw_init(adapter);
4474         if (status)
4475                 goto err;
4476
4477         status = be_setup(adapter);
4478         if (status)
4479                 goto err;
4480
4481         if (netif_running(netdev)) {
4482                 status = be_open(netdev);
4483                 if (status)
4484                         goto err;
4485         }
4486
4487         schedule_delayed_work(&adapter->func_recovery_work,
4488                               msecs_to_jiffies(1000));
4489         netif_device_attach(netdev);
4490         return;
4491 err:
4492         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4493 }
4494
4495 static const struct pci_error_handlers be_eeh_handlers = {
4496         .error_detected = be_eeh_err_detected,
4497         .slot_reset = be_eeh_reset,
4498         .resume = be_eeh_resume,
4499 };
4500
4501 static struct pci_driver be_driver = {
4502         .name = DRV_NAME,
4503         .id_table = be_dev_ids,
4504         .probe = be_probe,
4505         .remove = be_remove,
4506         .suspend = be_suspend,
4507         .resume = be_resume,
4508         .shutdown = be_shutdown,
4509         .err_handler = &be_eeh_handlers
4510 };
4511
4512 static int __init be_init_module(void)
4513 {
4514         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4515             rx_frag_size != 2048) {
4516                 printk(KERN_WARNING DRV_NAME
4517                         " : Module param rx_frag_size must be 2048/4096/8192."
4518                         " Using 2048\n");
4519                 rx_frag_size = 2048;
4520         }
4521
4522         return pci_register_driver(&be_driver);
4523 }
4524 module_init(be_init_module);
4525
4526 static void __exit be_exit_module(void)
4527 {
4528         pci_unregister_driver(&be_driver);
4529 }
4530 module_exit(be_exit_module);