]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
3224d28cdad4384d4322bd5765d503ef9ace1ca1
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25
26 MODULE_VERSION(DRV_VER);
27 MODULE_DEVICE_TABLE(pci, be_dev_ids);
28 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
29 MODULE_AUTHOR("Emulex Corporation");
30 MODULE_LICENSE("GPL");
31
32 static unsigned int num_vfs;
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static ushort rx_frag_size = 2048;
37 module_param(rx_frag_size, ushort, S_IRUGO);
38 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
49         { 0 }
50 };
51 MODULE_DEVICE_TABLE(pci, be_dev_ids);
52 /* UE Status Low CSR */
53 static const char * const ue_status_low_desc[] = {
54         "CEV",
55         "CTX",
56         "DBUF",
57         "ERX",
58         "Host",
59         "MPU",
60         "NDMA",
61         "PTC ",
62         "RDMA ",
63         "RXF ",
64         "RXIPS ",
65         "RXULP0 ",
66         "RXULP1 ",
67         "RXULP2 ",
68         "TIM ",
69         "TPOST ",
70         "TPRE ",
71         "TXIPS ",
72         "TXULP0 ",
73         "TXULP1 ",
74         "UC ",
75         "WDMA ",
76         "TXULP2 ",
77         "HOST1 ",
78         "P0_OB_LINK ",
79         "P1_OB_LINK ",
80         "HOST_GPIO ",
81         "MBOX ",
82         "AXGMAC0",
83         "AXGMAC1",
84         "JTAG",
85         "MPU_INTPEND"
86 };
87 /* UE Status High CSR */
88 static const char * const ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122
123 /* Is BE in a multi-channel mode */
124 static inline bool be_is_mc(struct be_adapter *adapter) {
125         return (adapter->function_mode & FLEX10_MODE ||
126                 adapter->function_mode & VNIC_MODE ||
127                 adapter->function_mode & UMC_ENABLED);
128 }
129
130 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131 {
132         struct be_dma_mem *mem = &q->dma_mem;
133         if (mem->va) {
134                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135                                   mem->dma);
136                 mem->va = NULL;
137         }
138 }
139
140 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141                 u16 len, u16 entry_size)
142 {
143         struct be_dma_mem *mem = &q->dma_mem;
144
145         memset(q, 0, sizeof(*q));
146         q->len = len;
147         q->entry_size = entry_size;
148         mem->size = len * entry_size;
149         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150                                       GFP_KERNEL);
151         if (!mem->va)
152                 return -ENOMEM;
153         return 0;
154 }
155
156 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161                                 &reg);
162         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
164         if (!enabled && enable)
165                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166         else if (enabled && !enable)
167                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else
169                 return;
170
171         pci_write_config_dword(adapter->pdev,
172                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
173 }
174
175 static void be_intr_set(struct be_adapter *adapter, bool enable)
176 {
177         int status = 0;
178
179         /* On lancer interrupts can't be controlled via this register */
180         if (lancer_chip(adapter))
181                 return;
182
183         if (adapter->eeh_error)
184                 return;
185
186         status = be_cmd_intr_set(adapter, enable);
187         if (status)
188                 be_reg_intr_set(adapter, enable);
189 }
190
191 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
192 {
193         u32 val = 0;
194         val |= qid & DB_RQ_RING_ID_MASK;
195         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
196
197         wmb();
198         iowrite32(val, adapter->db + DB_RQ_OFFSET);
199 }
200
201 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202                           u16 posted)
203 {
204         u32 val = 0;
205         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
206         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
207
208         wmb();
209         iowrite32(val, adapter->db + txo->db_offset);
210 }
211
212 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
213                 bool arm, bool clear_int, u16 num_popped)
214 {
215         u32 val = 0;
216         val |= qid & DB_EQ_RING_ID_MASK;
217         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
219
220         if (adapter->eeh_error)
221                 return;
222
223         if (arm)
224                 val |= 1 << DB_EQ_REARM_SHIFT;
225         if (clear_int)
226                 val |= 1 << DB_EQ_CLR_SHIFT;
227         val |= 1 << DB_EQ_EVNT_SHIFT;
228         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_EQ_OFFSET);
230 }
231
232 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
233 {
234         u32 val = 0;
235         val |= qid & DB_CQ_RING_ID_MASK;
236         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
238
239         if (adapter->eeh_error)
240                 return;
241
242         if (arm)
243                 val |= 1 << DB_CQ_REARM_SHIFT;
244         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
245         iowrite32(val, adapter->db + DB_CQ_OFFSET);
246 }
247
248 static int be_mac_addr_set(struct net_device *netdev, void *p)
249 {
250         struct be_adapter *adapter = netdev_priv(netdev);
251         struct device *dev = &adapter->pdev->dev;
252         struct sockaddr *addr = p;
253         int status;
254         u8 mac[ETH_ALEN];
255         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
256
257         if (!is_valid_ether_addr(addr->sa_data))
258                 return -EADDRNOTAVAIL;
259
260         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261          * privilege or if PF did not provision the new MAC address.
262          * On BE3, this cmd will always fail if the VF doesn't have the
263          * FILTMGMT privilege. This failure is OK, only if the PF programmed
264          * the MAC for the VF.
265          */
266         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267                                  adapter->if_handle, &adapter->pmac_id[0], 0);
268         if (!status) {
269                 curr_pmac_id = adapter->pmac_id[0];
270
271                 /* Delete the old programmed MAC. This call may fail if the
272                  * old MAC was already deleted by the PF driver.
273                  */
274                 if (adapter->pmac_id[0] != old_pmac_id)
275                         be_cmd_pmac_del(adapter, adapter->if_handle,
276                                         old_pmac_id, 0);
277         }
278
279         /* Decide if the new MAC is successfully activated only after
280          * querying the FW
281          */
282         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
283         if (status)
284                 goto err;
285
286         /* The MAC change did not happen, either due to lack of privilege
287          * or PF didn't pre-provision.
288          */
289         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290                 status = -EPERM;
291                 goto err;
292         }
293
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         dev_info(dev, "MAC address changed to %pM\n", mac);
296         return 0;
297 err:
298         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
299         return status;
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *hw_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308                 return &cmd->hw_stats;
309         } else  {
310                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312                 return &cmd->hw_stats;
313         }
314 }
315
316 /* BE2 supports only v0 cmd */
317 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318 {
319         if (BE2_chip(adapter)) {
320                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322                 return &hw_stats->erx;
323         } else {
324                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326                 return &hw_stats->erx;
327         }
328 }
329
330 static void populate_be_v0_stats(struct be_adapter *adapter)
331 {
332         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
335         struct be_port_rxf_stats_v0 *port_stats =
336                                         &rxf_stats->port[adapter->port_num];
337         struct be_drv_stats *drvs = &adapter->drv_stats;
338
339         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
340         drvs->rx_pause_frames = port_stats->rx_pause_frames;
341         drvs->rx_crc_errors = port_stats->rx_crc_errors;
342         drvs->rx_control_frames = port_stats->rx_control_frames;
343         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
354         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
355         drvs->rx_dropped_header_too_small =
356                 port_stats->rx_dropped_header_too_small;
357         drvs->rx_address_filtered =
358                                         port_stats->rx_address_filtered +
359                                         port_stats->rx_vlan_filtered;
360         drvs->rx_alignment_symbol_errors =
361                 port_stats->rx_alignment_symbol_errors;
362
363         drvs->tx_pauseframes = port_stats->tx_pauseframes;
364         drvs->tx_controlframes = port_stats->tx_controlframes;
365
366         if (adapter->port_num)
367                 drvs->jabber_events = rxf_stats->port1_jabber_events;
368         else
369                 drvs->jabber_events = rxf_stats->port0_jabber_events;
370         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
371         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
372         drvs->forwarded_packets = rxf_stats->forwarded_packets;
373         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
374         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
376         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377 }
378
379 static void populate_be_v1_stats(struct be_adapter *adapter)
380 {
381         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
384         struct be_port_rxf_stats_v1 *port_stats =
385                                         &rxf_stats->port[adapter->port_num];
386         struct be_drv_stats *drvs = &adapter->drv_stats;
387
388         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
389         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
391         drvs->rx_pause_frames = port_stats->rx_pause_frames;
392         drvs->rx_crc_errors = port_stats->rx_crc_errors;
393         drvs->rx_control_frames = port_stats->rx_control_frames;
394         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404         drvs->rx_dropped_header_too_small =
405                 port_stats->rx_dropped_header_too_small;
406         drvs->rx_input_fifo_overflow_drop =
407                 port_stats->rx_input_fifo_overflow_drop;
408         drvs->rx_address_filtered = port_stats->rx_address_filtered;
409         drvs->rx_alignment_symbol_errors =
410                 port_stats->rx_alignment_symbol_errors;
411         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
412         drvs->tx_pauseframes = port_stats->tx_pauseframes;
413         drvs->tx_controlframes = port_stats->tx_controlframes;
414         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
415         drvs->jabber_events = port_stats->jabber_events;
416         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
417         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
418         drvs->forwarded_packets = rxf_stats->forwarded_packets;
419         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
420         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
422         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423 }
424
425 static void populate_lancer_stats(struct be_adapter *adapter)
426 {
427
428         struct be_drv_stats *drvs = &adapter->drv_stats;
429         struct lancer_pport_stats *pport_stats =
430                                         pport_stats_from_cmd(adapter);
431
432         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
436         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
437         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
438         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442         drvs->rx_dropped_tcp_length =
443                                 pport_stats->rx_dropped_invalid_tcp_length;
444         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447         drvs->rx_dropped_header_too_small =
448                                 pport_stats->rx_dropped_header_too_small;
449         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
450         drvs->rx_address_filtered =
451                                         pport_stats->rx_address_filtered +
452                                         pport_stats->rx_vlan_filtered;
453         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
454         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
455         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
457         drvs->jabber_events = pport_stats->rx_jabbers;
458         drvs->forwarded_packets = pport_stats->num_forwards_lo;
459         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
460         drvs->rx_drops_too_many_frags =
461                                 pport_stats->rx_drops_too_many_frags_lo;
462 }
463
464 static void accumulate_16bit_val(u32 *acc, u16 val)
465 {
466 #define lo(x)                   (x & 0xFFFF)
467 #define hi(x)                   (x & 0xFFFF0000)
468         bool wrapped = val < lo(*acc);
469         u32 newacc = hi(*acc) + val;
470
471         if (wrapped)
472                 newacc += 65536;
473         ACCESS_ONCE(*acc) = newacc;
474 }
475
476 static void populate_erx_stats(struct be_adapter *adapter,
477                         struct be_rx_obj *rxo,
478                         u32 erx_stat)
479 {
480         if (!BEx_chip(adapter))
481                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482         else
483                 /* below erx HW counter can actually wrap around after
484                  * 65535. Driver accumulates a 32-bit value
485                  */
486                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487                                      (u16)erx_stat);
488 }
489
490 void be_parse_stats(struct be_adapter *adapter)
491 {
492         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493         struct be_rx_obj *rxo;
494         int i;
495         u32 erx_stat;
496
497         if (lancer_chip(adapter)) {
498                 populate_lancer_stats(adapter);
499         } else {
500                 if (BE2_chip(adapter))
501                         populate_be_v0_stats(adapter);
502                 else
503                         /* for BE3 and Skyhawk */
504                         populate_be_v1_stats(adapter);
505
506                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507                 for_all_rx_queues(adapter, rxo, i) {
508                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509                         populate_erx_stats(adapter, rxo, erx_stat);
510                 }
511         }
512 }
513
514 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515                                         struct rtnl_link_stats64 *stats)
516 {
517         struct be_adapter *adapter = netdev_priv(netdev);
518         struct be_drv_stats *drvs = &adapter->drv_stats;
519         struct be_rx_obj *rxo;
520         struct be_tx_obj *txo;
521         u64 pkts, bytes;
522         unsigned int start;
523         int i;
524
525         for_all_rx_queues(adapter, rxo, i) {
526                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527                 do {
528                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529                         pkts = rx_stats(rxo)->rx_pkts;
530                         bytes = rx_stats(rxo)->rx_bytes;
531                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532                 stats->rx_packets += pkts;
533                 stats->rx_bytes += bytes;
534                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536                                         rx_stats(rxo)->rx_drops_no_frags;
537         }
538
539         for_all_tx_queues(adapter, txo, i) {
540                 const struct be_tx_stats *tx_stats = tx_stats(txo);
541                 do {
542                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543                         pkts = tx_stats(txo)->tx_pkts;
544                         bytes = tx_stats(txo)->tx_bytes;
545                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546                 stats->tx_packets += pkts;
547                 stats->tx_bytes += bytes;
548         }
549
550         /* bad pkts received */
551         stats->rx_errors = drvs->rx_crc_errors +
552                 drvs->rx_alignment_symbol_errors +
553                 drvs->rx_in_range_errors +
554                 drvs->rx_out_range_errors +
555                 drvs->rx_frame_too_long +
556                 drvs->rx_dropped_too_small +
557                 drvs->rx_dropped_too_short +
558                 drvs->rx_dropped_header_too_small +
559                 drvs->rx_dropped_tcp_length +
560                 drvs->rx_dropped_runt;
561
562         /* detailed rx errors */
563         stats->rx_length_errors = drvs->rx_in_range_errors +
564                 drvs->rx_out_range_errors +
565                 drvs->rx_frame_too_long;
566
567         stats->rx_crc_errors = drvs->rx_crc_errors;
568
569         /* frame alignment errors */
570         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
571
572         /* receiver fifo overrun */
573         /* drops_no_pbuf is no per i/f, it's per BE card */
574         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
575                                 drvs->rx_input_fifo_overflow_drop +
576                                 drvs->rx_drops_no_pbuf;
577         return stats;
578 }
579
580 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
581 {
582         struct net_device *netdev = adapter->netdev;
583
584         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
585                 netif_carrier_off(netdev);
586                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
587         }
588
589         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590                 netif_carrier_on(netdev);
591         else
592                 netif_carrier_off(netdev);
593 }
594
595 static void be_tx_stats_update(struct be_tx_obj *txo,
596                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
597 {
598         struct be_tx_stats *stats = tx_stats(txo);
599
600         u64_stats_update_begin(&stats->sync);
601         stats->tx_reqs++;
602         stats->tx_wrbs += wrb_cnt;
603         stats->tx_bytes += copied;
604         stats->tx_pkts += (gso_segs ? gso_segs : 1);
605         if (stopped)
606                 stats->tx_stops++;
607         u64_stats_update_end(&stats->sync);
608 }
609
610 /* Determine number of WRB entries needed to xmit data in an skb */
611 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612                                                                 bool *dummy)
613 {
614         int cnt = (skb->len > skb->data_len);
615
616         cnt += skb_shinfo(skb)->nr_frags;
617
618         /* to account for hdr wrb */
619         cnt++;
620         if (lancer_chip(adapter) || !(cnt & 1)) {
621                 *dummy = false;
622         } else {
623                 /* add a dummy to make it an even num */
624                 cnt++;
625                 *dummy = true;
626         }
627         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628         return cnt;
629 }
630
631 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632 {
633         wrb->frag_pa_hi = upper_32_bits(addr);
634         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
636         wrb->rsvd0 = 0;
637 }
638
639 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640                                         struct sk_buff *skb)
641 {
642         u8 vlan_prio;
643         u16 vlan_tag;
644
645         vlan_tag = vlan_tx_tag_get(skb);
646         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647         /* If vlan priority provided by OS is NOT in available bmap */
648         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650                                 adapter->recommended_prio;
651
652         return vlan_tag;
653 }
654
655 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
656                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
657 {
658         u16 vlan_tag;
659
660         memset(hdr, 0, sizeof(*hdr));
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
664         if (skb_is_gso(skb)) {
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667                         hdr, skb_shinfo(skb)->gso_size);
668                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
669                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
670         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671                 if (is_tcp_pkt(skb))
672                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673                 else if (is_udp_pkt(skb))
674                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675         }
676
677         if (vlan_tx_tag_present(skb)) {
678                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
679                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
680                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
681         }
682
683         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688 }
689
690 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
691                 bool unmap_single)
692 {
693         dma_addr_t dma;
694
695         be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
698         if (wrb->frag_len) {
699                 if (unmap_single)
700                         dma_unmap_single(dev, dma, wrb->frag_len,
701                                          DMA_TO_DEVICE);
702                 else
703                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
704         }
705 }
706
707 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
708                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709                 bool skip_hw_vlan)
710 {
711         dma_addr_t busaddr;
712         int i, copied = 0;
713         struct device *dev = &adapter->pdev->dev;
714         struct sk_buff *first_skb = skb;
715         struct be_eth_wrb *wrb;
716         struct be_eth_hdr_wrb *hdr;
717         bool map_single = false;
718         u16 map_head;
719
720         hdr = queue_head_node(txq);
721         queue_head_inc(txq);
722         map_head = txq->head;
723
724         if (skb->len > skb->data_len) {
725                 int len = skb_headlen(skb);
726                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727                 if (dma_mapping_error(dev, busaddr))
728                         goto dma_err;
729                 map_single = true;
730                 wrb = queue_head_node(txq);
731                 wrb_fill(wrb, busaddr, len);
732                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733                 queue_head_inc(txq);
734                 copied += len;
735         }
736
737         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
738                 const struct skb_frag_struct *frag =
739                         &skb_shinfo(skb)->frags[i];
740                 busaddr = skb_frag_dma_map(dev, frag, 0,
741                                            skb_frag_size(frag), DMA_TO_DEVICE);
742                 if (dma_mapping_error(dev, busaddr))
743                         goto dma_err;
744                 wrb = queue_head_node(txq);
745                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
746                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747                 queue_head_inc(txq);
748                 copied += skb_frag_size(frag);
749         }
750
751         if (dummy_wrb) {
752                 wrb = queue_head_node(txq);
753                 wrb_fill(wrb, 0, 0);
754                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755                 queue_head_inc(txq);
756         }
757
758         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
759         be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761         return copied;
762 dma_err:
763         txq->head = map_head;
764         while (copied) {
765                 wrb = queue_head_node(txq);
766                 unmap_tx_frag(dev, wrb, map_single);
767                 map_single = false;
768                 copied -= wrb->frag_len;
769                 queue_head_inc(txq);
770         }
771         return 0;
772 }
773
774 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
775                                              struct sk_buff *skb,
776                                              bool *skip_hw_vlan)
777 {
778         u16 vlan_tag = 0;
779
780         skb = skb_share_check(skb, GFP_ATOMIC);
781         if (unlikely(!skb))
782                 return skb;
783
784         if (vlan_tx_tag_present(skb))
785                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
786
787         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788                 if (!vlan_tag)
789                         vlan_tag = adapter->pvid;
790                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791                  * skip VLAN insertion
792                  */
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         if (vlan_tag) {
798                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799                 if (unlikely(!skb))
800                         return skb;
801                 skb->vlan_tci = 0;
802         }
803
804         /* Insert the outer VLAN, if any */
805         if (adapter->qnq_vid) {
806                 vlan_tag = adapter->qnq_vid;
807                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
808                 if (unlikely(!skb))
809                         return skb;
810                 if (skip_hw_vlan)
811                         *skip_hw_vlan = true;
812         }
813
814         return skb;
815 }
816
817 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818 {
819         struct ethhdr *eh = (struct ethhdr *)skb->data;
820         u16 offset = ETH_HLEN;
821
822         if (eh->h_proto == htons(ETH_P_IPV6)) {
823                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825                 offset += sizeof(struct ipv6hdr);
826                 if (ip6h->nexthdr != NEXTHDR_TCP &&
827                     ip6h->nexthdr != NEXTHDR_UDP) {
828                         struct ipv6_opt_hdr *ehdr =
829                                 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832                         if (ehdr->hdrlen == 0xff)
833                                 return true;
834                 }
835         }
836         return false;
837 }
838
839 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840 {
841         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842 }
843
844 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845                                 struct sk_buff *skb)
846 {
847         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
848 }
849
850 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851                                            struct sk_buff *skb,
852                                            bool *skip_hw_vlan)
853 {
854         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855         unsigned int eth_hdr_len;
856         struct iphdr *ip;
857
858         /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
859          * may cause a transmit stall on that port. So the work-around is to
860          * pad such packets to a 36-byte length.
861          */
862         if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
863                 if (skb_padto(skb, 36))
864                         goto tx_drop;
865                 skb->len = 36;
866         }
867
868         /* For padded packets, BE HW modifies tot_len field in IP header
869          * incorrecly when VLAN tag is inserted by HW.
870          * For padded packets, Lancer computes incorrect checksum.
871          */
872         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873                                                 VLAN_ETH_HLEN : ETH_HLEN;
874         if (skb->len <= 60 &&
875             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
876             is_ipv4_pkt(skb)) {
877                 ip = (struct iphdr *)ip_hdr(skb);
878                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879         }
880
881         /* If vlan tag is already inlined in the packet, skip HW VLAN
882          * tagging in UMC mode
883          */
884         if ((adapter->function_mode & UMC_ENABLED) &&
885             veh->h_vlan_proto == htons(ETH_P_8021Q))
886                         *skip_hw_vlan = true;
887
888         /* HW has a bug wherein it will calculate CSUM for VLAN
889          * pkts even though it is disabled.
890          * Manually insert VLAN in pkt.
891          */
892         if (skb->ip_summed != CHECKSUM_PARTIAL &&
893             vlan_tx_tag_present(skb)) {
894                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
895                 if (unlikely(!skb))
896                         goto tx_drop;
897         }
898
899         /* HW may lockup when VLAN HW tagging is requested on
900          * certain ipv6 packets. Drop such pkts if the HW workaround to
901          * skip HW tagging is not enabled by FW.
902          */
903         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
904             (adapter->pvid || adapter->qnq_vid) &&
905             !qnq_async_evt_rcvd(adapter)))
906                 goto tx_drop;
907
908         /* Manual VLAN tag insertion to prevent:
909          * ASIC lockup when the ASIC inserts VLAN tag into
910          * certain ipv6 packets. Insert VLAN tags in driver,
911          * and set event, completion, vlan bits accordingly
912          * in the Tx WRB.
913          */
914         if (be_ipv6_tx_stall_chk(adapter, skb) &&
915             be_vlan_tag_tx_chk(adapter, skb)) {
916                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
917                 if (unlikely(!skb))
918                         goto tx_drop;
919         }
920
921         return skb;
922 tx_drop:
923         dev_kfree_skb_any(skb);
924         return NULL;
925 }
926
927 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928 {
929         struct be_adapter *adapter = netdev_priv(netdev);
930         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931         struct be_queue_info *txq = &txo->q;
932         bool dummy_wrb, stopped = false;
933         u32 wrb_cnt = 0, copied = 0;
934         bool skip_hw_vlan = false;
935         u32 start = txq->head;
936
937         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938         if (!skb)
939                 return NETDEV_TX_OK;
940
941         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
942
943         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944                               skip_hw_vlan);
945         if (copied) {
946                 int gso_segs = skb_shinfo(skb)->gso_segs;
947
948                 /* record the sent skb in the sent_skb table */
949                 BUG_ON(txo->sent_skb_list[start]);
950                 txo->sent_skb_list[start] = skb;
951
952                 /* Ensure txq has space for the next skb; Else stop the queue
953                  * *BEFORE* ringing the tx doorbell, so that we serialze the
954                  * tx compls of the current transmit which'll wake up the queue
955                  */
956                 atomic_add(wrb_cnt, &txq->used);
957                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958                                                                 txq->len) {
959                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
960                         stopped = true;
961                 }
962
963                 be_txq_notify(adapter, txo, wrb_cnt);
964
965                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
966         } else {
967                 txq->head = start;
968                 dev_kfree_skb_any(skb);
969         }
970         return NETDEV_TX_OK;
971 }
972
973 static int be_change_mtu(struct net_device *netdev, int new_mtu)
974 {
975         struct be_adapter *adapter = netdev_priv(netdev);
976         if (new_mtu < BE_MIN_MTU ||
977                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978                                         (ETH_HLEN + ETH_FCS_LEN))) {
979                 dev_info(&adapter->pdev->dev,
980                         "MTU must be between %d and %d bytes\n",
981                         BE_MIN_MTU,
982                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
983                 return -EINVAL;
984         }
985         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986                         netdev->mtu, new_mtu);
987         netdev->mtu = new_mtu;
988         return 0;
989 }
990
991 /*
992  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993  * If the user configures more, place BE in vlan promiscuous mode.
994  */
995 static int be_vid_config(struct be_adapter *adapter)
996 {
997         u16 vids[BE_NUM_VLANS_SUPPORTED];
998         u16 num = 0, i;
999         int status = 0;
1000
1001         /* No need to further configure vids if in promiscuous mode */
1002         if (adapter->promiscuous)
1003                 return 0;
1004
1005         if (adapter->vlans_added > be_max_vlans(adapter))
1006                 goto set_vlan_promisc;
1007
1008         /* Construct VLAN Table to give to HW */
1009         for (i = 0; i < VLAN_N_VID; i++)
1010                 if (adapter->vlan_tag[i])
1011                         vids[num++] = cpu_to_le16(i);
1012
1013         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014                                     vids, num, 1, 0);
1015
1016         /* Set to VLAN promisc mode as setting VLAN filter failed */
1017         if (status) {
1018                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1019                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1020                 goto set_vlan_promisc;
1021         }
1022
1023         return status;
1024
1025 set_vlan_promisc:
1026         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1027                                     NULL, 0, 1, 1);
1028         return status;
1029 }
1030
1031 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1032 {
1033         struct be_adapter *adapter = netdev_priv(netdev);
1034         int status = 0;
1035
1036         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037                 status = -EINVAL;
1038                 goto ret;
1039         }
1040
1041         /* Packets with VID 0 are always received by Lancer by default */
1042         if (lancer_chip(adapter) && vid == 0)
1043                 goto ret;
1044
1045         adapter->vlan_tag[vid] = 1;
1046         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1047                 status = be_vid_config(adapter);
1048
1049         if (!status)
1050                 adapter->vlans_added++;
1051         else
1052                 adapter->vlan_tag[vid] = 0;
1053 ret:
1054         return status;
1055 }
1056
1057 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1058 {
1059         struct be_adapter *adapter = netdev_priv(netdev);
1060         int status = 0;
1061
1062         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063                 status = -EINVAL;
1064                 goto ret;
1065         }
1066
1067         /* Packets with VID 0 are always received by Lancer by default */
1068         if (lancer_chip(adapter) && vid == 0)
1069                 goto ret;
1070
1071         adapter->vlan_tag[vid] = 0;
1072         if (adapter->vlans_added <= be_max_vlans(adapter))
1073                 status = be_vid_config(adapter);
1074
1075         if (!status)
1076                 adapter->vlans_added--;
1077         else
1078                 adapter->vlan_tag[vid] = 1;
1079 ret:
1080         return status;
1081 }
1082
1083 static void be_set_rx_mode(struct net_device *netdev)
1084 {
1085         struct be_adapter *adapter = netdev_priv(netdev);
1086         int status;
1087
1088         if (netdev->flags & IFF_PROMISC) {
1089                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1090                 adapter->promiscuous = true;
1091                 goto done;
1092         }
1093
1094         /* BE was previously in promiscuous mode; disable it */
1095         if (adapter->promiscuous) {
1096                 adapter->promiscuous = false;
1097                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1098
1099                 if (adapter->vlans_added)
1100                         be_vid_config(adapter);
1101         }
1102
1103         /* Enable multicast promisc if num configured exceeds what we support */
1104         if (netdev->flags & IFF_ALLMULTI ||
1105             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1106                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1107                 goto done;
1108         }
1109
1110         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1111                 struct netdev_hw_addr *ha;
1112                 int i = 1; /* First slot is claimed by the Primary MAC */
1113
1114                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1115                         be_cmd_pmac_del(adapter, adapter->if_handle,
1116                                         adapter->pmac_id[i], 0);
1117                 }
1118
1119                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1120                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1121                         adapter->promiscuous = true;
1122                         goto done;
1123                 }
1124
1125                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1126                         adapter->uc_macs++; /* First slot is for Primary MAC */
1127                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1128                                         adapter->if_handle,
1129                                         &adapter->pmac_id[adapter->uc_macs], 0);
1130                 }
1131         }
1132
1133         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1134
1135         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1136         if (status) {
1137                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1138                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1139                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1140         }
1141 done:
1142         return;
1143 }
1144
1145 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1146 {
1147         struct be_adapter *adapter = netdev_priv(netdev);
1148         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1149         int status;
1150
1151         if (!sriov_enabled(adapter))
1152                 return -EPERM;
1153
1154         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1155                 return -EINVAL;
1156
1157         if (BEx_chip(adapter)) {
1158                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1159                                 vf + 1);
1160
1161                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1162                                          &vf_cfg->pmac_id, vf + 1);
1163         } else {
1164                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1165                                         vf + 1);
1166         }
1167
1168         if (status)
1169                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1170                                 mac, vf);
1171         else
1172                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1173
1174         return status;
1175 }
1176
1177 static int be_get_vf_config(struct net_device *netdev, int vf,
1178                         struct ifla_vf_info *vi)
1179 {
1180         struct be_adapter *adapter = netdev_priv(netdev);
1181         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1182
1183         if (!sriov_enabled(adapter))
1184                 return -EPERM;
1185
1186         if (vf >= adapter->num_vfs)
1187                 return -EINVAL;
1188
1189         vi->vf = vf;
1190         vi->tx_rate = vf_cfg->tx_rate;
1191         vi->vlan = vf_cfg->vlan_tag;
1192         vi->qos = 0;
1193         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1194
1195         return 0;
1196 }
1197
1198 static int be_set_vf_vlan(struct net_device *netdev,
1199                         int vf, u16 vlan, u8 qos)
1200 {
1201         struct be_adapter *adapter = netdev_priv(netdev);
1202         int status = 0;
1203
1204         if (!sriov_enabled(adapter))
1205                 return -EPERM;
1206
1207         if (vf >= adapter->num_vfs || vlan > 4095)
1208                 return -EINVAL;
1209
1210         if (vlan) {
1211                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1212                         /* If this is new value, program it. Else skip. */
1213                         adapter->vf_cfg[vf].vlan_tag = vlan;
1214
1215                         status = be_cmd_set_hsw_config(adapter, vlan,
1216                                 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1217                 }
1218         } else {
1219                 /* Reset Transparent Vlan Tagging. */
1220                 adapter->vf_cfg[vf].vlan_tag = 0;
1221                 vlan = adapter->vf_cfg[vf].def_vid;
1222                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1223                         adapter->vf_cfg[vf].if_handle, 0);
1224         }
1225
1226
1227         if (status)
1228                 dev_info(&adapter->pdev->dev,
1229                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1230         return status;
1231 }
1232
1233 static int be_set_vf_tx_rate(struct net_device *netdev,
1234                         int vf, int rate)
1235 {
1236         struct be_adapter *adapter = netdev_priv(netdev);
1237         int status = 0;
1238
1239         if (!sriov_enabled(adapter))
1240                 return -EPERM;
1241
1242         if (vf >= adapter->num_vfs)
1243                 return -EINVAL;
1244
1245         if (rate < 100 || rate > 10000) {
1246                 dev_err(&adapter->pdev->dev,
1247                         "tx rate must be between 100 and 10000 Mbps\n");
1248                 return -EINVAL;
1249         }
1250
1251         if (lancer_chip(adapter))
1252                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1253         else
1254                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1255
1256         if (status)
1257                 dev_err(&adapter->pdev->dev,
1258                                 "tx rate %d on VF %d failed\n", rate, vf);
1259         else
1260                 adapter->vf_cfg[vf].tx_rate = rate;
1261         return status;
1262 }
1263
1264 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1265 {
1266         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1267         ulong now = jiffies;
1268         ulong delta = now - stats->rx_jiffies;
1269         u64 pkts;
1270         unsigned int start, eqd;
1271
1272         if (!eqo->enable_aic) {
1273                 eqd = eqo->eqd;
1274                 goto modify_eqd;
1275         }
1276
1277         if (eqo->idx >= adapter->num_rx_qs)
1278                 return;
1279
1280         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1281
1282         /* Wrapped around */
1283         if (time_before(now, stats->rx_jiffies)) {
1284                 stats->rx_jiffies = now;
1285                 return;
1286         }
1287
1288         /* Update once a second */
1289         if (delta < HZ)
1290                 return;
1291
1292         do {
1293                 start = u64_stats_fetch_begin_bh(&stats->sync);
1294                 pkts = stats->rx_pkts;
1295         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1296
1297         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1298         stats->rx_pkts_prev = pkts;
1299         stats->rx_jiffies = now;
1300         eqd = (stats->rx_pps / 110000) << 3;
1301         eqd = min(eqd, eqo->max_eqd);
1302         eqd = max(eqd, eqo->min_eqd);
1303         if (eqd < 10)
1304                 eqd = 0;
1305
1306 modify_eqd:
1307         if (eqd != eqo->cur_eqd) {
1308                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1309                 eqo->cur_eqd = eqd;
1310         }
1311 }
1312
1313 static void be_rx_stats_update(struct be_rx_obj *rxo,
1314                 struct be_rx_compl_info *rxcp)
1315 {
1316         struct be_rx_stats *stats = rx_stats(rxo);
1317
1318         u64_stats_update_begin(&stats->sync);
1319         stats->rx_compl++;
1320         stats->rx_bytes += rxcp->pkt_size;
1321         stats->rx_pkts++;
1322         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1323                 stats->rx_mcast_pkts++;
1324         if (rxcp->err)
1325                 stats->rx_compl_err++;
1326         u64_stats_update_end(&stats->sync);
1327 }
1328
1329 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1330 {
1331         /* L4 checksum is not reliable for non TCP/UDP packets.
1332          * Also ignore ipcksm for ipv6 pkts */
1333         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1334                                 (rxcp->ip_csum || rxcp->ipv6);
1335 }
1336
1337 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1338                                                 u16 frag_idx)
1339 {
1340         struct be_adapter *adapter = rxo->adapter;
1341         struct be_rx_page_info *rx_page_info;
1342         struct be_queue_info *rxq = &rxo->q;
1343
1344         rx_page_info = &rxo->page_info_tbl[frag_idx];
1345         BUG_ON(!rx_page_info->page);
1346
1347         if (rx_page_info->last_page_user) {
1348                 dma_unmap_page(&adapter->pdev->dev,
1349                                dma_unmap_addr(rx_page_info, bus),
1350                                adapter->big_page_size, DMA_FROM_DEVICE);
1351                 rx_page_info->last_page_user = false;
1352         }
1353
1354         atomic_dec(&rxq->used);
1355         return rx_page_info;
1356 }
1357
1358 /* Throwaway the data in the Rx completion */
1359 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1360                                 struct be_rx_compl_info *rxcp)
1361 {
1362         struct be_queue_info *rxq = &rxo->q;
1363         struct be_rx_page_info *page_info;
1364         u16 i, num_rcvd = rxcp->num_rcvd;
1365
1366         for (i = 0; i < num_rcvd; i++) {
1367                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1368                 put_page(page_info->page);
1369                 memset(page_info, 0, sizeof(*page_info));
1370                 index_inc(&rxcp->rxq_idx, rxq->len);
1371         }
1372 }
1373
1374 /*
1375  * skb_fill_rx_data forms a complete skb for an ether frame
1376  * indicated by rxcp.
1377  */
1378 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1379                              struct be_rx_compl_info *rxcp)
1380 {
1381         struct be_queue_info *rxq = &rxo->q;
1382         struct be_rx_page_info *page_info;
1383         u16 i, j;
1384         u16 hdr_len, curr_frag_len, remaining;
1385         u8 *start;
1386
1387         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1388         start = page_address(page_info->page) + page_info->page_offset;
1389         prefetch(start);
1390
1391         /* Copy data in the first descriptor of this completion */
1392         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1393
1394         skb->len = curr_frag_len;
1395         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1396                 memcpy(skb->data, start, curr_frag_len);
1397                 /* Complete packet has now been moved to data */
1398                 put_page(page_info->page);
1399                 skb->data_len = 0;
1400                 skb->tail += curr_frag_len;
1401         } else {
1402                 hdr_len = ETH_HLEN;
1403                 memcpy(skb->data, start, hdr_len);
1404                 skb_shinfo(skb)->nr_frags = 1;
1405                 skb_frag_set_page(skb, 0, page_info->page);
1406                 skb_shinfo(skb)->frags[0].page_offset =
1407                                         page_info->page_offset + hdr_len;
1408                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1409                 skb->data_len = curr_frag_len - hdr_len;
1410                 skb->truesize += rx_frag_size;
1411                 skb->tail += hdr_len;
1412         }
1413         page_info->page = NULL;
1414
1415         if (rxcp->pkt_size <= rx_frag_size) {
1416                 BUG_ON(rxcp->num_rcvd != 1);
1417                 return;
1418         }
1419
1420         /* More frags present for this completion */
1421         index_inc(&rxcp->rxq_idx, rxq->len);
1422         remaining = rxcp->pkt_size - curr_frag_len;
1423         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1424                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1425                 curr_frag_len = min(remaining, rx_frag_size);
1426
1427                 /* Coalesce all frags from the same physical page in one slot */
1428                 if (page_info->page_offset == 0) {
1429                         /* Fresh page */
1430                         j++;
1431                         skb_frag_set_page(skb, j, page_info->page);
1432                         skb_shinfo(skb)->frags[j].page_offset =
1433                                                         page_info->page_offset;
1434                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1435                         skb_shinfo(skb)->nr_frags++;
1436                 } else {
1437                         put_page(page_info->page);
1438                 }
1439
1440                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1441                 skb->len += curr_frag_len;
1442                 skb->data_len += curr_frag_len;
1443                 skb->truesize += rx_frag_size;
1444                 remaining -= curr_frag_len;
1445                 index_inc(&rxcp->rxq_idx, rxq->len);
1446                 page_info->page = NULL;
1447         }
1448         BUG_ON(j > MAX_SKB_FRAGS);
1449 }
1450
1451 /* Process the RX completion indicated by rxcp when GRO is disabled */
1452 static void be_rx_compl_process(struct be_rx_obj *rxo,
1453                                 struct be_rx_compl_info *rxcp)
1454 {
1455         struct be_adapter *adapter = rxo->adapter;
1456         struct net_device *netdev = adapter->netdev;
1457         struct sk_buff *skb;
1458
1459         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1460         if (unlikely(!skb)) {
1461                 rx_stats(rxo)->rx_drops_no_skbs++;
1462                 be_rx_compl_discard(rxo, rxcp);
1463                 return;
1464         }
1465
1466         skb_fill_rx_data(rxo, skb, rxcp);
1467
1468         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1469                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1470         else
1471                 skb_checksum_none_assert(skb);
1472
1473         skb->protocol = eth_type_trans(skb, netdev);
1474         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1475         if (netdev->features & NETIF_F_RXHASH)
1476                 skb->rxhash = rxcp->rss_hash;
1477
1478
1479         if (rxcp->vlanf)
1480                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1481
1482         netif_receive_skb(skb);
1483 }
1484
1485 /* Process the RX completion indicated by rxcp when GRO is enabled */
1486 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1487                                     struct napi_struct *napi,
1488                                     struct be_rx_compl_info *rxcp)
1489 {
1490         struct be_adapter *adapter = rxo->adapter;
1491         struct be_rx_page_info *page_info;
1492         struct sk_buff *skb = NULL;
1493         struct be_queue_info *rxq = &rxo->q;
1494         u16 remaining, curr_frag_len;
1495         u16 i, j;
1496
1497         skb = napi_get_frags(napi);
1498         if (!skb) {
1499                 be_rx_compl_discard(rxo, rxcp);
1500                 return;
1501         }
1502
1503         remaining = rxcp->pkt_size;
1504         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1505                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1506
1507                 curr_frag_len = min(remaining, rx_frag_size);
1508
1509                 /* Coalesce all frags from the same physical page in one slot */
1510                 if (i == 0 || page_info->page_offset == 0) {
1511                         /* First frag or Fresh page */
1512                         j++;
1513                         skb_frag_set_page(skb, j, page_info->page);
1514                         skb_shinfo(skb)->frags[j].page_offset =
1515                                                         page_info->page_offset;
1516                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1517                 } else {
1518                         put_page(page_info->page);
1519                 }
1520                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1521                 skb->truesize += rx_frag_size;
1522                 remaining -= curr_frag_len;
1523                 index_inc(&rxcp->rxq_idx, rxq->len);
1524                 memset(page_info, 0, sizeof(*page_info));
1525         }
1526         BUG_ON(j > MAX_SKB_FRAGS);
1527
1528         skb_shinfo(skb)->nr_frags = j + 1;
1529         skb->len = rxcp->pkt_size;
1530         skb->data_len = rxcp->pkt_size;
1531         skb->ip_summed = CHECKSUM_UNNECESSARY;
1532         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1533         if (adapter->netdev->features & NETIF_F_RXHASH)
1534                 skb->rxhash = rxcp->rss_hash;
1535
1536         if (rxcp->vlanf)
1537                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1538
1539         napi_gro_frags(napi);
1540 }
1541
1542 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1543                                  struct be_rx_compl_info *rxcp)
1544 {
1545         rxcp->pkt_size =
1546                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1547         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1548         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1549         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1550         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1551         rxcp->ip_csum =
1552                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1553         rxcp->l4_csum =
1554                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1555         rxcp->ipv6 =
1556                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1557         rxcp->rxq_idx =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1559         rxcp->num_rcvd =
1560                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1561         rxcp->pkt_type =
1562                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1563         rxcp->rss_hash =
1564                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1565         if (rxcp->vlanf) {
1566                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1567                                           compl);
1568                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1569                                                compl);
1570         }
1571         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1572 }
1573
1574 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1575                                  struct be_rx_compl_info *rxcp)
1576 {
1577         rxcp->pkt_size =
1578                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1579         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1580         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1581         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1582         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1583         rxcp->ip_csum =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1585         rxcp->l4_csum =
1586                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1587         rxcp->ipv6 =
1588                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1589         rxcp->rxq_idx =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1591         rxcp->num_rcvd =
1592                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1593         rxcp->pkt_type =
1594                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1595         rxcp->rss_hash =
1596                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1597         if (rxcp->vlanf) {
1598                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1599                                           compl);
1600                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1601                                                compl);
1602         }
1603         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1604         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1605                                       ip_frag, compl);
1606 }
1607
1608 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1609 {
1610         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1611         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1612         struct be_adapter *adapter = rxo->adapter;
1613
1614         /* For checking the valid bit it is Ok to use either definition as the
1615          * valid bit is at the same position in both v0 and v1 Rx compl */
1616         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1617                 return NULL;
1618
1619         rmb();
1620         be_dws_le_to_cpu(compl, sizeof(*compl));
1621
1622         if (adapter->be3_native)
1623                 be_parse_rx_compl_v1(compl, rxcp);
1624         else
1625                 be_parse_rx_compl_v0(compl, rxcp);
1626
1627         if (rxcp->ip_frag)
1628                 rxcp->l4_csum = 0;
1629
1630         if (rxcp->vlanf) {
1631                 /* vlanf could be wrongly set in some cards.
1632                  * ignore if vtm is not set */
1633                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1634                         rxcp->vlanf = 0;
1635
1636                 if (!lancer_chip(adapter))
1637                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1638
1639                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1640                     !adapter->vlan_tag[rxcp->vlan_tag])
1641                         rxcp->vlanf = 0;
1642         }
1643
1644         /* As the compl has been parsed, reset it; we wont touch it again */
1645         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1646
1647         queue_tail_inc(&rxo->cq);
1648         return rxcp;
1649 }
1650
1651 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1652 {
1653         u32 order = get_order(size);
1654
1655         if (order > 0)
1656                 gfp |= __GFP_COMP;
1657         return  alloc_pages(gfp, order);
1658 }
1659
1660 /*
1661  * Allocate a page, split it to fragments of size rx_frag_size and post as
1662  * receive buffers to BE
1663  */
1664 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1665 {
1666         struct be_adapter *adapter = rxo->adapter;
1667         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1668         struct be_queue_info *rxq = &rxo->q;
1669         struct page *pagep = NULL;
1670         struct be_eth_rx_d *rxd;
1671         u64 page_dmaaddr = 0, frag_dmaaddr;
1672         u32 posted, page_offset = 0;
1673
1674         page_info = &rxo->page_info_tbl[rxq->head];
1675         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1676                 if (!pagep) {
1677                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1678                         if (unlikely(!pagep)) {
1679                                 rx_stats(rxo)->rx_post_fail++;
1680                                 break;
1681                         }
1682                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1683                                                     0, adapter->big_page_size,
1684                                                     DMA_FROM_DEVICE);
1685                         page_info->page_offset = 0;
1686                 } else {
1687                         get_page(pagep);
1688                         page_info->page_offset = page_offset + rx_frag_size;
1689                 }
1690                 page_offset = page_info->page_offset;
1691                 page_info->page = pagep;
1692                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1693                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1694
1695                 rxd = queue_head_node(rxq);
1696                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1697                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1698
1699                 /* Any space left in the current big page for another frag? */
1700                 if ((page_offset + rx_frag_size + rx_frag_size) >
1701                                         adapter->big_page_size) {
1702                         pagep = NULL;
1703                         page_info->last_page_user = true;
1704                 }
1705
1706                 prev_page_info = page_info;
1707                 queue_head_inc(rxq);
1708                 page_info = &rxo->page_info_tbl[rxq->head];
1709         }
1710         if (pagep)
1711                 prev_page_info->last_page_user = true;
1712
1713         if (posted) {
1714                 atomic_add(posted, &rxq->used);
1715                 be_rxq_notify(adapter, rxq->id, posted);
1716         } else if (atomic_read(&rxq->used) == 0) {
1717                 /* Let be_worker replenish when memory is available */
1718                 rxo->rx_post_starved = true;
1719         }
1720 }
1721
1722 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1723 {
1724         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1725
1726         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1727                 return NULL;
1728
1729         rmb();
1730         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1731
1732         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1733
1734         queue_tail_inc(tx_cq);
1735         return txcp;
1736 }
1737
1738 static u16 be_tx_compl_process(struct be_adapter *adapter,
1739                 struct be_tx_obj *txo, u16 last_index)
1740 {
1741         struct be_queue_info *txq = &txo->q;
1742         struct be_eth_wrb *wrb;
1743         struct sk_buff **sent_skbs = txo->sent_skb_list;
1744         struct sk_buff *sent_skb;
1745         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1746         bool unmap_skb_hdr = true;
1747
1748         sent_skb = sent_skbs[txq->tail];
1749         BUG_ON(!sent_skb);
1750         sent_skbs[txq->tail] = NULL;
1751
1752         /* skip header wrb */
1753         queue_tail_inc(txq);
1754
1755         do {
1756                 cur_index = txq->tail;
1757                 wrb = queue_tail_node(txq);
1758                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1759                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1760                 unmap_skb_hdr = false;
1761
1762                 num_wrbs++;
1763                 queue_tail_inc(txq);
1764         } while (cur_index != last_index);
1765
1766         kfree_skb(sent_skb);
1767         return num_wrbs;
1768 }
1769
1770 /* Return the number of events in the event queue */
1771 static inline int events_get(struct be_eq_obj *eqo)
1772 {
1773         struct be_eq_entry *eqe;
1774         int num = 0;
1775
1776         do {
1777                 eqe = queue_tail_node(&eqo->q);
1778                 if (eqe->evt == 0)
1779                         break;
1780
1781                 rmb();
1782                 eqe->evt = 0;
1783                 num++;
1784                 queue_tail_inc(&eqo->q);
1785         } while (true);
1786
1787         return num;
1788 }
1789
1790 /* Leaves the EQ is disarmed state */
1791 static void be_eq_clean(struct be_eq_obj *eqo)
1792 {
1793         int num = events_get(eqo);
1794
1795         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1796 }
1797
1798 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1799 {
1800         struct be_rx_page_info *page_info;
1801         struct be_queue_info *rxq = &rxo->q;
1802         struct be_queue_info *rx_cq = &rxo->cq;
1803         struct be_rx_compl_info *rxcp;
1804         struct be_adapter *adapter = rxo->adapter;
1805         int flush_wait = 0;
1806         u16 tail;
1807
1808         /* Consume pending rx completions.
1809          * Wait for the flush completion (identified by zero num_rcvd)
1810          * to arrive. Notify CQ even when there are no more CQ entries
1811          * for HW to flush partially coalesced CQ entries.
1812          * In Lancer, there is no need to wait for flush compl.
1813          */
1814         for (;;) {
1815                 rxcp = be_rx_compl_get(rxo);
1816                 if (rxcp == NULL) {
1817                         if (lancer_chip(adapter))
1818                                 break;
1819
1820                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1821                                 dev_warn(&adapter->pdev->dev,
1822                                          "did not receive flush compl\n");
1823                                 break;
1824                         }
1825                         be_cq_notify(adapter, rx_cq->id, true, 0);
1826                         mdelay(1);
1827                 } else {
1828                         be_rx_compl_discard(rxo, rxcp);
1829                         be_cq_notify(adapter, rx_cq->id, false, 1);
1830                         if (rxcp->num_rcvd == 0)
1831                                 break;
1832                 }
1833         }
1834
1835         /* After cleanup, leave the CQ in unarmed state */
1836         be_cq_notify(adapter, rx_cq->id, false, 0);
1837
1838         /* Then free posted rx buffers that were not used */
1839         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1840         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1841                 page_info = get_rx_page_info(rxo, tail);
1842                 put_page(page_info->page);
1843                 memset(page_info, 0, sizeof(*page_info));
1844         }
1845         BUG_ON(atomic_read(&rxq->used));
1846         rxq->tail = rxq->head = 0;
1847 }
1848
1849 static void be_tx_compl_clean(struct be_adapter *adapter)
1850 {
1851         struct be_tx_obj *txo;
1852         struct be_queue_info *txq;
1853         struct be_eth_tx_compl *txcp;
1854         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1855         struct sk_buff *sent_skb;
1856         bool dummy_wrb;
1857         int i, pending_txqs;
1858
1859         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1860         do {
1861                 pending_txqs = adapter->num_tx_qs;
1862
1863                 for_all_tx_queues(adapter, txo, i) {
1864                         txq = &txo->q;
1865                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1866                                 end_idx =
1867                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1868                                                       wrb_index, txcp);
1869                                 num_wrbs += be_tx_compl_process(adapter, txo,
1870                                                                 end_idx);
1871                                 cmpl++;
1872                         }
1873                         if (cmpl) {
1874                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1875                                 atomic_sub(num_wrbs, &txq->used);
1876                                 cmpl = 0;
1877                                 num_wrbs = 0;
1878                         }
1879                         if (atomic_read(&txq->used) == 0)
1880                                 pending_txqs--;
1881                 }
1882
1883                 if (pending_txqs == 0 || ++timeo > 200)
1884                         break;
1885
1886                 mdelay(1);
1887         } while (true);
1888
1889         for_all_tx_queues(adapter, txo, i) {
1890                 txq = &txo->q;
1891                 if (atomic_read(&txq->used))
1892                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1893                                 atomic_read(&txq->used));
1894
1895                 /* free posted tx for which compls will never arrive */
1896                 while (atomic_read(&txq->used)) {
1897                         sent_skb = txo->sent_skb_list[txq->tail];
1898                         end_idx = txq->tail;
1899                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1900                                                    &dummy_wrb);
1901                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1902                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1903                         atomic_sub(num_wrbs, &txq->used);
1904                 }
1905         }
1906 }
1907
1908 static void be_evt_queues_destroy(struct be_adapter *adapter)
1909 {
1910         struct be_eq_obj *eqo;
1911         int i;
1912
1913         for_all_evt_queues(adapter, eqo, i) {
1914                 if (eqo->q.created) {
1915                         be_eq_clean(eqo);
1916                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1917                         netif_napi_del(&eqo->napi);
1918                 }
1919                 be_queue_free(adapter, &eqo->q);
1920         }
1921 }
1922
1923 static int be_evt_queues_create(struct be_adapter *adapter)
1924 {
1925         struct be_queue_info *eq;
1926         struct be_eq_obj *eqo;
1927         int i, rc;
1928
1929         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1930                                     adapter->cfg_num_qs);
1931
1932         for_all_evt_queues(adapter, eqo, i) {
1933                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1934                                BE_NAPI_WEIGHT);
1935                 eqo->adapter = adapter;
1936                 eqo->tx_budget = BE_TX_BUDGET;
1937                 eqo->idx = i;
1938                 eqo->max_eqd = BE_MAX_EQD;
1939                 eqo->enable_aic = true;
1940
1941                 eq = &eqo->q;
1942                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1943                                         sizeof(struct be_eq_entry));
1944                 if (rc)
1945                         return rc;
1946
1947                 rc = be_cmd_eq_create(adapter, eqo);
1948                 if (rc)
1949                         return rc;
1950         }
1951         return 0;
1952 }
1953
1954 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1955 {
1956         struct be_queue_info *q;
1957
1958         q = &adapter->mcc_obj.q;
1959         if (q->created)
1960                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1961         be_queue_free(adapter, q);
1962
1963         q = &adapter->mcc_obj.cq;
1964         if (q->created)
1965                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1966         be_queue_free(adapter, q);
1967 }
1968
1969 /* Must be called only after TX qs are created as MCC shares TX EQ */
1970 static int be_mcc_queues_create(struct be_adapter *adapter)
1971 {
1972         struct be_queue_info *q, *cq;
1973
1974         cq = &adapter->mcc_obj.cq;
1975         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1976                         sizeof(struct be_mcc_compl)))
1977                 goto err;
1978
1979         /* Use the default EQ for MCC completions */
1980         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1981                 goto mcc_cq_free;
1982
1983         q = &adapter->mcc_obj.q;
1984         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1985                 goto mcc_cq_destroy;
1986
1987         if (be_cmd_mccq_create(adapter, q, cq))
1988                 goto mcc_q_free;
1989
1990         return 0;
1991
1992 mcc_q_free:
1993         be_queue_free(adapter, q);
1994 mcc_cq_destroy:
1995         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1996 mcc_cq_free:
1997         be_queue_free(adapter, cq);
1998 err:
1999         return -1;
2000 }
2001
2002 static void be_tx_queues_destroy(struct be_adapter *adapter)
2003 {
2004         struct be_queue_info *q;
2005         struct be_tx_obj *txo;
2006         u8 i;
2007
2008         for_all_tx_queues(adapter, txo, i) {
2009                 q = &txo->q;
2010                 if (q->created)
2011                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2012                 be_queue_free(adapter, q);
2013
2014                 q = &txo->cq;
2015                 if (q->created)
2016                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2017                 be_queue_free(adapter, q);
2018         }
2019 }
2020
2021 static int be_tx_qs_create(struct be_adapter *adapter)
2022 {
2023         struct be_queue_info *cq, *eq;
2024         struct be_tx_obj *txo;
2025         int status, i;
2026
2027         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2028
2029         for_all_tx_queues(adapter, txo, i) {
2030                 cq = &txo->cq;
2031                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2032                                         sizeof(struct be_eth_tx_compl));
2033                 if (status)
2034                         return status;
2035
2036                 /* If num_evt_qs is less than num_tx_qs, then more than
2037                  * one txq share an eq
2038                  */
2039                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2040                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2041                 if (status)
2042                         return status;
2043
2044                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2045                                         sizeof(struct be_eth_wrb));
2046                 if (status)
2047                         return status;
2048
2049                 status = be_cmd_txq_create(adapter, txo);
2050                 if (status)
2051                         return status;
2052         }
2053
2054         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2055                  adapter->num_tx_qs);
2056         return 0;
2057 }
2058
2059 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2060 {
2061         struct be_queue_info *q;
2062         struct be_rx_obj *rxo;
2063         int i;
2064
2065         for_all_rx_queues(adapter, rxo, i) {
2066                 q = &rxo->cq;
2067                 if (q->created)
2068                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2069                 be_queue_free(adapter, q);
2070         }
2071 }
2072
2073 static int be_rx_cqs_create(struct be_adapter *adapter)
2074 {
2075         struct be_queue_info *eq, *cq;
2076         struct be_rx_obj *rxo;
2077         int rc, i;
2078
2079         /* We can create as many RSS rings as there are EQs. */
2080         adapter->num_rx_qs = adapter->num_evt_qs;
2081
2082         /* We'll use RSS only if atleast 2 RSS rings are supported.
2083          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2084          */
2085         if (adapter->num_rx_qs > 1)
2086                 adapter->num_rx_qs++;
2087
2088         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2089         for_all_rx_queues(adapter, rxo, i) {
2090                 rxo->adapter = adapter;
2091                 cq = &rxo->cq;
2092                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2093                                 sizeof(struct be_eth_rx_compl));
2094                 if (rc)
2095                         return rc;
2096
2097                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2098                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2099                 if (rc)
2100                         return rc;
2101         }
2102
2103         dev_info(&adapter->pdev->dev,
2104                  "created %d RSS queue(s) and 1 default RX queue\n",
2105                  adapter->num_rx_qs - 1);
2106         return 0;
2107 }
2108
2109 static irqreturn_t be_intx(int irq, void *dev)
2110 {
2111         struct be_eq_obj *eqo = dev;
2112         struct be_adapter *adapter = eqo->adapter;
2113         int num_evts = 0;
2114
2115         /* IRQ is not expected when NAPI is scheduled as the EQ
2116          * will not be armed.
2117          * But, this can happen on Lancer INTx where it takes
2118          * a while to de-assert INTx or in BE2 where occasionaly
2119          * an interrupt may be raised even when EQ is unarmed.
2120          * If NAPI is already scheduled, then counting & notifying
2121          * events will orphan them.
2122          */
2123         if (napi_schedule_prep(&eqo->napi)) {
2124                 num_evts = events_get(eqo);
2125                 __napi_schedule(&eqo->napi);
2126                 if (num_evts)
2127                         eqo->spurious_intr = 0;
2128         }
2129         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2130
2131         /* Return IRQ_HANDLED only for the the first spurious intr
2132          * after a valid intr to stop the kernel from branding
2133          * this irq as a bad one!
2134          */
2135         if (num_evts || eqo->spurious_intr++ == 0)
2136                 return IRQ_HANDLED;
2137         else
2138                 return IRQ_NONE;
2139 }
2140
2141 static irqreturn_t be_msix(int irq, void *dev)
2142 {
2143         struct be_eq_obj *eqo = dev;
2144
2145         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2146         napi_schedule(&eqo->napi);
2147         return IRQ_HANDLED;
2148 }
2149
2150 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2151 {
2152         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2153 }
2154
2155 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2156                         int budget)
2157 {
2158         struct be_adapter *adapter = rxo->adapter;
2159         struct be_queue_info *rx_cq = &rxo->cq;
2160         struct be_rx_compl_info *rxcp;
2161         u32 work_done;
2162
2163         for (work_done = 0; work_done < budget; work_done++) {
2164                 rxcp = be_rx_compl_get(rxo);
2165                 if (!rxcp)
2166                         break;
2167
2168                 /* Is it a flush compl that has no data */
2169                 if (unlikely(rxcp->num_rcvd == 0))
2170                         goto loop_continue;
2171
2172                 /* Discard compl with partial DMA Lancer B0 */
2173                 if (unlikely(!rxcp->pkt_size)) {
2174                         be_rx_compl_discard(rxo, rxcp);
2175                         goto loop_continue;
2176                 }
2177
2178                 /* On BE drop pkts that arrive due to imperfect filtering in
2179                  * promiscuous mode on some skews
2180                  */
2181                 if (unlikely(rxcp->port != adapter->port_num &&
2182                                 !lancer_chip(adapter))) {
2183                         be_rx_compl_discard(rxo, rxcp);
2184                         goto loop_continue;
2185                 }
2186
2187                 if (do_gro(rxcp))
2188                         be_rx_compl_process_gro(rxo, napi, rxcp);
2189                 else
2190                         be_rx_compl_process(rxo, rxcp);
2191 loop_continue:
2192                 be_rx_stats_update(rxo, rxcp);
2193         }
2194
2195         if (work_done) {
2196                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2197
2198                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2199                         be_post_rx_frags(rxo, GFP_ATOMIC);
2200         }
2201
2202         return work_done;
2203 }
2204
2205 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2206                           int budget, int idx)
2207 {
2208         struct be_eth_tx_compl *txcp;
2209         int num_wrbs = 0, work_done;
2210
2211         for (work_done = 0; work_done < budget; work_done++) {
2212                 txcp = be_tx_compl_get(&txo->cq);
2213                 if (!txcp)
2214                         break;
2215                 num_wrbs += be_tx_compl_process(adapter, txo,
2216                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2217                                         wrb_index, txcp));
2218         }
2219
2220         if (work_done) {
2221                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2222                 atomic_sub(num_wrbs, &txo->q.used);
2223
2224                 /* As Tx wrbs have been freed up, wake up netdev queue
2225                  * if it was stopped due to lack of tx wrbs.  */
2226                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2227                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2228                         netif_wake_subqueue(adapter->netdev, idx);
2229                 }
2230
2231                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2232                 tx_stats(txo)->tx_compl += work_done;
2233                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2234         }
2235         return (work_done < budget); /* Done */
2236 }
2237
2238 int be_poll(struct napi_struct *napi, int budget)
2239 {
2240         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2241         struct be_adapter *adapter = eqo->adapter;
2242         int max_work = 0, work, i, num_evts;
2243         bool tx_done;
2244
2245         num_evts = events_get(eqo);
2246
2247         /* Process all TXQs serviced by this EQ */
2248         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2249                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2250                                         eqo->tx_budget, i);
2251                 if (!tx_done)
2252                         max_work = budget;
2253         }
2254
2255         /* This loop will iterate twice for EQ0 in which
2256          * completions of the last RXQ (default one) are also processed
2257          * For other EQs the loop iterates only once
2258          */
2259         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2260                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2261                 max_work = max(work, max_work);
2262         }
2263
2264         if (is_mcc_eqo(eqo))
2265                 be_process_mcc(adapter);
2266
2267         if (max_work < budget) {
2268                 napi_complete(napi);
2269                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2270         } else {
2271                 /* As we'll continue in polling mode, count and clear events */
2272                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2273         }
2274         return max_work;
2275 }
2276
2277 void be_detect_error(struct be_adapter *adapter)
2278 {
2279         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2280         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2281         u32 i;
2282
2283         if (be_hw_error(adapter))
2284                 return;
2285
2286         if (lancer_chip(adapter)) {
2287                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2288                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2289                         sliport_err1 = ioread32(adapter->db +
2290                                         SLIPORT_ERROR1_OFFSET);
2291                         sliport_err2 = ioread32(adapter->db +
2292                                         SLIPORT_ERROR2_OFFSET);
2293                 }
2294         } else {
2295                 pci_read_config_dword(adapter->pdev,
2296                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2297                 pci_read_config_dword(adapter->pdev,
2298                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2299                 pci_read_config_dword(adapter->pdev,
2300                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2301                 pci_read_config_dword(adapter->pdev,
2302                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2303
2304                 ue_lo = (ue_lo & ~ue_lo_mask);
2305                 ue_hi = (ue_hi & ~ue_hi_mask);
2306         }
2307
2308         /* On certain platforms BE hardware can indicate spurious UEs.
2309          * Allow the h/w to stop working completely in case of a real UE.
2310          * Hence not setting the hw_error for UE detection.
2311          */
2312         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313                 adapter->hw_error = true;
2314                 dev_err(&adapter->pdev->dev,
2315                         "Error detected in the card\n");
2316         }
2317
2318         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2319                 dev_err(&adapter->pdev->dev,
2320                         "ERR: sliport status 0x%x\n", sliport_status);
2321                 dev_err(&adapter->pdev->dev,
2322                         "ERR: sliport error1 0x%x\n", sliport_err1);
2323                 dev_err(&adapter->pdev->dev,
2324                         "ERR: sliport error2 0x%x\n", sliport_err2);
2325         }
2326
2327         if (ue_lo) {
2328                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2329                         if (ue_lo & 1)
2330                                 dev_err(&adapter->pdev->dev,
2331                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2332                 }
2333         }
2334
2335         if (ue_hi) {
2336                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2337                         if (ue_hi & 1)
2338                                 dev_err(&adapter->pdev->dev,
2339                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2340                 }
2341         }
2342
2343 }
2344
2345 static void be_msix_disable(struct be_adapter *adapter)
2346 {
2347         if (msix_enabled(adapter)) {
2348                 pci_disable_msix(adapter->pdev);
2349                 adapter->num_msix_vec = 0;
2350                 adapter->num_msix_roce_vec = 0;
2351         }
2352 }
2353
2354 static int be_msix_enable(struct be_adapter *adapter)
2355 {
2356         int i, status, num_vec;
2357         struct device *dev = &adapter->pdev->dev;
2358
2359         /* If RoCE is supported, program the max number of NIC vectors that
2360          * may be configured via set-channels, along with vectors needed for
2361          * RoCe. Else, just program the number we'll use initially.
2362          */
2363         if (be_roce_supported(adapter))
2364                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2365                                 2 * num_online_cpus());
2366         else
2367                 num_vec = adapter->cfg_num_qs;
2368
2369         for (i = 0; i < num_vec; i++)
2370                 adapter->msix_entries[i].entry = i;
2371
2372         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2373         if (status == 0) {
2374                 goto done;
2375         } else if (status >= MIN_MSIX_VECTORS) {
2376                 num_vec = status;
2377                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2378                                          num_vec);
2379                 if (!status)
2380                         goto done;
2381         }
2382
2383         dev_warn(dev, "MSIx enable failed\n");
2384
2385         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2386         if (!be_physfn(adapter))
2387                 return status;
2388         return 0;
2389 done:
2390         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2391                 adapter->num_msix_roce_vec = num_vec / 2;
2392                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2393                          adapter->num_msix_roce_vec);
2394         }
2395
2396         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2397
2398         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2399                  adapter->num_msix_vec);
2400         return 0;
2401 }
2402
2403 static inline int be_msix_vec_get(struct be_adapter *adapter,
2404                                 struct be_eq_obj *eqo)
2405 {
2406         return adapter->msix_entries[eqo->msix_idx].vector;
2407 }
2408
2409 static int be_msix_register(struct be_adapter *adapter)
2410 {
2411         struct net_device *netdev = adapter->netdev;
2412         struct be_eq_obj *eqo;
2413         int status, i, vec;
2414
2415         for_all_evt_queues(adapter, eqo, i) {
2416                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2417                 vec = be_msix_vec_get(adapter, eqo);
2418                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2419                 if (status)
2420                         goto err_msix;
2421         }
2422
2423         return 0;
2424 err_msix:
2425         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2426                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2427         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2428                 status);
2429         be_msix_disable(adapter);
2430         return status;
2431 }
2432
2433 static int be_irq_register(struct be_adapter *adapter)
2434 {
2435         struct net_device *netdev = adapter->netdev;
2436         int status;
2437
2438         if (msix_enabled(adapter)) {
2439                 status = be_msix_register(adapter);
2440                 if (status == 0)
2441                         goto done;
2442                 /* INTx is not supported for VF */
2443                 if (!be_physfn(adapter))
2444                         return status;
2445         }
2446
2447         /* INTx: only the first EQ is used */
2448         netdev->irq = adapter->pdev->irq;
2449         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2450                              &adapter->eq_obj[0]);
2451         if (status) {
2452                 dev_err(&adapter->pdev->dev,
2453                         "INTx request IRQ failed - err %d\n", status);
2454                 return status;
2455         }
2456 done:
2457         adapter->isr_registered = true;
2458         return 0;
2459 }
2460
2461 static void be_irq_unregister(struct be_adapter *adapter)
2462 {
2463         struct net_device *netdev = adapter->netdev;
2464         struct be_eq_obj *eqo;
2465         int i;
2466
2467         if (!adapter->isr_registered)
2468                 return;
2469
2470         /* INTx */
2471         if (!msix_enabled(adapter)) {
2472                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2473                 goto done;
2474         }
2475
2476         /* MSIx */
2477         for_all_evt_queues(adapter, eqo, i)
2478                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2479
2480 done:
2481         adapter->isr_registered = false;
2482 }
2483
2484 static void be_rx_qs_destroy(struct be_adapter *adapter)
2485 {
2486         struct be_queue_info *q;
2487         struct be_rx_obj *rxo;
2488         int i;
2489
2490         for_all_rx_queues(adapter, rxo, i) {
2491                 q = &rxo->q;
2492                 if (q->created) {
2493                         be_cmd_rxq_destroy(adapter, q);
2494                         be_rx_cq_clean(rxo);
2495                 }
2496                 be_queue_free(adapter, q);
2497         }
2498 }
2499
2500 static int be_close(struct net_device *netdev)
2501 {
2502         struct be_adapter *adapter = netdev_priv(netdev);
2503         struct be_eq_obj *eqo;
2504         int i;
2505
2506         be_roce_dev_close(adapter);
2507
2508         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2509                 for_all_evt_queues(adapter, eqo, i)
2510                         napi_disable(&eqo->napi);
2511                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2512         }
2513
2514         be_async_mcc_disable(adapter);
2515
2516         /* Wait for all pending tx completions to arrive so that
2517          * all tx skbs are freed.
2518          */
2519         netif_tx_disable(netdev);
2520         be_tx_compl_clean(adapter);
2521
2522         be_rx_qs_destroy(adapter);
2523
2524         for_all_evt_queues(adapter, eqo, i) {
2525                 if (msix_enabled(adapter))
2526                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2527                 else
2528                         synchronize_irq(netdev->irq);
2529                 be_eq_clean(eqo);
2530         }
2531
2532         be_irq_unregister(adapter);
2533
2534         return 0;
2535 }
2536
2537 static int be_rx_qs_create(struct be_adapter *adapter)
2538 {
2539         struct be_rx_obj *rxo;
2540         int rc, i, j;
2541         u8 rsstable[128];
2542
2543         for_all_rx_queues(adapter, rxo, i) {
2544                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2545                                     sizeof(struct be_eth_rx_d));
2546                 if (rc)
2547                         return rc;
2548         }
2549
2550         /* The FW would like the default RXQ to be created first */
2551         rxo = default_rxo(adapter);
2552         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2553                                adapter->if_handle, false, &rxo->rss_id);
2554         if (rc)
2555                 return rc;
2556
2557         for_all_rss_queues(adapter, rxo, i) {
2558                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2559                                        rx_frag_size, adapter->if_handle,
2560                                        true, &rxo->rss_id);
2561                 if (rc)
2562                         return rc;
2563         }
2564
2565         if (be_multi_rxq(adapter)) {
2566                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2567                         for_all_rss_queues(adapter, rxo, i) {
2568                                 if ((j + i) >= 128)
2569                                         break;
2570                                 rsstable[j + i] = rxo->rss_id;
2571                         }
2572                 }
2573                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2574                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2575
2576                 if (!BEx_chip(adapter))
2577                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2578                                                 RSS_ENABLE_UDP_IPV6;
2579
2580                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2581                                        128);
2582                 if (rc) {
2583                         adapter->rss_flags = 0;
2584                         return rc;
2585                 }
2586         }
2587
2588         /* First time posting */
2589         for_all_rx_queues(adapter, rxo, i)
2590                 be_post_rx_frags(rxo, GFP_KERNEL);
2591         return 0;
2592 }
2593
2594 static int be_open(struct net_device *netdev)
2595 {
2596         struct be_adapter *adapter = netdev_priv(netdev);
2597         struct be_eq_obj *eqo;
2598         struct be_rx_obj *rxo;
2599         struct be_tx_obj *txo;
2600         u8 link_status;
2601         int status, i;
2602
2603         status = be_rx_qs_create(adapter);
2604         if (status)
2605                 goto err;
2606
2607         status = be_irq_register(adapter);
2608         if (status)
2609                 goto err;
2610
2611         for_all_rx_queues(adapter, rxo, i)
2612                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2613
2614         for_all_tx_queues(adapter, txo, i)
2615                 be_cq_notify(adapter, txo->cq.id, true, 0);
2616
2617         be_async_mcc_enable(adapter);
2618
2619         for_all_evt_queues(adapter, eqo, i) {
2620                 napi_enable(&eqo->napi);
2621                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2622         }
2623         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2624
2625         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2626         if (!status)
2627                 be_link_status_update(adapter, link_status);
2628
2629         netif_tx_start_all_queues(netdev);
2630         be_roce_dev_open(adapter);
2631         return 0;
2632 err:
2633         be_close(adapter->netdev);
2634         return -EIO;
2635 }
2636
2637 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2638 {
2639         struct be_dma_mem cmd;
2640         int status = 0;
2641         u8 mac[ETH_ALEN];
2642
2643         memset(mac, 0, ETH_ALEN);
2644
2645         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2646         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2647                                      GFP_KERNEL);
2648         if (cmd.va == NULL)
2649                 return -1;
2650
2651         if (enable) {
2652                 status = pci_write_config_dword(adapter->pdev,
2653                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2654                 if (status) {
2655                         dev_err(&adapter->pdev->dev,
2656                                 "Could not enable Wake-on-lan\n");
2657                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2658                                           cmd.dma);
2659                         return status;
2660                 }
2661                 status = be_cmd_enable_magic_wol(adapter,
2662                                 adapter->netdev->dev_addr, &cmd);
2663                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2664                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2665         } else {
2666                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2667                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2668                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2669         }
2670
2671         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2672         return status;
2673 }
2674
2675 /*
2676  * Generate a seed MAC address from the PF MAC Address using jhash.
2677  * MAC Address for VFs are assigned incrementally starting from the seed.
2678  * These addresses are programmed in the ASIC by the PF and the VF driver
2679  * queries for the MAC address during its probe.
2680  */
2681 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2682 {
2683         u32 vf;
2684         int status = 0;
2685         u8 mac[ETH_ALEN];
2686         struct be_vf_cfg *vf_cfg;
2687
2688         be_vf_eth_addr_generate(adapter, mac);
2689
2690         for_all_vfs(adapter, vf_cfg, vf) {
2691                 if (BEx_chip(adapter))
2692                         status = be_cmd_pmac_add(adapter, mac,
2693                                                  vf_cfg->if_handle,
2694                                                  &vf_cfg->pmac_id, vf + 1);
2695                 else
2696                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2697                                                 vf + 1);
2698
2699                 if (status)
2700                         dev_err(&adapter->pdev->dev,
2701                         "Mac address assignment failed for VF %d\n", vf);
2702                 else
2703                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2704
2705                 mac[5] += 1;
2706         }
2707         return status;
2708 }
2709
2710 static int be_vfs_mac_query(struct be_adapter *adapter)
2711 {
2712         int status, vf;
2713         u8 mac[ETH_ALEN];
2714         struct be_vf_cfg *vf_cfg;
2715         bool active = false;
2716
2717         for_all_vfs(adapter, vf_cfg, vf) {
2718                 be_cmd_get_mac_from_list(adapter, mac, &active,
2719                                          &vf_cfg->pmac_id, 0);
2720
2721                 status = be_cmd_mac_addr_query(adapter, mac, false,
2722                                                vf_cfg->if_handle, 0);
2723                 if (status)
2724                         return status;
2725                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2726         }
2727         return 0;
2728 }
2729
2730 static void be_vf_clear(struct be_adapter *adapter)
2731 {
2732         struct be_vf_cfg *vf_cfg;
2733         u32 vf;
2734
2735         if (pci_vfs_assigned(adapter->pdev)) {
2736                 dev_warn(&adapter->pdev->dev,
2737                          "VFs are assigned to VMs: not disabling VFs\n");
2738                 goto done;
2739         }
2740
2741         pci_disable_sriov(adapter->pdev);
2742
2743         for_all_vfs(adapter, vf_cfg, vf) {
2744                 if (BEx_chip(adapter))
2745                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2746                                         vf_cfg->pmac_id, vf + 1);
2747                 else
2748                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2749                                        vf + 1);
2750
2751                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2752         }
2753 done:
2754         kfree(adapter->vf_cfg);
2755         adapter->num_vfs = 0;
2756 }
2757
2758 static void be_clear_queues(struct be_adapter *adapter)
2759 {
2760         be_mcc_queues_destroy(adapter);
2761         be_rx_cqs_destroy(adapter);
2762         be_tx_queues_destroy(adapter);
2763         be_evt_queues_destroy(adapter);
2764 }
2765
2766 static void be_cancel_worker(struct be_adapter *adapter)
2767 {
2768         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2769                 cancel_delayed_work_sync(&adapter->work);
2770                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2771         }
2772 }
2773
2774 static int be_clear(struct be_adapter *adapter)
2775 {
2776         int i;
2777
2778         be_cancel_worker(adapter);
2779
2780         if (sriov_enabled(adapter))
2781                 be_vf_clear(adapter);
2782
2783         /* delete the primary mac along with the uc-mac list */
2784         for (i = 0; i < (adapter->uc_macs + 1); i++)
2785                 be_cmd_pmac_del(adapter, adapter->if_handle,
2786                                 adapter->pmac_id[i], 0);
2787         adapter->uc_macs = 0;
2788
2789         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2790
2791         be_clear_queues(adapter);
2792
2793         kfree(adapter->pmac_id);
2794         adapter->pmac_id = NULL;
2795
2796         be_msix_disable(adapter);
2797         return 0;
2798 }
2799
2800 static int be_vfs_if_create(struct be_adapter *adapter)
2801 {
2802         struct be_resources res = {0};
2803         struct be_vf_cfg *vf_cfg;
2804         u32 cap_flags, en_flags, vf;
2805         int status;
2806
2807         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2808                     BE_IF_FLAGS_MULTICAST;
2809
2810         for_all_vfs(adapter, vf_cfg, vf) {
2811                 if (!BE3_chip(adapter)) {
2812                         status = be_cmd_get_profile_config(adapter, &res,
2813                                                            vf + 1);
2814                         if (!status)
2815                                 cap_flags = res.if_cap_flags;
2816                 }
2817
2818                 /* If a FW profile exists, then cap_flags are updated */
2819                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2820                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2821                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2822                                           &vf_cfg->if_handle, vf + 1);
2823                 if (status)
2824                         goto err;
2825         }
2826 err:
2827         return status;
2828 }
2829
2830 static int be_vf_setup_init(struct be_adapter *adapter)
2831 {
2832         struct be_vf_cfg *vf_cfg;
2833         int vf;
2834
2835         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2836                                   GFP_KERNEL);
2837         if (!adapter->vf_cfg)
2838                 return -ENOMEM;
2839
2840         for_all_vfs(adapter, vf_cfg, vf) {
2841                 vf_cfg->if_handle = -1;
2842                 vf_cfg->pmac_id = -1;
2843         }
2844         return 0;
2845 }
2846
2847 static int be_vf_setup(struct be_adapter *adapter)
2848 {
2849         struct be_vf_cfg *vf_cfg;
2850         u16 def_vlan, lnk_speed;
2851         int status, old_vfs, vf;
2852         struct device *dev = &adapter->pdev->dev;
2853         u32 privileges;
2854
2855         old_vfs = pci_num_vf(adapter->pdev);
2856         if (old_vfs) {
2857                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2858                 if (old_vfs != num_vfs)
2859                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2860                 adapter->num_vfs = old_vfs;
2861         } else {
2862                 if (num_vfs > be_max_vfs(adapter))
2863                         dev_info(dev, "Device supports %d VFs and not %d\n",
2864                                  be_max_vfs(adapter), num_vfs);
2865                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2866                 if (!adapter->num_vfs)
2867                         return 0;
2868         }
2869
2870         status = be_vf_setup_init(adapter);
2871         if (status)
2872                 goto err;
2873
2874         if (old_vfs) {
2875                 for_all_vfs(adapter, vf_cfg, vf) {
2876                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2877                         if (status)
2878                                 goto err;
2879                 }
2880         } else {
2881                 status = be_vfs_if_create(adapter);
2882                 if (status)
2883                         goto err;
2884         }
2885
2886         if (old_vfs) {
2887                 status = be_vfs_mac_query(adapter);
2888                 if (status)
2889                         goto err;
2890         } else {
2891                 status = be_vf_eth_addr_config(adapter);
2892                 if (status)
2893                         goto err;
2894         }
2895
2896         for_all_vfs(adapter, vf_cfg, vf) {
2897                 /* Allow VFs to programs MAC/VLAN filters */
2898                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2899                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2900                         status = be_cmd_set_fn_privileges(adapter,
2901                                                           privileges |
2902                                                           BE_PRIV_FILTMGMT,
2903                                                           vf + 1);
2904                         if (!status)
2905                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2906                                          vf);
2907                 }
2908
2909                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2910                  * Allow full available bandwidth
2911                  */
2912                 if (BE3_chip(adapter) && !old_vfs)
2913                         be_cmd_set_qos(adapter, 1000, vf+1);
2914
2915                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2916                                                   NULL, vf + 1);
2917                 if (!status)
2918                         vf_cfg->tx_rate = lnk_speed;
2919
2920                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2921                                                vf + 1, vf_cfg->if_handle, NULL);
2922                 if (status)
2923                         goto err;
2924                 vf_cfg->def_vid = def_vlan;
2925
2926                 be_cmd_enable_vf(adapter, vf + 1);
2927         }
2928
2929         if (!old_vfs) {
2930                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2931                 if (status) {
2932                         dev_err(dev, "SRIOV enable failed\n");
2933                         adapter->num_vfs = 0;
2934                         goto err;
2935                 }
2936         }
2937         return 0;
2938 err:
2939         dev_err(dev, "VF setup failed\n");
2940         be_vf_clear(adapter);
2941         return status;
2942 }
2943
2944 /* On BE2/BE3 FW does not suggest the supported limits */
2945 static void BEx_get_resources(struct be_adapter *adapter,
2946                               struct be_resources *res)
2947 {
2948         struct pci_dev *pdev = adapter->pdev;
2949         bool use_sriov = false;
2950
2951         if (BE3_chip(adapter) && be_physfn(adapter)) {
2952                 int max_vfs;
2953
2954                 max_vfs = pci_sriov_get_totalvfs(pdev);
2955                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2956                 use_sriov = res->max_vfs && num_vfs;
2957         }
2958
2959         if (be_physfn(adapter))
2960                 res->max_uc_mac = BE_UC_PMAC_COUNT;
2961         else
2962                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2963
2964         if (adapter->function_mode & FLEX10_MODE)
2965                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2966         else
2967                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968         res->max_mcast_mac = BE_MAX_MC;
2969
2970         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2971             !be_physfn(adapter))
2972                 res->max_tx_qs = 1;
2973         else
2974                 res->max_tx_qs = BE3_MAX_TX_QS;
2975
2976         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2977             !use_sriov && be_physfn(adapter))
2978                 res->max_rss_qs = (adapter->be3_native) ?
2979                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2980         res->max_rx_qs = res->max_rss_qs + 1;
2981
2982         res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
2983
2984         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2985         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2986                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2987 }
2988
2989 static void be_setup_init(struct be_adapter *adapter)
2990 {
2991         adapter->vlan_prio_bmap = 0xff;
2992         adapter->phy.link_speed = -1;
2993         adapter->if_handle = -1;
2994         adapter->be3_native = false;
2995         adapter->promiscuous = false;
2996         if (be_physfn(adapter))
2997                 adapter->cmd_privileges = MAX_PRIVILEGES;
2998         else
2999                 adapter->cmd_privileges = MIN_PRIVILEGES;
3000 }
3001
3002 static int be_get_resources(struct be_adapter *adapter)
3003 {
3004         struct device *dev = &adapter->pdev->dev;
3005         struct be_resources res = {0};
3006         int status;
3007
3008         if (BEx_chip(adapter)) {
3009                 BEx_get_resources(adapter, &res);
3010                 adapter->res = res;
3011         }
3012
3013         /* For BE3 only check if FW suggests a different max-txqs value */
3014         if (BE3_chip(adapter)) {
3015                 status = be_cmd_get_profile_config(adapter, &res, 0);
3016                 if (!status && res.max_tx_qs)
3017                         adapter->res.max_tx_qs =
3018                                 min(adapter->res.max_tx_qs, res.max_tx_qs);
3019         }
3020
3021         /* For Lancer, SH etc read per-function resource limits from FW.
3022          * GET_FUNC_CONFIG returns per function guaranteed limits.
3023          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3024          */
3025         if (!BEx_chip(adapter)) {
3026                 status = be_cmd_get_func_config(adapter, &res);
3027                 if (status)
3028                         return status;
3029
3030                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3031                 if (be_roce_supported(adapter))
3032                         res.max_evt_qs /= 2;
3033                 adapter->res = res;
3034
3035                 if (be_physfn(adapter)) {
3036                         status = be_cmd_get_profile_config(adapter, &res, 0);
3037                         if (status)
3038                                 return status;
3039                         adapter->res.max_vfs = res.max_vfs;
3040                 }
3041
3042                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3043                          be_max_txqs(adapter), be_max_rxqs(adapter),
3044                          be_max_rss(adapter), be_max_eqs(adapter),
3045                          be_max_vfs(adapter));
3046                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3047                          be_max_uc(adapter), be_max_mc(adapter),
3048                          be_max_vlans(adapter));
3049         }
3050
3051         return 0;
3052 }
3053
3054 /* Routine to query per function resource limits */
3055 static int be_get_config(struct be_adapter *adapter)
3056 {
3057         int status;
3058
3059         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3060                                      &adapter->function_mode,
3061                                      &adapter->function_caps,
3062                                      &adapter->asic_rev);
3063         if (status)
3064                 return status;
3065
3066         status = be_get_resources(adapter);
3067         if (status)
3068                 return status;
3069
3070         /* primary mac needs 1 pmac entry */
3071         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3072                                    GFP_KERNEL);
3073         if (!adapter->pmac_id)
3074                 return -ENOMEM;
3075
3076         /* Sanitize cfg_num_qs based on HW and platform limits */
3077         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3078
3079         return 0;
3080 }
3081
3082 static int be_mac_setup(struct be_adapter *adapter)
3083 {
3084         u8 mac[ETH_ALEN];
3085         int status;
3086
3087         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3088                 status = be_cmd_get_perm_mac(adapter, mac);
3089                 if (status)
3090                         return status;
3091
3092                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3093                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3094         } else {
3095                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3096                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3097         }
3098
3099         /* On BE3 VFs this cmd may fail due to lack of privilege.
3100          * Ignore the failure as in this case pmac_id is fetched
3101          * in the IFACE_CREATE cmd.
3102          */
3103         be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3104                         &adapter->pmac_id[0], 0);
3105         return 0;
3106 }
3107
3108 static void be_schedule_worker(struct be_adapter *adapter)
3109 {
3110         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3111         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3112 }
3113
3114 static int be_setup_queues(struct be_adapter *adapter)
3115 {
3116         struct net_device *netdev = adapter->netdev;
3117         int status;
3118
3119         status = be_evt_queues_create(adapter);
3120         if (status)
3121                 goto err;
3122
3123         status = be_tx_qs_create(adapter);
3124         if (status)
3125                 goto err;
3126
3127         status = be_rx_cqs_create(adapter);
3128         if (status)
3129                 goto err;
3130
3131         status = be_mcc_queues_create(adapter);
3132         if (status)
3133                 goto err;
3134
3135         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3136         if (status)
3137                 goto err;
3138
3139         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3140         if (status)
3141                 goto err;
3142
3143         return 0;
3144 err:
3145         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3146         return status;
3147 }
3148
3149 int be_update_queues(struct be_adapter *adapter)
3150 {
3151         struct net_device *netdev = adapter->netdev;
3152         int status;
3153
3154         if (netif_running(netdev))
3155                 be_close(netdev);
3156
3157         be_cancel_worker(adapter);
3158
3159         /* If any vectors have been shared with RoCE we cannot re-program
3160          * the MSIx table.
3161          */
3162         if (!adapter->num_msix_roce_vec)
3163                 be_msix_disable(adapter);
3164
3165         be_clear_queues(adapter);
3166
3167         if (!msix_enabled(adapter)) {
3168                 status = be_msix_enable(adapter);
3169                 if (status)
3170                         return status;
3171         }
3172
3173         status = be_setup_queues(adapter);
3174         if (status)
3175                 return status;
3176
3177         be_schedule_worker(adapter);
3178
3179         if (netif_running(netdev))
3180                 status = be_open(netdev);
3181
3182         return status;
3183 }
3184
3185 static int be_setup(struct be_adapter *adapter)
3186 {
3187         struct device *dev = &adapter->pdev->dev;
3188         u32 tx_fc, rx_fc, en_flags;
3189         int status;
3190
3191         be_setup_init(adapter);
3192
3193         if (!lancer_chip(adapter))
3194                 be_cmd_req_native_mode(adapter);
3195
3196         status = be_get_config(adapter);
3197         if (status)
3198                 goto err;
3199
3200         status = be_msix_enable(adapter);
3201         if (status)
3202                 goto err;
3203
3204         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3205                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3206         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3207                 en_flags |= BE_IF_FLAGS_RSS;
3208         en_flags = en_flags & be_if_cap_flags(adapter);
3209         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3210                                   &adapter->if_handle, 0);
3211         if (status)
3212                 goto err;
3213
3214         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3215         rtnl_lock();
3216         status = be_setup_queues(adapter);
3217         rtnl_unlock();
3218         if (status)
3219                 goto err;
3220
3221         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3222         /* In UMC mode FW does not return right privileges.
3223          * Override with correct privilege equivalent to PF.
3224          */
3225         if (be_is_mc(adapter))
3226                 adapter->cmd_privileges = MAX_PRIVILEGES;
3227
3228         status = be_mac_setup(adapter);
3229         if (status)
3230                 goto err;
3231
3232         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3233
3234         if (adapter->vlans_added)
3235                 be_vid_config(adapter);
3236
3237         be_set_rx_mode(adapter->netdev);
3238
3239         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3240
3241         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3242                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3243                                         adapter->rx_fc);
3244
3245         if (be_physfn(adapter) && num_vfs) {
3246                 if (be_max_vfs(adapter))
3247                         be_vf_setup(adapter);
3248                 else
3249                         dev_warn(dev, "device doesn't support SRIOV\n");
3250         }
3251
3252         status = be_cmd_get_phy_info(adapter);
3253         if (!status && be_pause_supported(adapter))
3254                 adapter->phy.fc_autoneg = 1;
3255
3256         be_schedule_worker(adapter);
3257         return 0;
3258 err:
3259         be_clear(adapter);
3260         return status;
3261 }
3262
3263 #ifdef CONFIG_NET_POLL_CONTROLLER
3264 static void be_netpoll(struct net_device *netdev)
3265 {
3266         struct be_adapter *adapter = netdev_priv(netdev);
3267         struct be_eq_obj *eqo;
3268         int i;
3269
3270         for_all_evt_queues(adapter, eqo, i) {
3271                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3272                 napi_schedule(&eqo->napi);
3273         }
3274
3275         return;
3276 }
3277 #endif
3278
3279 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3280 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3281
3282 static bool be_flash_redboot(struct be_adapter *adapter,
3283                         const u8 *p, u32 img_start, int image_size,
3284                         int hdr_size)
3285 {
3286         u32 crc_offset;
3287         u8 flashed_crc[4];
3288         int status;
3289
3290         crc_offset = hdr_size + img_start + image_size - 4;
3291
3292         p += crc_offset;
3293
3294         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3295                         (image_size - 4));
3296         if (status) {
3297                 dev_err(&adapter->pdev->dev,
3298                 "could not get crc from flash, not flashing redboot\n");
3299                 return false;
3300         }
3301
3302         /*update redboot only if crc does not match*/
3303         if (!memcmp(flashed_crc, p, 4))
3304                 return false;
3305         else
3306                 return true;
3307 }
3308
3309 static bool phy_flashing_required(struct be_adapter *adapter)
3310 {
3311         return (adapter->phy.phy_type == TN_8022 &&
3312                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3313 }
3314
3315 static bool is_comp_in_ufi(struct be_adapter *adapter,
3316                            struct flash_section_info *fsec, int type)
3317 {
3318         int i = 0, img_type = 0;
3319         struct flash_section_info_g2 *fsec_g2 = NULL;
3320
3321         if (BE2_chip(adapter))
3322                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3323
3324         for (i = 0; i < MAX_FLASH_COMP; i++) {
3325                 if (fsec_g2)
3326                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3327                 else
3328                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3329
3330                 if (img_type == type)
3331                         return true;
3332         }
3333         return false;
3334
3335 }
3336
3337 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3338                                          int header_size,
3339                                          const struct firmware *fw)
3340 {
3341         struct flash_section_info *fsec = NULL;
3342         const u8 *p = fw->data;
3343
3344         p += header_size;
3345         while (p < (fw->data + fw->size)) {
3346                 fsec = (struct flash_section_info *)p;
3347                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3348                         return fsec;
3349                 p += 32;
3350         }
3351         return NULL;
3352 }
3353
3354 static int be_flash(struct be_adapter *adapter, const u8 *img,
3355                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3356 {
3357         u32 total_bytes = 0, flash_op, num_bytes = 0;
3358         int status = 0;
3359         struct be_cmd_write_flashrom *req = flash_cmd->va;
3360
3361         total_bytes = img_size;
3362         while (total_bytes) {
3363                 num_bytes = min_t(u32, 32*1024, total_bytes);
3364
3365                 total_bytes -= num_bytes;
3366
3367                 if (!total_bytes) {
3368                         if (optype == OPTYPE_PHY_FW)
3369                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3370                         else
3371                                 flash_op = FLASHROM_OPER_FLASH;
3372                 } else {
3373                         if (optype == OPTYPE_PHY_FW)
3374                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3375                         else
3376                                 flash_op = FLASHROM_OPER_SAVE;
3377                 }
3378
3379                 memcpy(req->data_buf, img, num_bytes);
3380                 img += num_bytes;
3381                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3382                                                 flash_op, num_bytes);
3383                 if (status) {
3384                         if (status == ILLEGAL_IOCTL_REQ &&
3385                             optype == OPTYPE_PHY_FW)
3386                                 break;
3387                         dev_err(&adapter->pdev->dev,
3388                                 "cmd to write to flash rom failed.\n");
3389                         return status;
3390                 }
3391         }
3392         return 0;
3393 }
3394
3395 /* For BE2, BE3 and BE3-R */
3396 static int be_flash_BEx(struct be_adapter *adapter,
3397                          const struct firmware *fw,
3398                          struct be_dma_mem *flash_cmd,
3399                          int num_of_images)
3400
3401 {
3402         int status = 0, i, filehdr_size = 0;
3403         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3404         const u8 *p = fw->data;
3405         const struct flash_comp *pflashcomp;
3406         int num_comp, redboot;
3407         struct flash_section_info *fsec = NULL;
3408
3409         struct flash_comp gen3_flash_types[] = {
3410                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3411                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3412                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3413                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3414                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3415                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3416                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3417                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3418                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3419                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3420                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3421                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3422                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3423                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3424                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3425                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3426                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3427                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3428                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3429                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3430         };
3431
3432         struct flash_comp gen2_flash_types[] = {
3433                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3434                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3435                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3436                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3437                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3438                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3439                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3440                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3441                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3442                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3443                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3444                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3445                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3446                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3447                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3448                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3449         };
3450
3451         if (BE3_chip(adapter)) {
3452                 pflashcomp = gen3_flash_types;
3453                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3454                 num_comp = ARRAY_SIZE(gen3_flash_types);
3455         } else {
3456                 pflashcomp = gen2_flash_types;
3457                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3458                 num_comp = ARRAY_SIZE(gen2_flash_types);
3459         }
3460
3461         /* Get flash section info*/
3462         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3463         if (!fsec) {
3464                 dev_err(&adapter->pdev->dev,
3465                         "Invalid Cookie. UFI corrupted ?\n");
3466                 return -1;
3467         }
3468         for (i = 0; i < num_comp; i++) {
3469                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3470                         continue;
3471
3472                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3473                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3474                         continue;
3475
3476                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3477                     !phy_flashing_required(adapter))
3478                                 continue;
3479
3480                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3481                         redboot = be_flash_redboot(adapter, fw->data,
3482                                 pflashcomp[i].offset, pflashcomp[i].size,
3483                                 filehdr_size + img_hdrs_size);
3484                         if (!redboot)
3485                                 continue;
3486                 }
3487
3488                 p = fw->data;
3489                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3490                 if (p + pflashcomp[i].size > fw->data + fw->size)
3491                         return -1;
3492
3493                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3494                                         pflashcomp[i].size);
3495                 if (status) {
3496                         dev_err(&adapter->pdev->dev,
3497                                 "Flashing section type %d failed.\n",
3498                                 pflashcomp[i].img_type);
3499                         return status;
3500                 }
3501         }
3502         return 0;
3503 }
3504
3505 static int be_flash_skyhawk(struct be_adapter *adapter,
3506                 const struct firmware *fw,
3507                 struct be_dma_mem *flash_cmd, int num_of_images)
3508 {
3509         int status = 0, i, filehdr_size = 0;
3510         int img_offset, img_size, img_optype, redboot;
3511         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3512         const u8 *p = fw->data;
3513         struct flash_section_info *fsec = NULL;
3514
3515         filehdr_size = sizeof(struct flash_file_hdr_g3);
3516         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3517         if (!fsec) {
3518                 dev_err(&adapter->pdev->dev,
3519                         "Invalid Cookie. UFI corrupted ?\n");
3520                 return -1;
3521         }
3522
3523         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3524                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3525                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3526
3527                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3528                 case IMAGE_FIRMWARE_iSCSI:
3529                         img_optype = OPTYPE_ISCSI_ACTIVE;
3530                         break;
3531                 case IMAGE_BOOT_CODE:
3532                         img_optype = OPTYPE_REDBOOT;
3533                         break;
3534                 case IMAGE_OPTION_ROM_ISCSI:
3535                         img_optype = OPTYPE_BIOS;
3536                         break;
3537                 case IMAGE_OPTION_ROM_PXE:
3538                         img_optype = OPTYPE_PXE_BIOS;
3539                         break;
3540                 case IMAGE_OPTION_ROM_FCoE:
3541                         img_optype = OPTYPE_FCOE_BIOS;
3542                         break;
3543                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3544                         img_optype = OPTYPE_ISCSI_BACKUP;
3545                         break;
3546                 case IMAGE_NCSI:
3547                         img_optype = OPTYPE_NCSI_FW;
3548                         break;
3549                 default:
3550                         continue;
3551                 }
3552
3553                 if (img_optype == OPTYPE_REDBOOT) {
3554                         redboot = be_flash_redboot(adapter, fw->data,
3555                                         img_offset, img_size,
3556                                         filehdr_size + img_hdrs_size);
3557                         if (!redboot)
3558                                 continue;
3559                 }
3560
3561                 p = fw->data;
3562                 p += filehdr_size + img_offset + img_hdrs_size;
3563                 if (p + img_size > fw->data + fw->size)
3564                         return -1;
3565
3566                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3567                 if (status) {
3568                         dev_err(&adapter->pdev->dev,
3569                                 "Flashing section type %d failed.\n",
3570                                 fsec->fsec_entry[i].type);
3571                         return status;
3572                 }
3573         }
3574         return 0;
3575 }
3576
3577 static int lancer_fw_download(struct be_adapter *adapter,
3578                                 const struct firmware *fw)
3579 {
3580 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3581 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3582         struct be_dma_mem flash_cmd;
3583         const u8 *data_ptr = NULL;
3584         u8 *dest_image_ptr = NULL;
3585         size_t image_size = 0;
3586         u32 chunk_size = 0;
3587         u32 data_written = 0;
3588         u32 offset = 0;
3589         int status = 0;
3590         u8 add_status = 0;
3591         u8 change_status;
3592
3593         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3594                 dev_err(&adapter->pdev->dev,
3595                         "FW Image not properly aligned. "
3596                         "Length must be 4 byte aligned.\n");
3597                 status = -EINVAL;
3598                 goto lancer_fw_exit;
3599         }
3600
3601         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3602                                 + LANCER_FW_DOWNLOAD_CHUNK;
3603         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3604                                           &flash_cmd.dma, GFP_KERNEL);
3605         if (!flash_cmd.va) {
3606                 status = -ENOMEM;
3607                 goto lancer_fw_exit;
3608         }
3609
3610         dest_image_ptr = flash_cmd.va +
3611                                 sizeof(struct lancer_cmd_req_write_object);
3612         image_size = fw->size;
3613         data_ptr = fw->data;
3614
3615         while (image_size) {
3616                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3617
3618                 /* Copy the image chunk content. */
3619                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3620
3621                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3622                                                  chunk_size, offset,
3623                                                  LANCER_FW_DOWNLOAD_LOCATION,
3624                                                  &data_written, &change_status,
3625                                                  &add_status);
3626                 if (status)
3627                         break;
3628
3629                 offset += data_written;
3630                 data_ptr += data_written;
3631                 image_size -= data_written;
3632         }
3633
3634         if (!status) {
3635                 /* Commit the FW written */
3636                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3637                                                  0, offset,
3638                                                  LANCER_FW_DOWNLOAD_LOCATION,
3639                                                  &data_written, &change_status,
3640                                                  &add_status);
3641         }
3642
3643         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3644                                 flash_cmd.dma);
3645         if (status) {
3646                 dev_err(&adapter->pdev->dev,
3647                         "Firmware load error. "
3648                         "Status code: 0x%x Additional Status: 0x%x\n",
3649                         status, add_status);
3650                 goto lancer_fw_exit;
3651         }
3652
3653         if (change_status == LANCER_FW_RESET_NEEDED) {
3654                 status = lancer_physdev_ctrl(adapter,
3655                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3656                 if (status) {
3657                         dev_err(&adapter->pdev->dev,
3658                                 "Adapter busy for FW reset.\n"
3659                                 "New FW will not be active.\n");
3660                         goto lancer_fw_exit;
3661                 }
3662         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3663                         dev_err(&adapter->pdev->dev,
3664                                 "System reboot required for new FW"
3665                                 " to be active\n");
3666         }
3667
3668         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3669 lancer_fw_exit:
3670         return status;
3671 }
3672
3673 #define UFI_TYPE2               2
3674 #define UFI_TYPE3               3
3675 #define UFI_TYPE3R              10
3676 #define UFI_TYPE4               4
3677 static int be_get_ufi_type(struct be_adapter *adapter,
3678                            struct flash_file_hdr_g3 *fhdr)
3679 {
3680         if (fhdr == NULL)
3681                 goto be_get_ufi_exit;
3682
3683         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3684                 return UFI_TYPE4;
3685         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3686                 if (fhdr->asic_type_rev == 0x10)
3687                         return UFI_TYPE3R;
3688                 else
3689                         return UFI_TYPE3;
3690         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3691                 return UFI_TYPE2;
3692
3693 be_get_ufi_exit:
3694         dev_err(&adapter->pdev->dev,
3695                 "UFI and Interface are not compatible for flashing\n");
3696         return -1;
3697 }
3698
3699 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3700 {
3701         struct flash_file_hdr_g3 *fhdr3;
3702         struct image_hdr *img_hdr_ptr = NULL;
3703         struct be_dma_mem flash_cmd;
3704         const u8 *p;
3705         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3706
3707         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3708         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3709                                           &flash_cmd.dma, GFP_KERNEL);
3710         if (!flash_cmd.va) {
3711                 status = -ENOMEM;
3712                 goto be_fw_exit;
3713         }
3714
3715         p = fw->data;
3716         fhdr3 = (struct flash_file_hdr_g3 *)p;
3717
3718         ufi_type = be_get_ufi_type(adapter, fhdr3);
3719
3720         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3721         for (i = 0; i < num_imgs; i++) {
3722                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3723                                 (sizeof(struct flash_file_hdr_g3) +
3724                                  i * sizeof(struct image_hdr)));
3725                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3726                         switch (ufi_type) {
3727                         case UFI_TYPE4:
3728                                 status = be_flash_skyhawk(adapter, fw,
3729                                                         &flash_cmd, num_imgs);
3730                                 break;
3731                         case UFI_TYPE3R:
3732                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3733                                                       num_imgs);
3734                                 break;
3735                         case UFI_TYPE3:
3736                                 /* Do not flash this ufi on BE3-R cards */
3737                                 if (adapter->asic_rev < 0x10)
3738                                         status = be_flash_BEx(adapter, fw,
3739                                                               &flash_cmd,
3740                                                               num_imgs);
3741                                 else {
3742                                         status = -1;
3743                                         dev_err(&adapter->pdev->dev,
3744                                                 "Can't load BE3 UFI on BE3R\n");
3745                                 }
3746                         }
3747                 }
3748         }
3749
3750         if (ufi_type == UFI_TYPE2)
3751                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3752         else if (ufi_type == -1)
3753                 status = -1;
3754
3755         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3756                           flash_cmd.dma);
3757         if (status) {
3758                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3759                 goto be_fw_exit;
3760         }
3761
3762         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3763
3764 be_fw_exit:
3765         return status;
3766 }
3767
3768 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3769 {
3770         const struct firmware *fw;
3771         int status;
3772
3773         if (!netif_running(adapter->netdev)) {
3774                 dev_err(&adapter->pdev->dev,
3775                         "Firmware load not allowed (interface is down)\n");
3776                 return -1;
3777         }
3778
3779         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3780         if (status)
3781                 goto fw_exit;
3782
3783         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3784
3785         if (lancer_chip(adapter))
3786                 status = lancer_fw_download(adapter, fw);
3787         else
3788                 status = be_fw_download(adapter, fw);
3789
3790         if (!status)
3791                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3792                                   adapter->fw_on_flash);
3793
3794 fw_exit:
3795         release_firmware(fw);
3796         return status;
3797 }
3798
3799 static int be_ndo_bridge_setlink(struct net_device *dev,
3800                                     struct nlmsghdr *nlh)
3801 {
3802         struct be_adapter *adapter = netdev_priv(dev);
3803         struct nlattr *attr, *br_spec;
3804         int rem;
3805         int status = 0;
3806         u16 mode = 0;
3807
3808         if (!sriov_enabled(adapter))
3809                 return -EOPNOTSUPP;
3810
3811         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3812
3813         nla_for_each_nested(attr, br_spec, rem) {
3814                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3815                         continue;
3816
3817                 mode = nla_get_u16(attr);
3818                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3819                         return -EINVAL;
3820
3821                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3822                                                adapter->if_handle,
3823                                                mode == BRIDGE_MODE_VEPA ?
3824                                                PORT_FWD_TYPE_VEPA :
3825                                                PORT_FWD_TYPE_VEB);
3826                 if (status)
3827                         goto err;
3828
3829                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3830                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3831
3832                 return status;
3833         }
3834 err:
3835         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3836                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3837
3838         return status;
3839 }
3840
3841 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3842                                     struct net_device *dev,
3843                                     u32 filter_mask)
3844 {
3845         struct be_adapter *adapter = netdev_priv(dev);
3846         int status = 0;
3847         u8 hsw_mode;
3848
3849         if (!sriov_enabled(adapter))
3850                 return 0;
3851
3852         /* BE and Lancer chips support VEB mode only */
3853         if (BEx_chip(adapter) || lancer_chip(adapter)) {
3854                 hsw_mode = PORT_FWD_TYPE_VEB;
3855         } else {
3856                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3857                                                adapter->if_handle, &hsw_mode);
3858                 if (status)
3859                         return 0;
3860         }
3861
3862         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3863                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
3864                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3865 }
3866
3867 static const struct net_device_ops be_netdev_ops = {
3868         .ndo_open               = be_open,
3869         .ndo_stop               = be_close,
3870         .ndo_start_xmit         = be_xmit,
3871         .ndo_set_rx_mode        = be_set_rx_mode,
3872         .ndo_set_mac_address    = be_mac_addr_set,
3873         .ndo_change_mtu         = be_change_mtu,
3874         .ndo_get_stats64        = be_get_stats64,
3875         .ndo_validate_addr      = eth_validate_addr,
3876         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3877         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3878         .ndo_set_vf_mac         = be_set_vf_mac,
3879         .ndo_set_vf_vlan        = be_set_vf_vlan,
3880         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3881         .ndo_get_vf_config      = be_get_vf_config,
3882 #ifdef CONFIG_NET_POLL_CONTROLLER
3883         .ndo_poll_controller    = be_netpoll,
3884 #endif
3885         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
3886         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
3887 };
3888
3889 static void be_netdev_init(struct net_device *netdev)
3890 {
3891         struct be_adapter *adapter = netdev_priv(netdev);
3892
3893         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3894                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3895                 NETIF_F_HW_VLAN_CTAG_TX;
3896         if (be_multi_rxq(adapter))
3897                 netdev->hw_features |= NETIF_F_RXHASH;
3898
3899         netdev->features |= netdev->hw_features |
3900                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3901
3902         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3903                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3904
3905         netdev->priv_flags |= IFF_UNICAST_FLT;
3906
3907         netdev->flags |= IFF_MULTICAST;
3908
3909         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3910
3911         netdev->netdev_ops = &be_netdev_ops;
3912
3913         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3914 }
3915
3916 static void be_unmap_pci_bars(struct be_adapter *adapter)
3917 {
3918         if (adapter->csr)
3919                 pci_iounmap(adapter->pdev, adapter->csr);
3920         if (adapter->db)
3921                 pci_iounmap(adapter->pdev, adapter->db);
3922 }
3923
3924 static int db_bar(struct be_adapter *adapter)
3925 {
3926         if (lancer_chip(adapter) || !be_physfn(adapter))
3927                 return 0;
3928         else
3929                 return 4;
3930 }
3931
3932 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3933 {
3934         if (skyhawk_chip(adapter)) {
3935                 adapter->roce_db.size = 4096;
3936                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3937                                                               db_bar(adapter));
3938                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3939                                                                db_bar(adapter));
3940         }
3941         return 0;
3942 }
3943
3944 static int be_map_pci_bars(struct be_adapter *adapter)
3945 {
3946         u8 __iomem *addr;
3947         u32 sli_intf;
3948
3949         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3950         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3951                                 SLI_INTF_IF_TYPE_SHIFT;
3952
3953         if (BEx_chip(adapter) && be_physfn(adapter)) {
3954                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3955                 if (adapter->csr == NULL)
3956                         return -ENOMEM;
3957         }
3958
3959         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3960         if (addr == NULL)
3961                 goto pci_map_err;
3962         adapter->db = addr;
3963
3964         be_roce_map_pci_bars(adapter);
3965         return 0;
3966
3967 pci_map_err:
3968         be_unmap_pci_bars(adapter);
3969         return -ENOMEM;
3970 }
3971
3972 static void be_ctrl_cleanup(struct be_adapter *adapter)
3973 {
3974         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3975
3976         be_unmap_pci_bars(adapter);
3977
3978         if (mem->va)
3979                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3980                                   mem->dma);
3981
3982         mem = &adapter->rx_filter;
3983         if (mem->va)
3984                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3985                                   mem->dma);
3986 }
3987
3988 static int be_ctrl_init(struct be_adapter *adapter)
3989 {
3990         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3991         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3992         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3993         u32 sli_intf;
3994         int status;
3995
3996         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3997         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3998                                  SLI_INTF_FAMILY_SHIFT;
3999         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4000
4001         status = be_map_pci_bars(adapter);
4002         if (status)
4003                 goto done;
4004
4005         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4006         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4007                                                 mbox_mem_alloc->size,
4008                                                 &mbox_mem_alloc->dma,
4009                                                 GFP_KERNEL);
4010         if (!mbox_mem_alloc->va) {
4011                 status = -ENOMEM;
4012                 goto unmap_pci_bars;
4013         }
4014         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4015         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4016         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4017         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4018
4019         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4020         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4021                                             rx_filter->size, &rx_filter->dma,
4022                                             GFP_KERNEL);
4023         if (rx_filter->va == NULL) {
4024                 status = -ENOMEM;
4025                 goto free_mbox;
4026         }
4027
4028         mutex_init(&adapter->mbox_lock);
4029         spin_lock_init(&adapter->mcc_lock);
4030         spin_lock_init(&adapter->mcc_cq_lock);
4031
4032         init_completion(&adapter->flash_compl);
4033         pci_save_state(adapter->pdev);
4034         return 0;
4035
4036 free_mbox:
4037         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4038                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4039
4040 unmap_pci_bars:
4041         be_unmap_pci_bars(adapter);
4042
4043 done:
4044         return status;
4045 }
4046
4047 static void be_stats_cleanup(struct be_adapter *adapter)
4048 {
4049         struct be_dma_mem *cmd = &adapter->stats_cmd;
4050
4051         if (cmd->va)
4052                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4053                                   cmd->va, cmd->dma);
4054 }
4055
4056 static int be_stats_init(struct be_adapter *adapter)
4057 {
4058         struct be_dma_mem *cmd = &adapter->stats_cmd;
4059
4060         if (lancer_chip(adapter))
4061                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4062         else if (BE2_chip(adapter))
4063                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4064         else
4065                 /* BE3 and Skyhawk */
4066                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4067
4068         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4069                                       GFP_KERNEL);
4070         if (cmd->va == NULL)
4071                 return -1;
4072         return 0;
4073 }
4074
4075 static void be_remove(struct pci_dev *pdev)
4076 {
4077         struct be_adapter *adapter = pci_get_drvdata(pdev);
4078
4079         if (!adapter)
4080                 return;
4081
4082         be_roce_dev_remove(adapter);
4083         be_intr_set(adapter, false);
4084
4085         cancel_delayed_work_sync(&adapter->func_recovery_work);
4086
4087         unregister_netdev(adapter->netdev);
4088
4089         be_clear(adapter);
4090
4091         /* tell fw we're done with firing cmds */
4092         be_cmd_fw_clean(adapter);
4093
4094         be_stats_cleanup(adapter);
4095
4096         be_ctrl_cleanup(adapter);
4097
4098         pci_disable_pcie_error_reporting(pdev);
4099
4100         pci_set_drvdata(pdev, NULL);
4101         pci_release_regions(pdev);
4102         pci_disable_device(pdev);
4103
4104         free_netdev(adapter->netdev);
4105 }
4106
4107 bool be_is_wol_supported(struct be_adapter *adapter)
4108 {
4109         return ((adapter->wol_cap & BE_WOL_CAP) &&
4110                 !be_is_wol_excluded(adapter)) ? true : false;
4111 }
4112
4113 u32 be_get_fw_log_level(struct be_adapter *adapter)
4114 {
4115         struct be_dma_mem extfat_cmd;
4116         struct be_fat_conf_params *cfgs;
4117         int status;
4118         u32 level = 0;
4119         int j;
4120
4121         if (lancer_chip(adapter))
4122                 return 0;
4123
4124         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4125         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4126         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4127                                              &extfat_cmd.dma);
4128
4129         if (!extfat_cmd.va) {
4130                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4131                         __func__);
4132                 goto err;
4133         }
4134
4135         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4136         if (!status) {
4137                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4138                                                 sizeof(struct be_cmd_resp_hdr));
4139                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4140                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4141                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4142                 }
4143         }
4144         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4145                             extfat_cmd.dma);
4146 err:
4147         return level;
4148 }
4149
4150 static int be_get_initial_config(struct be_adapter *adapter)
4151 {
4152         int status;
4153         u32 level;
4154
4155         status = be_cmd_get_cntl_attributes(adapter);
4156         if (status)
4157                 return status;
4158
4159         status = be_cmd_get_acpi_wol_cap(adapter);
4160         if (status) {
4161                 /* in case of a failure to get wol capabillities
4162                  * check the exclusion list to determine WOL capability */
4163                 if (!be_is_wol_excluded(adapter))
4164                         adapter->wol_cap |= BE_WOL_CAP;
4165         }
4166
4167         if (be_is_wol_supported(adapter))
4168                 adapter->wol = true;
4169
4170         /* Must be a power of 2 or else MODULO will BUG_ON */
4171         adapter->be_get_temp_freq = 64;
4172
4173         level = be_get_fw_log_level(adapter);
4174         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4175
4176         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4177         return 0;
4178 }
4179
4180 static int lancer_recover_func(struct be_adapter *adapter)
4181 {
4182         struct device *dev = &adapter->pdev->dev;
4183         int status;
4184
4185         status = lancer_test_and_set_rdy_state(adapter);
4186         if (status)
4187                 goto err;
4188
4189         if (netif_running(adapter->netdev))
4190                 be_close(adapter->netdev);
4191
4192         be_clear(adapter);
4193
4194         be_clear_all_error(adapter);
4195
4196         status = be_setup(adapter);
4197         if (status)
4198                 goto err;
4199
4200         if (netif_running(adapter->netdev)) {
4201                 status = be_open(adapter->netdev);
4202                 if (status)
4203                         goto err;
4204         }
4205
4206         dev_err(dev, "Error recovery successful\n");
4207         return 0;
4208 err:
4209         if (status == -EAGAIN)
4210                 dev_err(dev, "Waiting for resource provisioning\n");
4211         else
4212                 dev_err(dev, "Error recovery failed\n");
4213
4214         return status;
4215 }
4216
4217 static void be_func_recovery_task(struct work_struct *work)
4218 {
4219         struct be_adapter *adapter =
4220                 container_of(work, struct be_adapter,  func_recovery_work.work);
4221         int status = 0;
4222
4223         be_detect_error(adapter);
4224
4225         if (adapter->hw_error && lancer_chip(adapter)) {
4226
4227                 rtnl_lock();
4228                 netif_device_detach(adapter->netdev);
4229                 rtnl_unlock();
4230
4231                 status = lancer_recover_func(adapter);
4232                 if (!status)
4233                         netif_device_attach(adapter->netdev);
4234         }
4235
4236         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4237          * no need to attempt further recovery.
4238          */
4239         if (!status || status == -EAGAIN)
4240                 schedule_delayed_work(&adapter->func_recovery_work,
4241                                       msecs_to_jiffies(1000));
4242 }
4243
4244 static void be_worker(struct work_struct *work)
4245 {
4246         struct be_adapter *adapter =
4247                 container_of(work, struct be_adapter, work.work);
4248         struct be_rx_obj *rxo;
4249         struct be_eq_obj *eqo;
4250         int i;
4251
4252         /* when interrupts are not yet enabled, just reap any pending
4253         * mcc completions */
4254         if (!netif_running(adapter->netdev)) {
4255                 local_bh_disable();
4256                 be_process_mcc(adapter);
4257                 local_bh_enable();
4258                 goto reschedule;
4259         }
4260
4261         if (!adapter->stats_cmd_sent) {
4262                 if (lancer_chip(adapter))
4263                         lancer_cmd_get_pport_stats(adapter,
4264                                                 &adapter->stats_cmd);
4265                 else
4266                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4267         }
4268
4269         if (be_physfn(adapter) &&
4270             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4271                 be_cmd_get_die_temperature(adapter);
4272
4273         for_all_rx_queues(adapter, rxo, i) {
4274                 if (rxo->rx_post_starved) {
4275                         rxo->rx_post_starved = false;
4276                         be_post_rx_frags(rxo, GFP_KERNEL);
4277                 }
4278         }
4279
4280         for_all_evt_queues(adapter, eqo, i)
4281                 be_eqd_update(adapter, eqo);
4282
4283 reschedule:
4284         adapter->work_counter++;
4285         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4286 }
4287
4288 /* If any VFs are already enabled don't FLR the PF */
4289 static bool be_reset_required(struct be_adapter *adapter)
4290 {
4291         return pci_num_vf(adapter->pdev) ? false : true;
4292 }
4293
4294 static char *mc_name(struct be_adapter *adapter)
4295 {
4296         if (adapter->function_mode & FLEX10_MODE)
4297                 return "FLEX10";
4298         else if (adapter->function_mode & VNIC_MODE)
4299                 return "vNIC";
4300         else if (adapter->function_mode & UMC_ENABLED)
4301                 return "UMC";
4302         else
4303                 return "";
4304 }
4305
4306 static inline char *func_name(struct be_adapter *adapter)
4307 {
4308         return be_physfn(adapter) ? "PF" : "VF";
4309 }
4310
4311 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4312 {
4313         int status = 0;
4314         struct be_adapter *adapter;
4315         struct net_device *netdev;
4316         char port_name;
4317
4318         status = pci_enable_device(pdev);
4319         if (status)
4320                 goto do_none;
4321
4322         status = pci_request_regions(pdev, DRV_NAME);
4323         if (status)
4324                 goto disable_dev;
4325         pci_set_master(pdev);
4326
4327         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4328         if (netdev == NULL) {
4329                 status = -ENOMEM;
4330                 goto rel_reg;
4331         }
4332         adapter = netdev_priv(netdev);
4333         adapter->pdev = pdev;
4334         pci_set_drvdata(pdev, adapter);
4335         adapter->netdev = netdev;
4336         SET_NETDEV_DEV(netdev, &pdev->dev);
4337
4338         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4339         if (!status) {
4340                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4341                 if (status < 0) {
4342                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4343                         goto free_netdev;
4344                 }
4345                 netdev->features |= NETIF_F_HIGHDMA;
4346         } else {
4347                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4348                 if (!status)
4349                         status = dma_set_coherent_mask(&pdev->dev,
4350                                                        DMA_BIT_MASK(32));
4351                 if (status) {
4352                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4353                         goto free_netdev;
4354                 }
4355         }
4356
4357         status = pci_enable_pcie_error_reporting(pdev);
4358         if (status)
4359                 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
4360
4361         status = be_ctrl_init(adapter);
4362         if (status)
4363                 goto free_netdev;
4364
4365         /* sync up with fw's ready state */
4366         if (be_physfn(adapter)) {
4367                 status = be_fw_wait_ready(adapter);
4368                 if (status)
4369                         goto ctrl_clean;
4370         }
4371
4372         if (be_reset_required(adapter)) {
4373                 status = be_cmd_reset_function(adapter);
4374                 if (status)
4375                         goto ctrl_clean;
4376
4377                 /* Wait for interrupts to quiesce after an FLR */
4378                 msleep(100);
4379         }
4380
4381         /* Allow interrupts for other ULPs running on NIC function */
4382         be_intr_set(adapter, true);
4383
4384         /* tell fw we're ready to fire cmds */
4385         status = be_cmd_fw_init(adapter);
4386         if (status)
4387                 goto ctrl_clean;
4388
4389         status = be_stats_init(adapter);
4390         if (status)
4391                 goto ctrl_clean;
4392
4393         status = be_get_initial_config(adapter);
4394         if (status)
4395                 goto stats_clean;
4396
4397         INIT_DELAYED_WORK(&adapter->work, be_worker);
4398         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4399         adapter->rx_fc = adapter->tx_fc = true;
4400
4401         status = be_setup(adapter);
4402         if (status)
4403                 goto stats_clean;
4404
4405         be_netdev_init(netdev);
4406         status = register_netdev(netdev);
4407         if (status != 0)
4408                 goto unsetup;
4409
4410         be_roce_dev_add(adapter);
4411
4412         schedule_delayed_work(&adapter->func_recovery_work,
4413                               msecs_to_jiffies(1000));
4414
4415         be_cmd_query_port_name(adapter, &port_name);
4416
4417         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4418                  func_name(adapter), mc_name(adapter), port_name);
4419
4420         return 0;
4421
4422 unsetup:
4423         be_clear(adapter);
4424 stats_clean:
4425         be_stats_cleanup(adapter);
4426 ctrl_clean:
4427         be_ctrl_cleanup(adapter);
4428 free_netdev:
4429         free_netdev(netdev);
4430         pci_set_drvdata(pdev, NULL);
4431 rel_reg:
4432         pci_release_regions(pdev);
4433 disable_dev:
4434         pci_disable_device(pdev);
4435 do_none:
4436         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4437         return status;
4438 }
4439
4440 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4441 {
4442         struct be_adapter *adapter = pci_get_drvdata(pdev);
4443         struct net_device *netdev =  adapter->netdev;
4444
4445         if (adapter->wol)
4446                 be_setup_wol(adapter, true);
4447
4448         cancel_delayed_work_sync(&adapter->func_recovery_work);
4449
4450         netif_device_detach(netdev);
4451         if (netif_running(netdev)) {
4452                 rtnl_lock();
4453                 be_close(netdev);
4454                 rtnl_unlock();
4455         }
4456         be_clear(adapter);
4457
4458         pci_save_state(pdev);
4459         pci_disable_device(pdev);
4460         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4461         return 0;
4462 }
4463
4464 static int be_resume(struct pci_dev *pdev)
4465 {
4466         int status = 0;
4467         struct be_adapter *adapter = pci_get_drvdata(pdev);
4468         struct net_device *netdev =  adapter->netdev;
4469
4470         netif_device_detach(netdev);
4471
4472         status = pci_enable_device(pdev);
4473         if (status)
4474                 return status;
4475
4476         pci_set_power_state(pdev, PCI_D0);
4477         pci_restore_state(pdev);
4478
4479         status = be_fw_wait_ready(adapter);
4480         if (status)
4481                 return status;
4482
4483         /* tell fw we're ready to fire cmds */
4484         status = be_cmd_fw_init(adapter);
4485         if (status)
4486                 return status;
4487
4488         be_setup(adapter);
4489         if (netif_running(netdev)) {
4490                 rtnl_lock();
4491                 be_open(netdev);
4492                 rtnl_unlock();
4493         }
4494
4495         schedule_delayed_work(&adapter->func_recovery_work,
4496                               msecs_to_jiffies(1000));
4497         netif_device_attach(netdev);
4498
4499         if (adapter->wol)
4500                 be_setup_wol(adapter, false);
4501
4502         return 0;
4503 }
4504
4505 /*
4506  * An FLR will stop BE from DMAing any data.
4507  */
4508 static void be_shutdown(struct pci_dev *pdev)
4509 {
4510         struct be_adapter *adapter = pci_get_drvdata(pdev);
4511
4512         if (!adapter)
4513                 return;
4514
4515         cancel_delayed_work_sync(&adapter->work);
4516         cancel_delayed_work_sync(&adapter->func_recovery_work);
4517
4518         netif_device_detach(adapter->netdev);
4519
4520         be_cmd_reset_function(adapter);
4521
4522         pci_disable_device(pdev);
4523 }
4524
4525 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4526                                 pci_channel_state_t state)
4527 {
4528         struct be_adapter *adapter = pci_get_drvdata(pdev);
4529         struct net_device *netdev =  adapter->netdev;
4530
4531         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4532
4533         if (!adapter->eeh_error) {
4534                 adapter->eeh_error = true;
4535
4536                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4537
4538                 rtnl_lock();
4539                 netif_device_detach(netdev);
4540                 if (netif_running(netdev))
4541                         be_close(netdev);
4542                 rtnl_unlock();
4543
4544                 be_clear(adapter);
4545         }
4546
4547         if (state == pci_channel_io_perm_failure)
4548                 return PCI_ERS_RESULT_DISCONNECT;
4549
4550         pci_disable_device(pdev);
4551
4552         /* The error could cause the FW to trigger a flash debug dump.
4553          * Resetting the card while flash dump is in progress
4554          * can cause it not to recover; wait for it to finish.
4555          * Wait only for first function as it is needed only once per
4556          * adapter.
4557          */
4558         if (pdev->devfn == 0)
4559                 ssleep(30);
4560
4561         return PCI_ERS_RESULT_NEED_RESET;
4562 }
4563
4564 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4565 {
4566         struct be_adapter *adapter = pci_get_drvdata(pdev);
4567         int status;
4568
4569         dev_info(&adapter->pdev->dev, "EEH reset\n");
4570
4571         status = pci_enable_device(pdev);
4572         if (status)
4573                 return PCI_ERS_RESULT_DISCONNECT;
4574
4575         pci_set_master(pdev);
4576         pci_set_power_state(pdev, PCI_D0);
4577         pci_restore_state(pdev);
4578
4579         /* Check if card is ok and fw is ready */
4580         dev_info(&adapter->pdev->dev,
4581                  "Waiting for FW to be ready after EEH reset\n");
4582         status = be_fw_wait_ready(adapter);
4583         if (status)
4584                 return PCI_ERS_RESULT_DISCONNECT;
4585
4586         pci_cleanup_aer_uncorrect_error_status(pdev);
4587         be_clear_all_error(adapter);
4588         return PCI_ERS_RESULT_RECOVERED;
4589 }
4590
4591 static void be_eeh_resume(struct pci_dev *pdev)
4592 {
4593         int status = 0;
4594         struct be_adapter *adapter = pci_get_drvdata(pdev);
4595         struct net_device *netdev =  adapter->netdev;
4596
4597         dev_info(&adapter->pdev->dev, "EEH resume\n");
4598
4599         pci_save_state(pdev);
4600
4601         status = be_cmd_reset_function(adapter);
4602         if (status)
4603                 goto err;
4604
4605         /* tell fw we're ready to fire cmds */
4606         status = be_cmd_fw_init(adapter);
4607         if (status)
4608                 goto err;
4609
4610         status = be_setup(adapter);
4611         if (status)
4612                 goto err;
4613
4614         if (netif_running(netdev)) {
4615                 status = be_open(netdev);
4616                 if (status)
4617                         goto err;
4618         }
4619
4620         schedule_delayed_work(&adapter->func_recovery_work,
4621                               msecs_to_jiffies(1000));
4622         netif_device_attach(netdev);
4623         return;
4624 err:
4625         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4626 }
4627
4628 static const struct pci_error_handlers be_eeh_handlers = {
4629         .error_detected = be_eeh_err_detected,
4630         .slot_reset = be_eeh_reset,
4631         .resume = be_eeh_resume,
4632 };
4633
4634 static struct pci_driver be_driver = {
4635         .name = DRV_NAME,
4636         .id_table = be_dev_ids,
4637         .probe = be_probe,
4638         .remove = be_remove,
4639         .suspend = be_suspend,
4640         .resume = be_resume,
4641         .shutdown = be_shutdown,
4642         .err_handler = &be_eeh_handlers
4643 };
4644
4645 static int __init be_init_module(void)
4646 {
4647         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4648             rx_frag_size != 2048) {
4649                 printk(KERN_WARNING DRV_NAME
4650                         " : Module param rx_frag_size must be 2048/4096/8192."
4651                         " Using 2048\n");
4652                 rx_frag_size = 2048;
4653         }
4654
4655         return pci_register_driver(&be_driver);
4656 }
4657 module_init(be_init_module);
4658
4659 static void __exit be_exit_module(void)
4660 {
4661         pci_unregister_driver(&be_driver);
4662 }
4663 module_exit(be_exit_module);