]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
3f04356afa821b589283aecf7954542dbfcaf9ef
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
27
28 MODULE_VERSION(DRV_VER);
29 MODULE_DEVICE_TABLE(pci, be_dev_ids);
30 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
31 MODULE_AUTHOR("Emulex Corporation");
32 MODULE_LICENSE("GPL");
33
34 static unsigned int num_vfs;
35 module_param(num_vfs, uint, S_IRUGO);
36 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37
38 static ushort rx_frag_size = 2048;
39 module_param(rx_frag_size, ushort, S_IRUGO);
40 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
42 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
50         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
51         { 0 }
52 };
53 MODULE_DEVICE_TABLE(pci, be_dev_ids);
54 /* UE Status Low CSR */
55 static const char * const ue_status_low_desc[] = {
56         "CEV",
57         "CTX",
58         "DBUF",
59         "ERX",
60         "Host",
61         "MPU",
62         "NDMA",
63         "PTC ",
64         "RDMA ",
65         "RXF ",
66         "RXIPS ",
67         "RXULP0 ",
68         "RXULP1 ",
69         "RXULP2 ",
70         "TIM ",
71         "TPOST ",
72         "TPRE ",
73         "TXIPS ",
74         "TXULP0 ",
75         "TXULP1 ",
76         "UC ",
77         "WDMA ",
78         "TXULP2 ",
79         "HOST1 ",
80         "P0_OB_LINK ",
81         "P1_OB_LINK ",
82         "HOST_GPIO ",
83         "MBOX ",
84         "AXGMAC0",
85         "AXGMAC1",
86         "JTAG",
87         "MPU_INTPEND"
88 };
89 /* UE Status High CSR */
90 static const char * const ue_status_hi_desc[] = {
91         "LPCMEMHOST",
92         "MGMT_MAC",
93         "PCS0ONLINE",
94         "MPU_IRAM",
95         "PCS1ONLINE",
96         "PCTL0",
97         "PCTL1",
98         "PMEM",
99         "RR",
100         "TXPB",
101         "RXPP",
102         "XAUI",
103         "TXP",
104         "ARM",
105         "IPC",
106         "HOST2",
107         "HOST3",
108         "HOST4",
109         "HOST5",
110         "HOST6",
111         "HOST7",
112         "HOST8",
113         "HOST9",
114         "NETC",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown",
122         "Unknown"
123 };
124
125
126 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129         if (mem->va) {
130                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131                                   mem->dma);
132                 mem->va = NULL;
133         }
134 }
135
136 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137                           u16 len, u16 entry_size)
138 {
139         struct be_dma_mem *mem = &q->dma_mem;
140
141         memset(q, 0, sizeof(*q));
142         q->len = len;
143         q->entry_size = entry_size;
144         mem->size = len * entry_size;
145         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146                                       GFP_KERNEL);
147         if (!mem->va)
148                 return -ENOMEM;
149         return 0;
150 }
151
152 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
153 {
154         u32 reg, enabled;
155
156         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157                               &reg);
158         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
160         if (!enabled && enable)
161                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else if (enabled && !enable)
163                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164         else
165                 return;
166
167         pci_write_config_dword(adapter->pdev,
168                                PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169 }
170
171 static void be_intr_set(struct be_adapter *adapter, bool enable)
172 {
173         int status = 0;
174
175         /* On lancer interrupts can't be controlled via this register */
176         if (lancer_chip(adapter))
177                 return;
178
179         if (adapter->eeh_error)
180                 return;
181
182         status = be_cmd_intr_set(adapter, enable);
183         if (status)
184                 be_reg_intr_set(adapter, enable);
185 }
186
187 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_RQ_RING_ID_MASK;
191         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_RQ_OFFSET);
195 }
196
197 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198                           u16 posted)
199 {
200         u32 val = 0;
201         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
202         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203
204         wmb();
205         iowrite32(val, adapter->db + txo->db_offset);
206 }
207
208 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209                          bool arm, bool clear_int, u16 num_popped)
210 {
211         u32 val = 0;
212         val |= qid & DB_EQ_RING_ID_MASK;
213         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter)) {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
494
495         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
499         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
501         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505         drvs->rx_dropped_tcp_length =
506                                 pport_stats->rx_dropped_invalid_tcp_length;
507         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510         drvs->rx_dropped_header_too_small =
511                                 pport_stats->rx_dropped_header_too_small;
512         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513         drvs->rx_address_filtered =
514                                         pport_stats->rx_address_filtered +
515                                         pport_stats->rx_vlan_filtered;
516         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
517         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
520         drvs->jabber_events = pport_stats->rx_jabbers;
521         drvs->forwarded_packets = pport_stats->num_forwards_lo;
522         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
523         drvs->rx_drops_too_many_frags =
524                                 pport_stats->rx_drops_too_many_frags_lo;
525 }
526
527 static void accumulate_16bit_val(u32 *acc, u16 val)
528 {
529 #define lo(x)                   (x & 0xFFFF)
530 #define hi(x)                   (x & 0xFFFF0000)
531         bool wrapped = val < lo(*acc);
532         u32 newacc = hi(*acc) + val;
533
534         if (wrapped)
535                 newacc += 65536;
536         ACCESS_ONCE(*acc) = newacc;
537 }
538
539 static void populate_erx_stats(struct be_adapter *adapter,
540                                struct be_rx_obj *rxo, u32 erx_stat)
541 {
542         if (!BEx_chip(adapter))
543                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544         else
545                 /* below erx HW counter can actually wrap around after
546                  * 65535. Driver accumulates a 32-bit value
547                  */
548                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549                                      (u16)erx_stat);
550 }
551
552 void be_parse_stats(struct be_adapter *adapter)
553 {
554         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
555         struct be_rx_obj *rxo;
556         int i;
557         u32 erx_stat;
558
559         if (lancer_chip(adapter)) {
560                 populate_lancer_stats(adapter);
561         } else {
562                 if (BE2_chip(adapter))
563                         populate_be_v0_stats(adapter);
564                 else if (BE3_chip(adapter))
565                         /* for BE3 */
566                         populate_be_v1_stats(adapter);
567                 else
568                         populate_be_v2_stats(adapter);
569
570                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
571                 for_all_rx_queues(adapter, rxo, i) {
572                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573                         populate_erx_stats(adapter, rxo, erx_stat);
574                 }
575         }
576 }
577
578 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
579                                                 struct rtnl_link_stats64 *stats)
580 {
581         struct be_adapter *adapter = netdev_priv(netdev);
582         struct be_drv_stats *drvs = &adapter->drv_stats;
583         struct be_rx_obj *rxo;
584         struct be_tx_obj *txo;
585         u64 pkts, bytes;
586         unsigned int start;
587         int i;
588
589         for_all_rx_queues(adapter, rxo, i) {
590                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591                 do {
592                         start = u64_stats_fetch_begin_irq(&rx_stats->sync);
593                         pkts = rx_stats(rxo)->rx_pkts;
594                         bytes = rx_stats(rxo)->rx_bytes;
595                 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
596                 stats->rx_packets += pkts;
597                 stats->rx_bytes += bytes;
598                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600                                         rx_stats(rxo)->rx_drops_no_frags;
601         }
602
603         for_all_tx_queues(adapter, txo, i) {
604                 const struct be_tx_stats *tx_stats = tx_stats(txo);
605                 do {
606                         start = u64_stats_fetch_begin_irq(&tx_stats->sync);
607                         pkts = tx_stats(txo)->tx_pkts;
608                         bytes = tx_stats(txo)->tx_bytes;
609                 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
610                 stats->tx_packets += pkts;
611                 stats->tx_bytes += bytes;
612         }
613
614         /* bad pkts received */
615         stats->rx_errors = drvs->rx_crc_errors +
616                 drvs->rx_alignment_symbol_errors +
617                 drvs->rx_in_range_errors +
618                 drvs->rx_out_range_errors +
619                 drvs->rx_frame_too_long +
620                 drvs->rx_dropped_too_small +
621                 drvs->rx_dropped_too_short +
622                 drvs->rx_dropped_header_too_small +
623                 drvs->rx_dropped_tcp_length +
624                 drvs->rx_dropped_runt;
625
626         /* detailed rx errors */
627         stats->rx_length_errors = drvs->rx_in_range_errors +
628                 drvs->rx_out_range_errors +
629                 drvs->rx_frame_too_long;
630
631         stats->rx_crc_errors = drvs->rx_crc_errors;
632
633         /* frame alignment errors */
634         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
635
636         /* receiver fifo overrun */
637         /* drops_no_pbuf is no per i/f, it's per BE card */
638         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
639                                 drvs->rx_input_fifo_overflow_drop +
640                                 drvs->rx_drops_no_pbuf;
641         return stats;
642 }
643
644 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
645 {
646         struct net_device *netdev = adapter->netdev;
647
648         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
649                 netif_carrier_off(netdev);
650                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
651         }
652
653         if (link_status)
654                 netif_carrier_on(netdev);
655         else
656                 netif_carrier_off(netdev);
657 }
658
659 static void be_tx_stats_update(struct be_tx_obj *txo,
660                                u32 wrb_cnt, u32 copied, u32 gso_segs,
661                                bool stopped)
662 {
663         struct be_tx_stats *stats = tx_stats(txo);
664
665         u64_stats_update_begin(&stats->sync);
666         stats->tx_reqs++;
667         stats->tx_wrbs += wrb_cnt;
668         stats->tx_bytes += copied;
669         stats->tx_pkts += (gso_segs ? gso_segs : 1);
670         if (stopped)
671                 stats->tx_stops++;
672         u64_stats_update_end(&stats->sync);
673 }
674
675 /* Determine number of WRB entries needed to xmit data in an skb */
676 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677                            bool *dummy)
678 {
679         int cnt = (skb->len > skb->data_len);
680
681         cnt += skb_shinfo(skb)->nr_frags;
682
683         /* to account for hdr wrb */
684         cnt++;
685         if (lancer_chip(adapter) || !(cnt & 1)) {
686                 *dummy = false;
687         } else {
688                 /* add a dummy to make it an even num */
689                 cnt++;
690                 *dummy = true;
691         }
692         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693         return cnt;
694 }
695
696 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 {
698         wrb->frag_pa_hi = upper_32_bits(addr);
699         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701         wrb->rsvd0 = 0;
702 }
703
704 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705                                      struct sk_buff *skb)
706 {
707         u8 vlan_prio;
708         u16 vlan_tag;
709
710         vlan_tag = vlan_tx_tag_get(skb);
711         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712         /* If vlan priority provided by OS is NOT in available bmap */
713         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715                                 adapter->recommended_prio;
716
717         return vlan_tag;
718 }
719
720 /* Used only for IP tunnel packets */
721 static u16 skb_inner_ip_proto(struct sk_buff *skb)
722 {
723         return (inner_ip_hdr(skb)->version == 4) ?
724                 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725 }
726
727 static u16 skb_ip_proto(struct sk_buff *skb)
728 {
729         return (ip_hdr(skb)->version == 4) ?
730                 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731 }
732
733 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
734                          struct sk_buff *skb, u32 wrb_cnt, u32 len,
735                          bool skip_hw_vlan)
736 {
737         u16 vlan_tag, proto;
738
739         memset(hdr, 0, sizeof(*hdr));
740
741         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
743         if (skb_is_gso(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746                         hdr, skb_shinfo(skb)->gso_size);
747                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
748                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
749         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
750                 if (skb->encapsulation) {
751                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752                         proto = skb_inner_ip_proto(skb);
753                 } else {
754                         proto = skb_ip_proto(skb);
755                 }
756                 if (proto == IPPROTO_TCP)
757                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
758                 else if (proto == IPPROTO_UDP)
759                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760         }
761
762         if (vlan_tx_tag_present(skb)) {
763                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
764                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
765                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
766         }
767
768         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
770         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
771         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773 }
774
775 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
776                           bool unmap_single)
777 {
778         dma_addr_t dma;
779
780         be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
783         if (wrb->frag_len) {
784                 if (unmap_single)
785                         dma_unmap_single(dev, dma, wrb->frag_len,
786                                          DMA_TO_DEVICE);
787                 else
788                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
789         }
790 }
791
792 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
793                         struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794                         bool skip_hw_vlan)
795 {
796         dma_addr_t busaddr;
797         int i, copied = 0;
798         struct device *dev = &adapter->pdev->dev;
799         struct sk_buff *first_skb = skb;
800         struct be_eth_wrb *wrb;
801         struct be_eth_hdr_wrb *hdr;
802         bool map_single = false;
803         u16 map_head;
804
805         hdr = queue_head_node(txq);
806         queue_head_inc(txq);
807         map_head = txq->head;
808
809         if (skb->len > skb->data_len) {
810                 int len = skb_headlen(skb);
811                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812                 if (dma_mapping_error(dev, busaddr))
813                         goto dma_err;
814                 map_single = true;
815                 wrb = queue_head_node(txq);
816                 wrb_fill(wrb, busaddr, len);
817                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818                 queue_head_inc(txq);
819                 copied += len;
820         }
821
822         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
823                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
824                 busaddr = skb_frag_dma_map(dev, frag, 0,
825                                            skb_frag_size(frag), DMA_TO_DEVICE);
826                 if (dma_mapping_error(dev, busaddr))
827                         goto dma_err;
828                 wrb = queue_head_node(txq);
829                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
830                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831                 queue_head_inc(txq);
832                 copied += skb_frag_size(frag);
833         }
834
835         if (dummy_wrb) {
836                 wrb = queue_head_node(txq);
837                 wrb_fill(wrb, 0, 0);
838                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839                 queue_head_inc(txq);
840         }
841
842         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
843         be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845         return copied;
846 dma_err:
847         txq->head = map_head;
848         while (copied) {
849                 wrb = queue_head_node(txq);
850                 unmap_tx_frag(dev, wrb, map_single);
851                 map_single = false;
852                 copied -= wrb->frag_len;
853                 queue_head_inc(txq);
854         }
855         return 0;
856 }
857
858 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
859                                              struct sk_buff *skb,
860                                              bool *skip_hw_vlan)
861 {
862         u16 vlan_tag = 0;
863
864         skb = skb_share_check(skb, GFP_ATOMIC);
865         if (unlikely(!skb))
866                 return skb;
867
868         if (vlan_tx_tag_present(skb))
869                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
870
871         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872                 if (!vlan_tag)
873                         vlan_tag = adapter->pvid;
874                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875                  * skip VLAN insertion
876                  */
877                 if (skip_hw_vlan)
878                         *skip_hw_vlan = true;
879         }
880
881         if (vlan_tag) {
882                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
883                 if (unlikely(!skb))
884                         return skb;
885                 skb->vlan_tci = 0;
886         }
887
888         /* Insert the outer VLAN, if any */
889         if (adapter->qnq_vid) {
890                 vlan_tag = adapter->qnq_vid;
891                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
892                 if (unlikely(!skb))
893                         return skb;
894                 if (skip_hw_vlan)
895                         *skip_hw_vlan = true;
896         }
897
898         return skb;
899 }
900
901 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902 {
903         struct ethhdr *eh = (struct ethhdr *)skb->data;
904         u16 offset = ETH_HLEN;
905
906         if (eh->h_proto == htons(ETH_P_IPV6)) {
907                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909                 offset += sizeof(struct ipv6hdr);
910                 if (ip6h->nexthdr != NEXTHDR_TCP &&
911                     ip6h->nexthdr != NEXTHDR_UDP) {
912                         struct ipv6_opt_hdr *ehdr =
913                                 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916                         if (ehdr->hdrlen == 0xff)
917                                 return true;
918                 }
919         }
920         return false;
921 }
922
923 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924 {
925         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926 }
927
928 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
929 {
930         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
931 }
932
933 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934                                                   struct sk_buff *skb,
935                                                   bool *skip_hw_vlan)
936 {
937         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
938         unsigned int eth_hdr_len;
939         struct iphdr *ip;
940
941         /* For padded packets, BE HW modifies tot_len field in IP header
942          * incorrecly when VLAN tag is inserted by HW.
943          * For padded packets, Lancer computes incorrect checksum.
944          */
945         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946                                                 VLAN_ETH_HLEN : ETH_HLEN;
947         if (skb->len <= 60 &&
948             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
949             is_ipv4_pkt(skb)) {
950                 ip = (struct iphdr *)ip_hdr(skb);
951                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952         }
953
954         /* If vlan tag is already inlined in the packet, skip HW VLAN
955          * tagging in pvid-tagging mode
956          */
957         if (be_pvid_tagging_enabled(adapter) &&
958             veh->h_vlan_proto == htons(ETH_P_8021Q))
959                 *skip_hw_vlan = true;
960
961         /* HW has a bug wherein it will calculate CSUM for VLAN
962          * pkts even though it is disabled.
963          * Manually insert VLAN in pkt.
964          */
965         if (skb->ip_summed != CHECKSUM_PARTIAL &&
966             vlan_tx_tag_present(skb)) {
967                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
968                 if (unlikely(!skb))
969                         goto err;
970         }
971
972         /* HW may lockup when VLAN HW tagging is requested on
973          * certain ipv6 packets. Drop such pkts if the HW workaround to
974          * skip HW tagging is not enabled by FW.
975          */
976         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
977             (adapter->pvid || adapter->qnq_vid) &&
978             !qnq_async_evt_rcvd(adapter)))
979                 goto tx_drop;
980
981         /* Manual VLAN tag insertion to prevent:
982          * ASIC lockup when the ASIC inserts VLAN tag into
983          * certain ipv6 packets. Insert VLAN tags in driver,
984          * and set event, completion, vlan bits accordingly
985          * in the Tx WRB.
986          */
987         if (be_ipv6_tx_stall_chk(adapter, skb) &&
988             be_vlan_tag_tx_chk(adapter, skb)) {
989                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
990                 if (unlikely(!skb))
991                         goto err;
992         }
993
994         return skb;
995 tx_drop:
996         dev_kfree_skb_any(skb);
997 err:
998         return NULL;
999 }
1000
1001 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002                                            struct sk_buff *skb,
1003                                            bool *skip_hw_vlan)
1004 {
1005         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006          * less may cause a transmit stall on that port. So the work-around is
1007          * to pad short packets (<= 32 bytes) to a 36-byte length.
1008          */
1009         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010                 if (skb_padto(skb, 36))
1011                         return NULL;
1012                 skb->len = 36;
1013         }
1014
1015         if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016                 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017                 if (!skb)
1018                         return NULL;
1019         }
1020
1021         return skb;
1022 }
1023
1024 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025 {
1026         struct be_adapter *adapter = netdev_priv(netdev);
1027         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028         struct be_queue_info *txq = &txo->q;
1029         bool dummy_wrb, stopped = false;
1030         u32 wrb_cnt = 0, copied = 0;
1031         bool skip_hw_vlan = false;
1032         u32 start = txq->head;
1033
1034         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1035         if (!skb) {
1036                 tx_stats(txo)->tx_drv_drops++;
1037                 return NETDEV_TX_OK;
1038         }
1039
1040         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1041
1042         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043                               skip_hw_vlan);
1044         if (copied) {
1045                 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
1047                 /* record the sent skb in the sent_skb table */
1048                 BUG_ON(txo->sent_skb_list[start]);
1049                 txo->sent_skb_list[start] = skb;
1050
1051                 /* Ensure txq has space for the next skb; Else stop the queue
1052                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1053                  * tx compls of the current transmit which'll wake up the queue
1054                  */
1055                 atomic_add(wrb_cnt, &txq->used);
1056                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057                                                                 txq->len) {
1058                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1059                         stopped = true;
1060                 }
1061
1062                 be_txq_notify(adapter, txo, wrb_cnt);
1063
1064                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1065         } else {
1066                 txq->head = start;
1067                 tx_stats(txo)->tx_drv_drops++;
1068                 dev_kfree_skb_any(skb);
1069         }
1070         return NETDEV_TX_OK;
1071 }
1072
1073 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074 {
1075         struct be_adapter *adapter = netdev_priv(netdev);
1076         if (new_mtu < BE_MIN_MTU ||
1077             new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
1078                 dev_info(&adapter->pdev->dev,
1079                          "MTU must be between %d and %d bytes\n",
1080                          BE_MIN_MTU,
1081                          (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1082                 return -EINVAL;
1083         }
1084         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1085                  netdev->mtu, new_mtu);
1086         netdev->mtu = new_mtu;
1087         return 0;
1088 }
1089
1090 /*
1091  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092  * If the user configures more, place BE in vlan promiscuous mode.
1093  */
1094 static int be_vid_config(struct be_adapter *adapter)
1095 {
1096         u16 vids[BE_NUM_VLANS_SUPPORTED];
1097         u16 num = 0, i = 0;
1098         int status = 0;
1099
1100         /* No need to further configure vids if in promiscuous mode */
1101         if (adapter->promiscuous)
1102                 return 0;
1103
1104         if (adapter->vlans_added > be_max_vlans(adapter))
1105                 goto set_vlan_promisc;
1106
1107         /* Construct VLAN Table to give to HW */
1108         for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109                 vids[num++] = cpu_to_le16(i);
1110
1111         status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1112         if (status) {
1113                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1114                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1115                         goto set_vlan_promisc;
1116                 dev_err(&adapter->pdev->dev,
1117                         "Setting HW VLAN filtering failed.\n");
1118         } else {
1119                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1120                         /* hw VLAN filtering re-enabled. */
1121                         status = be_cmd_rx_filter(adapter,
1122                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1123                         if (!status) {
1124                                 dev_info(&adapter->pdev->dev,
1125                                          "Disabling VLAN Promiscuous mode.\n");
1126                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1127                         }
1128                 }
1129         }
1130
1131         return status;
1132
1133 set_vlan_promisc:
1134         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1135                 return 0;
1136
1137         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1138         if (!status) {
1139                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1140                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1141         } else
1142                 dev_err(&adapter->pdev->dev,
1143                         "Failed to enable VLAN Promiscuous mode.\n");
1144         return status;
1145 }
1146
1147 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1148 {
1149         struct be_adapter *adapter = netdev_priv(netdev);
1150         int status = 0;
1151
1152         /* Packets with VID 0 are always received by Lancer by default */
1153         if (lancer_chip(adapter) && vid == 0)
1154                 return status;
1155
1156         if (test_bit(vid, adapter->vids))
1157                 return status;
1158
1159         set_bit(vid, adapter->vids);
1160         adapter->vlans_added++;
1161
1162         status = be_vid_config(adapter);
1163         if (status) {
1164                 adapter->vlans_added--;
1165                 clear_bit(vid, adapter->vids);
1166         }
1167
1168         return status;
1169 }
1170
1171 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1172 {
1173         struct be_adapter *adapter = netdev_priv(netdev);
1174         int status = 0;
1175
1176         /* Packets with VID 0 are always received by Lancer by default */
1177         if (lancer_chip(adapter) && vid == 0)
1178                 goto ret;
1179
1180         clear_bit(vid, adapter->vids);
1181         status = be_vid_config(adapter);
1182         if (!status)
1183                 adapter->vlans_added--;
1184         else
1185                 set_bit(vid, adapter->vids);
1186 ret:
1187         return status;
1188 }
1189
1190 static void be_clear_promisc(struct be_adapter *adapter)
1191 {
1192         adapter->promiscuous = false;
1193         adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1194
1195         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1196 }
1197
1198 static void be_set_rx_mode(struct net_device *netdev)
1199 {
1200         struct be_adapter *adapter = netdev_priv(netdev);
1201         int status;
1202
1203         if (netdev->flags & IFF_PROMISC) {
1204                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1205                 adapter->promiscuous = true;
1206                 goto done;
1207         }
1208
1209         /* BE was previously in promiscuous mode; disable it */
1210         if (adapter->promiscuous) {
1211                 be_clear_promisc(adapter);
1212                 if (adapter->vlans_added)
1213                         be_vid_config(adapter);
1214         }
1215
1216         /* Enable multicast promisc if num configured exceeds what we support */
1217         if (netdev->flags & IFF_ALLMULTI ||
1218             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1219                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1220                 goto done;
1221         }
1222
1223         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1224                 struct netdev_hw_addr *ha;
1225                 int i = 1; /* First slot is claimed by the Primary MAC */
1226
1227                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1228                         be_cmd_pmac_del(adapter, adapter->if_handle,
1229                                         adapter->pmac_id[i], 0);
1230                 }
1231
1232                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1233                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1234                         adapter->promiscuous = true;
1235                         goto done;
1236                 }
1237
1238                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1239                         adapter->uc_macs++; /* First slot is for Primary MAC */
1240                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1241                                         adapter->if_handle,
1242                                         &adapter->pmac_id[adapter->uc_macs], 0);
1243                 }
1244         }
1245
1246         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1247
1248         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1249         if (status) {
1250                 dev_info(&adapter->pdev->dev,
1251                          "Exhausted multicast HW filters.\n");
1252                 dev_info(&adapter->pdev->dev,
1253                          "Disabling HW multicast filtering.\n");
1254                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255         }
1256 done:
1257         return;
1258 }
1259
1260 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261 {
1262         struct be_adapter *adapter = netdev_priv(netdev);
1263         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1264         int status;
1265
1266         if (!sriov_enabled(adapter))
1267                 return -EPERM;
1268
1269         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1270                 return -EINVAL;
1271
1272         if (BEx_chip(adapter)) {
1273                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1274                                 vf + 1);
1275
1276                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1277                                          &vf_cfg->pmac_id, vf + 1);
1278         } else {
1279                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1280                                         vf + 1);
1281         }
1282
1283         if (status)
1284                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1285                         mac, vf);
1286         else
1287                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1288
1289         return status;
1290 }
1291
1292 static int be_get_vf_config(struct net_device *netdev, int vf,
1293                             struct ifla_vf_info *vi)
1294 {
1295         struct be_adapter *adapter = netdev_priv(netdev);
1296         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1297
1298         if (!sriov_enabled(adapter))
1299                 return -EPERM;
1300
1301         if (vf >= adapter->num_vfs)
1302                 return -EINVAL;
1303
1304         vi->vf = vf;
1305         vi->tx_rate = vf_cfg->tx_rate;
1306         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1307         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1308         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1309         vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1310
1311         return 0;
1312 }
1313
1314 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1315 {
1316         struct be_adapter *adapter = netdev_priv(netdev);
1317         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1318         int status = 0;
1319
1320         if (!sriov_enabled(adapter))
1321                 return -EPERM;
1322
1323         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1324                 return -EINVAL;
1325
1326         if (vlan || qos) {
1327                 vlan |= qos << VLAN_PRIO_SHIFT;
1328                 if (vf_cfg->vlan_tag != vlan)
1329                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1330                                                        vf_cfg->if_handle, 0);
1331         } else {
1332                 /* Reset Transparent Vlan Tagging. */
1333                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1334                                                vf + 1, vf_cfg->if_handle, 0);
1335         }
1336
1337         if (!status)
1338                 vf_cfg->vlan_tag = vlan;
1339         else
1340                 dev_info(&adapter->pdev->dev,
1341                          "VLAN %d config on VF %d failed\n", vlan, vf);
1342         return status;
1343 }
1344
1345 static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
1346 {
1347         struct be_adapter *adapter = netdev_priv(netdev);
1348         int status = 0;
1349
1350         if (!sriov_enabled(adapter))
1351                 return -EPERM;
1352
1353         if (vf >= adapter->num_vfs)
1354                 return -EINVAL;
1355
1356         if (rate < 100 || rate > 10000) {
1357                 dev_err(&adapter->pdev->dev,
1358                         "tx rate must be between 100 and 10000 Mbps\n");
1359                 return -EINVAL;
1360         }
1361
1362         status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
1363         if (status)
1364                 dev_err(&adapter->pdev->dev,
1365                         "tx rate %d on VF %d failed\n", rate, vf);
1366         else
1367                 adapter->vf_cfg[vf].tx_rate = rate;
1368         return status;
1369 }
1370 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1371                                 int link_state)
1372 {
1373         struct be_adapter *adapter = netdev_priv(netdev);
1374         int status;
1375
1376         if (!sriov_enabled(adapter))
1377                 return -EPERM;
1378
1379         if (vf >= adapter->num_vfs)
1380                 return -EINVAL;
1381
1382         status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1383         if (!status)
1384                 adapter->vf_cfg[vf].plink_tracking = link_state;
1385
1386         return status;
1387 }
1388
1389 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1390                           ulong now)
1391 {
1392         aic->rx_pkts_prev = rx_pkts;
1393         aic->tx_reqs_prev = tx_pkts;
1394         aic->jiffies = now;
1395 }
1396
1397 static void be_eqd_update(struct be_adapter *adapter)
1398 {
1399         struct be_set_eqd set_eqd[MAX_EVT_QS];
1400         int eqd, i, num = 0, start;
1401         struct be_aic_obj *aic;
1402         struct be_eq_obj *eqo;
1403         struct be_rx_obj *rxo;
1404         struct be_tx_obj *txo;
1405         u64 rx_pkts, tx_pkts;
1406         ulong now;
1407         u32 pps, delta;
1408
1409         for_all_evt_queues(adapter, eqo, i) {
1410                 aic = &adapter->aic_obj[eqo->idx];
1411                 if (!aic->enable) {
1412                         if (aic->jiffies)
1413                                 aic->jiffies = 0;
1414                         eqd = aic->et_eqd;
1415                         goto modify_eqd;
1416                 }
1417
1418                 rxo = &adapter->rx_obj[eqo->idx];
1419                 do {
1420                         start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1421                         rx_pkts = rxo->stats.rx_pkts;
1422                 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1423
1424                 txo = &adapter->tx_obj[eqo->idx];
1425                 do {
1426                         start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1427                         tx_pkts = txo->stats.tx_reqs;
1428                 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1429
1430
1431                 /* Skip, if wrapped around or first calculation */
1432                 now = jiffies;
1433                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1434                     rx_pkts < aic->rx_pkts_prev ||
1435                     tx_pkts < aic->tx_reqs_prev) {
1436                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1437                         continue;
1438                 }
1439
1440                 delta = jiffies_to_msecs(now - aic->jiffies);
1441                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1442                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1443                 eqd = (pps / 15000) << 2;
1444
1445                 if (eqd < 8)
1446                         eqd = 0;
1447                 eqd = min_t(u32, eqd, aic->max_eqd);
1448                 eqd = max_t(u32, eqd, aic->min_eqd);
1449
1450                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1451 modify_eqd:
1452                 if (eqd != aic->prev_eqd) {
1453                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1454                         set_eqd[num].eq_id = eqo->q.id;
1455                         aic->prev_eqd = eqd;
1456                         num++;
1457                 }
1458         }
1459
1460         if (num)
1461                 be_cmd_modify_eqd(adapter, set_eqd, num);
1462 }
1463
1464 static void be_rx_stats_update(struct be_rx_obj *rxo,
1465                                struct be_rx_compl_info *rxcp)
1466 {
1467         struct be_rx_stats *stats = rx_stats(rxo);
1468
1469         u64_stats_update_begin(&stats->sync);
1470         stats->rx_compl++;
1471         stats->rx_bytes += rxcp->pkt_size;
1472         stats->rx_pkts++;
1473         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1474                 stats->rx_mcast_pkts++;
1475         if (rxcp->err)
1476                 stats->rx_compl_err++;
1477         u64_stats_update_end(&stats->sync);
1478 }
1479
1480 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1481 {
1482         /* L4 checksum is not reliable for non TCP/UDP packets.
1483          * Also ignore ipcksm for ipv6 pkts
1484          */
1485         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1486                 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1487 }
1488
1489 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1490 {
1491         struct be_adapter *adapter = rxo->adapter;
1492         struct be_rx_page_info *rx_page_info;
1493         struct be_queue_info *rxq = &rxo->q;
1494         u16 frag_idx = rxq->tail;
1495
1496         rx_page_info = &rxo->page_info_tbl[frag_idx];
1497         BUG_ON(!rx_page_info->page);
1498
1499         if (rx_page_info->last_frag) {
1500                 dma_unmap_page(&adapter->pdev->dev,
1501                                dma_unmap_addr(rx_page_info, bus),
1502                                adapter->big_page_size, DMA_FROM_DEVICE);
1503                 rx_page_info->last_frag = false;
1504         } else {
1505                 dma_sync_single_for_cpu(&adapter->pdev->dev,
1506                                         dma_unmap_addr(rx_page_info, bus),
1507                                         rx_frag_size, DMA_FROM_DEVICE);
1508         }
1509
1510         queue_tail_inc(rxq);
1511         atomic_dec(&rxq->used);
1512         return rx_page_info;
1513 }
1514
1515 /* Throwaway the data in the Rx completion */
1516 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1517                                 struct be_rx_compl_info *rxcp)
1518 {
1519         struct be_rx_page_info *page_info;
1520         u16 i, num_rcvd = rxcp->num_rcvd;
1521
1522         for (i = 0; i < num_rcvd; i++) {
1523                 page_info = get_rx_page_info(rxo);
1524                 put_page(page_info->page);
1525                 memset(page_info, 0, sizeof(*page_info));
1526         }
1527 }
1528
1529 /*
1530  * skb_fill_rx_data forms a complete skb for an ether frame
1531  * indicated by rxcp.
1532  */
1533 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1534                              struct be_rx_compl_info *rxcp)
1535 {
1536         struct be_rx_page_info *page_info;
1537         u16 i, j;
1538         u16 hdr_len, curr_frag_len, remaining;
1539         u8 *start;
1540
1541         page_info = get_rx_page_info(rxo);
1542         start = page_address(page_info->page) + page_info->page_offset;
1543         prefetch(start);
1544
1545         /* Copy data in the first descriptor of this completion */
1546         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1547
1548         skb->len = curr_frag_len;
1549         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1550                 memcpy(skb->data, start, curr_frag_len);
1551                 /* Complete packet has now been moved to data */
1552                 put_page(page_info->page);
1553                 skb->data_len = 0;
1554                 skb->tail += curr_frag_len;
1555         } else {
1556                 hdr_len = ETH_HLEN;
1557                 memcpy(skb->data, start, hdr_len);
1558                 skb_shinfo(skb)->nr_frags = 1;
1559                 skb_frag_set_page(skb, 0, page_info->page);
1560                 skb_shinfo(skb)->frags[0].page_offset =
1561                                         page_info->page_offset + hdr_len;
1562                 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1563                                   curr_frag_len - hdr_len);
1564                 skb->data_len = curr_frag_len - hdr_len;
1565                 skb->truesize += rx_frag_size;
1566                 skb->tail += hdr_len;
1567         }
1568         page_info->page = NULL;
1569
1570         if (rxcp->pkt_size <= rx_frag_size) {
1571                 BUG_ON(rxcp->num_rcvd != 1);
1572                 return;
1573         }
1574
1575         /* More frags present for this completion */
1576         remaining = rxcp->pkt_size - curr_frag_len;
1577         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1578                 page_info = get_rx_page_info(rxo);
1579                 curr_frag_len = min(remaining, rx_frag_size);
1580
1581                 /* Coalesce all frags from the same physical page in one slot */
1582                 if (page_info->page_offset == 0) {
1583                         /* Fresh page */
1584                         j++;
1585                         skb_frag_set_page(skb, j, page_info->page);
1586                         skb_shinfo(skb)->frags[j].page_offset =
1587                                                         page_info->page_offset;
1588                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1589                         skb_shinfo(skb)->nr_frags++;
1590                 } else {
1591                         put_page(page_info->page);
1592                 }
1593
1594                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1595                 skb->len += curr_frag_len;
1596                 skb->data_len += curr_frag_len;
1597                 skb->truesize += rx_frag_size;
1598                 remaining -= curr_frag_len;
1599                 page_info->page = NULL;
1600         }
1601         BUG_ON(j > MAX_SKB_FRAGS);
1602 }
1603
1604 /* Process the RX completion indicated by rxcp when GRO is disabled */
1605 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1606                                 struct be_rx_compl_info *rxcp)
1607 {
1608         struct be_adapter *adapter = rxo->adapter;
1609         struct net_device *netdev = adapter->netdev;
1610         struct sk_buff *skb;
1611
1612         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1613         if (unlikely(!skb)) {
1614                 rx_stats(rxo)->rx_drops_no_skbs++;
1615                 be_rx_compl_discard(rxo, rxcp);
1616                 return;
1617         }
1618
1619         skb_fill_rx_data(rxo, skb, rxcp);
1620
1621         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1622                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1623         else
1624                 skb_checksum_none_assert(skb);
1625
1626         skb->protocol = eth_type_trans(skb, netdev);
1627         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1628         if (netdev->features & NETIF_F_RXHASH)
1629                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1630
1631         skb->encapsulation = rxcp->tunneled;
1632         skb_mark_napi_id(skb, napi);
1633
1634         if (rxcp->vlanf)
1635                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1636
1637         netif_receive_skb(skb);
1638 }
1639
1640 /* Process the RX completion indicated by rxcp when GRO is enabled */
1641 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1642                                     struct napi_struct *napi,
1643                                     struct be_rx_compl_info *rxcp)
1644 {
1645         struct be_adapter *adapter = rxo->adapter;
1646         struct be_rx_page_info *page_info;
1647         struct sk_buff *skb = NULL;
1648         u16 remaining, curr_frag_len;
1649         u16 i, j;
1650
1651         skb = napi_get_frags(napi);
1652         if (!skb) {
1653                 be_rx_compl_discard(rxo, rxcp);
1654                 return;
1655         }
1656
1657         remaining = rxcp->pkt_size;
1658         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1659                 page_info = get_rx_page_info(rxo);
1660
1661                 curr_frag_len = min(remaining, rx_frag_size);
1662
1663                 /* Coalesce all frags from the same physical page in one slot */
1664                 if (i == 0 || page_info->page_offset == 0) {
1665                         /* First frag or Fresh page */
1666                         j++;
1667                         skb_frag_set_page(skb, j, page_info->page);
1668                         skb_shinfo(skb)->frags[j].page_offset =
1669                                                         page_info->page_offset;
1670                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1671                 } else {
1672                         put_page(page_info->page);
1673                 }
1674                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1675                 skb->truesize += rx_frag_size;
1676                 remaining -= curr_frag_len;
1677                 memset(page_info, 0, sizeof(*page_info));
1678         }
1679         BUG_ON(j > MAX_SKB_FRAGS);
1680
1681         skb_shinfo(skb)->nr_frags = j + 1;
1682         skb->len = rxcp->pkt_size;
1683         skb->data_len = rxcp->pkt_size;
1684         skb->ip_summed = CHECKSUM_UNNECESSARY;
1685         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1686         if (adapter->netdev->features & NETIF_F_RXHASH)
1687                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1688
1689         skb->encapsulation = rxcp->tunneled;
1690         skb_mark_napi_id(skb, napi);
1691
1692         if (rxcp->vlanf)
1693                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1694
1695         napi_gro_frags(napi);
1696 }
1697
1698 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1699                                  struct be_rx_compl_info *rxcp)
1700 {
1701         rxcp->pkt_size =
1702                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1703         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1704         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1705         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1706         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1707         rxcp->ip_csum =
1708                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1709         rxcp->l4_csum =
1710                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1711         rxcp->ipv6 =
1712                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1713         rxcp->num_rcvd =
1714                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1715         rxcp->pkt_type =
1716                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1717         rxcp->rss_hash =
1718                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1719         if (rxcp->vlanf) {
1720                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1721                                           compl);
1722                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1723                                                vlan_tag, compl);
1724         }
1725         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1726         rxcp->tunneled =
1727                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
1728 }
1729
1730 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1731                                  struct be_rx_compl_info *rxcp)
1732 {
1733         rxcp->pkt_size =
1734                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1735         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1736         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1737         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1738         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1739         rxcp->ip_csum =
1740                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1741         rxcp->l4_csum =
1742                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1743         rxcp->ipv6 =
1744                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1745         rxcp->num_rcvd =
1746                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1747         rxcp->pkt_type =
1748                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1749         rxcp->rss_hash =
1750                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1751         if (rxcp->vlanf) {
1752                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1753                                           compl);
1754                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1755                                                vlan_tag, compl);
1756         }
1757         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1758         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1759                                       ip_frag, compl);
1760 }
1761
1762 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1763 {
1764         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1765         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1766         struct be_adapter *adapter = rxo->adapter;
1767
1768         /* For checking the valid bit it is Ok to use either definition as the
1769          * valid bit is at the same position in both v0 and v1 Rx compl */
1770         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1771                 return NULL;
1772
1773         rmb();
1774         be_dws_le_to_cpu(compl, sizeof(*compl));
1775
1776         if (adapter->be3_native)
1777                 be_parse_rx_compl_v1(compl, rxcp);
1778         else
1779                 be_parse_rx_compl_v0(compl, rxcp);
1780
1781         if (rxcp->ip_frag)
1782                 rxcp->l4_csum = 0;
1783
1784         if (rxcp->vlanf) {
1785                 /* In QNQ modes, if qnq bit is not set, then the packet was
1786                  * tagged only with the transparent outer vlan-tag and must
1787                  * not be treated as a vlan packet by host
1788                  */
1789                 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1790                         rxcp->vlanf = 0;
1791
1792                 if (!lancer_chip(adapter))
1793                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1794
1795                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1796                     !test_bit(rxcp->vlan_tag, adapter->vids))
1797                         rxcp->vlanf = 0;
1798         }
1799
1800         /* As the compl has been parsed, reset it; we wont touch it again */
1801         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1802
1803         queue_tail_inc(&rxo->cq);
1804         return rxcp;
1805 }
1806
1807 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1808 {
1809         u32 order = get_order(size);
1810
1811         if (order > 0)
1812                 gfp |= __GFP_COMP;
1813         return  alloc_pages(gfp, order);
1814 }
1815
1816 /*
1817  * Allocate a page, split it to fragments of size rx_frag_size and post as
1818  * receive buffers to BE
1819  */
1820 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1821 {
1822         struct be_adapter *adapter = rxo->adapter;
1823         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1824         struct be_queue_info *rxq = &rxo->q;
1825         struct page *pagep = NULL;
1826         struct device *dev = &adapter->pdev->dev;
1827         struct be_eth_rx_d *rxd;
1828         u64 page_dmaaddr = 0, frag_dmaaddr;
1829         u32 posted, page_offset = 0;
1830
1831         page_info = &rxo->page_info_tbl[rxq->head];
1832         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1833                 if (!pagep) {
1834                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1835                         if (unlikely(!pagep)) {
1836                                 rx_stats(rxo)->rx_post_fail++;
1837                                 break;
1838                         }
1839                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1840                                                     adapter->big_page_size,
1841                                                     DMA_FROM_DEVICE);
1842                         if (dma_mapping_error(dev, page_dmaaddr)) {
1843                                 put_page(pagep);
1844                                 pagep = NULL;
1845                                 rx_stats(rxo)->rx_post_fail++;
1846                                 break;
1847                         }
1848                         page_offset = 0;
1849                 } else {
1850                         get_page(pagep);
1851                         page_offset += rx_frag_size;
1852                 }
1853                 page_info->page_offset = page_offset;
1854                 page_info->page = pagep;
1855
1856                 rxd = queue_head_node(rxq);
1857                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1858                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1859                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1860
1861                 /* Any space left in the current big page for another frag? */
1862                 if ((page_offset + rx_frag_size + rx_frag_size) >
1863                                         adapter->big_page_size) {
1864                         pagep = NULL;
1865                         page_info->last_frag = true;
1866                         dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1867                 } else {
1868                         dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1869                 }
1870
1871                 prev_page_info = page_info;
1872                 queue_head_inc(rxq);
1873                 page_info = &rxo->page_info_tbl[rxq->head];
1874         }
1875
1876         /* Mark the last frag of a page when we break out of the above loop
1877          * with no more slots available in the RXQ
1878          */
1879         if (pagep) {
1880                 prev_page_info->last_frag = true;
1881                 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1882         }
1883
1884         if (posted) {
1885                 atomic_add(posted, &rxq->used);
1886                 if (rxo->rx_post_starved)
1887                         rxo->rx_post_starved = false;
1888                 be_rxq_notify(adapter, rxq->id, posted);
1889         } else if (atomic_read(&rxq->used) == 0) {
1890                 /* Let be_worker replenish when memory is available */
1891                 rxo->rx_post_starved = true;
1892         }
1893 }
1894
1895 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1896 {
1897         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1898
1899         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1900                 return NULL;
1901
1902         rmb();
1903         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1904
1905         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1906
1907         queue_tail_inc(tx_cq);
1908         return txcp;
1909 }
1910
1911 static u16 be_tx_compl_process(struct be_adapter *adapter,
1912                                struct be_tx_obj *txo, u16 last_index)
1913 {
1914         struct be_queue_info *txq = &txo->q;
1915         struct be_eth_wrb *wrb;
1916         struct sk_buff **sent_skbs = txo->sent_skb_list;
1917         struct sk_buff *sent_skb;
1918         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1919         bool unmap_skb_hdr = true;
1920
1921         sent_skb = sent_skbs[txq->tail];
1922         BUG_ON(!sent_skb);
1923         sent_skbs[txq->tail] = NULL;
1924
1925         /* skip header wrb */
1926         queue_tail_inc(txq);
1927
1928         do {
1929                 cur_index = txq->tail;
1930                 wrb = queue_tail_node(txq);
1931                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1932                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1933                 unmap_skb_hdr = false;
1934
1935                 num_wrbs++;
1936                 queue_tail_inc(txq);
1937         } while (cur_index != last_index);
1938
1939         dev_kfree_skb_any(sent_skb);
1940         return num_wrbs;
1941 }
1942
1943 /* Return the number of events in the event queue */
1944 static inline int events_get(struct be_eq_obj *eqo)
1945 {
1946         struct be_eq_entry *eqe;
1947         int num = 0;
1948
1949         do {
1950                 eqe = queue_tail_node(&eqo->q);
1951                 if (eqe->evt == 0)
1952                         break;
1953
1954                 rmb();
1955                 eqe->evt = 0;
1956                 num++;
1957                 queue_tail_inc(&eqo->q);
1958         } while (true);
1959
1960         return num;
1961 }
1962
1963 /* Leaves the EQ is disarmed state */
1964 static void be_eq_clean(struct be_eq_obj *eqo)
1965 {
1966         int num = events_get(eqo);
1967
1968         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1969 }
1970
1971 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1972 {
1973         struct be_rx_page_info *page_info;
1974         struct be_queue_info *rxq = &rxo->q;
1975         struct be_queue_info *rx_cq = &rxo->cq;
1976         struct be_rx_compl_info *rxcp;
1977         struct be_adapter *adapter = rxo->adapter;
1978         int flush_wait = 0;
1979
1980         /* Consume pending rx completions.
1981          * Wait for the flush completion (identified by zero num_rcvd)
1982          * to arrive. Notify CQ even when there are no more CQ entries
1983          * for HW to flush partially coalesced CQ entries.
1984          * In Lancer, there is no need to wait for flush compl.
1985          */
1986         for (;;) {
1987                 rxcp = be_rx_compl_get(rxo);
1988                 if (rxcp == NULL) {
1989                         if (lancer_chip(adapter))
1990                                 break;
1991
1992                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1993                                 dev_warn(&adapter->pdev->dev,
1994                                          "did not receive flush compl\n");
1995                                 break;
1996                         }
1997                         be_cq_notify(adapter, rx_cq->id, true, 0);
1998                         mdelay(1);
1999                 } else {
2000                         be_rx_compl_discard(rxo, rxcp);
2001                         be_cq_notify(adapter, rx_cq->id, false, 1);
2002                         if (rxcp->num_rcvd == 0)
2003                                 break;
2004                 }
2005         }
2006
2007         /* After cleanup, leave the CQ in unarmed state */
2008         be_cq_notify(adapter, rx_cq->id, false, 0);
2009
2010         /* Then free posted rx buffers that were not used */
2011         while (atomic_read(&rxq->used) > 0) {
2012                 page_info = get_rx_page_info(rxo);
2013                 put_page(page_info->page);
2014                 memset(page_info, 0, sizeof(*page_info));
2015         }
2016         BUG_ON(atomic_read(&rxq->used));
2017         rxq->tail = rxq->head = 0;
2018 }
2019
2020 static void be_tx_compl_clean(struct be_adapter *adapter)
2021 {
2022         struct be_tx_obj *txo;
2023         struct be_queue_info *txq;
2024         struct be_eth_tx_compl *txcp;
2025         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2026         struct sk_buff *sent_skb;
2027         bool dummy_wrb;
2028         int i, pending_txqs;
2029
2030         /* Stop polling for compls when HW has been silent for 10ms */
2031         do {
2032                 pending_txqs = adapter->num_tx_qs;
2033
2034                 for_all_tx_queues(adapter, txo, i) {
2035                         cmpl = 0;
2036                         num_wrbs = 0;
2037                         txq = &txo->q;
2038                         while ((txcp = be_tx_compl_get(&txo->cq))) {
2039                                 end_idx =
2040                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
2041                                                       wrb_index, txcp);
2042                                 num_wrbs += be_tx_compl_process(adapter, txo,
2043                                                                 end_idx);
2044                                 cmpl++;
2045                         }
2046                         if (cmpl) {
2047                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2048                                 atomic_sub(num_wrbs, &txq->used);
2049                                 timeo = 0;
2050                         }
2051                         if (atomic_read(&txq->used) == 0)
2052                                 pending_txqs--;
2053                 }
2054
2055                 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2056                         break;
2057
2058                 mdelay(1);
2059         } while (true);
2060
2061         for_all_tx_queues(adapter, txo, i) {
2062                 txq = &txo->q;
2063                 if (atomic_read(&txq->used))
2064                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2065                                 atomic_read(&txq->used));
2066
2067                 /* free posted tx for which compls will never arrive */
2068                 while (atomic_read(&txq->used)) {
2069                         sent_skb = txo->sent_skb_list[txq->tail];
2070                         end_idx = txq->tail;
2071                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2072                                                    &dummy_wrb);
2073                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2074                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2075                         atomic_sub(num_wrbs, &txq->used);
2076                 }
2077         }
2078 }
2079
2080 static void be_evt_queues_destroy(struct be_adapter *adapter)
2081 {
2082         struct be_eq_obj *eqo;
2083         int i;
2084
2085         for_all_evt_queues(adapter, eqo, i) {
2086                 if (eqo->q.created) {
2087                         be_eq_clean(eqo);
2088                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2089                         napi_hash_del(&eqo->napi);
2090                         netif_napi_del(&eqo->napi);
2091                 }
2092                 be_queue_free(adapter, &eqo->q);
2093         }
2094 }
2095
2096 static int be_evt_queues_create(struct be_adapter *adapter)
2097 {
2098         struct be_queue_info *eq;
2099         struct be_eq_obj *eqo;
2100         struct be_aic_obj *aic;
2101         int i, rc;
2102
2103         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2104                                     adapter->cfg_num_qs);
2105
2106         for_all_evt_queues(adapter, eqo, i) {
2107                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2108                                BE_NAPI_WEIGHT);
2109                 napi_hash_add(&eqo->napi);
2110                 aic = &adapter->aic_obj[i];
2111                 eqo->adapter = adapter;
2112                 eqo->tx_budget = BE_TX_BUDGET;
2113                 eqo->idx = i;
2114                 aic->max_eqd = BE_MAX_EQD;
2115                 aic->enable = true;
2116
2117                 eq = &eqo->q;
2118                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2119                                     sizeof(struct be_eq_entry));
2120                 if (rc)
2121                         return rc;
2122
2123                 rc = be_cmd_eq_create(adapter, eqo);
2124                 if (rc)
2125                         return rc;
2126         }
2127         return 0;
2128 }
2129
2130 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2131 {
2132         struct be_queue_info *q;
2133
2134         q = &adapter->mcc_obj.q;
2135         if (q->created)
2136                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2137         be_queue_free(adapter, q);
2138
2139         q = &adapter->mcc_obj.cq;
2140         if (q->created)
2141                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2142         be_queue_free(adapter, q);
2143 }
2144
2145 /* Must be called only after TX qs are created as MCC shares TX EQ */
2146 static int be_mcc_queues_create(struct be_adapter *adapter)
2147 {
2148         struct be_queue_info *q, *cq;
2149
2150         cq = &adapter->mcc_obj.cq;
2151         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2152                            sizeof(struct be_mcc_compl)))
2153                 goto err;
2154
2155         /* Use the default EQ for MCC completions */
2156         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2157                 goto mcc_cq_free;
2158
2159         q = &adapter->mcc_obj.q;
2160         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2161                 goto mcc_cq_destroy;
2162
2163         if (be_cmd_mccq_create(adapter, q, cq))
2164                 goto mcc_q_free;
2165
2166         return 0;
2167
2168 mcc_q_free:
2169         be_queue_free(adapter, q);
2170 mcc_cq_destroy:
2171         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2172 mcc_cq_free:
2173         be_queue_free(adapter, cq);
2174 err:
2175         return -1;
2176 }
2177
2178 static void be_tx_queues_destroy(struct be_adapter *adapter)
2179 {
2180         struct be_queue_info *q;
2181         struct be_tx_obj *txo;
2182         u8 i;
2183
2184         for_all_tx_queues(adapter, txo, i) {
2185                 q = &txo->q;
2186                 if (q->created)
2187                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2188                 be_queue_free(adapter, q);
2189
2190                 q = &txo->cq;
2191                 if (q->created)
2192                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2193                 be_queue_free(adapter, q);
2194         }
2195 }
2196
2197 static int be_tx_qs_create(struct be_adapter *adapter)
2198 {
2199         struct be_queue_info *cq, *eq;
2200         struct be_tx_obj *txo;
2201         int status, i;
2202
2203         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2204
2205         for_all_tx_queues(adapter, txo, i) {
2206                 cq = &txo->cq;
2207                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2208                                         sizeof(struct be_eth_tx_compl));
2209                 if (status)
2210                         return status;
2211
2212                 u64_stats_init(&txo->stats.sync);
2213                 u64_stats_init(&txo->stats.sync_compl);
2214
2215                 /* If num_evt_qs is less than num_tx_qs, then more than
2216                  * one txq share an eq
2217                  */
2218                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2219                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2220                 if (status)
2221                         return status;
2222
2223                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2224                                         sizeof(struct be_eth_wrb));
2225                 if (status)
2226                         return status;
2227
2228                 status = be_cmd_txq_create(adapter, txo);
2229                 if (status)
2230                         return status;
2231         }
2232
2233         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2234                  adapter->num_tx_qs);
2235         return 0;
2236 }
2237
2238 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2239 {
2240         struct be_queue_info *q;
2241         struct be_rx_obj *rxo;
2242         int i;
2243
2244         for_all_rx_queues(adapter, rxo, i) {
2245                 q = &rxo->cq;
2246                 if (q->created)
2247                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2248                 be_queue_free(adapter, q);
2249         }
2250 }
2251
2252 static int be_rx_cqs_create(struct be_adapter *adapter)
2253 {
2254         struct be_queue_info *eq, *cq;
2255         struct be_rx_obj *rxo;
2256         int rc, i;
2257
2258         /* We can create as many RSS rings as there are EQs. */
2259         adapter->num_rx_qs = adapter->num_evt_qs;
2260
2261         /* We'll use RSS only if atleast 2 RSS rings are supported.
2262          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2263          */
2264         if (adapter->num_rx_qs > 1)
2265                 adapter->num_rx_qs++;
2266
2267         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2268         for_all_rx_queues(adapter, rxo, i) {
2269                 rxo->adapter = adapter;
2270                 cq = &rxo->cq;
2271                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2272                                     sizeof(struct be_eth_rx_compl));
2273                 if (rc)
2274                         return rc;
2275
2276                 u64_stats_init(&rxo->stats.sync);
2277                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2278                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2279                 if (rc)
2280                         return rc;
2281         }
2282
2283         dev_info(&adapter->pdev->dev,
2284                  "created %d RSS queue(s) and 1 default RX queue\n",
2285                  adapter->num_rx_qs - 1);
2286         return 0;
2287 }
2288
2289 static irqreturn_t be_intx(int irq, void *dev)
2290 {
2291         struct be_eq_obj *eqo = dev;
2292         struct be_adapter *adapter = eqo->adapter;
2293         int num_evts = 0;
2294
2295         /* IRQ is not expected when NAPI is scheduled as the EQ
2296          * will not be armed.
2297          * But, this can happen on Lancer INTx where it takes
2298          * a while to de-assert INTx or in BE2 where occasionaly
2299          * an interrupt may be raised even when EQ is unarmed.
2300          * If NAPI is already scheduled, then counting & notifying
2301          * events will orphan them.
2302          */
2303         if (napi_schedule_prep(&eqo->napi)) {
2304                 num_evts = events_get(eqo);
2305                 __napi_schedule(&eqo->napi);
2306                 if (num_evts)
2307                         eqo->spurious_intr = 0;
2308         }
2309         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2310
2311         /* Return IRQ_HANDLED only for the the first spurious intr
2312          * after a valid intr to stop the kernel from branding
2313          * this irq as a bad one!
2314          */
2315         if (num_evts || eqo->spurious_intr++ == 0)
2316                 return IRQ_HANDLED;
2317         else
2318                 return IRQ_NONE;
2319 }
2320
2321 static irqreturn_t be_msix(int irq, void *dev)
2322 {
2323         struct be_eq_obj *eqo = dev;
2324
2325         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2326         napi_schedule(&eqo->napi);
2327         return IRQ_HANDLED;
2328 }
2329
2330 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2331 {
2332         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2333 }
2334
2335 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2336                          int budget, int polling)
2337 {
2338         struct be_adapter *adapter = rxo->adapter;
2339         struct be_queue_info *rx_cq = &rxo->cq;
2340         struct be_rx_compl_info *rxcp;
2341         u32 work_done;
2342
2343         for (work_done = 0; work_done < budget; work_done++) {
2344                 rxcp = be_rx_compl_get(rxo);
2345                 if (!rxcp)
2346                         break;
2347
2348                 /* Is it a flush compl that has no data */
2349                 if (unlikely(rxcp->num_rcvd == 0))
2350                         goto loop_continue;
2351
2352                 /* Discard compl with partial DMA Lancer B0 */
2353                 if (unlikely(!rxcp->pkt_size)) {
2354                         be_rx_compl_discard(rxo, rxcp);
2355                         goto loop_continue;
2356                 }
2357
2358                 /* On BE drop pkts that arrive due to imperfect filtering in
2359                  * promiscuous mode on some skews
2360                  */
2361                 if (unlikely(rxcp->port != adapter->port_num &&
2362                              !lancer_chip(adapter))) {
2363                         be_rx_compl_discard(rxo, rxcp);
2364                         goto loop_continue;
2365                 }
2366
2367                 /* Don't do gro when we're busy_polling */
2368                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2369                         be_rx_compl_process_gro(rxo, napi, rxcp);
2370                 else
2371                         be_rx_compl_process(rxo, napi, rxcp);
2372
2373 loop_continue:
2374                 be_rx_stats_update(rxo, rxcp);
2375         }
2376
2377         if (work_done) {
2378                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2379
2380                 /* When an rx-obj gets into post_starved state, just
2381                  * let be_worker do the posting.
2382                  */
2383                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2384                     !rxo->rx_post_starved)
2385                         be_post_rx_frags(rxo, GFP_ATOMIC);
2386         }
2387
2388         return work_done;
2389 }
2390
2391 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2392                           int budget, int idx)
2393 {
2394         struct be_eth_tx_compl *txcp;
2395         int num_wrbs = 0, work_done;
2396
2397         for (work_done = 0; work_done < budget; work_done++) {
2398                 txcp = be_tx_compl_get(&txo->cq);
2399                 if (!txcp)
2400                         break;
2401                 num_wrbs += be_tx_compl_process(adapter, txo,
2402                                                 AMAP_GET_BITS(struct
2403                                                               amap_eth_tx_compl,
2404                                                               wrb_index, txcp));
2405         }
2406
2407         if (work_done) {
2408                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2409                 atomic_sub(num_wrbs, &txo->q.used);
2410
2411                 /* As Tx wrbs have been freed up, wake up netdev queue
2412                  * if it was stopped due to lack of tx wrbs.  */
2413                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2414                     atomic_read(&txo->q.used) < txo->q.len / 2) {
2415                         netif_wake_subqueue(adapter->netdev, idx);
2416                 }
2417
2418                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2419                 tx_stats(txo)->tx_compl += work_done;
2420                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2421         }
2422         return (work_done < budget); /* Done */
2423 }
2424
2425 int be_poll(struct napi_struct *napi, int budget)
2426 {
2427         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2428         struct be_adapter *adapter = eqo->adapter;
2429         int max_work = 0, work, i, num_evts;
2430         struct be_rx_obj *rxo;
2431         bool tx_done;
2432
2433         num_evts = events_get(eqo);
2434
2435         /* Process all TXQs serviced by this EQ */
2436         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2437                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2438                                         eqo->tx_budget, i);
2439                 if (!tx_done)
2440                         max_work = budget;
2441         }
2442
2443         if (be_lock_napi(eqo)) {
2444                 /* This loop will iterate twice for EQ0 in which
2445                  * completions of the last RXQ (default one) are also processed
2446                  * For other EQs the loop iterates only once
2447                  */
2448                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2449                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2450                         max_work = max(work, max_work);
2451                 }
2452                 be_unlock_napi(eqo);
2453         } else {
2454                 max_work = budget;
2455         }
2456
2457         if (is_mcc_eqo(eqo))
2458                 be_process_mcc(adapter);
2459
2460         if (max_work < budget) {
2461                 napi_complete(napi);
2462                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2463         } else {
2464                 /* As we'll continue in polling mode, count and clear events */
2465                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2466         }
2467         return max_work;
2468 }
2469
2470 #ifdef CONFIG_NET_RX_BUSY_POLL
2471 static int be_busy_poll(struct napi_struct *napi)
2472 {
2473         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2474         struct be_adapter *adapter = eqo->adapter;
2475         struct be_rx_obj *rxo;
2476         int i, work = 0;
2477
2478         if (!be_lock_busy_poll(eqo))
2479                 return LL_FLUSH_BUSY;
2480
2481         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2482                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2483                 if (work)
2484                         break;
2485         }
2486
2487         be_unlock_busy_poll(eqo);
2488         return work;
2489 }
2490 #endif
2491
2492 void be_detect_error(struct be_adapter *adapter)
2493 {
2494         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2495         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2496         u32 i;
2497         bool error_detected = false;
2498         struct device *dev = &adapter->pdev->dev;
2499         struct net_device *netdev = adapter->netdev;
2500
2501         if (be_hw_error(adapter))
2502                 return;
2503
2504         if (lancer_chip(adapter)) {
2505                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2506                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2507                         sliport_err1 = ioread32(adapter->db +
2508                                                 SLIPORT_ERROR1_OFFSET);
2509                         sliport_err2 = ioread32(adapter->db +
2510                                                 SLIPORT_ERROR2_OFFSET);
2511                         adapter->hw_error = true;
2512                         /* Do not log error messages if its a FW reset */
2513                         if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2514                             sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2515                                 dev_info(dev, "Firmware update in progress\n");
2516                         } else {
2517                                 error_detected = true;
2518                                 dev_err(dev, "Error detected in the card\n");
2519                                 dev_err(dev, "ERR: sliport status 0x%x\n",
2520                                         sliport_status);
2521                                 dev_err(dev, "ERR: sliport error1 0x%x\n",
2522                                         sliport_err1);
2523                                 dev_err(dev, "ERR: sliport error2 0x%x\n",
2524                                         sliport_err2);
2525                         }
2526                 }
2527         } else {
2528                 pci_read_config_dword(adapter->pdev,
2529                                       PCICFG_UE_STATUS_LOW, &ue_lo);
2530                 pci_read_config_dword(adapter->pdev,
2531                                       PCICFG_UE_STATUS_HIGH, &ue_hi);
2532                 pci_read_config_dword(adapter->pdev,
2533                                       PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2534                 pci_read_config_dword(adapter->pdev,
2535                                       PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2536
2537                 ue_lo = (ue_lo & ~ue_lo_mask);
2538                 ue_hi = (ue_hi & ~ue_hi_mask);
2539
2540                 /* On certain platforms BE hardware can indicate spurious UEs.
2541                  * Allow HW to stop working completely in case of a real UE.
2542                  * Hence not setting the hw_error for UE detection.
2543                  */
2544
2545                 if (ue_lo || ue_hi) {
2546                         error_detected = true;
2547                         dev_err(dev,
2548                                 "Unrecoverable Error detected in the adapter");
2549                         dev_err(dev, "Please reboot server to recover");
2550                         if (skyhawk_chip(adapter))
2551                                 adapter->hw_error = true;
2552                         for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2553                                 if (ue_lo & 1)
2554                                         dev_err(dev, "UE: %s bit set\n",
2555                                                 ue_status_low_desc[i]);
2556                         }
2557                         for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2558                                 if (ue_hi & 1)
2559                                         dev_err(dev, "UE: %s bit set\n",
2560                                                 ue_status_hi_desc[i]);
2561                         }
2562                 }
2563         }
2564         if (error_detected)
2565                 netif_carrier_off(netdev);
2566 }
2567
2568 static void be_msix_disable(struct be_adapter *adapter)
2569 {
2570         if (msix_enabled(adapter)) {
2571                 pci_disable_msix(adapter->pdev);
2572                 adapter->num_msix_vec = 0;
2573                 adapter->num_msix_roce_vec = 0;
2574         }
2575 }
2576
2577 static int be_msix_enable(struct be_adapter *adapter)
2578 {
2579         int i, num_vec;
2580         struct device *dev = &adapter->pdev->dev;
2581
2582         /* If RoCE is supported, program the max number of NIC vectors that
2583          * may be configured via set-channels, along with vectors needed for
2584          * RoCe. Else, just program the number we'll use initially.
2585          */
2586         if (be_roce_supported(adapter))
2587                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2588                                 2 * num_online_cpus());
2589         else
2590                 num_vec = adapter->cfg_num_qs;
2591
2592         for (i = 0; i < num_vec; i++)
2593                 adapter->msix_entries[i].entry = i;
2594
2595         num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2596                                         MIN_MSIX_VECTORS, num_vec);
2597         if (num_vec < 0)
2598                 goto fail;
2599
2600         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2601                 adapter->num_msix_roce_vec = num_vec / 2;
2602                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2603                          adapter->num_msix_roce_vec);
2604         }
2605
2606         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2607
2608         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2609                  adapter->num_msix_vec);
2610         return 0;
2611
2612 fail:
2613         dev_warn(dev, "MSIx enable failed\n");
2614
2615         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2616         if (!be_physfn(adapter))
2617                 return num_vec;
2618         return 0;
2619 }
2620
2621 static inline int be_msix_vec_get(struct be_adapter *adapter,
2622                                   struct be_eq_obj *eqo)
2623 {
2624         return adapter->msix_entries[eqo->msix_idx].vector;
2625 }
2626
2627 static int be_msix_register(struct be_adapter *adapter)
2628 {
2629         struct net_device *netdev = adapter->netdev;
2630         struct be_eq_obj *eqo;
2631         int status, i, vec;
2632
2633         for_all_evt_queues(adapter, eqo, i) {
2634                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2635                 vec = be_msix_vec_get(adapter, eqo);
2636                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2637                 if (status)
2638                         goto err_msix;
2639         }
2640
2641         return 0;
2642 err_msix:
2643         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2644                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2645         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2646                  status);
2647         be_msix_disable(adapter);
2648         return status;
2649 }
2650
2651 static int be_irq_register(struct be_adapter *adapter)
2652 {
2653         struct net_device *netdev = adapter->netdev;
2654         int status;
2655
2656         if (msix_enabled(adapter)) {
2657                 status = be_msix_register(adapter);
2658                 if (status == 0)
2659                         goto done;
2660                 /* INTx is not supported for VF */
2661                 if (!be_physfn(adapter))
2662                         return status;
2663         }
2664
2665         /* INTx: only the first EQ is used */
2666         netdev->irq = adapter->pdev->irq;
2667         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2668                              &adapter->eq_obj[0]);
2669         if (status) {
2670                 dev_err(&adapter->pdev->dev,
2671                         "INTx request IRQ failed - err %d\n", status);
2672                 return status;
2673         }
2674 done:
2675         adapter->isr_registered = true;
2676         return 0;
2677 }
2678
2679 static void be_irq_unregister(struct be_adapter *adapter)
2680 {
2681         struct net_device *netdev = adapter->netdev;
2682         struct be_eq_obj *eqo;
2683         int i;
2684
2685         if (!adapter->isr_registered)
2686                 return;
2687
2688         /* INTx */
2689         if (!msix_enabled(adapter)) {
2690                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2691                 goto done;
2692         }
2693
2694         /* MSIx */
2695         for_all_evt_queues(adapter, eqo, i)
2696                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2697
2698 done:
2699         adapter->isr_registered = false;
2700 }
2701
2702 static void be_rx_qs_destroy(struct be_adapter *adapter)
2703 {
2704         struct be_queue_info *q;
2705         struct be_rx_obj *rxo;
2706         int i;
2707
2708         for_all_rx_queues(adapter, rxo, i) {
2709                 q = &rxo->q;
2710                 if (q->created) {
2711                         be_cmd_rxq_destroy(adapter, q);
2712                         be_rx_cq_clean(rxo);
2713                 }
2714                 be_queue_free(adapter, q);
2715         }
2716 }
2717
2718 static int be_close(struct net_device *netdev)
2719 {
2720         struct be_adapter *adapter = netdev_priv(netdev);
2721         struct be_eq_obj *eqo;
2722         int i;
2723
2724         /* This protection is needed as be_close() may be called even when the
2725          * adapter is in cleared state (after eeh perm failure)
2726          */
2727         if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2728                 return 0;
2729
2730         be_roce_dev_close(adapter);
2731
2732         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2733                 for_all_evt_queues(adapter, eqo, i) {
2734                         napi_disable(&eqo->napi);
2735                         be_disable_busy_poll(eqo);
2736                 }
2737                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2738         }
2739
2740         be_async_mcc_disable(adapter);
2741
2742         /* Wait for all pending tx completions to arrive so that
2743          * all tx skbs are freed.
2744          */
2745         netif_tx_disable(netdev);
2746         be_tx_compl_clean(adapter);
2747
2748         be_rx_qs_destroy(adapter);
2749
2750         for (i = 1; i < (adapter->uc_macs + 1); i++)
2751                 be_cmd_pmac_del(adapter, adapter->if_handle,
2752                                 adapter->pmac_id[i], 0);
2753         adapter->uc_macs = 0;
2754
2755         for_all_evt_queues(adapter, eqo, i) {
2756                 if (msix_enabled(adapter))
2757                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2758                 else
2759                         synchronize_irq(netdev->irq);
2760                 be_eq_clean(eqo);
2761         }
2762
2763         be_irq_unregister(adapter);
2764
2765         return 0;
2766 }
2767
2768 static int be_rx_qs_create(struct be_adapter *adapter)
2769 {
2770         struct be_rx_obj *rxo;
2771         int rc, i, j;
2772         u8 rss_hkey[RSS_HASH_KEY_LEN];
2773         struct rss_info *rss = &adapter->rss_info;
2774
2775         for_all_rx_queues(adapter, rxo, i) {
2776                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2777                                     sizeof(struct be_eth_rx_d));
2778                 if (rc)
2779                         return rc;
2780         }
2781
2782         /* The FW would like the default RXQ to be created first */
2783         rxo = default_rxo(adapter);
2784         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2785                                adapter->if_handle, false, &rxo->rss_id);
2786         if (rc)
2787                 return rc;
2788
2789         for_all_rss_queues(adapter, rxo, i) {
2790                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2791                                        rx_frag_size, adapter->if_handle,
2792                                        true, &rxo->rss_id);
2793                 if (rc)
2794                         return rc;
2795         }
2796
2797         if (be_multi_rxq(adapter)) {
2798                 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2799                         j += adapter->num_rx_qs - 1) {
2800                         for_all_rss_queues(adapter, rxo, i) {
2801                                 if ((j + i) >= RSS_INDIR_TABLE_LEN)
2802                                         break;
2803                                 rss->rsstable[j + i] = rxo->rss_id;
2804                                 rss->rss_queue[j + i] = i;
2805                         }
2806                 }
2807                 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2808                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2809
2810                 if (!BEx_chip(adapter))
2811                         rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2812                                 RSS_ENABLE_UDP_IPV6;
2813         } else {
2814                 /* Disable RSS, if only default RX Q is created */
2815                 rss->rss_flags = RSS_ENABLE_NONE;
2816         }
2817
2818         get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
2819         rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
2820                                128, rss_hkey);
2821         if (rc) {
2822                 rss->rss_flags = RSS_ENABLE_NONE;
2823                 return rc;
2824         }
2825
2826         memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2827
2828         /* First time posting */
2829         for_all_rx_queues(adapter, rxo, i)
2830                 be_post_rx_frags(rxo, GFP_KERNEL);
2831         return 0;
2832 }
2833
2834 static int be_open(struct net_device *netdev)
2835 {
2836         struct be_adapter *adapter = netdev_priv(netdev);
2837         struct be_eq_obj *eqo;
2838         struct be_rx_obj *rxo;
2839         struct be_tx_obj *txo;
2840         u8 link_status;
2841         int status, i;
2842
2843         status = be_rx_qs_create(adapter);
2844         if (status)
2845                 goto err;
2846
2847         status = be_irq_register(adapter);
2848         if (status)
2849                 goto err;
2850
2851         for_all_rx_queues(adapter, rxo, i)
2852                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2853
2854         for_all_tx_queues(adapter, txo, i)
2855                 be_cq_notify(adapter, txo->cq.id, true, 0);
2856
2857         be_async_mcc_enable(adapter);
2858
2859         for_all_evt_queues(adapter, eqo, i) {
2860                 napi_enable(&eqo->napi);
2861                 be_enable_busy_poll(eqo);
2862                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2863         }
2864         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2865
2866         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2867         if (!status)
2868                 be_link_status_update(adapter, link_status);
2869
2870         netif_tx_start_all_queues(netdev);
2871         be_roce_dev_open(adapter);
2872
2873 #ifdef CONFIG_BE2NET_VXLAN
2874         if (skyhawk_chip(adapter))
2875                 vxlan_get_rx_port(netdev);
2876 #endif
2877
2878         return 0;
2879 err:
2880         be_close(adapter->netdev);
2881         return -EIO;
2882 }
2883
2884 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2885 {
2886         struct be_dma_mem cmd;
2887         int status = 0;
2888         u8 mac[ETH_ALEN];
2889
2890         memset(mac, 0, ETH_ALEN);
2891
2892         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2893         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2894                                      GFP_KERNEL);
2895         if (cmd.va == NULL)
2896                 return -1;
2897
2898         if (enable) {
2899                 status = pci_write_config_dword(adapter->pdev,
2900                                                 PCICFG_PM_CONTROL_OFFSET,
2901                                                 PCICFG_PM_CONTROL_MASK);
2902                 if (status) {
2903                         dev_err(&adapter->pdev->dev,
2904                                 "Could not enable Wake-on-lan\n");
2905                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2906                                           cmd.dma);
2907                         return status;
2908                 }
2909                 status = be_cmd_enable_magic_wol(adapter,
2910                                                  adapter->netdev->dev_addr,
2911                                                  &cmd);
2912                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2913                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2914         } else {
2915                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2916                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2917                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2918         }
2919
2920         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2921         return status;
2922 }
2923
2924 /*
2925  * Generate a seed MAC address from the PF MAC Address using jhash.
2926  * MAC Address for VFs are assigned incrementally starting from the seed.
2927  * These addresses are programmed in the ASIC by the PF and the VF driver
2928  * queries for the MAC address during its probe.
2929  */
2930 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2931 {
2932         u32 vf;
2933         int status = 0;
2934         u8 mac[ETH_ALEN];
2935         struct be_vf_cfg *vf_cfg;
2936
2937         be_vf_eth_addr_generate(adapter, mac);
2938
2939         for_all_vfs(adapter, vf_cfg, vf) {
2940                 if (BEx_chip(adapter))
2941                         status = be_cmd_pmac_add(adapter, mac,
2942                                                  vf_cfg->if_handle,
2943                                                  &vf_cfg->pmac_id, vf + 1);
2944                 else
2945                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2946                                                 vf + 1);
2947
2948                 if (status)
2949                         dev_err(&adapter->pdev->dev,
2950                                 "Mac address assignment failed for VF %d\n",
2951                                 vf);
2952                 else
2953                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2954
2955                 mac[5] += 1;
2956         }
2957         return status;
2958 }
2959
2960 static int be_vfs_mac_query(struct be_adapter *adapter)
2961 {
2962         int status, vf;
2963         u8 mac[ETH_ALEN];
2964         struct be_vf_cfg *vf_cfg;
2965
2966         for_all_vfs(adapter, vf_cfg, vf) {
2967                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2968                                                mac, vf_cfg->if_handle,
2969                                                false, vf+1);
2970                 if (status)
2971                         return status;
2972                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2973         }
2974         return 0;
2975 }
2976
2977 static void be_vf_clear(struct be_adapter *adapter)
2978 {
2979         struct be_vf_cfg *vf_cfg;
2980         u32 vf;
2981
2982         if (pci_vfs_assigned(adapter->pdev)) {
2983                 dev_warn(&adapter->pdev->dev,
2984                          "VFs are assigned to VMs: not disabling VFs\n");
2985                 goto done;
2986         }
2987
2988         pci_disable_sriov(adapter->pdev);
2989
2990         for_all_vfs(adapter, vf_cfg, vf) {
2991                 if (BEx_chip(adapter))
2992                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2993                                         vf_cfg->pmac_id, vf + 1);
2994                 else
2995                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2996                                        vf + 1);
2997
2998                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2999         }
3000 done:
3001         kfree(adapter->vf_cfg);
3002         adapter->num_vfs = 0;
3003 }
3004
3005 static void be_clear_queues(struct be_adapter *adapter)
3006 {
3007         be_mcc_queues_destroy(adapter);
3008         be_rx_cqs_destroy(adapter);
3009         be_tx_queues_destroy(adapter);
3010         be_evt_queues_destroy(adapter);
3011 }
3012
3013 static void be_cancel_worker(struct be_adapter *adapter)
3014 {
3015         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3016                 cancel_delayed_work_sync(&adapter->work);
3017                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3018         }
3019 }
3020
3021 static void be_mac_clear(struct be_adapter *adapter)
3022 {
3023         int i;
3024
3025         if (adapter->pmac_id) {
3026                 for (i = 0; i < (adapter->uc_macs + 1); i++)
3027                         be_cmd_pmac_del(adapter, adapter->if_handle,
3028                                         adapter->pmac_id[i], 0);
3029                 adapter->uc_macs = 0;
3030
3031                 kfree(adapter->pmac_id);
3032                 adapter->pmac_id = NULL;
3033         }
3034 }
3035
3036 #ifdef CONFIG_BE2NET_VXLAN
3037 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3038 {
3039         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3040                 be_cmd_manage_iface(adapter, adapter->if_handle,
3041                                     OP_CONVERT_TUNNEL_TO_NORMAL);
3042
3043         if (adapter->vxlan_port)
3044                 be_cmd_set_vxlan_port(adapter, 0);
3045
3046         adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3047         adapter->vxlan_port = 0;
3048 }
3049 #endif
3050
3051 static int be_clear(struct be_adapter *adapter)
3052 {
3053         be_cancel_worker(adapter);
3054
3055         if (sriov_enabled(adapter))
3056                 be_vf_clear(adapter);
3057
3058 #ifdef CONFIG_BE2NET_VXLAN
3059         be_disable_vxlan_offloads(adapter);
3060 #endif
3061         /* delete the primary mac along with the uc-mac list */
3062         be_mac_clear(adapter);
3063
3064         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
3065
3066         be_clear_queues(adapter);
3067
3068         be_msix_disable(adapter);
3069         adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3070         return 0;
3071 }
3072
3073 static int be_vfs_if_create(struct be_adapter *adapter)
3074 {
3075         struct be_resources res = {0};
3076         struct be_vf_cfg *vf_cfg;
3077         u32 cap_flags, en_flags, vf;
3078         int status = 0;
3079
3080         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3081                     BE_IF_FLAGS_MULTICAST;
3082
3083         for_all_vfs(adapter, vf_cfg, vf) {
3084                 if (!BE3_chip(adapter)) {
3085                         status = be_cmd_get_profile_config(adapter, &res,
3086                                                            vf + 1);
3087                         if (!status)
3088                                 cap_flags = res.if_cap_flags;
3089                 }
3090
3091                 /* If a FW profile exists, then cap_flags are updated */
3092                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3093                                         BE_IF_FLAGS_BROADCAST |
3094                                         BE_IF_FLAGS_MULTICAST);
3095                 status =
3096                     be_cmd_if_create(adapter, cap_flags, en_flags,
3097                                      &vf_cfg->if_handle, vf + 1);
3098                 if (status)
3099                         goto err;
3100         }
3101 err:
3102         return status;
3103 }
3104
3105 static int be_vf_setup_init(struct be_adapter *adapter)
3106 {
3107         struct be_vf_cfg *vf_cfg;
3108         int vf;
3109
3110         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3111                                   GFP_KERNEL);
3112         if (!adapter->vf_cfg)
3113                 return -ENOMEM;
3114
3115         for_all_vfs(adapter, vf_cfg, vf) {
3116                 vf_cfg->if_handle = -1;
3117                 vf_cfg->pmac_id = -1;
3118         }
3119         return 0;
3120 }
3121
3122 static int be_vf_setup(struct be_adapter *adapter)
3123 {
3124         struct device *dev = &adapter->pdev->dev;
3125         struct be_vf_cfg *vf_cfg;
3126         int status, old_vfs, vf;
3127         u32 privileges;
3128         u16 lnk_speed;
3129
3130         old_vfs = pci_num_vf(adapter->pdev);
3131         if (old_vfs) {
3132                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3133                 if (old_vfs != num_vfs)
3134                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3135                 adapter->num_vfs = old_vfs;
3136         } else {
3137                 if (num_vfs > be_max_vfs(adapter))
3138                         dev_info(dev, "Device supports %d VFs and not %d\n",
3139                                  be_max_vfs(adapter), num_vfs);
3140                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3141                 if (!adapter->num_vfs)
3142                         return 0;
3143         }
3144
3145         status = be_vf_setup_init(adapter);
3146         if (status)
3147                 goto err;
3148
3149         if (old_vfs) {
3150                 for_all_vfs(adapter, vf_cfg, vf) {
3151                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3152                         if (status)
3153                                 goto err;
3154                 }
3155         } else {
3156                 status = be_vfs_if_create(adapter);
3157                 if (status)
3158                         goto err;
3159         }
3160
3161         if (old_vfs) {
3162                 status = be_vfs_mac_query(adapter);
3163                 if (status)
3164                         goto err;
3165         } else {
3166                 status = be_vf_eth_addr_config(adapter);
3167                 if (status)
3168                         goto err;
3169         }
3170
3171         for_all_vfs(adapter, vf_cfg, vf) {
3172                 /* Allow VFs to programs MAC/VLAN filters */
3173                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3174                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3175                         status = be_cmd_set_fn_privileges(adapter,
3176                                                           privileges |
3177                                                           BE_PRIV_FILTMGMT,
3178                                                           vf + 1);
3179                         if (!status)
3180                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3181                                          vf);
3182                 }
3183
3184                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3185                  * Allow full available bandwidth
3186                  */
3187                 if (BE3_chip(adapter) && !old_vfs)
3188                         be_cmd_config_qos(adapter, 1000, vf + 1);
3189
3190                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3191                                                   NULL, vf + 1);
3192                 if (!status)
3193                         vf_cfg->tx_rate = lnk_speed;
3194
3195                 if (!old_vfs) {
3196                         be_cmd_enable_vf(adapter, vf + 1);
3197                         be_cmd_set_logical_link_config(adapter,
3198                                                        IFLA_VF_LINK_STATE_AUTO,
3199                                                        vf+1);
3200                 }
3201         }
3202
3203         if (!old_vfs) {
3204                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3205                 if (status) {
3206                         dev_err(dev, "SRIOV enable failed\n");
3207                         adapter->num_vfs = 0;
3208                         goto err;
3209                 }
3210         }
3211         return 0;
3212 err:
3213         dev_err(dev, "VF setup failed\n");
3214         be_vf_clear(adapter);
3215         return status;
3216 }
3217
3218 /* Converting function_mode bits on BE3 to SH mc_type enums */
3219
3220 static u8 be_convert_mc_type(u32 function_mode)
3221 {
3222         if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3223                 return vNIC1;
3224         else if (function_mode & FLEX10_MODE)
3225                 return FLEX10;
3226         else if (function_mode & VNIC_MODE)
3227                 return vNIC2;
3228         else if (function_mode & UMC_ENABLED)
3229                 return UMC;
3230         else
3231                 return MC_NONE;
3232 }
3233
3234 /* On BE2/BE3 FW does not suggest the supported limits */
3235 static void BEx_get_resources(struct be_adapter *adapter,
3236                               struct be_resources *res)
3237 {
3238         struct pci_dev *pdev = adapter->pdev;
3239         bool use_sriov = false;
3240         int max_vfs = 0;
3241
3242         if (be_physfn(adapter) && BE3_chip(adapter)) {
3243                 be_cmd_get_profile_config(adapter, res, 0);
3244                 /* Some old versions of BE3 FW don't report max_vfs value */
3245                 if (res->max_vfs == 0) {
3246                         max_vfs = pci_sriov_get_totalvfs(pdev);
3247                         res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3248                 }
3249                 use_sriov = res->max_vfs && sriov_want(adapter);
3250         }
3251
3252         if (be_physfn(adapter))
3253                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3254         else
3255                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3256
3257         adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3258
3259         if (be_is_mc(adapter)) {
3260                 /* Assuming that there are 4 channels per port,
3261                  * when multi-channel is enabled
3262                  */
3263                 if (be_is_qnq_mode(adapter))
3264                         res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3265                 else
3266                         /* In a non-qnq multichannel mode, the pvid
3267                          * takes up one vlan entry
3268                          */
3269                         res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3270         } else {
3271                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3272         }
3273
3274         res->max_mcast_mac = BE_MAX_MC;
3275
3276         /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3277          * 2) Create multiple TX rings on a BE3-R multi-channel interface
3278          *    *only* if it is RSS-capable.
3279          */
3280         if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
3281             !be_physfn(adapter) || (be_is_mc(adapter) &&
3282             !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3283                 res->max_tx_qs = 1;
3284         else
3285                 res->max_tx_qs = BE3_MAX_TX_QS;
3286
3287         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3288             !use_sriov && be_physfn(adapter))
3289                 res->max_rss_qs = (adapter->be3_native) ?
3290                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3291         res->max_rx_qs = res->max_rss_qs + 1;
3292
3293         if (be_physfn(adapter))
3294                 res->max_evt_qs = (res->max_vfs > 0) ?
3295                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3296         else
3297                 res->max_evt_qs = 1;
3298
3299         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3300         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3301                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3302 }
3303
3304 static void be_setup_init(struct be_adapter *adapter)
3305 {
3306         adapter->vlan_prio_bmap = 0xff;
3307         adapter->phy.link_speed = -1;
3308         adapter->if_handle = -1;
3309         adapter->be3_native = false;
3310         adapter->promiscuous = false;
3311         if (be_physfn(adapter))
3312                 adapter->cmd_privileges = MAX_PRIVILEGES;
3313         else
3314                 adapter->cmd_privileges = MIN_PRIVILEGES;
3315 }
3316
3317 static int be_get_resources(struct be_adapter *adapter)
3318 {
3319         struct device *dev = &adapter->pdev->dev;
3320         struct be_resources res = {0};
3321         int status;
3322
3323         if (BEx_chip(adapter)) {
3324                 BEx_get_resources(adapter, &res);
3325                 adapter->res = res;
3326         }
3327
3328         /* For Lancer, SH etc read per-function resource limits from FW.
3329          * GET_FUNC_CONFIG returns per function guaranteed limits.
3330          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3331          */
3332         if (!BEx_chip(adapter)) {
3333                 status = be_cmd_get_func_config(adapter, &res);
3334                 if (status)
3335                         return status;
3336
3337                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3338                 if (be_roce_supported(adapter))
3339                         res.max_evt_qs /= 2;
3340                 adapter->res = res;
3341
3342                 if (be_physfn(adapter)) {
3343                         status = be_cmd_get_profile_config(adapter, &res, 0);
3344                         if (status)
3345                                 return status;
3346                         adapter->res.max_vfs = res.max_vfs;
3347                 }
3348
3349                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3350                          be_max_txqs(adapter), be_max_rxqs(adapter),
3351                          be_max_rss(adapter), be_max_eqs(adapter),
3352                          be_max_vfs(adapter));
3353                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3354                          be_max_uc(adapter), be_max_mc(adapter),
3355                          be_max_vlans(adapter));
3356         }
3357
3358         return 0;
3359 }
3360
3361 /* Routine to query per function resource limits */
3362 static int be_get_config(struct be_adapter *adapter)
3363 {
3364         u16 profile_id;
3365         int status;
3366
3367         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3368                                      &adapter->function_mode,
3369                                      &adapter->function_caps,
3370                                      &adapter->asic_rev);
3371         if (status)
3372                 return status;
3373
3374          if (be_physfn(adapter)) {
3375                 status = be_cmd_get_active_profile(adapter, &profile_id);
3376                 if (!status)
3377                         dev_info(&adapter->pdev->dev,
3378                                  "Using profile 0x%x\n", profile_id);
3379         }
3380
3381         status = be_get_resources(adapter);
3382         if (status)
3383                 return status;
3384
3385         adapter->pmac_id = kcalloc(be_max_uc(adapter),
3386                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3387         if (!adapter->pmac_id)
3388                 return -ENOMEM;
3389
3390         /* Sanitize cfg_num_qs based on HW and platform limits */
3391         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3392
3393         return 0;
3394 }
3395
3396 static int be_mac_setup(struct be_adapter *adapter)
3397 {
3398         u8 mac[ETH_ALEN];
3399         int status;
3400
3401         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3402                 status = be_cmd_get_perm_mac(adapter, mac);
3403                 if (status)
3404                         return status;
3405
3406                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3407                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3408         } else {
3409                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3410                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3411         }
3412
3413         /* For BE3-R VFs, the PF programs the initial MAC address */
3414         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3415                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3416                                 &adapter->pmac_id[0], 0);
3417         return 0;
3418 }
3419
3420 static void be_schedule_worker(struct be_adapter *adapter)
3421 {
3422         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3423         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3424 }
3425
3426 static int be_setup_queues(struct be_adapter *adapter)
3427 {
3428         struct net_device *netdev = adapter->netdev;
3429         int status;
3430
3431         status = be_evt_queues_create(adapter);
3432         if (status)
3433                 goto err;
3434
3435         status = be_tx_qs_create(adapter);
3436         if (status)
3437                 goto err;
3438
3439         status = be_rx_cqs_create(adapter);
3440         if (status)
3441                 goto err;
3442
3443         status = be_mcc_queues_create(adapter);
3444         if (status)
3445                 goto err;
3446
3447         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3448         if (status)
3449                 goto err;
3450
3451         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3452         if (status)
3453                 goto err;
3454
3455         return 0;
3456 err:
3457         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3458         return status;
3459 }
3460
3461 int be_update_queues(struct be_adapter *adapter)
3462 {
3463         struct net_device *netdev = adapter->netdev;
3464         int status;
3465
3466         if (netif_running(netdev))
3467                 be_close(netdev);
3468
3469         be_cancel_worker(adapter);
3470
3471         /* If any vectors have been shared with RoCE we cannot re-program
3472          * the MSIx table.
3473          */
3474         if (!adapter->num_msix_roce_vec)
3475                 be_msix_disable(adapter);
3476
3477         be_clear_queues(adapter);
3478
3479         if (!msix_enabled(adapter)) {
3480                 status = be_msix_enable(adapter);
3481                 if (status)
3482                         return status;
3483         }
3484
3485         status = be_setup_queues(adapter);
3486         if (status)
3487                 return status;
3488
3489         be_schedule_worker(adapter);
3490
3491         if (netif_running(netdev))
3492                 status = be_open(netdev);
3493
3494         return status;
3495 }
3496
3497 static int be_setup(struct be_adapter *adapter)
3498 {
3499         struct device *dev = &adapter->pdev->dev;
3500         u32 tx_fc, rx_fc, en_flags;
3501         int status;
3502
3503         be_setup_init(adapter);
3504
3505         if (!lancer_chip(adapter))
3506                 be_cmd_req_native_mode(adapter);
3507
3508         status = be_get_config(adapter);
3509         if (status)
3510                 goto err;
3511
3512         status = be_msix_enable(adapter);
3513         if (status)
3514                 goto err;
3515
3516         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3517                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3518         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3519                 en_flags |= BE_IF_FLAGS_RSS;
3520         en_flags = en_flags & be_if_cap_flags(adapter);
3521         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3522                                   &adapter->if_handle, 0);
3523         if (status)
3524                 goto err;
3525
3526         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3527         rtnl_lock();
3528         status = be_setup_queues(adapter);
3529         rtnl_unlock();
3530         if (status)
3531                 goto err;
3532
3533         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3534
3535         status = be_mac_setup(adapter);
3536         if (status)
3537                 goto err;
3538
3539         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3540
3541         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3542                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3543                         adapter->fw_ver);
3544                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3545         }
3546
3547         if (adapter->vlans_added)
3548                 be_vid_config(adapter);
3549
3550         be_set_rx_mode(adapter->netdev);
3551
3552         be_cmd_get_acpi_wol_cap(adapter);
3553
3554         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3555
3556         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3557                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3558                                         adapter->rx_fc);
3559
3560         if (be_physfn(adapter))
3561                 be_cmd_set_logical_link_config(adapter,
3562                                                IFLA_VF_LINK_STATE_AUTO, 0);
3563
3564         if (sriov_want(adapter)) {
3565                 if (be_max_vfs(adapter))
3566                         be_vf_setup(adapter);
3567                 else
3568                         dev_warn(dev, "device doesn't support SRIOV\n");
3569         }
3570
3571         status = be_cmd_get_phy_info(adapter);
3572         if (!status && be_pause_supported(adapter))
3573                 adapter->phy.fc_autoneg = 1;
3574
3575         be_schedule_worker(adapter);
3576         adapter->flags |= BE_FLAGS_SETUP_DONE;
3577         return 0;
3578 err:
3579         be_clear(adapter);
3580         return status;
3581 }
3582
3583 #ifdef CONFIG_NET_POLL_CONTROLLER
3584 static void be_netpoll(struct net_device *netdev)
3585 {
3586         struct be_adapter *adapter = netdev_priv(netdev);
3587         struct be_eq_obj *eqo;
3588         int i;
3589
3590         for_all_evt_queues(adapter, eqo, i) {
3591                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3592                 napi_schedule(&eqo->napi);
3593         }
3594
3595         return;
3596 }
3597 #endif
3598
3599 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3600 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3601
3602 static bool be_flash_redboot(struct be_adapter *adapter,
3603                              const u8 *p, u32 img_start, int image_size,
3604                              int hdr_size)
3605 {
3606         u32 crc_offset;
3607         u8 flashed_crc[4];
3608         int status;
3609
3610         crc_offset = hdr_size + img_start + image_size - 4;
3611
3612         p += crc_offset;
3613
3614         status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
3615         if (status) {
3616                 dev_err(&adapter->pdev->dev,
3617                         "could not get crc from flash, not flashing redboot\n");
3618                 return false;
3619         }
3620
3621         /*update redboot only if crc does not match*/
3622         if (!memcmp(flashed_crc, p, 4))
3623                 return false;
3624         else
3625                 return true;
3626 }
3627
3628 static bool phy_flashing_required(struct be_adapter *adapter)
3629 {
3630         return (adapter->phy.phy_type == TN_8022 &&
3631                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3632 }
3633
3634 static bool is_comp_in_ufi(struct be_adapter *adapter,
3635                            struct flash_section_info *fsec, int type)
3636 {
3637         int i = 0, img_type = 0;
3638         struct flash_section_info_g2 *fsec_g2 = NULL;
3639
3640         if (BE2_chip(adapter))
3641                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3642
3643         for (i = 0; i < MAX_FLASH_COMP; i++) {
3644                 if (fsec_g2)
3645                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3646                 else
3647                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3648
3649                 if (img_type == type)
3650                         return true;
3651         }
3652         return false;
3653
3654 }
3655
3656 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3657                                                 int header_size,
3658                                                 const struct firmware *fw)
3659 {
3660         struct flash_section_info *fsec = NULL;
3661         const u8 *p = fw->data;
3662
3663         p += header_size;
3664         while (p < (fw->data + fw->size)) {
3665                 fsec = (struct flash_section_info *)p;
3666                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3667                         return fsec;
3668                 p += 32;
3669         }
3670         return NULL;
3671 }
3672
3673 static int be_flash(struct be_adapter *adapter, const u8 *img,
3674                     struct be_dma_mem *flash_cmd, int optype, int img_size)
3675 {
3676         u32 total_bytes = 0, flash_op, num_bytes = 0;
3677         int status = 0;
3678         struct be_cmd_write_flashrom *req = flash_cmd->va;
3679
3680         total_bytes = img_size;
3681         while (total_bytes) {
3682                 num_bytes = min_t(u32, 32*1024, total_bytes);
3683
3684                 total_bytes -= num_bytes;
3685
3686                 if (!total_bytes) {
3687                         if (optype == OPTYPE_PHY_FW)
3688                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3689                         else
3690                                 flash_op = FLASHROM_OPER_FLASH;
3691                 } else {
3692                         if (optype == OPTYPE_PHY_FW)
3693                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3694                         else
3695                                 flash_op = FLASHROM_OPER_SAVE;
3696                 }
3697
3698                 memcpy(req->data_buf, img, num_bytes);
3699                 img += num_bytes;
3700                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3701                                                flash_op, num_bytes);
3702                 if (status) {
3703                         if (status == ILLEGAL_IOCTL_REQ &&
3704                             optype == OPTYPE_PHY_FW)
3705                                 break;
3706                         dev_err(&adapter->pdev->dev,
3707                                 "cmd to write to flash rom failed.\n");
3708                         return status;
3709                 }
3710         }
3711         return 0;
3712 }
3713
3714 /* For BE2, BE3 and BE3-R */
3715 static int be_flash_BEx(struct be_adapter *adapter,
3716                         const struct firmware *fw,
3717                         struct be_dma_mem *flash_cmd, int num_of_images)
3718 {
3719         int status = 0, i, filehdr_size = 0;
3720         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3721         const u8 *p = fw->data;
3722         const struct flash_comp *pflashcomp;
3723         int num_comp, redboot;
3724         struct flash_section_info *fsec = NULL;
3725
3726         struct flash_comp gen3_flash_types[] = {
3727                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3728                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3729                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3730                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3731                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3732                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3733                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3734                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3735                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3736                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3737                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3738                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3739                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3740                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3741                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3742                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3743                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3744                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3745                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3746                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3747         };
3748
3749         struct flash_comp gen2_flash_types[] = {
3750                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3751                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3752                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3753                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3754                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3755                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3756                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3757                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3758                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3759                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3760                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3761                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3762                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3763                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3764                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3765                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3766         };
3767
3768         if (BE3_chip(adapter)) {
3769                 pflashcomp = gen3_flash_types;
3770                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3771                 num_comp = ARRAY_SIZE(gen3_flash_types);
3772         } else {
3773                 pflashcomp = gen2_flash_types;
3774                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3775                 num_comp = ARRAY_SIZE(gen2_flash_types);
3776         }
3777
3778         /* Get flash section info*/
3779         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3780         if (!fsec) {
3781                 dev_err(&adapter->pdev->dev,
3782                         "Invalid Cookie. UFI corrupted ?\n");
3783                 return -1;
3784         }
3785         for (i = 0; i < num_comp; i++) {
3786                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3787                         continue;
3788
3789                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3790                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3791                         continue;
3792
3793                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3794                     !phy_flashing_required(adapter))
3795                                 continue;
3796
3797                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3798                         redboot = be_flash_redboot(adapter, fw->data,
3799                                                    pflashcomp[i].offset,
3800                                                    pflashcomp[i].size,
3801                                                    filehdr_size +
3802                                                    img_hdrs_size);
3803                         if (!redboot)
3804                                 continue;
3805                 }
3806
3807                 p = fw->data;
3808                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3809                 if (p + pflashcomp[i].size > fw->data + fw->size)
3810                         return -1;
3811
3812                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3813                                   pflashcomp[i].size);
3814                 if (status) {
3815                         dev_err(&adapter->pdev->dev,
3816                                 "Flashing section type %d failed.\n",
3817                                 pflashcomp[i].img_type);
3818                         return status;
3819                 }
3820         }
3821         return 0;
3822 }
3823
3824 static int be_flash_skyhawk(struct be_adapter *adapter,
3825                             const struct firmware *fw,
3826                             struct be_dma_mem *flash_cmd, int num_of_images)
3827 {
3828         int status = 0, i, filehdr_size = 0;
3829         int img_offset, img_size, img_optype, redboot;
3830         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3831         const u8 *p = fw->data;
3832         struct flash_section_info *fsec = NULL;
3833
3834         filehdr_size = sizeof(struct flash_file_hdr_g3);
3835         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3836         if (!fsec) {
3837                 dev_err(&adapter->pdev->dev,
3838                         "Invalid Cookie. UFI corrupted ?\n");
3839                 return -1;
3840         }
3841
3842         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3843                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3844                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3845
3846                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3847                 case IMAGE_FIRMWARE_iSCSI:
3848                         img_optype = OPTYPE_ISCSI_ACTIVE;
3849                         break;
3850                 case IMAGE_BOOT_CODE:
3851                         img_optype = OPTYPE_REDBOOT;
3852                         break;
3853                 case IMAGE_OPTION_ROM_ISCSI:
3854                         img_optype = OPTYPE_BIOS;
3855                         break;
3856                 case IMAGE_OPTION_ROM_PXE:
3857                         img_optype = OPTYPE_PXE_BIOS;
3858                         break;
3859                 case IMAGE_OPTION_ROM_FCoE:
3860                         img_optype = OPTYPE_FCOE_BIOS;
3861                         break;
3862                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3863                         img_optype = OPTYPE_ISCSI_BACKUP;
3864                         break;
3865                 case IMAGE_NCSI:
3866                         img_optype = OPTYPE_NCSI_FW;
3867                         break;
3868                 default:
3869                         continue;
3870                 }
3871
3872                 if (img_optype == OPTYPE_REDBOOT) {
3873                         redboot = be_flash_redboot(adapter, fw->data,
3874                                                    img_offset, img_size,
3875                                                    filehdr_size +
3876                                                    img_hdrs_size);
3877                         if (!redboot)
3878                                 continue;
3879                 }
3880
3881                 p = fw->data;
3882                 p += filehdr_size + img_offset + img_hdrs_size;
3883                 if (p + img_size > fw->data + fw->size)
3884                         return -1;
3885
3886                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3887                 if (status) {
3888                         dev_err(&adapter->pdev->dev,
3889                                 "Flashing section type %d failed.\n",
3890                                 fsec->fsec_entry[i].type);
3891                         return status;
3892                 }
3893         }
3894         return 0;
3895 }
3896
3897 static int lancer_fw_download(struct be_adapter *adapter,
3898                               const struct firmware *fw)
3899 {
3900 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3901 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3902         struct be_dma_mem flash_cmd;
3903         const u8 *data_ptr = NULL;
3904         u8 *dest_image_ptr = NULL;
3905         size_t image_size = 0;
3906         u32 chunk_size = 0;
3907         u32 data_written = 0;
3908         u32 offset = 0;
3909         int status = 0;
3910         u8 add_status = 0;
3911         u8 change_status;
3912
3913         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3914                 dev_err(&adapter->pdev->dev,
3915                         "FW Image not properly aligned. "
3916                         "Length must be 4 byte aligned.\n");
3917                 status = -EINVAL;
3918                 goto lancer_fw_exit;
3919         }
3920
3921         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3922                                 + LANCER_FW_DOWNLOAD_CHUNK;
3923         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3924                                           &flash_cmd.dma, GFP_KERNEL);
3925         if (!flash_cmd.va) {
3926                 status = -ENOMEM;
3927                 goto lancer_fw_exit;
3928         }
3929
3930         dest_image_ptr = flash_cmd.va +
3931                                 sizeof(struct lancer_cmd_req_write_object);
3932         image_size = fw->size;
3933         data_ptr = fw->data;
3934
3935         while (image_size) {
3936                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3937
3938                 /* Copy the image chunk content. */
3939                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3940
3941                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3942                                                  chunk_size, offset,
3943                                                  LANCER_FW_DOWNLOAD_LOCATION,
3944                                                  &data_written, &change_status,
3945                                                  &add_status);
3946                 if (status)
3947                         break;
3948
3949                 offset += data_written;
3950                 data_ptr += data_written;
3951                 image_size -= data_written;
3952         }
3953
3954         if (!status) {
3955                 /* Commit the FW written */
3956                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3957                                                  0, offset,
3958                                                  LANCER_FW_DOWNLOAD_LOCATION,
3959                                                  &data_written, &change_status,
3960                                                  &add_status);
3961         }
3962
3963         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3964                           flash_cmd.dma);
3965         if (status) {
3966                 dev_err(&adapter->pdev->dev,
3967                         "Firmware load error. "
3968                         "Status code: 0x%x Additional Status: 0x%x\n",
3969                         status, add_status);
3970                 goto lancer_fw_exit;
3971         }
3972
3973         if (change_status == LANCER_FW_RESET_NEEDED) {
3974                 dev_info(&adapter->pdev->dev,
3975                          "Resetting adapter to activate new FW\n");
3976                 status = lancer_physdev_ctrl(adapter,
3977                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3978                 if (status) {
3979                         dev_err(&adapter->pdev->dev,
3980                                 "Adapter busy for FW reset.\n"
3981                                 "New FW will not be active.\n");
3982                         goto lancer_fw_exit;
3983                 }
3984         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3985                 dev_err(&adapter->pdev->dev,
3986                         "System reboot required for new FW to be active\n");
3987         }
3988
3989         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3990 lancer_fw_exit:
3991         return status;
3992 }
3993
3994 #define UFI_TYPE2               2
3995 #define UFI_TYPE3               3
3996 #define UFI_TYPE3R              10
3997 #define UFI_TYPE4               4
3998 static int be_get_ufi_type(struct be_adapter *adapter,
3999                            struct flash_file_hdr_g3 *fhdr)
4000 {
4001         if (fhdr == NULL)
4002                 goto be_get_ufi_exit;
4003
4004         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4005                 return UFI_TYPE4;
4006         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4007                 if (fhdr->asic_type_rev == 0x10)
4008                         return UFI_TYPE3R;
4009                 else
4010                         return UFI_TYPE3;
4011         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
4012                 return UFI_TYPE2;
4013
4014 be_get_ufi_exit:
4015         dev_err(&adapter->pdev->dev,
4016                 "UFI and Interface are not compatible for flashing\n");
4017         return -1;
4018 }
4019
4020 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4021 {
4022         struct flash_file_hdr_g3 *fhdr3;
4023         struct image_hdr *img_hdr_ptr = NULL;
4024         struct be_dma_mem flash_cmd;
4025         const u8 *p;
4026         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
4027
4028         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4029         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4030                                           &flash_cmd.dma, GFP_KERNEL);
4031         if (!flash_cmd.va) {
4032                 status = -ENOMEM;
4033                 goto be_fw_exit;
4034         }
4035
4036         p = fw->data;
4037         fhdr3 = (struct flash_file_hdr_g3 *)p;
4038
4039         ufi_type = be_get_ufi_type(adapter, fhdr3);
4040
4041         num_imgs = le32_to_cpu(fhdr3->num_imgs);
4042         for (i = 0; i < num_imgs; i++) {
4043                 img_hdr_ptr = (struct image_hdr *)(fw->data +
4044                                 (sizeof(struct flash_file_hdr_g3) +
4045                                  i * sizeof(struct image_hdr)));
4046                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
4047                         switch (ufi_type) {
4048                         case UFI_TYPE4:
4049                                 status = be_flash_skyhawk(adapter, fw,
4050                                                           &flash_cmd, num_imgs);
4051                                 break;
4052                         case UFI_TYPE3R:
4053                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
4054                                                       num_imgs);
4055                                 break;
4056                         case UFI_TYPE3:
4057                                 /* Do not flash this ufi on BE3-R cards */
4058                                 if (adapter->asic_rev < 0x10)
4059                                         status = be_flash_BEx(adapter, fw,
4060                                                               &flash_cmd,
4061                                                               num_imgs);
4062                                 else {
4063                                         status = -1;
4064                                         dev_err(&adapter->pdev->dev,
4065                                                 "Can't load BE3 UFI on BE3R\n");
4066                                 }
4067                         }
4068                 }
4069         }
4070
4071         if (ufi_type == UFI_TYPE2)
4072                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4073         else if (ufi_type == -1)
4074                 status = -1;
4075
4076         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4077                           flash_cmd.dma);
4078         if (status) {
4079                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
4080                 goto be_fw_exit;
4081         }
4082
4083         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4084
4085 be_fw_exit:
4086         return status;
4087 }
4088
4089 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4090 {
4091         const struct firmware *fw;
4092         int status;
4093
4094         if (!netif_running(adapter->netdev)) {
4095                 dev_err(&adapter->pdev->dev,
4096                         "Firmware load not allowed (interface is down)\n");
4097                 return -1;
4098         }
4099
4100         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4101         if (status)
4102                 goto fw_exit;
4103
4104         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4105
4106         if (lancer_chip(adapter))
4107                 status = lancer_fw_download(adapter, fw);
4108         else
4109                 status = be_fw_download(adapter, fw);
4110
4111         if (!status)
4112                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4113                                   adapter->fw_on_flash);
4114
4115 fw_exit:
4116         release_firmware(fw);
4117         return status;
4118 }
4119
4120 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4121 {
4122         struct be_adapter *adapter = netdev_priv(dev);
4123         struct nlattr *attr, *br_spec;
4124         int rem;
4125         int status = 0;
4126         u16 mode = 0;
4127
4128         if (!sriov_enabled(adapter))
4129                 return -EOPNOTSUPP;
4130
4131         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4132
4133         nla_for_each_nested(attr, br_spec, rem) {
4134                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4135                         continue;
4136
4137                 mode = nla_get_u16(attr);
4138                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4139                         return -EINVAL;
4140
4141                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4142                                                adapter->if_handle,
4143                                                mode == BRIDGE_MODE_VEPA ?
4144                                                PORT_FWD_TYPE_VEPA :
4145                                                PORT_FWD_TYPE_VEB);
4146                 if (status)
4147                         goto err;
4148
4149                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4150                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4151
4152                 return status;
4153         }
4154 err:
4155         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4156                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4157
4158         return status;
4159 }
4160
4161 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4162                                  struct net_device *dev, u32 filter_mask)
4163 {
4164         struct be_adapter *adapter = netdev_priv(dev);
4165         int status = 0;
4166         u8 hsw_mode;
4167
4168         if (!sriov_enabled(adapter))
4169                 return 0;
4170
4171         /* BE and Lancer chips support VEB mode only */
4172         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4173                 hsw_mode = PORT_FWD_TYPE_VEB;
4174         } else {
4175                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4176                                                adapter->if_handle, &hsw_mode);
4177                 if (status)
4178                         return 0;
4179         }
4180
4181         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4182                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4183                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4184 }
4185
4186 #ifdef CONFIG_BE2NET_VXLAN
4187 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4188                               __be16 port)
4189 {
4190         struct be_adapter *adapter = netdev_priv(netdev);
4191         struct device *dev = &adapter->pdev->dev;
4192         int status;
4193
4194         if (lancer_chip(adapter) || BEx_chip(adapter))
4195                 return;
4196
4197         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4198                 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4199                          be16_to_cpu(port));
4200                 dev_info(dev,
4201                          "Only one UDP port supported for VxLAN offloads\n");
4202                 return;
4203         }
4204
4205         status = be_cmd_manage_iface(adapter, adapter->if_handle,
4206                                      OP_CONVERT_NORMAL_TO_TUNNEL);
4207         if (status) {
4208                 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4209                 goto err;
4210         }
4211
4212         status = be_cmd_set_vxlan_port(adapter, port);
4213         if (status) {
4214                 dev_warn(dev, "Failed to add VxLAN port\n");
4215                 goto err;
4216         }
4217         adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4218         adapter->vxlan_port = port;
4219
4220         dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4221                  be16_to_cpu(port));
4222         return;
4223 err:
4224         be_disable_vxlan_offloads(adapter);
4225         return;
4226 }
4227
4228 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4229                               __be16 port)
4230 {
4231         struct be_adapter *adapter = netdev_priv(netdev);
4232
4233         if (lancer_chip(adapter) || BEx_chip(adapter))
4234                 return;
4235
4236         if (adapter->vxlan_port != port)
4237                 return;
4238
4239         be_disable_vxlan_offloads(adapter);
4240
4241         dev_info(&adapter->pdev->dev,
4242                  "Disabled VxLAN offloads for UDP port %d\n",
4243                  be16_to_cpu(port));
4244 }
4245 #endif
4246
4247 static const struct net_device_ops be_netdev_ops = {
4248         .ndo_open               = be_open,
4249         .ndo_stop               = be_close,
4250         .ndo_start_xmit         = be_xmit,
4251         .ndo_set_rx_mode        = be_set_rx_mode,
4252         .ndo_set_mac_address    = be_mac_addr_set,
4253         .ndo_change_mtu         = be_change_mtu,
4254         .ndo_get_stats64        = be_get_stats64,
4255         .ndo_validate_addr      = eth_validate_addr,
4256         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4257         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4258         .ndo_set_vf_mac         = be_set_vf_mac,
4259         .ndo_set_vf_vlan        = be_set_vf_vlan,
4260         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4261         .ndo_get_vf_config      = be_get_vf_config,
4262         .ndo_set_vf_link_state  = be_set_vf_link_state,
4263 #ifdef CONFIG_NET_POLL_CONTROLLER
4264         .ndo_poll_controller    = be_netpoll,
4265 #endif
4266         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4267         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4268 #ifdef CONFIG_NET_RX_BUSY_POLL
4269         .ndo_busy_poll          = be_busy_poll,
4270 #endif
4271 #ifdef CONFIG_BE2NET_VXLAN
4272         .ndo_add_vxlan_port     = be_add_vxlan_port,
4273         .ndo_del_vxlan_port     = be_del_vxlan_port,
4274 #endif
4275 };
4276
4277 static void be_netdev_init(struct net_device *netdev)
4278 {
4279         struct be_adapter *adapter = netdev_priv(netdev);
4280
4281         if (skyhawk_chip(adapter)) {
4282                 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4283                                            NETIF_F_TSO | NETIF_F_TSO6 |
4284                                            NETIF_F_GSO_UDP_TUNNEL;
4285                 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4286         }
4287         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4288                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4289                 NETIF_F_HW_VLAN_CTAG_TX;
4290         if (be_multi_rxq(adapter))
4291                 netdev->hw_features |= NETIF_F_RXHASH;
4292
4293         netdev->features |= netdev->hw_features |
4294                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4295
4296         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4297                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4298
4299         netdev->priv_flags |= IFF_UNICAST_FLT;
4300
4301         netdev->flags |= IFF_MULTICAST;
4302
4303         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4304
4305         netdev->netdev_ops = &be_netdev_ops;
4306
4307         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4308 }
4309
4310 static void be_unmap_pci_bars(struct be_adapter *adapter)
4311 {
4312         if (adapter->csr)
4313                 pci_iounmap(adapter->pdev, adapter->csr);
4314         if (adapter->db)
4315                 pci_iounmap(adapter->pdev, adapter->db);
4316 }
4317
4318 static int db_bar(struct be_adapter *adapter)
4319 {
4320         if (lancer_chip(adapter) || !be_physfn(adapter))
4321                 return 0;
4322         else
4323                 return 4;
4324 }
4325
4326 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4327 {
4328         if (skyhawk_chip(adapter)) {
4329                 adapter->roce_db.size = 4096;
4330                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4331                                                               db_bar(adapter));
4332                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4333                                                                db_bar(adapter));
4334         }
4335         return 0;
4336 }
4337
4338 static int be_map_pci_bars(struct be_adapter *adapter)
4339 {
4340         u8 __iomem *addr;
4341
4342         if (BEx_chip(adapter) && be_physfn(adapter)) {
4343                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4344                 if (adapter->csr == NULL)
4345                         return -ENOMEM;
4346         }
4347
4348         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4349         if (addr == NULL)
4350                 goto pci_map_err;
4351         adapter->db = addr;
4352
4353         be_roce_map_pci_bars(adapter);
4354         return 0;
4355
4356 pci_map_err:
4357         be_unmap_pci_bars(adapter);
4358         return -ENOMEM;
4359 }
4360
4361 static void be_ctrl_cleanup(struct be_adapter *adapter)
4362 {
4363         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4364
4365         be_unmap_pci_bars(adapter);
4366
4367         if (mem->va)
4368                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4369                                   mem->dma);
4370
4371         mem = &adapter->rx_filter;
4372         if (mem->va)
4373                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4374                                   mem->dma);
4375 }
4376
4377 static int be_ctrl_init(struct be_adapter *adapter)
4378 {
4379         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4380         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4381         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4382         u32 sli_intf;
4383         int status;
4384
4385         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4386         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4387                                  SLI_INTF_FAMILY_SHIFT;
4388         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4389
4390         status = be_map_pci_bars(adapter);
4391         if (status)
4392                 goto done;
4393
4394         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4395         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4396                                                 mbox_mem_alloc->size,
4397                                                 &mbox_mem_alloc->dma,
4398                                                 GFP_KERNEL);
4399         if (!mbox_mem_alloc->va) {
4400                 status = -ENOMEM;
4401                 goto unmap_pci_bars;
4402         }
4403         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4404         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4405         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4406         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4407
4408         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4409         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4410                                             rx_filter->size, &rx_filter->dma,
4411                                             GFP_KERNEL);
4412         if (rx_filter->va == NULL) {
4413                 status = -ENOMEM;
4414                 goto free_mbox;
4415         }
4416
4417         mutex_init(&adapter->mbox_lock);
4418         spin_lock_init(&adapter->mcc_lock);
4419         spin_lock_init(&adapter->mcc_cq_lock);
4420
4421         init_completion(&adapter->et_cmd_compl);
4422         pci_save_state(adapter->pdev);
4423         return 0;
4424
4425 free_mbox:
4426         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4427                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4428
4429 unmap_pci_bars:
4430         be_unmap_pci_bars(adapter);
4431
4432 done:
4433         return status;
4434 }
4435
4436 static void be_stats_cleanup(struct be_adapter *adapter)
4437 {
4438         struct be_dma_mem *cmd = &adapter->stats_cmd;
4439
4440         if (cmd->va)
4441                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4442                                   cmd->va, cmd->dma);
4443 }
4444
4445 static int be_stats_init(struct be_adapter *adapter)
4446 {
4447         struct be_dma_mem *cmd = &adapter->stats_cmd;
4448
4449         if (lancer_chip(adapter))
4450                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4451         else if (BE2_chip(adapter))
4452                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4453         else if (BE3_chip(adapter))
4454                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4455         else
4456                 /* ALL non-BE ASICs */
4457                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4458
4459         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4460                                       GFP_KERNEL);
4461         if (cmd->va == NULL)
4462                 return -1;
4463         return 0;
4464 }
4465
4466 static void be_remove(struct pci_dev *pdev)
4467 {
4468         struct be_adapter *adapter = pci_get_drvdata(pdev);
4469
4470         if (!adapter)
4471                 return;
4472
4473         be_roce_dev_remove(adapter);
4474         be_intr_set(adapter, false);
4475
4476         cancel_delayed_work_sync(&adapter->func_recovery_work);
4477
4478         unregister_netdev(adapter->netdev);
4479
4480         be_clear(adapter);
4481
4482         /* tell fw we're done with firing cmds */
4483         be_cmd_fw_clean(adapter);
4484
4485         be_stats_cleanup(adapter);
4486
4487         be_ctrl_cleanup(adapter);
4488
4489         pci_disable_pcie_error_reporting(pdev);
4490
4491         pci_release_regions(pdev);
4492         pci_disable_device(pdev);
4493
4494         free_netdev(adapter->netdev);
4495 }
4496
4497 static int be_get_initial_config(struct be_adapter *adapter)
4498 {
4499         int status, level;
4500
4501         status = be_cmd_get_cntl_attributes(adapter);
4502         if (status)
4503                 return status;
4504
4505         /* Must be a power of 2 or else MODULO will BUG_ON */
4506         adapter->be_get_temp_freq = 64;
4507
4508         if (BEx_chip(adapter)) {
4509                 level = be_cmd_get_fw_log_level(adapter);
4510                 adapter->msg_enable =
4511                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4512         }
4513
4514         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4515         return 0;
4516 }
4517
4518 static int lancer_recover_func(struct be_adapter *adapter)
4519 {
4520         struct device *dev = &adapter->pdev->dev;
4521         int status;
4522
4523         status = lancer_test_and_set_rdy_state(adapter);
4524         if (status)
4525                 goto err;
4526
4527         if (netif_running(adapter->netdev))
4528                 be_close(adapter->netdev);
4529
4530         be_clear(adapter);
4531
4532         be_clear_all_error(adapter);
4533
4534         status = be_setup(adapter);
4535         if (status)
4536                 goto err;
4537
4538         if (netif_running(adapter->netdev)) {
4539                 status = be_open(adapter->netdev);
4540                 if (status)
4541                         goto err;
4542         }
4543
4544         dev_err(dev, "Adapter recovery successful\n");
4545         return 0;
4546 err:
4547         if (status == -EAGAIN)
4548                 dev_err(dev, "Waiting for resource provisioning\n");
4549         else
4550                 dev_err(dev, "Adapter recovery failed\n");
4551
4552         return status;
4553 }
4554
4555 static void be_func_recovery_task(struct work_struct *work)
4556 {
4557         struct be_adapter *adapter =
4558                 container_of(work, struct be_adapter,  func_recovery_work.work);
4559         int status = 0;
4560
4561         be_detect_error(adapter);
4562
4563         if (adapter->hw_error && lancer_chip(adapter)) {
4564
4565                 rtnl_lock();
4566                 netif_device_detach(adapter->netdev);
4567                 rtnl_unlock();
4568
4569                 status = lancer_recover_func(adapter);
4570                 if (!status)
4571                         netif_device_attach(adapter->netdev);
4572         }
4573
4574         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4575          * no need to attempt further recovery.
4576          */
4577         if (!status || status == -EAGAIN)
4578                 schedule_delayed_work(&adapter->func_recovery_work,
4579                                       msecs_to_jiffies(1000));
4580 }
4581
4582 static void be_worker(struct work_struct *work)
4583 {
4584         struct be_adapter *adapter =
4585                 container_of(work, struct be_adapter, work.work);
4586         struct be_rx_obj *rxo;
4587         int i;
4588
4589         /* when interrupts are not yet enabled, just reap any pending
4590         * mcc completions */
4591         if (!netif_running(adapter->netdev)) {
4592                 local_bh_disable();
4593                 be_process_mcc(adapter);
4594                 local_bh_enable();
4595                 goto reschedule;
4596         }
4597
4598         if (!adapter->stats_cmd_sent) {
4599                 if (lancer_chip(adapter))
4600                         lancer_cmd_get_pport_stats(adapter,
4601                                                 &adapter->stats_cmd);
4602                 else
4603                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4604         }
4605
4606         if (be_physfn(adapter) &&
4607             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4608                 be_cmd_get_die_temperature(adapter);
4609
4610         for_all_rx_queues(adapter, rxo, i) {
4611                 /* Replenish RX-queues starved due to memory
4612                  * allocation failures.
4613                  */
4614                 if (rxo->rx_post_starved)
4615                         be_post_rx_frags(rxo, GFP_KERNEL);
4616         }
4617
4618         be_eqd_update(adapter);
4619
4620 reschedule:
4621         adapter->work_counter++;
4622         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4623 }
4624
4625 /* If any VFs are already enabled don't FLR the PF */
4626 static bool be_reset_required(struct be_adapter *adapter)
4627 {
4628         return pci_num_vf(adapter->pdev) ? false : true;
4629 }
4630
4631 static char *mc_name(struct be_adapter *adapter)
4632 {
4633         char *str = ""; /* default */
4634
4635         switch (adapter->mc_type) {
4636         case UMC:
4637                 str = "UMC";
4638                 break;
4639         case FLEX10:
4640                 str = "FLEX10";
4641                 break;
4642         case vNIC1:
4643                 str = "vNIC-1";
4644                 break;
4645         case nPAR:
4646                 str = "nPAR";
4647                 break;
4648         case UFP:
4649                 str = "UFP";
4650                 break;
4651         case vNIC2:
4652                 str = "vNIC-2";
4653                 break;
4654         default:
4655                 str = "";
4656         }
4657
4658         return str;
4659 }
4660
4661 static inline char *func_name(struct be_adapter *adapter)
4662 {
4663         return be_physfn(adapter) ? "PF" : "VF";
4664 }
4665
4666 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4667 {
4668         int status = 0;
4669         struct be_adapter *adapter;
4670         struct net_device *netdev;
4671         char port_name;
4672
4673         status = pci_enable_device(pdev);
4674         if (status)
4675                 goto do_none;
4676
4677         status = pci_request_regions(pdev, DRV_NAME);
4678         if (status)
4679                 goto disable_dev;
4680         pci_set_master(pdev);
4681
4682         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4683         if (netdev == NULL) {
4684                 status = -ENOMEM;
4685                 goto rel_reg;
4686         }
4687         adapter = netdev_priv(netdev);
4688         adapter->pdev = pdev;
4689         pci_set_drvdata(pdev, adapter);
4690         adapter->netdev = netdev;
4691         SET_NETDEV_DEV(netdev, &pdev->dev);
4692
4693         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4694         if (!status) {
4695                 netdev->features |= NETIF_F_HIGHDMA;
4696         } else {
4697                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4698                 if (status) {
4699                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4700                         goto free_netdev;
4701                 }
4702         }
4703
4704         if (be_physfn(adapter)) {
4705                 status = pci_enable_pcie_error_reporting(pdev);
4706                 if (!status)
4707                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4708         }
4709
4710         status = be_ctrl_init(adapter);
4711         if (status)
4712                 goto free_netdev;
4713
4714         /* sync up with fw's ready state */
4715         if (be_physfn(adapter)) {
4716                 status = be_fw_wait_ready(adapter);
4717                 if (status)
4718                         goto ctrl_clean;
4719         }
4720
4721         if (be_reset_required(adapter)) {
4722                 status = be_cmd_reset_function(adapter);
4723                 if (status)
4724                         goto ctrl_clean;
4725
4726                 /* Wait for interrupts to quiesce after an FLR */
4727                 msleep(100);
4728         }
4729
4730         /* Allow interrupts for other ULPs running on NIC function */
4731         be_intr_set(adapter, true);
4732
4733         /* tell fw we're ready to fire cmds */
4734         status = be_cmd_fw_init(adapter);
4735         if (status)
4736                 goto ctrl_clean;
4737
4738         status = be_stats_init(adapter);
4739         if (status)
4740                 goto ctrl_clean;
4741
4742         status = be_get_initial_config(adapter);
4743         if (status)
4744                 goto stats_clean;
4745
4746         INIT_DELAYED_WORK(&adapter->work, be_worker);
4747         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4748         adapter->rx_fc = adapter->tx_fc = true;
4749
4750         status = be_setup(adapter);
4751         if (status)
4752                 goto stats_clean;
4753
4754         be_netdev_init(netdev);
4755         status = register_netdev(netdev);
4756         if (status != 0)
4757                 goto unsetup;
4758
4759         be_roce_dev_add(adapter);
4760
4761         schedule_delayed_work(&adapter->func_recovery_work,
4762                               msecs_to_jiffies(1000));
4763
4764         be_cmd_query_port_name(adapter, &port_name);
4765
4766         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4767                  func_name(adapter), mc_name(adapter), port_name);
4768
4769         return 0;
4770
4771 unsetup:
4772         be_clear(adapter);
4773 stats_clean:
4774         be_stats_cleanup(adapter);
4775 ctrl_clean:
4776         be_ctrl_cleanup(adapter);
4777 free_netdev:
4778         free_netdev(netdev);
4779 rel_reg:
4780         pci_release_regions(pdev);
4781 disable_dev:
4782         pci_disable_device(pdev);
4783 do_none:
4784         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4785         return status;
4786 }
4787
4788 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4789 {
4790         struct be_adapter *adapter = pci_get_drvdata(pdev);
4791         struct net_device *netdev =  adapter->netdev;
4792
4793         if (adapter->wol_en)
4794                 be_setup_wol(adapter, true);
4795
4796         be_intr_set(adapter, false);
4797         cancel_delayed_work_sync(&adapter->func_recovery_work);
4798
4799         netif_device_detach(netdev);
4800         if (netif_running(netdev)) {
4801                 rtnl_lock();
4802                 be_close(netdev);
4803                 rtnl_unlock();
4804         }
4805         be_clear(adapter);
4806
4807         pci_save_state(pdev);
4808         pci_disable_device(pdev);
4809         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4810         return 0;
4811 }
4812
4813 static int be_resume(struct pci_dev *pdev)
4814 {
4815         int status = 0;
4816         struct be_adapter *adapter = pci_get_drvdata(pdev);
4817         struct net_device *netdev =  adapter->netdev;
4818
4819         netif_device_detach(netdev);
4820
4821         status = pci_enable_device(pdev);
4822         if (status)
4823                 return status;
4824
4825         pci_set_power_state(pdev, PCI_D0);
4826         pci_restore_state(pdev);
4827
4828         status = be_fw_wait_ready(adapter);
4829         if (status)
4830                 return status;
4831
4832         be_intr_set(adapter, true);
4833         /* tell fw we're ready to fire cmds */
4834         status = be_cmd_fw_init(adapter);
4835         if (status)
4836                 return status;
4837
4838         be_setup(adapter);
4839         if (netif_running(netdev)) {
4840                 rtnl_lock();
4841                 be_open(netdev);
4842                 rtnl_unlock();
4843         }
4844
4845         schedule_delayed_work(&adapter->func_recovery_work,
4846                               msecs_to_jiffies(1000));
4847         netif_device_attach(netdev);
4848
4849         if (adapter->wol_en)
4850                 be_setup_wol(adapter, false);
4851
4852         return 0;
4853 }
4854
4855 /*
4856  * An FLR will stop BE from DMAing any data.
4857  */
4858 static void be_shutdown(struct pci_dev *pdev)
4859 {
4860         struct be_adapter *adapter = pci_get_drvdata(pdev);
4861
4862         if (!adapter)
4863                 return;
4864
4865         cancel_delayed_work_sync(&adapter->work);
4866         cancel_delayed_work_sync(&adapter->func_recovery_work);
4867
4868         netif_device_detach(adapter->netdev);
4869
4870         be_cmd_reset_function(adapter);
4871
4872         pci_disable_device(pdev);
4873 }
4874
4875 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4876                                             pci_channel_state_t state)
4877 {
4878         struct be_adapter *adapter = pci_get_drvdata(pdev);
4879         struct net_device *netdev =  adapter->netdev;
4880
4881         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4882
4883         if (!adapter->eeh_error) {
4884                 adapter->eeh_error = true;
4885
4886                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4887
4888                 rtnl_lock();
4889                 netif_device_detach(netdev);
4890                 if (netif_running(netdev))
4891                         be_close(netdev);
4892                 rtnl_unlock();
4893
4894                 be_clear(adapter);
4895         }
4896
4897         if (state == pci_channel_io_perm_failure)
4898                 return PCI_ERS_RESULT_DISCONNECT;
4899
4900         pci_disable_device(pdev);
4901
4902         /* The error could cause the FW to trigger a flash debug dump.
4903          * Resetting the card while flash dump is in progress
4904          * can cause it not to recover; wait for it to finish.
4905          * Wait only for first function as it is needed only once per
4906          * adapter.
4907          */
4908         if (pdev->devfn == 0)
4909                 ssleep(30);
4910
4911         return PCI_ERS_RESULT_NEED_RESET;
4912 }
4913
4914 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4915 {
4916         struct be_adapter *adapter = pci_get_drvdata(pdev);
4917         int status;
4918
4919         dev_info(&adapter->pdev->dev, "EEH reset\n");
4920
4921         status = pci_enable_device(pdev);
4922         if (status)
4923                 return PCI_ERS_RESULT_DISCONNECT;
4924
4925         pci_set_master(pdev);
4926         pci_set_power_state(pdev, PCI_D0);
4927         pci_restore_state(pdev);
4928
4929         /* Check if card is ok and fw is ready */
4930         dev_info(&adapter->pdev->dev,
4931                  "Waiting for FW to be ready after EEH reset\n");
4932         status = be_fw_wait_ready(adapter);
4933         if (status)
4934                 return PCI_ERS_RESULT_DISCONNECT;
4935
4936         pci_cleanup_aer_uncorrect_error_status(pdev);
4937         be_clear_all_error(adapter);
4938         return PCI_ERS_RESULT_RECOVERED;
4939 }
4940
4941 static void be_eeh_resume(struct pci_dev *pdev)
4942 {
4943         int status = 0;
4944         struct be_adapter *adapter = pci_get_drvdata(pdev);
4945         struct net_device *netdev =  adapter->netdev;
4946
4947         dev_info(&adapter->pdev->dev, "EEH resume\n");
4948
4949         pci_save_state(pdev);
4950
4951         status = be_cmd_reset_function(adapter);
4952         if (status)
4953                 goto err;
4954
4955         /* tell fw we're ready to fire cmds */
4956         status = be_cmd_fw_init(adapter);
4957         if (status)
4958                 goto err;
4959
4960         status = be_setup(adapter);
4961         if (status)
4962                 goto err;
4963
4964         if (netif_running(netdev)) {
4965                 status = be_open(netdev);
4966                 if (status)
4967                         goto err;
4968         }
4969
4970         schedule_delayed_work(&adapter->func_recovery_work,
4971                               msecs_to_jiffies(1000));
4972         netif_device_attach(netdev);
4973         return;
4974 err:
4975         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4976 }
4977
4978 static const struct pci_error_handlers be_eeh_handlers = {
4979         .error_detected = be_eeh_err_detected,
4980         .slot_reset = be_eeh_reset,
4981         .resume = be_eeh_resume,
4982 };
4983
4984 static struct pci_driver be_driver = {
4985         .name = DRV_NAME,
4986         .id_table = be_dev_ids,
4987         .probe = be_probe,
4988         .remove = be_remove,
4989         .suspend = be_suspend,
4990         .resume = be_resume,
4991         .shutdown = be_shutdown,
4992         .err_handler = &be_eeh_handlers
4993 };
4994
4995 static int __init be_init_module(void)
4996 {
4997         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4998             rx_frag_size != 2048) {
4999                 printk(KERN_WARNING DRV_NAME
5000                         " : Module param rx_frag_size must be 2048/4096/8192."
5001                         " Using 2048\n");
5002                 rx_frag_size = 2048;
5003         }
5004
5005         return pci_register_driver(&be_driver);
5006 }
5007 module_init(be_init_module);
5008
5009 static void __exit be_exit_module(void)
5010 {
5011         pci_unregister_driver(&be_driver);
5012 }
5013 module_exit(be_exit_module);