]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
i2c: sun6-p2wi: fix call to snprintf
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
27
28 MODULE_VERSION(DRV_VER);
29 MODULE_DEVICE_TABLE(pci, be_dev_ids);
30 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
31 MODULE_AUTHOR("Emulex Corporation");
32 MODULE_LICENSE("GPL");
33
34 static unsigned int num_vfs;
35 module_param(num_vfs, uint, S_IRUGO);
36 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37
38 static ushort rx_frag_size = 2048;
39 module_param(rx_frag_size, ushort, S_IRUGO);
40 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
42 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
50         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
51         { 0 }
52 };
53 MODULE_DEVICE_TABLE(pci, be_dev_ids);
54 /* UE Status Low CSR */
55 static const char * const ue_status_low_desc[] = {
56         "CEV",
57         "CTX",
58         "DBUF",
59         "ERX",
60         "Host",
61         "MPU",
62         "NDMA",
63         "PTC ",
64         "RDMA ",
65         "RXF ",
66         "RXIPS ",
67         "RXULP0 ",
68         "RXULP1 ",
69         "RXULP2 ",
70         "TIM ",
71         "TPOST ",
72         "TPRE ",
73         "TXIPS ",
74         "TXULP0 ",
75         "TXULP1 ",
76         "UC ",
77         "WDMA ",
78         "TXULP2 ",
79         "HOST1 ",
80         "P0_OB_LINK ",
81         "P1_OB_LINK ",
82         "HOST_GPIO ",
83         "MBOX ",
84         "AXGMAC0",
85         "AXGMAC1",
86         "JTAG",
87         "MPU_INTPEND"
88 };
89 /* UE Status High CSR */
90 static const char * const ue_status_hi_desc[] = {
91         "LPCMEMHOST",
92         "MGMT_MAC",
93         "PCS0ONLINE",
94         "MPU_IRAM",
95         "PCS1ONLINE",
96         "PCTL0",
97         "PCTL1",
98         "PMEM",
99         "RR",
100         "TXPB",
101         "RXPP",
102         "XAUI",
103         "TXP",
104         "ARM",
105         "IPC",
106         "HOST2",
107         "HOST3",
108         "HOST4",
109         "HOST5",
110         "HOST6",
111         "HOST7",
112         "HOST8",
113         "HOST9",
114         "NETC",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown",
122         "Unknown"
123 };
124
125
126 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129         if (mem->va) {
130                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131                                   mem->dma);
132                 mem->va = NULL;
133         }
134 }
135
136 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137                 u16 len, u16 entry_size)
138 {
139         struct be_dma_mem *mem = &q->dma_mem;
140
141         memset(q, 0, sizeof(*q));
142         q->len = len;
143         q->entry_size = entry_size;
144         mem->size = len * entry_size;
145         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146                                       GFP_KERNEL);
147         if (!mem->va)
148                 return -ENOMEM;
149         return 0;
150 }
151
152 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
153 {
154         u32 reg, enabled;
155
156         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157                                 &reg);
158         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
160         if (!enabled && enable)
161                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else if (enabled && !enable)
163                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164         else
165                 return;
166
167         pci_write_config_dword(adapter->pdev,
168                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169 }
170
171 static void be_intr_set(struct be_adapter *adapter, bool enable)
172 {
173         int status = 0;
174
175         /* On lancer interrupts can't be controlled via this register */
176         if (lancer_chip(adapter))
177                 return;
178
179         if (adapter->eeh_error)
180                 return;
181
182         status = be_cmd_intr_set(adapter, enable);
183         if (status)
184                 be_reg_intr_set(adapter, enable);
185 }
186
187 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_RQ_RING_ID_MASK;
191         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_RQ_OFFSET);
195 }
196
197 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198                           u16 posted)
199 {
200         u32 val = 0;
201         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
202         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203
204         wmb();
205         iowrite32(val, adapter->db + txo->db_offset);
206 }
207
208 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209                 bool arm, bool clear_int, u16 num_popped)
210 {
211         u32 val = 0;
212         val |= qid & DB_EQ_RING_ID_MASK;
213         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
214                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
215
216         if (adapter->eeh_error)
217                 return;
218
219         if (arm)
220                 val |= 1 << DB_EQ_REARM_SHIFT;
221         if (clear_int)
222                 val |= 1 << DB_EQ_CLR_SHIFT;
223         val |= 1 << DB_EQ_EVNT_SHIFT;
224         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_EQ_OFFSET);
226 }
227
228 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
229 {
230         u32 val = 0;
231         val |= qid & DB_CQ_RING_ID_MASK;
232         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
233                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
234
235         if (adapter->eeh_error)
236                 return;
237
238         if (arm)
239                 val |= 1 << DB_CQ_REARM_SHIFT;
240         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
241         iowrite32(val, adapter->db + DB_CQ_OFFSET);
242 }
243
244 static int be_mac_addr_set(struct net_device *netdev, void *p)
245 {
246         struct be_adapter *adapter = netdev_priv(netdev);
247         struct device *dev = &adapter->pdev->dev;
248         struct sockaddr *addr = p;
249         int status;
250         u8 mac[ETH_ALEN];
251         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
252
253         if (!is_valid_ether_addr(addr->sa_data))
254                 return -EADDRNOTAVAIL;
255
256         /* Proceed further only if, User provided MAC is different
257          * from active MAC
258          */
259         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
260                 return 0;
261
262         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
263          * privilege or if PF did not provision the new MAC address.
264          * On BE3, this cmd will always fail if the VF doesn't have the
265          * FILTMGMT privilege. This failure is OK, only if the PF programmed
266          * the MAC for the VF.
267          */
268         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
269                                  adapter->if_handle, &adapter->pmac_id[0], 0);
270         if (!status) {
271                 curr_pmac_id = adapter->pmac_id[0];
272
273                 /* Delete the old programmed MAC. This call may fail if the
274                  * old MAC was already deleted by the PF driver.
275                  */
276                 if (adapter->pmac_id[0] != old_pmac_id)
277                         be_cmd_pmac_del(adapter, adapter->if_handle,
278                                         old_pmac_id, 0);
279         }
280
281         /* Decide if the new MAC is successfully activated only after
282          * querying the FW
283          */
284         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
285                                        adapter->if_handle, true, 0);
286         if (status)
287                 goto err;
288
289         /* The MAC change did not happen, either due to lack of privilege
290          * or PF didn't pre-provision.
291          */
292         if (!ether_addr_equal(addr->sa_data, mac)) {
293                 status = -EPERM;
294                 goto err;
295         }
296
297         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
298         dev_info(dev, "MAC address changed to %pM\n", mac);
299         return 0;
300 err:
301         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
302         return status;
303 }
304
305 /* BE2 supports only v0 cmd */
306 static void *hw_stats_from_cmd(struct be_adapter *adapter)
307 {
308         if (BE2_chip(adapter)) {
309                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         } else if (BE3_chip(adapter)) {
313                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
314
315                 return &cmd->hw_stats;
316         } else {
317                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
318
319                 return &cmd->hw_stats;
320         }
321 }
322
323 /* BE2 supports only v0 cmd */
324 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
325 {
326         if (BE2_chip(adapter)) {
327                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
328
329                 return &hw_stats->erx;
330         } else if (BE3_chip(adapter)) {
331                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
332
333                 return &hw_stats->erx;
334         } else {
335                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
336
337                 return &hw_stats->erx;
338         }
339 }
340
341 static void populate_be_v0_stats(struct be_adapter *adapter)
342 {
343         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
344         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
345         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
346         struct be_port_rxf_stats_v0 *port_stats =
347                                         &rxf_stats->port[adapter->port_num];
348         struct be_drv_stats *drvs = &adapter->drv_stats;
349
350         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
351         drvs->rx_pause_frames = port_stats->rx_pause_frames;
352         drvs->rx_crc_errors = port_stats->rx_crc_errors;
353         drvs->rx_control_frames = port_stats->rx_control_frames;
354         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
355         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
356         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
357         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
358         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
359         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
360         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
361         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
362         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
363         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
364         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
365         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
366         drvs->rx_dropped_header_too_small =
367                 port_stats->rx_dropped_header_too_small;
368         drvs->rx_address_filtered =
369                                         port_stats->rx_address_filtered +
370                                         port_stats->rx_vlan_filtered;
371         drvs->rx_alignment_symbol_errors =
372                 port_stats->rx_alignment_symbol_errors;
373
374         drvs->tx_pauseframes = port_stats->tx_pauseframes;
375         drvs->tx_controlframes = port_stats->tx_controlframes;
376
377         if (adapter->port_num)
378                 drvs->jabber_events = rxf_stats->port1_jabber_events;
379         else
380                 drvs->jabber_events = rxf_stats->port0_jabber_events;
381         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
382         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
383         drvs->forwarded_packets = rxf_stats->forwarded_packets;
384         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
385         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
386         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
387         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
388 }
389
390 static void populate_be_v1_stats(struct be_adapter *adapter)
391 {
392         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
393         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
394         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
395         struct be_port_rxf_stats_v1 *port_stats =
396                                         &rxf_stats->port[adapter->port_num];
397         struct be_drv_stats *drvs = &adapter->drv_stats;
398
399         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
400         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
401         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
402         drvs->rx_pause_frames = port_stats->rx_pause_frames;
403         drvs->rx_crc_errors = port_stats->rx_crc_errors;
404         drvs->rx_control_frames = port_stats->rx_control_frames;
405         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
406         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
407         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
408         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
409         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
410         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
411         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
412         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
413         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
414         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
415         drvs->rx_dropped_header_too_small =
416                 port_stats->rx_dropped_header_too_small;
417         drvs->rx_input_fifo_overflow_drop =
418                 port_stats->rx_input_fifo_overflow_drop;
419         drvs->rx_address_filtered = port_stats->rx_address_filtered;
420         drvs->rx_alignment_symbol_errors =
421                 port_stats->rx_alignment_symbol_errors;
422         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
423         drvs->tx_pauseframes = port_stats->tx_pauseframes;
424         drvs->tx_controlframes = port_stats->tx_controlframes;
425         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
426         drvs->jabber_events = port_stats->jabber_events;
427         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
428         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
429         drvs->forwarded_packets = rxf_stats->forwarded_packets;
430         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
431         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
432         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
433         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
434 }
435
436 static void populate_be_v2_stats(struct be_adapter *adapter)
437 {
438         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
439         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
440         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
441         struct be_port_rxf_stats_v2 *port_stats =
442                                         &rxf_stats->port[adapter->port_num];
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444
445         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
446         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
447         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
448         drvs->rx_pause_frames = port_stats->rx_pause_frames;
449         drvs->rx_crc_errors = port_stats->rx_crc_errors;
450         drvs->rx_control_frames = port_stats->rx_control_frames;
451         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
452         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
453         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
454         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
455         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
456         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
457         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
458         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
459         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
460         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
461         drvs->rx_dropped_header_too_small =
462                 port_stats->rx_dropped_header_too_small;
463         drvs->rx_input_fifo_overflow_drop =
464                 port_stats->rx_input_fifo_overflow_drop;
465         drvs->rx_address_filtered = port_stats->rx_address_filtered;
466         drvs->rx_alignment_symbol_errors =
467                 port_stats->rx_alignment_symbol_errors;
468         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
469         drvs->tx_pauseframes = port_stats->tx_pauseframes;
470         drvs->tx_controlframes = port_stats->tx_controlframes;
471         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
472         drvs->jabber_events = port_stats->jabber_events;
473         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
474         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
475         drvs->forwarded_packets = rxf_stats->forwarded_packets;
476         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
477         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
480         if (be_roce_supported(adapter))  {
481                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483                 drvs->rx_roce_frames = port_stats->roce_frames_received;
484                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
485                 drvs->roce_drops_payload_len =
486                         port_stats->roce_drops_payload_len;
487         }
488 }
489
490 static void populate_lancer_stats(struct be_adapter *adapter)
491 {
492
493         struct be_drv_stats *drvs = &adapter->drv_stats;
494         struct lancer_pport_stats *pport_stats =
495                                         pport_stats_from_cmd(adapter);
496
497         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
499         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
500         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
501         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
502         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
503         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
504         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
505         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
506         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
507         drvs->rx_dropped_tcp_length =
508                                 pport_stats->rx_dropped_invalid_tcp_length;
509         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
510         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
511         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
512         drvs->rx_dropped_header_too_small =
513                                 pport_stats->rx_dropped_header_too_small;
514         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
515         drvs->rx_address_filtered =
516                                         pport_stats->rx_address_filtered +
517                                         pport_stats->rx_vlan_filtered;
518         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
519         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
520         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
521         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
522         drvs->jabber_events = pport_stats->rx_jabbers;
523         drvs->forwarded_packets = pport_stats->num_forwards_lo;
524         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
525         drvs->rx_drops_too_many_frags =
526                                 pport_stats->rx_drops_too_many_frags_lo;
527 }
528
529 static void accumulate_16bit_val(u32 *acc, u16 val)
530 {
531 #define lo(x)                   (x & 0xFFFF)
532 #define hi(x)                   (x & 0xFFFF0000)
533         bool wrapped = val < lo(*acc);
534         u32 newacc = hi(*acc) + val;
535
536         if (wrapped)
537                 newacc += 65536;
538         ACCESS_ONCE(*acc) = newacc;
539 }
540
541 static void populate_erx_stats(struct be_adapter *adapter,
542                         struct be_rx_obj *rxo,
543                         u32 erx_stat)
544 {
545         if (!BEx_chip(adapter))
546                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547         else
548                 /* below erx HW counter can actually wrap around after
549                  * 65535. Driver accumulates a 32-bit value
550                  */
551                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552                                      (u16)erx_stat);
553 }
554
555 void be_parse_stats(struct be_adapter *adapter)
556 {
557         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
558         struct be_rx_obj *rxo;
559         int i;
560         u32 erx_stat;
561
562         if (lancer_chip(adapter)) {
563                 populate_lancer_stats(adapter);
564         } else {
565                 if (BE2_chip(adapter))
566                         populate_be_v0_stats(adapter);
567                 else if (BE3_chip(adapter))
568                         /* for BE3 */
569                         populate_be_v1_stats(adapter);
570                 else
571                         populate_be_v2_stats(adapter);
572
573                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
574                 for_all_rx_queues(adapter, rxo, i) {
575                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576                         populate_erx_stats(adapter, rxo, erx_stat);
577                 }
578         }
579 }
580
581 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582                                         struct rtnl_link_stats64 *stats)
583 {
584         struct be_adapter *adapter = netdev_priv(netdev);
585         struct be_drv_stats *drvs = &adapter->drv_stats;
586         struct be_rx_obj *rxo;
587         struct be_tx_obj *txo;
588         u64 pkts, bytes;
589         unsigned int start;
590         int i;
591
592         for_all_rx_queues(adapter, rxo, i) {
593                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
594                 do {
595                         start = u64_stats_fetch_begin_irq(&rx_stats->sync);
596                         pkts = rx_stats(rxo)->rx_pkts;
597                         bytes = rx_stats(rxo)->rx_bytes;
598                 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
599                 stats->rx_packets += pkts;
600                 stats->rx_bytes += bytes;
601                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
602                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
603                                         rx_stats(rxo)->rx_drops_no_frags;
604         }
605
606         for_all_tx_queues(adapter, txo, i) {
607                 const struct be_tx_stats *tx_stats = tx_stats(txo);
608                 do {
609                         start = u64_stats_fetch_begin_irq(&tx_stats->sync);
610                         pkts = tx_stats(txo)->tx_pkts;
611                         bytes = tx_stats(txo)->tx_bytes;
612                 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
613                 stats->tx_packets += pkts;
614                 stats->tx_bytes += bytes;
615         }
616
617         /* bad pkts received */
618         stats->rx_errors = drvs->rx_crc_errors +
619                 drvs->rx_alignment_symbol_errors +
620                 drvs->rx_in_range_errors +
621                 drvs->rx_out_range_errors +
622                 drvs->rx_frame_too_long +
623                 drvs->rx_dropped_too_small +
624                 drvs->rx_dropped_too_short +
625                 drvs->rx_dropped_header_too_small +
626                 drvs->rx_dropped_tcp_length +
627                 drvs->rx_dropped_runt;
628
629         /* detailed rx errors */
630         stats->rx_length_errors = drvs->rx_in_range_errors +
631                 drvs->rx_out_range_errors +
632                 drvs->rx_frame_too_long;
633
634         stats->rx_crc_errors = drvs->rx_crc_errors;
635
636         /* frame alignment errors */
637         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
638
639         /* receiver fifo overrun */
640         /* drops_no_pbuf is no per i/f, it's per BE card */
641         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
642                                 drvs->rx_input_fifo_overflow_drop +
643                                 drvs->rx_drops_no_pbuf;
644         return stats;
645 }
646
647 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
648 {
649         struct net_device *netdev = adapter->netdev;
650
651         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
652                 netif_carrier_off(netdev);
653                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
654         }
655
656         if (link_status)
657                 netif_carrier_on(netdev);
658         else
659                 netif_carrier_off(netdev);
660 }
661
662 static void be_tx_stats_update(struct be_tx_obj *txo,
663                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
664 {
665         struct be_tx_stats *stats = tx_stats(txo);
666
667         u64_stats_update_begin(&stats->sync);
668         stats->tx_reqs++;
669         stats->tx_wrbs += wrb_cnt;
670         stats->tx_bytes += copied;
671         stats->tx_pkts += (gso_segs ? gso_segs : 1);
672         if (stopped)
673                 stats->tx_stops++;
674         u64_stats_update_end(&stats->sync);
675 }
676
677 /* Determine number of WRB entries needed to xmit data in an skb */
678 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679                                                                 bool *dummy)
680 {
681         int cnt = (skb->len > skb->data_len);
682
683         cnt += skb_shinfo(skb)->nr_frags;
684
685         /* to account for hdr wrb */
686         cnt++;
687         if (lancer_chip(adapter) || !(cnt & 1)) {
688                 *dummy = false;
689         } else {
690                 /* add a dummy to make it an even num */
691                 cnt++;
692                 *dummy = true;
693         }
694         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
695         return cnt;
696 }
697
698 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
699 {
700         wrb->frag_pa_hi = upper_32_bits(addr);
701         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
702         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
703         wrb->rsvd0 = 0;
704 }
705
706 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707                                         struct sk_buff *skb)
708 {
709         u8 vlan_prio;
710         u16 vlan_tag;
711
712         vlan_tag = vlan_tx_tag_get(skb);
713         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714         /* If vlan priority provided by OS is NOT in available bmap */
715         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717                                 adapter->recommended_prio;
718
719         return vlan_tag;
720 }
721
722 /* Used only for IP tunnel packets */
723 static u16 skb_inner_ip_proto(struct sk_buff *skb)
724 {
725         return (inner_ip_hdr(skb)->version == 4) ?
726                 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727 }
728
729 static u16 skb_ip_proto(struct sk_buff *skb)
730 {
731         return (ip_hdr(skb)->version == 4) ?
732                 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733 }
734
735 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
736                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
737 {
738         u16 vlan_tag, proto;
739
740         memset(hdr, 0, sizeof(*hdr));
741
742         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
743
744         if (skb_is_gso(skb)) {
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
747                         hdr, skb_shinfo(skb)->gso_size);
748                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
749                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
750         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
751                 if (skb->encapsulation) {
752                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
753                         proto = skb_inner_ip_proto(skb);
754                 } else {
755                         proto = skb_ip_proto(skb);
756                 }
757                 if (proto == IPPROTO_TCP)
758                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
759                 else if (proto == IPPROTO_UDP)
760                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
761         }
762
763         if (vlan_tx_tag_present(skb)) {
764                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
765                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
766                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
767         }
768
769         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
770         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
771         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
772         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
773         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
774 }
775
776 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
777                 bool unmap_single)
778 {
779         dma_addr_t dma;
780
781         be_dws_le_to_cpu(wrb, sizeof(*wrb));
782
783         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
784         if (wrb->frag_len) {
785                 if (unmap_single)
786                         dma_unmap_single(dev, dma, wrb->frag_len,
787                                          DMA_TO_DEVICE);
788                 else
789                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
790         }
791 }
792
793 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
794                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795                 bool skip_hw_vlan)
796 {
797         dma_addr_t busaddr;
798         int i, copied = 0;
799         struct device *dev = &adapter->pdev->dev;
800         struct sk_buff *first_skb = skb;
801         struct be_eth_wrb *wrb;
802         struct be_eth_hdr_wrb *hdr;
803         bool map_single = false;
804         u16 map_head;
805
806         hdr = queue_head_node(txq);
807         queue_head_inc(txq);
808         map_head = txq->head;
809
810         if (skb->len > skb->data_len) {
811                 int len = skb_headlen(skb);
812                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
813                 if (dma_mapping_error(dev, busaddr))
814                         goto dma_err;
815                 map_single = true;
816                 wrb = queue_head_node(txq);
817                 wrb_fill(wrb, busaddr, len);
818                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
819                 queue_head_inc(txq);
820                 copied += len;
821         }
822
823         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
824                 const struct skb_frag_struct *frag =
825                         &skb_shinfo(skb)->frags[i];
826                 busaddr = skb_frag_dma_map(dev, frag, 0,
827                                            skb_frag_size(frag), DMA_TO_DEVICE);
828                 if (dma_mapping_error(dev, busaddr))
829                         goto dma_err;
830                 wrb = queue_head_node(txq);
831                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
832                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
833                 queue_head_inc(txq);
834                 copied += skb_frag_size(frag);
835         }
836
837         if (dummy_wrb) {
838                 wrb = queue_head_node(txq);
839                 wrb_fill(wrb, 0, 0);
840                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
841                 queue_head_inc(txq);
842         }
843
844         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
845         be_dws_cpu_to_le(hdr, sizeof(*hdr));
846
847         return copied;
848 dma_err:
849         txq->head = map_head;
850         while (copied) {
851                 wrb = queue_head_node(txq);
852                 unmap_tx_frag(dev, wrb, map_single);
853                 map_single = false;
854                 copied -= wrb->frag_len;
855                 queue_head_inc(txq);
856         }
857         return 0;
858 }
859
860 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
861                                              struct sk_buff *skb,
862                                              bool *skip_hw_vlan)
863 {
864         u16 vlan_tag = 0;
865
866         skb = skb_share_check(skb, GFP_ATOMIC);
867         if (unlikely(!skb))
868                 return skb;
869
870         if (vlan_tx_tag_present(skb))
871                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
872
873         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
874                 if (!vlan_tag)
875                         vlan_tag = adapter->pvid;
876                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
877                  * skip VLAN insertion
878                  */
879                 if (skip_hw_vlan)
880                         *skip_hw_vlan = true;
881         }
882
883         if (vlan_tag) {
884                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
885                 if (unlikely(!skb))
886                         return skb;
887                 skb->vlan_tci = 0;
888         }
889
890         /* Insert the outer VLAN, if any */
891         if (adapter->qnq_vid) {
892                 vlan_tag = adapter->qnq_vid;
893                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
894                 if (unlikely(!skb))
895                         return skb;
896                 if (skip_hw_vlan)
897                         *skip_hw_vlan = true;
898         }
899
900         return skb;
901 }
902
903 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
904 {
905         struct ethhdr *eh = (struct ethhdr *)skb->data;
906         u16 offset = ETH_HLEN;
907
908         if (eh->h_proto == htons(ETH_P_IPV6)) {
909                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
910
911                 offset += sizeof(struct ipv6hdr);
912                 if (ip6h->nexthdr != NEXTHDR_TCP &&
913                     ip6h->nexthdr != NEXTHDR_UDP) {
914                         struct ipv6_opt_hdr *ehdr =
915                                 (struct ipv6_opt_hdr *) (skb->data + offset);
916
917                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
918                         if (ehdr->hdrlen == 0xff)
919                                 return true;
920                 }
921         }
922         return false;
923 }
924
925 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
926 {
927         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928 }
929
930 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
931                                 struct sk_buff *skb)
932 {
933         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
934 }
935
936 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
937                                                   struct sk_buff *skb,
938                                                   bool *skip_hw_vlan)
939 {
940         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
941         unsigned int eth_hdr_len;
942         struct iphdr *ip;
943
944         /* For padded packets, BE HW modifies tot_len field in IP header
945          * incorrecly when VLAN tag is inserted by HW.
946          * For padded packets, Lancer computes incorrect checksum.
947          */
948         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
949                                                 VLAN_ETH_HLEN : ETH_HLEN;
950         if (skb->len <= 60 &&
951             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
952             is_ipv4_pkt(skb)) {
953                 ip = (struct iphdr *)ip_hdr(skb);
954                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
955         }
956
957         /* If vlan tag is already inlined in the packet, skip HW VLAN
958          * tagging in pvid-tagging mode
959          */
960         if (be_pvid_tagging_enabled(adapter) &&
961             veh->h_vlan_proto == htons(ETH_P_8021Q))
962                         *skip_hw_vlan = true;
963
964         /* HW has a bug wherein it will calculate CSUM for VLAN
965          * pkts even though it is disabled.
966          * Manually insert VLAN in pkt.
967          */
968         if (skb->ip_summed != CHECKSUM_PARTIAL &&
969             vlan_tx_tag_present(skb)) {
970                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
971                 if (unlikely(!skb))
972                         goto err;
973         }
974
975         /* HW may lockup when VLAN HW tagging is requested on
976          * certain ipv6 packets. Drop such pkts if the HW workaround to
977          * skip HW tagging is not enabled by FW.
978          */
979         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
980             (adapter->pvid || adapter->qnq_vid) &&
981             !qnq_async_evt_rcvd(adapter)))
982                 goto tx_drop;
983
984         /* Manual VLAN tag insertion to prevent:
985          * ASIC lockup when the ASIC inserts VLAN tag into
986          * certain ipv6 packets. Insert VLAN tags in driver,
987          * and set event, completion, vlan bits accordingly
988          * in the Tx WRB.
989          */
990         if (be_ipv6_tx_stall_chk(adapter, skb) &&
991             be_vlan_tag_tx_chk(adapter, skb)) {
992                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
993                 if (unlikely(!skb))
994                         goto err;
995         }
996
997         return skb;
998 tx_drop:
999         dev_kfree_skb_any(skb);
1000 err:
1001         return NULL;
1002 }
1003
1004 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1005                                            struct sk_buff *skb,
1006                                            bool *skip_hw_vlan)
1007 {
1008         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1009          * less may cause a transmit stall on that port. So the work-around is
1010          * to pad short packets (<= 32 bytes) to a 36-byte length.
1011          */
1012         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1013                 if (skb_padto(skb, 36))
1014                         return NULL;
1015                 skb->len = 36;
1016         }
1017
1018         if (BEx_chip(adapter) || lancer_chip(adapter)) {
1019                 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1020                 if (!skb)
1021                         return NULL;
1022         }
1023
1024         return skb;
1025 }
1026
1027 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1028 {
1029         struct be_adapter *adapter = netdev_priv(netdev);
1030         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1031         struct be_queue_info *txq = &txo->q;
1032         bool dummy_wrb, stopped = false;
1033         u32 wrb_cnt = 0, copied = 0;
1034         bool skip_hw_vlan = false;
1035         u32 start = txq->head;
1036
1037         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1038         if (!skb) {
1039                 tx_stats(txo)->tx_drv_drops++;
1040                 return NETDEV_TX_OK;
1041         }
1042
1043         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1044
1045         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1046                               skip_hw_vlan);
1047         if (copied) {
1048                 int gso_segs = skb_shinfo(skb)->gso_segs;
1049
1050                 /* record the sent skb in the sent_skb table */
1051                 BUG_ON(txo->sent_skb_list[start]);
1052                 txo->sent_skb_list[start] = skb;
1053
1054                 /* Ensure txq has space for the next skb; Else stop the queue
1055                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1056                  * tx compls of the current transmit which'll wake up the queue
1057                  */
1058                 atomic_add(wrb_cnt, &txq->used);
1059                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1060                                                                 txq->len) {
1061                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1062                         stopped = true;
1063                 }
1064
1065                 be_txq_notify(adapter, txo, wrb_cnt);
1066
1067                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1068         } else {
1069                 txq->head = start;
1070                 tx_stats(txo)->tx_drv_drops++;
1071                 dev_kfree_skb_any(skb);
1072         }
1073         return NETDEV_TX_OK;
1074 }
1075
1076 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077 {
1078         struct be_adapter *adapter = netdev_priv(netdev);
1079         if (new_mtu < BE_MIN_MTU ||
1080                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1081                                         (ETH_HLEN + ETH_FCS_LEN))) {
1082                 dev_info(&adapter->pdev->dev,
1083                         "MTU must be between %d and %d bytes\n",
1084                         BE_MIN_MTU,
1085                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1086                 return -EINVAL;
1087         }
1088         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089                         netdev->mtu, new_mtu);
1090         netdev->mtu = new_mtu;
1091         return 0;
1092 }
1093
1094 /*
1095  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1096  * If the user configures more, place BE in vlan promiscuous mode.
1097  */
1098 static int be_vid_config(struct be_adapter *adapter)
1099 {
1100         u16 vids[BE_NUM_VLANS_SUPPORTED];
1101         u16 num = 0, i;
1102         int status = 0;
1103
1104         /* No need to further configure vids if in promiscuous mode */
1105         if (adapter->promiscuous)
1106                 return 0;
1107
1108         if (adapter->vlans_added > be_max_vlans(adapter))
1109                 goto set_vlan_promisc;
1110
1111         /* Construct VLAN Table to give to HW */
1112         for (i = 0; i < VLAN_N_VID; i++)
1113                 if (adapter->vlan_tag[i])
1114                         vids[num++] = cpu_to_le16(i);
1115
1116         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1117                                     vids, num, 0);
1118
1119         if (status) {
1120                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1122                         goto set_vlan_promisc;
1123                 dev_err(&adapter->pdev->dev,
1124                         "Setting HW VLAN filtering failed.\n");
1125         } else {
1126                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1127                         /* hw VLAN filtering re-enabled. */
1128                         status = be_cmd_rx_filter(adapter,
1129                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1130                         if (!status) {
1131                                 dev_info(&adapter->pdev->dev,
1132                                          "Disabling VLAN Promiscuous mode.\n");
1133                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1134                         }
1135                 }
1136         }
1137
1138         return status;
1139
1140 set_vlan_promisc:
1141         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1142                 return 0;
1143
1144         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1145         if (!status) {
1146                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1147                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1148         } else
1149                 dev_err(&adapter->pdev->dev,
1150                         "Failed to enable VLAN Promiscuous mode.\n");
1151         return status;
1152 }
1153
1154 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1155 {
1156         struct be_adapter *adapter = netdev_priv(netdev);
1157         int status = 0;
1158
1159         /* Packets with VID 0 are always received by Lancer by default */
1160         if (lancer_chip(adapter) && vid == 0)
1161                 return status;
1162
1163         if (adapter->vlan_tag[vid])
1164                 return status;
1165
1166         adapter->vlan_tag[vid] = 1;
1167         adapter->vlans_added++;
1168
1169         status = be_vid_config(adapter);
1170         if (status) {
1171                 adapter->vlans_added--;
1172                 adapter->vlan_tag[vid] = 0;
1173         }
1174
1175         return status;
1176 }
1177
1178 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1179 {
1180         struct be_adapter *adapter = netdev_priv(netdev);
1181         int status = 0;
1182
1183         /* Packets with VID 0 are always received by Lancer by default */
1184         if (lancer_chip(adapter) && vid == 0)
1185                 goto ret;
1186
1187         adapter->vlan_tag[vid] = 0;
1188         status = be_vid_config(adapter);
1189         if (!status)
1190                 adapter->vlans_added--;
1191         else
1192                 adapter->vlan_tag[vid] = 1;
1193 ret:
1194         return status;
1195 }
1196
1197 static void be_clear_promisc(struct be_adapter *adapter)
1198 {
1199         adapter->promiscuous = false;
1200         adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1201
1202         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203 }
1204
1205 static void be_set_rx_mode(struct net_device *netdev)
1206 {
1207         struct be_adapter *adapter = netdev_priv(netdev);
1208         int status;
1209
1210         if (netdev->flags & IFF_PROMISC) {
1211                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1212                 adapter->promiscuous = true;
1213                 goto done;
1214         }
1215
1216         /* BE was previously in promiscuous mode; disable it */
1217         if (adapter->promiscuous) {
1218                 be_clear_promisc(adapter);
1219                 if (adapter->vlans_added)
1220                         be_vid_config(adapter);
1221         }
1222
1223         /* Enable multicast promisc if num configured exceeds what we support */
1224         if (netdev->flags & IFF_ALLMULTI ||
1225             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1226                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1227                 goto done;
1228         }
1229
1230         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231                 struct netdev_hw_addr *ha;
1232                 int i = 1; /* First slot is claimed by the Primary MAC */
1233
1234                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1235                         be_cmd_pmac_del(adapter, adapter->if_handle,
1236                                         adapter->pmac_id[i], 0);
1237                 }
1238
1239                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1240                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1241                         adapter->promiscuous = true;
1242                         goto done;
1243                 }
1244
1245                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1246                         adapter->uc_macs++; /* First slot is for Primary MAC */
1247                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1248                                         adapter->if_handle,
1249                                         &adapter->pmac_id[adapter->uc_macs], 0);
1250                 }
1251         }
1252
1253         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254
1255         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1256         if (status) {
1257                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1258                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260         }
1261 done:
1262         return;
1263 }
1264
1265 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1266 {
1267         struct be_adapter *adapter = netdev_priv(netdev);
1268         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1269         int status;
1270
1271         if (!sriov_enabled(adapter))
1272                 return -EPERM;
1273
1274         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1275                 return -EINVAL;
1276
1277         if (BEx_chip(adapter)) {
1278                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1279                                 vf + 1);
1280
1281                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1282                                          &vf_cfg->pmac_id, vf + 1);
1283         } else {
1284                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1285                                         vf + 1);
1286         }
1287
1288         if (status)
1289                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290                                 mac, vf);
1291         else
1292                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1293
1294         return status;
1295 }
1296
1297 static int be_get_vf_config(struct net_device *netdev, int vf,
1298                         struct ifla_vf_info *vi)
1299 {
1300         struct be_adapter *adapter = netdev_priv(netdev);
1301         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1302
1303         if (!sriov_enabled(adapter))
1304                 return -EPERM;
1305
1306         if (vf >= adapter->num_vfs)
1307                 return -EINVAL;
1308
1309         vi->vf = vf;
1310         vi->tx_rate = vf_cfg->tx_rate;
1311         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1313         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1314         vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1315
1316         return 0;
1317 }
1318
1319 static int be_set_vf_vlan(struct net_device *netdev,
1320                         int vf, u16 vlan, u8 qos)
1321 {
1322         struct be_adapter *adapter = netdev_priv(netdev);
1323         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1324         int status = 0;
1325
1326         if (!sriov_enabled(adapter))
1327                 return -EPERM;
1328
1329         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1330                 return -EINVAL;
1331
1332         if (vlan || qos) {
1333                 vlan |= qos << VLAN_PRIO_SHIFT;
1334                 if (vf_cfg->vlan_tag != vlan)
1335                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1336                                                        vf_cfg->if_handle, 0);
1337         } else {
1338                 /* Reset Transparent Vlan Tagging. */
1339                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1340                                                vf + 1, vf_cfg->if_handle, 0);
1341         }
1342
1343         if (!status)
1344                 vf_cfg->vlan_tag = vlan;
1345         else
1346                 dev_info(&adapter->pdev->dev,
1347                          "VLAN %d config on VF %d failed\n", vlan, vf);
1348         return status;
1349 }
1350
1351 static int be_set_vf_tx_rate(struct net_device *netdev,
1352                         int vf, int rate)
1353 {
1354         struct be_adapter *adapter = netdev_priv(netdev);
1355         int status = 0;
1356
1357         if (!sriov_enabled(adapter))
1358                 return -EPERM;
1359
1360         if (vf >= adapter->num_vfs)
1361                 return -EINVAL;
1362
1363         if (rate < 100 || rate > 10000) {
1364                 dev_err(&adapter->pdev->dev,
1365                         "tx rate must be between 100 and 10000 Mbps\n");
1366                 return -EINVAL;
1367         }
1368
1369         status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
1370         if (status)
1371                 dev_err(&adapter->pdev->dev,
1372                                 "tx rate %d on VF %d failed\n", rate, vf);
1373         else
1374                 adapter->vf_cfg[vf].tx_rate = rate;
1375         return status;
1376 }
1377 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1378                                 int link_state)
1379 {
1380         struct be_adapter *adapter = netdev_priv(netdev);
1381         int status;
1382
1383         if (!sriov_enabled(adapter))
1384                 return -EPERM;
1385
1386         if (vf >= adapter->num_vfs)
1387                 return -EINVAL;
1388
1389         status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1390         if (!status)
1391                 adapter->vf_cfg[vf].plink_tracking = link_state;
1392
1393         return status;
1394 }
1395
1396 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1397                           ulong now)
1398 {
1399         aic->rx_pkts_prev = rx_pkts;
1400         aic->tx_reqs_prev = tx_pkts;
1401         aic->jiffies = now;
1402 }
1403
1404 static void be_eqd_update(struct be_adapter *adapter)
1405 {
1406         struct be_set_eqd set_eqd[MAX_EVT_QS];
1407         int eqd, i, num = 0, start;
1408         struct be_aic_obj *aic;
1409         struct be_eq_obj *eqo;
1410         struct be_rx_obj *rxo;
1411         struct be_tx_obj *txo;
1412         u64 rx_pkts, tx_pkts;
1413         ulong now;
1414         u32 pps, delta;
1415
1416         for_all_evt_queues(adapter, eqo, i) {
1417                 aic = &adapter->aic_obj[eqo->idx];
1418                 if (!aic->enable) {
1419                         if (aic->jiffies)
1420                                 aic->jiffies = 0;
1421                         eqd = aic->et_eqd;
1422                         goto modify_eqd;
1423                 }
1424
1425                 rxo = &adapter->rx_obj[eqo->idx];
1426                 do {
1427                         start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1428                         rx_pkts = rxo->stats.rx_pkts;
1429                 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1430
1431                 txo = &adapter->tx_obj[eqo->idx];
1432                 do {
1433                         start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1434                         tx_pkts = txo->stats.tx_reqs;
1435                 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1436
1437
1438                 /* Skip, if wrapped around or first calculation */
1439                 now = jiffies;
1440                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1441                     rx_pkts < aic->rx_pkts_prev ||
1442                     tx_pkts < aic->tx_reqs_prev) {
1443                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1444                         continue;
1445                 }
1446
1447                 delta = jiffies_to_msecs(now - aic->jiffies);
1448                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1449                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1450                 eqd = (pps / 15000) << 2;
1451
1452                 if (eqd < 8)
1453                         eqd = 0;
1454                 eqd = min_t(u32, eqd, aic->max_eqd);
1455                 eqd = max_t(u32, eqd, aic->min_eqd);
1456
1457                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1458 modify_eqd:
1459                 if (eqd != aic->prev_eqd) {
1460                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1461                         set_eqd[num].eq_id = eqo->q.id;
1462                         aic->prev_eqd = eqd;
1463                         num++;
1464                 }
1465         }
1466
1467         if (num)
1468                 be_cmd_modify_eqd(adapter, set_eqd, num);
1469 }
1470
1471 static void be_rx_stats_update(struct be_rx_obj *rxo,
1472                 struct be_rx_compl_info *rxcp)
1473 {
1474         struct be_rx_stats *stats = rx_stats(rxo);
1475
1476         u64_stats_update_begin(&stats->sync);
1477         stats->rx_compl++;
1478         stats->rx_bytes += rxcp->pkt_size;
1479         stats->rx_pkts++;
1480         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1481                 stats->rx_mcast_pkts++;
1482         if (rxcp->err)
1483                 stats->rx_compl_err++;
1484         u64_stats_update_end(&stats->sync);
1485 }
1486
1487 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1488 {
1489         /* L4 checksum is not reliable for non TCP/UDP packets.
1490          * Also ignore ipcksm for ipv6 pkts
1491          */
1492         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1493                 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1494 }
1495
1496 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1497 {
1498         struct be_adapter *adapter = rxo->adapter;
1499         struct be_rx_page_info *rx_page_info;
1500         struct be_queue_info *rxq = &rxo->q;
1501         u16 frag_idx = rxq->tail;
1502
1503         rx_page_info = &rxo->page_info_tbl[frag_idx];
1504         BUG_ON(!rx_page_info->page);
1505
1506         if (rx_page_info->last_frag) {
1507                 dma_unmap_page(&adapter->pdev->dev,
1508                                dma_unmap_addr(rx_page_info, bus),
1509                                adapter->big_page_size, DMA_FROM_DEVICE);
1510                 rx_page_info->last_frag = false;
1511         } else {
1512                 dma_sync_single_for_cpu(&adapter->pdev->dev,
1513                                         dma_unmap_addr(rx_page_info, bus),
1514                                         rx_frag_size, DMA_FROM_DEVICE);
1515         }
1516
1517         queue_tail_inc(rxq);
1518         atomic_dec(&rxq->used);
1519         return rx_page_info;
1520 }
1521
1522 /* Throwaway the data in the Rx completion */
1523 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1524                                 struct be_rx_compl_info *rxcp)
1525 {
1526         struct be_rx_page_info *page_info;
1527         u16 i, num_rcvd = rxcp->num_rcvd;
1528
1529         for (i = 0; i < num_rcvd; i++) {
1530                 page_info = get_rx_page_info(rxo);
1531                 put_page(page_info->page);
1532                 memset(page_info, 0, sizeof(*page_info));
1533         }
1534 }
1535
1536 /*
1537  * skb_fill_rx_data forms a complete skb for an ether frame
1538  * indicated by rxcp.
1539  */
1540 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1541                              struct be_rx_compl_info *rxcp)
1542 {
1543         struct be_rx_page_info *page_info;
1544         u16 i, j;
1545         u16 hdr_len, curr_frag_len, remaining;
1546         u8 *start;
1547
1548         page_info = get_rx_page_info(rxo);
1549         start = page_address(page_info->page) + page_info->page_offset;
1550         prefetch(start);
1551
1552         /* Copy data in the first descriptor of this completion */
1553         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1554
1555         skb->len = curr_frag_len;
1556         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1557                 memcpy(skb->data, start, curr_frag_len);
1558                 /* Complete packet has now been moved to data */
1559                 put_page(page_info->page);
1560                 skb->data_len = 0;
1561                 skb->tail += curr_frag_len;
1562         } else {
1563                 hdr_len = ETH_HLEN;
1564                 memcpy(skb->data, start, hdr_len);
1565                 skb_shinfo(skb)->nr_frags = 1;
1566                 skb_frag_set_page(skb, 0, page_info->page);
1567                 skb_shinfo(skb)->frags[0].page_offset =
1568                                         page_info->page_offset + hdr_len;
1569                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1570                 skb->data_len = curr_frag_len - hdr_len;
1571                 skb->truesize += rx_frag_size;
1572                 skb->tail += hdr_len;
1573         }
1574         page_info->page = NULL;
1575
1576         if (rxcp->pkt_size <= rx_frag_size) {
1577                 BUG_ON(rxcp->num_rcvd != 1);
1578                 return;
1579         }
1580
1581         /* More frags present for this completion */
1582         remaining = rxcp->pkt_size - curr_frag_len;
1583         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1584                 page_info = get_rx_page_info(rxo);
1585                 curr_frag_len = min(remaining, rx_frag_size);
1586
1587                 /* Coalesce all frags from the same physical page in one slot */
1588                 if (page_info->page_offset == 0) {
1589                         /* Fresh page */
1590                         j++;
1591                         skb_frag_set_page(skb, j, page_info->page);
1592                         skb_shinfo(skb)->frags[j].page_offset =
1593                                                         page_info->page_offset;
1594                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1595                         skb_shinfo(skb)->nr_frags++;
1596                 } else {
1597                         put_page(page_info->page);
1598                 }
1599
1600                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1601                 skb->len += curr_frag_len;
1602                 skb->data_len += curr_frag_len;
1603                 skb->truesize += rx_frag_size;
1604                 remaining -= curr_frag_len;
1605                 page_info->page = NULL;
1606         }
1607         BUG_ON(j > MAX_SKB_FRAGS);
1608 }
1609
1610 /* Process the RX completion indicated by rxcp when GRO is disabled */
1611 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1612                                 struct be_rx_compl_info *rxcp)
1613 {
1614         struct be_adapter *adapter = rxo->adapter;
1615         struct net_device *netdev = adapter->netdev;
1616         struct sk_buff *skb;
1617
1618         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1619         if (unlikely(!skb)) {
1620                 rx_stats(rxo)->rx_drops_no_skbs++;
1621                 be_rx_compl_discard(rxo, rxcp);
1622                 return;
1623         }
1624
1625         skb_fill_rx_data(rxo, skb, rxcp);
1626
1627         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1628                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1629         else
1630                 skb_checksum_none_assert(skb);
1631
1632         skb->protocol = eth_type_trans(skb, netdev);
1633         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1634         if (netdev->features & NETIF_F_RXHASH)
1635                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1636
1637         skb->encapsulation = rxcp->tunneled;
1638         skb_mark_napi_id(skb, napi);
1639
1640         if (rxcp->vlanf)
1641                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1642
1643         netif_receive_skb(skb);
1644 }
1645
1646 /* Process the RX completion indicated by rxcp when GRO is enabled */
1647 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1648                                     struct napi_struct *napi,
1649                                     struct be_rx_compl_info *rxcp)
1650 {
1651         struct be_adapter *adapter = rxo->adapter;
1652         struct be_rx_page_info *page_info;
1653         struct sk_buff *skb = NULL;
1654         u16 remaining, curr_frag_len;
1655         u16 i, j;
1656
1657         skb = napi_get_frags(napi);
1658         if (!skb) {
1659                 be_rx_compl_discard(rxo, rxcp);
1660                 return;
1661         }
1662
1663         remaining = rxcp->pkt_size;
1664         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1665                 page_info = get_rx_page_info(rxo);
1666
1667                 curr_frag_len = min(remaining, rx_frag_size);
1668
1669                 /* Coalesce all frags from the same physical page in one slot */
1670                 if (i == 0 || page_info->page_offset == 0) {
1671                         /* First frag or Fresh page */
1672                         j++;
1673                         skb_frag_set_page(skb, j, page_info->page);
1674                         skb_shinfo(skb)->frags[j].page_offset =
1675                                                         page_info->page_offset;
1676                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1677                 } else {
1678                         put_page(page_info->page);
1679                 }
1680                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1681                 skb->truesize += rx_frag_size;
1682                 remaining -= curr_frag_len;
1683                 memset(page_info, 0, sizeof(*page_info));
1684         }
1685         BUG_ON(j > MAX_SKB_FRAGS);
1686
1687         skb_shinfo(skb)->nr_frags = j + 1;
1688         skb->len = rxcp->pkt_size;
1689         skb->data_len = rxcp->pkt_size;
1690         skb->ip_summed = CHECKSUM_UNNECESSARY;
1691         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1692         if (adapter->netdev->features & NETIF_F_RXHASH)
1693                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1694
1695         skb->encapsulation = rxcp->tunneled;
1696         skb_mark_napi_id(skb, napi);
1697
1698         if (rxcp->vlanf)
1699                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1700
1701         napi_gro_frags(napi);
1702 }
1703
1704 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1705                                  struct be_rx_compl_info *rxcp)
1706 {
1707         rxcp->pkt_size =
1708                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1709         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1710         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1711         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1712         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1713         rxcp->ip_csum =
1714                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1715         rxcp->l4_csum =
1716                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1717         rxcp->ipv6 =
1718                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1719         rxcp->num_rcvd =
1720                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1721         rxcp->pkt_type =
1722                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1723         rxcp->rss_hash =
1724                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1725         if (rxcp->vlanf) {
1726                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1727                                           compl);
1728                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1729                                                compl);
1730         }
1731         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1732         rxcp->tunneled =
1733                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
1734 }
1735
1736 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1737                                  struct be_rx_compl_info *rxcp)
1738 {
1739         rxcp->pkt_size =
1740                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1741         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1742         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1743         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1744         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1745         rxcp->ip_csum =
1746                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1747         rxcp->l4_csum =
1748                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1749         rxcp->ipv6 =
1750                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1751         rxcp->num_rcvd =
1752                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1753         rxcp->pkt_type =
1754                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1755         rxcp->rss_hash =
1756                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1757         if (rxcp->vlanf) {
1758                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1759                                           compl);
1760                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1761                                                compl);
1762         }
1763         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1764         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1765                                       ip_frag, compl);
1766 }
1767
1768 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1769 {
1770         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1771         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1772         struct be_adapter *adapter = rxo->adapter;
1773
1774         /* For checking the valid bit it is Ok to use either definition as the
1775          * valid bit is at the same position in both v0 and v1 Rx compl */
1776         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1777                 return NULL;
1778
1779         rmb();
1780         be_dws_le_to_cpu(compl, sizeof(*compl));
1781
1782         if (adapter->be3_native)
1783                 be_parse_rx_compl_v1(compl, rxcp);
1784         else
1785                 be_parse_rx_compl_v0(compl, rxcp);
1786
1787         if (rxcp->ip_frag)
1788                 rxcp->l4_csum = 0;
1789
1790         if (rxcp->vlanf) {
1791                 /* In QNQ modes, if qnq bit is not set, then the packet was
1792                  * tagged only with the transparent outer vlan-tag and must
1793                  * not be treated as a vlan packet by host
1794                  */
1795                 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1796                         rxcp->vlanf = 0;
1797
1798                 if (!lancer_chip(adapter))
1799                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1800
1801                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1802                     !adapter->vlan_tag[rxcp->vlan_tag])
1803                         rxcp->vlanf = 0;
1804         }
1805
1806         /* As the compl has been parsed, reset it; we wont touch it again */
1807         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1808
1809         queue_tail_inc(&rxo->cq);
1810         return rxcp;
1811 }
1812
1813 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1814 {
1815         u32 order = get_order(size);
1816
1817         if (order > 0)
1818                 gfp |= __GFP_COMP;
1819         return  alloc_pages(gfp, order);
1820 }
1821
1822 /*
1823  * Allocate a page, split it to fragments of size rx_frag_size and post as
1824  * receive buffers to BE
1825  */
1826 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1827 {
1828         struct be_adapter *adapter = rxo->adapter;
1829         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1830         struct be_queue_info *rxq = &rxo->q;
1831         struct page *pagep = NULL;
1832         struct device *dev = &adapter->pdev->dev;
1833         struct be_eth_rx_d *rxd;
1834         u64 page_dmaaddr = 0, frag_dmaaddr;
1835         u32 posted, page_offset = 0;
1836
1837         page_info = &rxo->page_info_tbl[rxq->head];
1838         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1839                 if (!pagep) {
1840                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1841                         if (unlikely(!pagep)) {
1842                                 rx_stats(rxo)->rx_post_fail++;
1843                                 break;
1844                         }
1845                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1846                                                     adapter->big_page_size,
1847                                                     DMA_FROM_DEVICE);
1848                         if (dma_mapping_error(dev, page_dmaaddr)) {
1849                                 put_page(pagep);
1850                                 pagep = NULL;
1851                                 rx_stats(rxo)->rx_post_fail++;
1852                                 break;
1853                         }
1854                         page_offset = 0;
1855                 } else {
1856                         get_page(pagep);
1857                         page_offset += rx_frag_size;
1858                 }
1859                 page_info->page_offset = page_offset;
1860                 page_info->page = pagep;
1861
1862                 rxd = queue_head_node(rxq);
1863                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1864                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1865                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1866
1867                 /* Any space left in the current big page for another frag? */
1868                 if ((page_offset + rx_frag_size + rx_frag_size) >
1869                                         adapter->big_page_size) {
1870                         pagep = NULL;
1871                         page_info->last_frag = true;
1872                         dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1873                 } else {
1874                         dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1875                 }
1876
1877                 prev_page_info = page_info;
1878                 queue_head_inc(rxq);
1879                 page_info = &rxo->page_info_tbl[rxq->head];
1880         }
1881
1882         /* Mark the last frag of a page when we break out of the above loop
1883          * with no more slots available in the RXQ
1884          */
1885         if (pagep) {
1886                 prev_page_info->last_frag = true;
1887                 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1888         }
1889
1890         if (posted) {
1891                 atomic_add(posted, &rxq->used);
1892                 if (rxo->rx_post_starved)
1893                         rxo->rx_post_starved = false;
1894                 be_rxq_notify(adapter, rxq->id, posted);
1895         } else if (atomic_read(&rxq->used) == 0) {
1896                 /* Let be_worker replenish when memory is available */
1897                 rxo->rx_post_starved = true;
1898         }
1899 }
1900
1901 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1902 {
1903         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1904
1905         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1906                 return NULL;
1907
1908         rmb();
1909         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1910
1911         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1912
1913         queue_tail_inc(tx_cq);
1914         return txcp;
1915 }
1916
1917 static u16 be_tx_compl_process(struct be_adapter *adapter,
1918                 struct be_tx_obj *txo, u16 last_index)
1919 {
1920         struct be_queue_info *txq = &txo->q;
1921         struct be_eth_wrb *wrb;
1922         struct sk_buff **sent_skbs = txo->sent_skb_list;
1923         struct sk_buff *sent_skb;
1924         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1925         bool unmap_skb_hdr = true;
1926
1927         sent_skb = sent_skbs[txq->tail];
1928         BUG_ON(!sent_skb);
1929         sent_skbs[txq->tail] = NULL;
1930
1931         /* skip header wrb */
1932         queue_tail_inc(txq);
1933
1934         do {
1935                 cur_index = txq->tail;
1936                 wrb = queue_tail_node(txq);
1937                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1938                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1939                 unmap_skb_hdr = false;
1940
1941                 num_wrbs++;
1942                 queue_tail_inc(txq);
1943         } while (cur_index != last_index);
1944
1945         dev_kfree_skb_any(sent_skb);
1946         return num_wrbs;
1947 }
1948
1949 /* Return the number of events in the event queue */
1950 static inline int events_get(struct be_eq_obj *eqo)
1951 {
1952         struct be_eq_entry *eqe;
1953         int num = 0;
1954
1955         do {
1956                 eqe = queue_tail_node(&eqo->q);
1957                 if (eqe->evt == 0)
1958                         break;
1959
1960                 rmb();
1961                 eqe->evt = 0;
1962                 num++;
1963                 queue_tail_inc(&eqo->q);
1964         } while (true);
1965
1966         return num;
1967 }
1968
1969 /* Leaves the EQ is disarmed state */
1970 static void be_eq_clean(struct be_eq_obj *eqo)
1971 {
1972         int num = events_get(eqo);
1973
1974         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1975 }
1976
1977 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1978 {
1979         struct be_rx_page_info *page_info;
1980         struct be_queue_info *rxq = &rxo->q;
1981         struct be_queue_info *rx_cq = &rxo->cq;
1982         struct be_rx_compl_info *rxcp;
1983         struct be_adapter *adapter = rxo->adapter;
1984         int flush_wait = 0;
1985
1986         /* Consume pending rx completions.
1987          * Wait for the flush completion (identified by zero num_rcvd)
1988          * to arrive. Notify CQ even when there are no more CQ entries
1989          * for HW to flush partially coalesced CQ entries.
1990          * In Lancer, there is no need to wait for flush compl.
1991          */
1992         for (;;) {
1993                 rxcp = be_rx_compl_get(rxo);
1994                 if (rxcp == NULL) {
1995                         if (lancer_chip(adapter))
1996                                 break;
1997
1998                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1999                                 dev_warn(&adapter->pdev->dev,
2000                                          "did not receive flush compl\n");
2001                                 break;
2002                         }
2003                         be_cq_notify(adapter, rx_cq->id, true, 0);
2004                         mdelay(1);
2005                 } else {
2006                         be_rx_compl_discard(rxo, rxcp);
2007                         be_cq_notify(adapter, rx_cq->id, false, 1);
2008                         if (rxcp->num_rcvd == 0)
2009                                 break;
2010                 }
2011         }
2012
2013         /* After cleanup, leave the CQ in unarmed state */
2014         be_cq_notify(adapter, rx_cq->id, false, 0);
2015
2016         /* Then free posted rx buffers that were not used */
2017         while (atomic_read(&rxq->used) > 0) {
2018                 page_info = get_rx_page_info(rxo);
2019                 put_page(page_info->page);
2020                 memset(page_info, 0, sizeof(*page_info));
2021         }
2022         BUG_ON(atomic_read(&rxq->used));
2023         rxq->tail = rxq->head = 0;
2024 }
2025
2026 static void be_tx_compl_clean(struct be_adapter *adapter)
2027 {
2028         struct be_tx_obj *txo;
2029         struct be_queue_info *txq;
2030         struct be_eth_tx_compl *txcp;
2031         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2032         struct sk_buff *sent_skb;
2033         bool dummy_wrb;
2034         int i, pending_txqs;
2035
2036         /* Stop polling for compls when HW has been silent for 10ms */
2037         do {
2038                 pending_txqs = adapter->num_tx_qs;
2039
2040                 for_all_tx_queues(adapter, txo, i) {
2041                         cmpl = 0;
2042                         num_wrbs = 0;
2043                         txq = &txo->q;
2044                         while ((txcp = be_tx_compl_get(&txo->cq))) {
2045                                 end_idx =
2046                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
2047                                                       wrb_index, txcp);
2048                                 num_wrbs += be_tx_compl_process(adapter, txo,
2049                                                                 end_idx);
2050                                 cmpl++;
2051                         }
2052                         if (cmpl) {
2053                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2054                                 atomic_sub(num_wrbs, &txq->used);
2055                                 timeo = 0;
2056                         }
2057                         if (atomic_read(&txq->used) == 0)
2058                                 pending_txqs--;
2059                 }
2060
2061                 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2062                         break;
2063
2064                 mdelay(1);
2065         } while (true);
2066
2067         for_all_tx_queues(adapter, txo, i) {
2068                 txq = &txo->q;
2069                 if (atomic_read(&txq->used))
2070                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2071                                 atomic_read(&txq->used));
2072
2073                 /* free posted tx for which compls will never arrive */
2074                 while (atomic_read(&txq->used)) {
2075                         sent_skb = txo->sent_skb_list[txq->tail];
2076                         end_idx = txq->tail;
2077                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2078                                                    &dummy_wrb);
2079                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2080                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2081                         atomic_sub(num_wrbs, &txq->used);
2082                 }
2083         }
2084 }
2085
2086 static void be_evt_queues_destroy(struct be_adapter *adapter)
2087 {
2088         struct be_eq_obj *eqo;
2089         int i;
2090
2091         for_all_evt_queues(adapter, eqo, i) {
2092                 if (eqo->q.created) {
2093                         be_eq_clean(eqo);
2094                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2095                         napi_hash_del(&eqo->napi);
2096                         netif_napi_del(&eqo->napi);
2097                 }
2098                 be_queue_free(adapter, &eqo->q);
2099         }
2100 }
2101
2102 static int be_evt_queues_create(struct be_adapter *adapter)
2103 {
2104         struct be_queue_info *eq;
2105         struct be_eq_obj *eqo;
2106         struct be_aic_obj *aic;
2107         int i, rc;
2108
2109         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2110                                     adapter->cfg_num_qs);
2111
2112         for_all_evt_queues(adapter, eqo, i) {
2113                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2114                                BE_NAPI_WEIGHT);
2115                 napi_hash_add(&eqo->napi);
2116                 aic = &adapter->aic_obj[i];
2117                 eqo->adapter = adapter;
2118                 eqo->tx_budget = BE_TX_BUDGET;
2119                 eqo->idx = i;
2120                 aic->max_eqd = BE_MAX_EQD;
2121                 aic->enable = true;
2122
2123                 eq = &eqo->q;
2124                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2125                                         sizeof(struct be_eq_entry));
2126                 if (rc)
2127                         return rc;
2128
2129                 rc = be_cmd_eq_create(adapter, eqo);
2130                 if (rc)
2131                         return rc;
2132         }
2133         return 0;
2134 }
2135
2136 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2137 {
2138         struct be_queue_info *q;
2139
2140         q = &adapter->mcc_obj.q;
2141         if (q->created)
2142                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2143         be_queue_free(adapter, q);
2144
2145         q = &adapter->mcc_obj.cq;
2146         if (q->created)
2147                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2148         be_queue_free(adapter, q);
2149 }
2150
2151 /* Must be called only after TX qs are created as MCC shares TX EQ */
2152 static int be_mcc_queues_create(struct be_adapter *adapter)
2153 {
2154         struct be_queue_info *q, *cq;
2155
2156         cq = &adapter->mcc_obj.cq;
2157         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2158                         sizeof(struct be_mcc_compl)))
2159                 goto err;
2160
2161         /* Use the default EQ for MCC completions */
2162         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2163                 goto mcc_cq_free;
2164
2165         q = &adapter->mcc_obj.q;
2166         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2167                 goto mcc_cq_destroy;
2168
2169         if (be_cmd_mccq_create(adapter, q, cq))
2170                 goto mcc_q_free;
2171
2172         return 0;
2173
2174 mcc_q_free:
2175         be_queue_free(adapter, q);
2176 mcc_cq_destroy:
2177         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2178 mcc_cq_free:
2179         be_queue_free(adapter, cq);
2180 err:
2181         return -1;
2182 }
2183
2184 static void be_tx_queues_destroy(struct be_adapter *adapter)
2185 {
2186         struct be_queue_info *q;
2187         struct be_tx_obj *txo;
2188         u8 i;
2189
2190         for_all_tx_queues(adapter, txo, i) {
2191                 q = &txo->q;
2192                 if (q->created)
2193                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2194                 be_queue_free(adapter, q);
2195
2196                 q = &txo->cq;
2197                 if (q->created)
2198                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2199                 be_queue_free(adapter, q);
2200         }
2201 }
2202
2203 static int be_tx_qs_create(struct be_adapter *adapter)
2204 {
2205         struct be_queue_info *cq, *eq;
2206         struct be_tx_obj *txo;
2207         int status, i;
2208
2209         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2210
2211         for_all_tx_queues(adapter, txo, i) {
2212                 cq = &txo->cq;
2213                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2214                                         sizeof(struct be_eth_tx_compl));
2215                 if (status)
2216                         return status;
2217
2218                 u64_stats_init(&txo->stats.sync);
2219                 u64_stats_init(&txo->stats.sync_compl);
2220
2221                 /* If num_evt_qs is less than num_tx_qs, then more than
2222                  * one txq share an eq
2223                  */
2224                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2225                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2226                 if (status)
2227                         return status;
2228
2229                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2230                                         sizeof(struct be_eth_wrb));
2231                 if (status)
2232                         return status;
2233
2234                 status = be_cmd_txq_create(adapter, txo);
2235                 if (status)
2236                         return status;
2237         }
2238
2239         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2240                  adapter->num_tx_qs);
2241         return 0;
2242 }
2243
2244 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2245 {
2246         struct be_queue_info *q;
2247         struct be_rx_obj *rxo;
2248         int i;
2249
2250         for_all_rx_queues(adapter, rxo, i) {
2251                 q = &rxo->cq;
2252                 if (q->created)
2253                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2254                 be_queue_free(adapter, q);
2255         }
2256 }
2257
2258 static int be_rx_cqs_create(struct be_adapter *adapter)
2259 {
2260         struct be_queue_info *eq, *cq;
2261         struct be_rx_obj *rxo;
2262         int rc, i;
2263
2264         /* We can create as many RSS rings as there are EQs. */
2265         adapter->num_rx_qs = adapter->num_evt_qs;
2266
2267         /* We'll use RSS only if atleast 2 RSS rings are supported.
2268          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2269          */
2270         if (adapter->num_rx_qs > 1)
2271                 adapter->num_rx_qs++;
2272
2273         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2274         for_all_rx_queues(adapter, rxo, i) {
2275                 rxo->adapter = adapter;
2276                 cq = &rxo->cq;
2277                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2278                                 sizeof(struct be_eth_rx_compl));
2279                 if (rc)
2280                         return rc;
2281
2282                 u64_stats_init(&rxo->stats.sync);
2283                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2284                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2285                 if (rc)
2286                         return rc;
2287         }
2288
2289         dev_info(&adapter->pdev->dev,
2290                  "created %d RSS queue(s) and 1 default RX queue\n",
2291                  adapter->num_rx_qs - 1);
2292         return 0;
2293 }
2294
2295 static irqreturn_t be_intx(int irq, void *dev)
2296 {
2297         struct be_eq_obj *eqo = dev;
2298         struct be_adapter *adapter = eqo->adapter;
2299         int num_evts = 0;
2300
2301         /* IRQ is not expected when NAPI is scheduled as the EQ
2302          * will not be armed.
2303          * But, this can happen on Lancer INTx where it takes
2304          * a while to de-assert INTx or in BE2 where occasionaly
2305          * an interrupt may be raised even when EQ is unarmed.
2306          * If NAPI is already scheduled, then counting & notifying
2307          * events will orphan them.
2308          */
2309         if (napi_schedule_prep(&eqo->napi)) {
2310                 num_evts = events_get(eqo);
2311                 __napi_schedule(&eqo->napi);
2312                 if (num_evts)
2313                         eqo->spurious_intr = 0;
2314         }
2315         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2316
2317         /* Return IRQ_HANDLED only for the the first spurious intr
2318          * after a valid intr to stop the kernel from branding
2319          * this irq as a bad one!
2320          */
2321         if (num_evts || eqo->spurious_intr++ == 0)
2322                 return IRQ_HANDLED;
2323         else
2324                 return IRQ_NONE;
2325 }
2326
2327 static irqreturn_t be_msix(int irq, void *dev)
2328 {
2329         struct be_eq_obj *eqo = dev;
2330
2331         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2332         napi_schedule(&eqo->napi);
2333         return IRQ_HANDLED;
2334 }
2335
2336 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2337 {
2338         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2339 }
2340
2341 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2342                         int budget, int polling)
2343 {
2344         struct be_adapter *adapter = rxo->adapter;
2345         struct be_queue_info *rx_cq = &rxo->cq;
2346         struct be_rx_compl_info *rxcp;
2347         u32 work_done;
2348
2349         for (work_done = 0; work_done < budget; work_done++) {
2350                 rxcp = be_rx_compl_get(rxo);
2351                 if (!rxcp)
2352                         break;
2353
2354                 /* Is it a flush compl that has no data */
2355                 if (unlikely(rxcp->num_rcvd == 0))
2356                         goto loop_continue;
2357
2358                 /* Discard compl with partial DMA Lancer B0 */
2359                 if (unlikely(!rxcp->pkt_size)) {
2360                         be_rx_compl_discard(rxo, rxcp);
2361                         goto loop_continue;
2362                 }
2363
2364                 /* On BE drop pkts that arrive due to imperfect filtering in
2365                  * promiscuous mode on some skews
2366                  */
2367                 if (unlikely(rxcp->port != adapter->port_num &&
2368                                 !lancer_chip(adapter))) {
2369                         be_rx_compl_discard(rxo, rxcp);
2370                         goto loop_continue;
2371                 }
2372
2373                 /* Don't do gro when we're busy_polling */
2374                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2375                         be_rx_compl_process_gro(rxo, napi, rxcp);
2376                 else
2377                         be_rx_compl_process(rxo, napi, rxcp);
2378
2379 loop_continue:
2380                 be_rx_stats_update(rxo, rxcp);
2381         }
2382
2383         if (work_done) {
2384                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2385
2386                 /* When an rx-obj gets into post_starved state, just
2387                  * let be_worker do the posting.
2388                  */
2389                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2390                     !rxo->rx_post_starved)
2391                         be_post_rx_frags(rxo, GFP_ATOMIC);
2392         }
2393
2394         return work_done;
2395 }
2396
2397 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2398                           int budget, int idx)
2399 {
2400         struct be_eth_tx_compl *txcp;
2401         int num_wrbs = 0, work_done;
2402
2403         for (work_done = 0; work_done < budget; work_done++) {
2404                 txcp = be_tx_compl_get(&txo->cq);
2405                 if (!txcp)
2406                         break;
2407                 num_wrbs += be_tx_compl_process(adapter, txo,
2408                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2409                                         wrb_index, txcp));
2410         }
2411
2412         if (work_done) {
2413                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2414                 atomic_sub(num_wrbs, &txo->q.used);
2415
2416                 /* As Tx wrbs have been freed up, wake up netdev queue
2417                  * if it was stopped due to lack of tx wrbs.  */
2418                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2419                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2420                         netif_wake_subqueue(adapter->netdev, idx);
2421                 }
2422
2423                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2424                 tx_stats(txo)->tx_compl += work_done;
2425                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2426         }
2427         return (work_done < budget); /* Done */
2428 }
2429
2430 int be_poll(struct napi_struct *napi, int budget)
2431 {
2432         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2433         struct be_adapter *adapter = eqo->adapter;
2434         int max_work = 0, work, i, num_evts;
2435         struct be_rx_obj *rxo;
2436         bool tx_done;
2437
2438         num_evts = events_get(eqo);
2439
2440         /* Process all TXQs serviced by this EQ */
2441         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2442                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2443                                         eqo->tx_budget, i);
2444                 if (!tx_done)
2445                         max_work = budget;
2446         }
2447
2448         if (be_lock_napi(eqo)) {
2449                 /* This loop will iterate twice for EQ0 in which
2450                  * completions of the last RXQ (default one) are also processed
2451                  * For other EQs the loop iterates only once
2452                  */
2453                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2454                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2455                         max_work = max(work, max_work);
2456                 }
2457                 be_unlock_napi(eqo);
2458         } else {
2459                 max_work = budget;
2460         }
2461
2462         if (is_mcc_eqo(eqo))
2463                 be_process_mcc(adapter);
2464
2465         if (max_work < budget) {
2466                 napi_complete(napi);
2467                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2468         } else {
2469                 /* As we'll continue in polling mode, count and clear events */
2470                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2471         }
2472         return max_work;
2473 }
2474
2475 #ifdef CONFIG_NET_RX_BUSY_POLL
2476 static int be_busy_poll(struct napi_struct *napi)
2477 {
2478         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2479         struct be_adapter *adapter = eqo->adapter;
2480         struct be_rx_obj *rxo;
2481         int i, work = 0;
2482
2483         if (!be_lock_busy_poll(eqo))
2484                 return LL_FLUSH_BUSY;
2485
2486         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2487                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2488                 if (work)
2489                         break;
2490         }
2491
2492         be_unlock_busy_poll(eqo);
2493         return work;
2494 }
2495 #endif
2496
2497 void be_detect_error(struct be_adapter *adapter)
2498 {
2499         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2500         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2501         u32 i;
2502         bool error_detected = false;
2503         struct device *dev = &adapter->pdev->dev;
2504         struct net_device *netdev = adapter->netdev;
2505
2506         if (be_hw_error(adapter))
2507                 return;
2508
2509         if (lancer_chip(adapter)) {
2510                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512                         sliport_err1 = ioread32(adapter->db +
2513                                         SLIPORT_ERROR1_OFFSET);
2514                         sliport_err2 = ioread32(adapter->db +
2515                                         SLIPORT_ERROR2_OFFSET);
2516                         adapter->hw_error = true;
2517                         /* Do not log error messages if its a FW reset */
2518                         if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2519                             sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2520                                 dev_info(dev, "Firmware update in progress\n");
2521                         } else {
2522                                 error_detected = true;
2523                                 dev_err(dev, "Error detected in the card\n");
2524                                 dev_err(dev, "ERR: sliport status 0x%x\n",
2525                                         sliport_status);
2526                                 dev_err(dev, "ERR: sliport error1 0x%x\n",
2527                                         sliport_err1);
2528                                 dev_err(dev, "ERR: sliport error2 0x%x\n",
2529                                         sliport_err2);
2530                         }
2531                 }
2532         } else {
2533                 pci_read_config_dword(adapter->pdev,
2534                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2535                 pci_read_config_dword(adapter->pdev,
2536                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2537                 pci_read_config_dword(adapter->pdev,
2538                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2539                 pci_read_config_dword(adapter->pdev,
2540                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2541
2542                 ue_lo = (ue_lo & ~ue_lo_mask);
2543                 ue_hi = (ue_hi & ~ue_hi_mask);
2544
2545                 /* On certain platforms BE hardware can indicate spurious UEs.
2546                  * Allow HW to stop working completely in case of a real UE.
2547                  * Hence not setting the hw_error for UE detection.
2548                  */
2549
2550                 if (ue_lo || ue_hi) {
2551                         error_detected = true;
2552                         dev_err(dev,
2553                                 "Unrecoverable Error detected in the adapter");
2554                         dev_err(dev, "Please reboot server to recover");
2555                         if (skyhawk_chip(adapter))
2556                                 adapter->hw_error = true;
2557                         for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2558                                 if (ue_lo & 1)
2559                                         dev_err(dev, "UE: %s bit set\n",
2560                                                 ue_status_low_desc[i]);
2561                         }
2562                         for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2563                                 if (ue_hi & 1)
2564                                         dev_err(dev, "UE: %s bit set\n",
2565                                                 ue_status_hi_desc[i]);
2566                         }
2567                 }
2568         }
2569         if (error_detected)
2570                 netif_carrier_off(netdev);
2571 }
2572
2573 static void be_msix_disable(struct be_adapter *adapter)
2574 {
2575         if (msix_enabled(adapter)) {
2576                 pci_disable_msix(adapter->pdev);
2577                 adapter->num_msix_vec = 0;
2578                 adapter->num_msix_roce_vec = 0;
2579         }
2580 }
2581
2582 static int be_msix_enable(struct be_adapter *adapter)
2583 {
2584         int i, num_vec;
2585         struct device *dev = &adapter->pdev->dev;
2586
2587         /* If RoCE is supported, program the max number of NIC vectors that
2588          * may be configured via set-channels, along with vectors needed for
2589          * RoCe. Else, just program the number we'll use initially.
2590          */
2591         if (be_roce_supported(adapter))
2592                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2593                                 2 * num_online_cpus());
2594         else
2595                 num_vec = adapter->cfg_num_qs;
2596
2597         for (i = 0; i < num_vec; i++)
2598                 adapter->msix_entries[i].entry = i;
2599
2600         num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2601                                         MIN_MSIX_VECTORS, num_vec);
2602         if (num_vec < 0)
2603                 goto fail;
2604
2605         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2606                 adapter->num_msix_roce_vec = num_vec / 2;
2607                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2608                          adapter->num_msix_roce_vec);
2609         }
2610
2611         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2612
2613         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2614                  adapter->num_msix_vec);
2615         return 0;
2616
2617 fail:
2618         dev_warn(dev, "MSIx enable failed\n");
2619
2620         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2621         if (!be_physfn(adapter))
2622                 return num_vec;
2623         return 0;
2624 }
2625
2626 static inline int be_msix_vec_get(struct be_adapter *adapter,
2627                                 struct be_eq_obj *eqo)
2628 {
2629         return adapter->msix_entries[eqo->msix_idx].vector;
2630 }
2631
2632 static int be_msix_register(struct be_adapter *adapter)
2633 {
2634         struct net_device *netdev = adapter->netdev;
2635         struct be_eq_obj *eqo;
2636         int status, i, vec;
2637
2638         for_all_evt_queues(adapter, eqo, i) {
2639                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2640                 vec = be_msix_vec_get(adapter, eqo);
2641                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2642                 if (status)
2643                         goto err_msix;
2644         }
2645
2646         return 0;
2647 err_msix:
2648         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2651                 status);
2652         be_msix_disable(adapter);
2653         return status;
2654 }
2655
2656 static int be_irq_register(struct be_adapter *adapter)
2657 {
2658         struct net_device *netdev = adapter->netdev;
2659         int status;
2660
2661         if (msix_enabled(adapter)) {
2662                 status = be_msix_register(adapter);
2663                 if (status == 0)
2664                         goto done;
2665                 /* INTx is not supported for VF */
2666                 if (!be_physfn(adapter))
2667                         return status;
2668         }
2669
2670         /* INTx: only the first EQ is used */
2671         netdev->irq = adapter->pdev->irq;
2672         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2673                              &adapter->eq_obj[0]);
2674         if (status) {
2675                 dev_err(&adapter->pdev->dev,
2676                         "INTx request IRQ failed - err %d\n", status);
2677                 return status;
2678         }
2679 done:
2680         adapter->isr_registered = true;
2681         return 0;
2682 }
2683
2684 static void be_irq_unregister(struct be_adapter *adapter)
2685 {
2686         struct net_device *netdev = adapter->netdev;
2687         struct be_eq_obj *eqo;
2688         int i;
2689
2690         if (!adapter->isr_registered)
2691                 return;
2692
2693         /* INTx */
2694         if (!msix_enabled(adapter)) {
2695                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2696                 goto done;
2697         }
2698
2699         /* MSIx */
2700         for_all_evt_queues(adapter, eqo, i)
2701                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2702
2703 done:
2704         adapter->isr_registered = false;
2705 }
2706
2707 static void be_rx_qs_destroy(struct be_adapter *adapter)
2708 {
2709         struct be_queue_info *q;
2710         struct be_rx_obj *rxo;
2711         int i;
2712
2713         for_all_rx_queues(adapter, rxo, i) {
2714                 q = &rxo->q;
2715                 if (q->created) {
2716                         be_cmd_rxq_destroy(adapter, q);
2717                         be_rx_cq_clean(rxo);
2718                 }
2719                 be_queue_free(adapter, q);
2720         }
2721 }
2722
2723 static int be_close(struct net_device *netdev)
2724 {
2725         struct be_adapter *adapter = netdev_priv(netdev);
2726         struct be_eq_obj *eqo;
2727         int i;
2728
2729         /* This protection is needed as be_close() may be called even when the
2730          * adapter is in cleared state (after eeh perm failure)
2731          */
2732         if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733                 return 0;
2734
2735         be_roce_dev_close(adapter);
2736
2737         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2738                 for_all_evt_queues(adapter, eqo, i) {
2739                         napi_disable(&eqo->napi);
2740                         be_disable_busy_poll(eqo);
2741                 }
2742                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2743         }
2744
2745         be_async_mcc_disable(adapter);
2746
2747         /* Wait for all pending tx completions to arrive so that
2748          * all tx skbs are freed.
2749          */
2750         netif_tx_disable(netdev);
2751         be_tx_compl_clean(adapter);
2752
2753         be_rx_qs_destroy(adapter);
2754
2755         for (i = 1; i < (adapter->uc_macs + 1); i++)
2756                 be_cmd_pmac_del(adapter, adapter->if_handle,
2757                                 adapter->pmac_id[i], 0);
2758         adapter->uc_macs = 0;
2759
2760         for_all_evt_queues(adapter, eqo, i) {
2761                 if (msix_enabled(adapter))
2762                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2763                 else
2764                         synchronize_irq(netdev->irq);
2765                 be_eq_clean(eqo);
2766         }
2767
2768         be_irq_unregister(adapter);
2769
2770         return 0;
2771 }
2772
2773 static int be_rx_qs_create(struct be_adapter *adapter)
2774 {
2775         struct be_rx_obj *rxo;
2776         int rc, i, j;
2777         u8 rsstable[128];
2778
2779         for_all_rx_queues(adapter, rxo, i) {
2780                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2781                                     sizeof(struct be_eth_rx_d));
2782                 if (rc)
2783                         return rc;
2784         }
2785
2786         /* The FW would like the default RXQ to be created first */
2787         rxo = default_rxo(adapter);
2788         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2789                                adapter->if_handle, false, &rxo->rss_id);
2790         if (rc)
2791                 return rc;
2792
2793         for_all_rss_queues(adapter, rxo, i) {
2794                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2795                                        rx_frag_size, adapter->if_handle,
2796                                        true, &rxo->rss_id);
2797                 if (rc)
2798                         return rc;
2799         }
2800
2801         if (be_multi_rxq(adapter)) {
2802                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2803                         for_all_rss_queues(adapter, rxo, i) {
2804                                 if ((j + i) >= 128)
2805                                         break;
2806                                 rsstable[j + i] = rxo->rss_id;
2807                         }
2808                 }
2809                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2811
2812                 if (!BEx_chip(adapter))
2813                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814                                                 RSS_ENABLE_UDP_IPV6;
2815         } else {
2816                 /* Disable RSS, if only default RX Q is created */
2817                 adapter->rss_flags = RSS_ENABLE_NONE;
2818         }
2819
2820         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2821                                128);
2822         if (rc) {
2823                 adapter->rss_flags = RSS_ENABLE_NONE;
2824                 return rc;
2825         }
2826
2827         /* First time posting */
2828         for_all_rx_queues(adapter, rxo, i)
2829                 be_post_rx_frags(rxo, GFP_KERNEL);
2830         return 0;
2831 }
2832
2833 static int be_open(struct net_device *netdev)
2834 {
2835         struct be_adapter *adapter = netdev_priv(netdev);
2836         struct be_eq_obj *eqo;
2837         struct be_rx_obj *rxo;
2838         struct be_tx_obj *txo;
2839         u8 link_status;
2840         int status, i;
2841
2842         status = be_rx_qs_create(adapter);
2843         if (status)
2844                 goto err;
2845
2846         status = be_irq_register(adapter);
2847         if (status)
2848                 goto err;
2849
2850         for_all_rx_queues(adapter, rxo, i)
2851                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2852
2853         for_all_tx_queues(adapter, txo, i)
2854                 be_cq_notify(adapter, txo->cq.id, true, 0);
2855
2856         be_async_mcc_enable(adapter);
2857
2858         for_all_evt_queues(adapter, eqo, i) {
2859                 napi_enable(&eqo->napi);
2860                 be_enable_busy_poll(eqo);
2861                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2862         }
2863         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2864
2865         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2866         if (!status)
2867                 be_link_status_update(adapter, link_status);
2868
2869         netif_tx_start_all_queues(netdev);
2870         be_roce_dev_open(adapter);
2871
2872 #ifdef CONFIG_BE2NET_VXLAN
2873         if (skyhawk_chip(adapter))
2874                 vxlan_get_rx_port(netdev);
2875 #endif
2876
2877         return 0;
2878 err:
2879         be_close(adapter->netdev);
2880         return -EIO;
2881 }
2882
2883 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2884 {
2885         struct be_dma_mem cmd;
2886         int status = 0;
2887         u8 mac[ETH_ALEN];
2888
2889         memset(mac, 0, ETH_ALEN);
2890
2891         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2892         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2893                                      GFP_KERNEL);
2894         if (cmd.va == NULL)
2895                 return -1;
2896
2897         if (enable) {
2898                 status = pci_write_config_dword(adapter->pdev,
2899                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2900                 if (status) {
2901                         dev_err(&adapter->pdev->dev,
2902                                 "Could not enable Wake-on-lan\n");
2903                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2904                                           cmd.dma);
2905                         return status;
2906                 }
2907                 status = be_cmd_enable_magic_wol(adapter,
2908                                 adapter->netdev->dev_addr, &cmd);
2909                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2910                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2911         } else {
2912                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2913                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2914                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2915         }
2916
2917         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2918         return status;
2919 }
2920
2921 /*
2922  * Generate a seed MAC address from the PF MAC Address using jhash.
2923  * MAC Address for VFs are assigned incrementally starting from the seed.
2924  * These addresses are programmed in the ASIC by the PF and the VF driver
2925  * queries for the MAC address during its probe.
2926  */
2927 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2928 {
2929         u32 vf;
2930         int status = 0;
2931         u8 mac[ETH_ALEN];
2932         struct be_vf_cfg *vf_cfg;
2933
2934         be_vf_eth_addr_generate(adapter, mac);
2935
2936         for_all_vfs(adapter, vf_cfg, vf) {
2937                 if (BEx_chip(adapter))
2938                         status = be_cmd_pmac_add(adapter, mac,
2939                                                  vf_cfg->if_handle,
2940                                                  &vf_cfg->pmac_id, vf + 1);
2941                 else
2942                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2943                                                 vf + 1);
2944
2945                 if (status)
2946                         dev_err(&adapter->pdev->dev,
2947                         "Mac address assignment failed for VF %d\n", vf);
2948                 else
2949                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2950
2951                 mac[5] += 1;
2952         }
2953         return status;
2954 }
2955
2956 static int be_vfs_mac_query(struct be_adapter *adapter)
2957 {
2958         int status, vf;
2959         u8 mac[ETH_ALEN];
2960         struct be_vf_cfg *vf_cfg;
2961
2962         for_all_vfs(adapter, vf_cfg, vf) {
2963                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2964                                                mac, vf_cfg->if_handle,
2965                                                false, vf+1);
2966                 if (status)
2967                         return status;
2968                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2969         }
2970         return 0;
2971 }
2972
2973 static void be_vf_clear(struct be_adapter *adapter)
2974 {
2975         struct be_vf_cfg *vf_cfg;
2976         u32 vf;
2977
2978         if (pci_vfs_assigned(adapter->pdev)) {
2979                 dev_warn(&adapter->pdev->dev,
2980                          "VFs are assigned to VMs: not disabling VFs\n");
2981                 goto done;
2982         }
2983
2984         pci_disable_sriov(adapter->pdev);
2985
2986         for_all_vfs(adapter, vf_cfg, vf) {
2987                 if (BEx_chip(adapter))
2988                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2989                                         vf_cfg->pmac_id, vf + 1);
2990                 else
2991                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2992                                        vf + 1);
2993
2994                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2995         }
2996 done:
2997         kfree(adapter->vf_cfg);
2998         adapter->num_vfs = 0;
2999 }
3000
3001 static void be_clear_queues(struct be_adapter *adapter)
3002 {
3003         be_mcc_queues_destroy(adapter);
3004         be_rx_cqs_destroy(adapter);
3005         be_tx_queues_destroy(adapter);
3006         be_evt_queues_destroy(adapter);
3007 }
3008
3009 static void be_cancel_worker(struct be_adapter *adapter)
3010 {
3011         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3012                 cancel_delayed_work_sync(&adapter->work);
3013                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3014         }
3015 }
3016
3017 static void be_mac_clear(struct be_adapter *adapter)
3018 {
3019         int i;
3020
3021         if (adapter->pmac_id) {
3022                 for (i = 0; i < (adapter->uc_macs + 1); i++)
3023                         be_cmd_pmac_del(adapter, adapter->if_handle,
3024                                         adapter->pmac_id[i], 0);
3025                 adapter->uc_macs = 0;
3026
3027                 kfree(adapter->pmac_id);
3028                 adapter->pmac_id = NULL;
3029         }
3030 }
3031
3032 #ifdef CONFIG_BE2NET_VXLAN
3033 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3034 {
3035         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3036                 be_cmd_manage_iface(adapter, adapter->if_handle,
3037                                     OP_CONVERT_TUNNEL_TO_NORMAL);
3038
3039         if (adapter->vxlan_port)
3040                 be_cmd_set_vxlan_port(adapter, 0);
3041
3042         adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3043         adapter->vxlan_port = 0;
3044 }
3045 #endif
3046
3047 static int be_clear(struct be_adapter *adapter)
3048 {
3049         be_cancel_worker(adapter);
3050
3051         if (sriov_enabled(adapter))
3052                 be_vf_clear(adapter);
3053
3054 #ifdef CONFIG_BE2NET_VXLAN
3055         be_disable_vxlan_offloads(adapter);
3056 #endif
3057         /* delete the primary mac along with the uc-mac list */
3058         be_mac_clear(adapter);
3059
3060         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
3061
3062         be_clear_queues(adapter);
3063
3064         be_msix_disable(adapter);
3065         adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3066         return 0;
3067 }
3068
3069 static int be_vfs_if_create(struct be_adapter *adapter)
3070 {
3071         struct be_resources res = {0};
3072         struct be_vf_cfg *vf_cfg;
3073         u32 cap_flags, en_flags, vf;
3074         int status = 0;
3075
3076         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3077                     BE_IF_FLAGS_MULTICAST;
3078
3079         for_all_vfs(adapter, vf_cfg, vf) {
3080                 if (!BE3_chip(adapter)) {
3081                         status = be_cmd_get_profile_config(adapter, &res,
3082                                                            vf + 1);
3083                         if (!status)
3084                                 cap_flags = res.if_cap_flags;
3085                 }
3086
3087                 /* If a FW profile exists, then cap_flags are updated */
3088                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3089                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3090                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3091                                           &vf_cfg->if_handle, vf + 1);
3092                 if (status)
3093                         goto err;
3094         }
3095 err:
3096         return status;
3097 }
3098
3099 static int be_vf_setup_init(struct be_adapter *adapter)
3100 {
3101         struct be_vf_cfg *vf_cfg;
3102         int vf;
3103
3104         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3105                                   GFP_KERNEL);
3106         if (!adapter->vf_cfg)
3107                 return -ENOMEM;
3108
3109         for_all_vfs(adapter, vf_cfg, vf) {
3110                 vf_cfg->if_handle = -1;
3111                 vf_cfg->pmac_id = -1;
3112         }
3113         return 0;
3114 }
3115
3116 static int be_vf_setup(struct be_adapter *adapter)
3117 {
3118         struct device *dev = &adapter->pdev->dev;
3119         struct be_vf_cfg *vf_cfg;
3120         int status, old_vfs, vf;
3121         u32 privileges;
3122         u16 lnk_speed;
3123
3124         old_vfs = pci_num_vf(adapter->pdev);
3125         if (old_vfs) {
3126                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3127                 if (old_vfs != num_vfs)
3128                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3129                 adapter->num_vfs = old_vfs;
3130         } else {
3131                 if (num_vfs > be_max_vfs(adapter))
3132                         dev_info(dev, "Device supports %d VFs and not %d\n",
3133                                  be_max_vfs(adapter), num_vfs);
3134                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3135                 if (!adapter->num_vfs)
3136                         return 0;
3137         }
3138
3139         status = be_vf_setup_init(adapter);
3140         if (status)
3141                 goto err;
3142
3143         if (old_vfs) {
3144                 for_all_vfs(adapter, vf_cfg, vf) {
3145                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3146                         if (status)
3147                                 goto err;
3148                 }
3149         } else {
3150                 status = be_vfs_if_create(adapter);
3151                 if (status)
3152                         goto err;
3153         }
3154
3155         if (old_vfs) {
3156                 status = be_vfs_mac_query(adapter);
3157                 if (status)
3158                         goto err;
3159         } else {
3160                 status = be_vf_eth_addr_config(adapter);
3161                 if (status)
3162                         goto err;
3163         }
3164
3165         for_all_vfs(adapter, vf_cfg, vf) {
3166                 /* Allow VFs to programs MAC/VLAN filters */
3167                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3168                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3169                         status = be_cmd_set_fn_privileges(adapter,
3170                                                           privileges |
3171                                                           BE_PRIV_FILTMGMT,
3172                                                           vf + 1);
3173                         if (!status)
3174                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3175                                          vf);
3176                 }
3177
3178                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3179                  * Allow full available bandwidth
3180                  */
3181                 if (BE3_chip(adapter) && !old_vfs)
3182                         be_cmd_config_qos(adapter, 1000, vf + 1);
3183
3184                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3185                                                   NULL, vf + 1);
3186                 if (!status)
3187                         vf_cfg->tx_rate = lnk_speed;
3188
3189                 if (!old_vfs) {
3190                         be_cmd_enable_vf(adapter, vf + 1);
3191                         be_cmd_set_logical_link_config(adapter,
3192                                                        IFLA_VF_LINK_STATE_AUTO,
3193                                                        vf+1);
3194                 }
3195         }
3196
3197         if (!old_vfs) {
3198                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3199                 if (status) {
3200                         dev_err(dev, "SRIOV enable failed\n");
3201                         adapter->num_vfs = 0;
3202                         goto err;
3203                 }
3204         }
3205         return 0;
3206 err:
3207         dev_err(dev, "VF setup failed\n");
3208         be_vf_clear(adapter);
3209         return status;
3210 }
3211
3212 /* Converting function_mode bits on BE3 to SH mc_type enums */
3213
3214 static u8 be_convert_mc_type(u32 function_mode)
3215 {
3216         if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3217                 return vNIC1;
3218         else if (function_mode & FLEX10_MODE)
3219                 return FLEX10;
3220         else if (function_mode & VNIC_MODE)
3221                 return vNIC2;
3222         else if (function_mode & UMC_ENABLED)
3223                 return UMC;
3224         else
3225                 return MC_NONE;
3226 }
3227
3228 /* On BE2/BE3 FW does not suggest the supported limits */
3229 static void BEx_get_resources(struct be_adapter *adapter,
3230                               struct be_resources *res)
3231 {
3232         struct pci_dev *pdev = adapter->pdev;
3233         bool use_sriov = false;
3234         int max_vfs = 0;
3235
3236         if (be_physfn(adapter) && BE3_chip(adapter)) {
3237                 be_cmd_get_profile_config(adapter, res, 0);
3238                 /* Some old versions of BE3 FW don't report max_vfs value */
3239                 if (res->max_vfs == 0) {
3240                         max_vfs = pci_sriov_get_totalvfs(pdev);
3241                         res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3242                 }
3243                 use_sriov = res->max_vfs && sriov_want(adapter);
3244         }
3245
3246         if (be_physfn(adapter))
3247                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3248         else
3249                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3250
3251         adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3252
3253         if (be_is_mc(adapter)) {
3254                 /* Assuming that there are 4 channels per port,
3255                  * when multi-channel is enabled
3256                  */
3257                 if (be_is_qnq_mode(adapter))
3258                         res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3259                 else
3260                         /* In a non-qnq multichannel mode, the pvid
3261                          * takes up one vlan entry
3262                          */
3263                         res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3264         } else {
3265                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3266         }
3267
3268         res->max_mcast_mac = BE_MAX_MC;
3269
3270         /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3271          * 2) Create multiple TX rings on a BE3-R multi-channel interface
3272          *    *only* if it is RSS-capable.
3273          */
3274         if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
3275             !be_physfn(adapter) || (be_is_mc(adapter) &&
3276             !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3277                 res->max_tx_qs = 1;
3278         else
3279                 res->max_tx_qs = BE3_MAX_TX_QS;
3280
3281         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3282             !use_sriov && be_physfn(adapter))
3283                 res->max_rss_qs = (adapter->be3_native) ?
3284                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3285         res->max_rx_qs = res->max_rss_qs + 1;
3286
3287         if (be_physfn(adapter))
3288                 res->max_evt_qs = (res->max_vfs > 0) ?
3289                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3290         else
3291                 res->max_evt_qs = 1;
3292
3293         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3294         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3295                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3296 }
3297
3298 static void be_setup_init(struct be_adapter *adapter)
3299 {
3300         adapter->vlan_prio_bmap = 0xff;
3301         adapter->phy.link_speed = -1;
3302         adapter->if_handle = -1;
3303         adapter->be3_native = false;
3304         adapter->promiscuous = false;
3305         if (be_physfn(adapter))
3306                 adapter->cmd_privileges = MAX_PRIVILEGES;
3307         else
3308                 adapter->cmd_privileges = MIN_PRIVILEGES;
3309 }
3310
3311 static int be_get_resources(struct be_adapter *adapter)
3312 {
3313         struct device *dev = &adapter->pdev->dev;
3314         struct be_resources res = {0};
3315         int status;
3316
3317         if (BEx_chip(adapter)) {
3318                 BEx_get_resources(adapter, &res);
3319                 adapter->res = res;
3320         }
3321
3322         /* For Lancer, SH etc read per-function resource limits from FW.
3323          * GET_FUNC_CONFIG returns per function guaranteed limits.
3324          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3325          */
3326         if (!BEx_chip(adapter)) {
3327                 status = be_cmd_get_func_config(adapter, &res);
3328                 if (status)
3329                         return status;
3330
3331                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3332                 if (be_roce_supported(adapter))
3333                         res.max_evt_qs /= 2;
3334                 adapter->res = res;
3335
3336                 if (be_physfn(adapter)) {
3337                         status = be_cmd_get_profile_config(adapter, &res, 0);
3338                         if (status)
3339                                 return status;
3340                         adapter->res.max_vfs = res.max_vfs;
3341                 }
3342
3343                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3344                          be_max_txqs(adapter), be_max_rxqs(adapter),
3345                          be_max_rss(adapter), be_max_eqs(adapter),
3346                          be_max_vfs(adapter));
3347                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3348                          be_max_uc(adapter), be_max_mc(adapter),
3349                          be_max_vlans(adapter));
3350         }
3351
3352         return 0;
3353 }
3354
3355 /* Routine to query per function resource limits */
3356 static int be_get_config(struct be_adapter *adapter)
3357 {
3358         u16 profile_id;
3359         int status;
3360
3361         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3362                                      &adapter->function_mode,
3363                                      &adapter->function_caps,
3364                                      &adapter->asic_rev);
3365         if (status)
3366                 return status;
3367
3368          if (be_physfn(adapter)) {
3369                 status = be_cmd_get_active_profile(adapter, &profile_id);
3370                 if (!status)
3371                         dev_info(&adapter->pdev->dev,
3372                                  "Using profile 0x%x\n", profile_id);
3373         }
3374
3375         status = be_get_resources(adapter);
3376         if (status)
3377                 return status;
3378
3379         adapter->pmac_id = kcalloc(be_max_uc(adapter),
3380                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3381         if (!adapter->pmac_id)
3382                 return -ENOMEM;
3383
3384         /* Sanitize cfg_num_qs based on HW and platform limits */
3385         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3386
3387         return 0;
3388 }
3389
3390 static int be_mac_setup(struct be_adapter *adapter)
3391 {
3392         u8 mac[ETH_ALEN];
3393         int status;
3394
3395         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3396                 status = be_cmd_get_perm_mac(adapter, mac);
3397                 if (status)
3398                         return status;
3399
3400                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3401                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3402         } else {
3403                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3404                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3405         }
3406
3407         /* For BE3-R VFs, the PF programs the initial MAC address */
3408         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3409                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3410                                 &adapter->pmac_id[0], 0);
3411         return 0;
3412 }
3413
3414 static void be_schedule_worker(struct be_adapter *adapter)
3415 {
3416         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3417         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3418 }
3419
3420 static int be_setup_queues(struct be_adapter *adapter)
3421 {
3422         struct net_device *netdev = adapter->netdev;
3423         int status;
3424
3425         status = be_evt_queues_create(adapter);
3426         if (status)
3427                 goto err;
3428
3429         status = be_tx_qs_create(adapter);
3430         if (status)
3431                 goto err;
3432
3433         status = be_rx_cqs_create(adapter);
3434         if (status)
3435                 goto err;
3436
3437         status = be_mcc_queues_create(adapter);
3438         if (status)
3439                 goto err;
3440
3441         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3442         if (status)
3443                 goto err;
3444
3445         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3446         if (status)
3447                 goto err;
3448
3449         return 0;
3450 err:
3451         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3452         return status;
3453 }
3454
3455 int be_update_queues(struct be_adapter *adapter)
3456 {
3457         struct net_device *netdev = adapter->netdev;
3458         int status;
3459
3460         if (netif_running(netdev))
3461                 be_close(netdev);
3462
3463         be_cancel_worker(adapter);
3464
3465         /* If any vectors have been shared with RoCE we cannot re-program
3466          * the MSIx table.
3467          */
3468         if (!adapter->num_msix_roce_vec)
3469                 be_msix_disable(adapter);
3470
3471         be_clear_queues(adapter);
3472
3473         if (!msix_enabled(adapter)) {
3474                 status = be_msix_enable(adapter);
3475                 if (status)
3476                         return status;
3477         }
3478
3479         status = be_setup_queues(adapter);
3480         if (status)
3481                 return status;
3482
3483         be_schedule_worker(adapter);
3484
3485         if (netif_running(netdev))
3486                 status = be_open(netdev);
3487
3488         return status;
3489 }
3490
3491 static int be_setup(struct be_adapter *adapter)
3492 {
3493         struct device *dev = &adapter->pdev->dev;
3494         u32 tx_fc, rx_fc, en_flags;
3495         int status;
3496
3497         be_setup_init(adapter);
3498
3499         if (!lancer_chip(adapter))
3500                 be_cmd_req_native_mode(adapter);
3501
3502         status = be_get_config(adapter);
3503         if (status)
3504                 goto err;
3505
3506         status = be_msix_enable(adapter);
3507         if (status)
3508                 goto err;
3509
3510         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3511                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3512         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3513                 en_flags |= BE_IF_FLAGS_RSS;
3514         en_flags = en_flags & be_if_cap_flags(adapter);
3515         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3516                                   &adapter->if_handle, 0);
3517         if (status)
3518                 goto err;
3519
3520         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3521         rtnl_lock();
3522         status = be_setup_queues(adapter);
3523         rtnl_unlock();
3524         if (status)
3525                 goto err;
3526
3527         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3528
3529         status = be_mac_setup(adapter);
3530         if (status)
3531                 goto err;
3532
3533         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3534
3535         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3536                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3537                         adapter->fw_ver);
3538                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3539         }
3540
3541         if (adapter->vlans_added)
3542                 be_vid_config(adapter);
3543
3544         be_set_rx_mode(adapter->netdev);
3545
3546         be_cmd_get_acpi_wol_cap(adapter);
3547
3548         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3549
3550         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3551                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3552                                         adapter->rx_fc);
3553
3554         if (be_physfn(adapter))
3555                 be_cmd_set_logical_link_config(adapter,
3556                                                IFLA_VF_LINK_STATE_AUTO, 0);
3557
3558         if (sriov_want(adapter)) {
3559                 if (be_max_vfs(adapter))
3560                         be_vf_setup(adapter);
3561                 else
3562                         dev_warn(dev, "device doesn't support SRIOV\n");
3563         }
3564
3565         status = be_cmd_get_phy_info(adapter);
3566         if (!status && be_pause_supported(adapter))
3567                 adapter->phy.fc_autoneg = 1;
3568
3569         be_schedule_worker(adapter);
3570         adapter->flags |= BE_FLAGS_SETUP_DONE;
3571         return 0;
3572 err:
3573         be_clear(adapter);
3574         return status;
3575 }
3576
3577 #ifdef CONFIG_NET_POLL_CONTROLLER
3578 static void be_netpoll(struct net_device *netdev)
3579 {
3580         struct be_adapter *adapter = netdev_priv(netdev);
3581         struct be_eq_obj *eqo;
3582         int i;
3583
3584         for_all_evt_queues(adapter, eqo, i) {
3585                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3586                 napi_schedule(&eqo->napi);
3587         }
3588
3589         return;
3590 }
3591 #endif
3592
3593 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3594 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3595
3596 static bool be_flash_redboot(struct be_adapter *adapter,
3597                         const u8 *p, u32 img_start, int image_size,
3598                         int hdr_size)
3599 {
3600         u32 crc_offset;
3601         u8 flashed_crc[4];
3602         int status;
3603
3604         crc_offset = hdr_size + img_start + image_size - 4;
3605
3606         p += crc_offset;
3607
3608         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3609                         (image_size - 4));
3610         if (status) {
3611                 dev_err(&adapter->pdev->dev,
3612                 "could not get crc from flash, not flashing redboot\n");
3613                 return false;
3614         }
3615
3616         /*update redboot only if crc does not match*/
3617         if (!memcmp(flashed_crc, p, 4))
3618                 return false;
3619         else
3620                 return true;
3621 }
3622
3623 static bool phy_flashing_required(struct be_adapter *adapter)
3624 {
3625         return (adapter->phy.phy_type == TN_8022 &&
3626                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3627 }
3628
3629 static bool is_comp_in_ufi(struct be_adapter *adapter,
3630                            struct flash_section_info *fsec, int type)
3631 {
3632         int i = 0, img_type = 0;
3633         struct flash_section_info_g2 *fsec_g2 = NULL;
3634
3635         if (BE2_chip(adapter))
3636                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3637
3638         for (i = 0; i < MAX_FLASH_COMP; i++) {
3639                 if (fsec_g2)
3640                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3641                 else
3642                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3643
3644                 if (img_type == type)
3645                         return true;
3646         }
3647         return false;
3648
3649 }
3650
3651 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3652                                          int header_size,
3653                                          const struct firmware *fw)
3654 {
3655         struct flash_section_info *fsec = NULL;
3656         const u8 *p = fw->data;
3657
3658         p += header_size;
3659         while (p < (fw->data + fw->size)) {
3660                 fsec = (struct flash_section_info *)p;
3661                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3662                         return fsec;
3663                 p += 32;
3664         }
3665         return NULL;
3666 }
3667
3668 static int be_flash(struct be_adapter *adapter, const u8 *img,
3669                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3670 {
3671         u32 total_bytes = 0, flash_op, num_bytes = 0;
3672         int status = 0;
3673         struct be_cmd_write_flashrom *req = flash_cmd->va;
3674
3675         total_bytes = img_size;
3676         while (total_bytes) {
3677                 num_bytes = min_t(u32, 32*1024, total_bytes);
3678
3679                 total_bytes -= num_bytes;
3680
3681                 if (!total_bytes) {
3682                         if (optype == OPTYPE_PHY_FW)
3683                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3684                         else
3685                                 flash_op = FLASHROM_OPER_FLASH;
3686                 } else {
3687                         if (optype == OPTYPE_PHY_FW)
3688                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3689                         else
3690                                 flash_op = FLASHROM_OPER_SAVE;
3691                 }
3692
3693                 memcpy(req->data_buf, img, num_bytes);
3694                 img += num_bytes;
3695                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3696                                                 flash_op, num_bytes);
3697                 if (status) {
3698                         if (status == ILLEGAL_IOCTL_REQ &&
3699                             optype == OPTYPE_PHY_FW)
3700                                 break;
3701                         dev_err(&adapter->pdev->dev,
3702                                 "cmd to write to flash rom failed.\n");
3703                         return status;
3704                 }
3705         }
3706         return 0;
3707 }
3708
3709 /* For BE2, BE3 and BE3-R */
3710 static int be_flash_BEx(struct be_adapter *adapter,
3711                          const struct firmware *fw,
3712                          struct be_dma_mem *flash_cmd,
3713                          int num_of_images)
3714
3715 {
3716         int status = 0, i, filehdr_size = 0;
3717         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3718         const u8 *p = fw->data;
3719         const struct flash_comp *pflashcomp;
3720         int num_comp, redboot;
3721         struct flash_section_info *fsec = NULL;
3722
3723         struct flash_comp gen3_flash_types[] = {
3724                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3725                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3726                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3727                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3728                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3729                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3730                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3731                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3732                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3733                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3734                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3735                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3736                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3737                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3738                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3739                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3740                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3741                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3742                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3743                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3744         };
3745
3746         struct flash_comp gen2_flash_types[] = {
3747                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3748                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3749                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3750                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3751                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3752                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3753                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3754                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3755                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3756                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3757                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3758                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3759                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3760                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3761                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3762                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3763         };
3764
3765         if (BE3_chip(adapter)) {
3766                 pflashcomp = gen3_flash_types;
3767                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3768                 num_comp = ARRAY_SIZE(gen3_flash_types);
3769         } else {
3770                 pflashcomp = gen2_flash_types;
3771                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3772                 num_comp = ARRAY_SIZE(gen2_flash_types);
3773         }
3774
3775         /* Get flash section info*/
3776         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3777         if (!fsec) {
3778                 dev_err(&adapter->pdev->dev,
3779                         "Invalid Cookie. UFI corrupted ?\n");
3780                 return -1;
3781         }
3782         for (i = 0; i < num_comp; i++) {
3783                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3784                         continue;
3785
3786                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3787                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3788                         continue;
3789
3790                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3791                     !phy_flashing_required(adapter))
3792                                 continue;
3793
3794                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3795                         redboot = be_flash_redboot(adapter, fw->data,
3796                                 pflashcomp[i].offset, pflashcomp[i].size,
3797                                 filehdr_size + img_hdrs_size);
3798                         if (!redboot)
3799                                 continue;
3800                 }
3801
3802                 p = fw->data;
3803                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3804                 if (p + pflashcomp[i].size > fw->data + fw->size)
3805                         return -1;
3806
3807                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3808                                         pflashcomp[i].size);
3809                 if (status) {
3810                         dev_err(&adapter->pdev->dev,
3811                                 "Flashing section type %d failed.\n",
3812                                 pflashcomp[i].img_type);
3813                         return status;
3814                 }
3815         }
3816         return 0;
3817 }
3818
3819 static int be_flash_skyhawk(struct be_adapter *adapter,
3820                 const struct firmware *fw,
3821                 struct be_dma_mem *flash_cmd, int num_of_images)
3822 {
3823         int status = 0, i, filehdr_size = 0;
3824         int img_offset, img_size, img_optype, redboot;
3825         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3826         const u8 *p = fw->data;
3827         struct flash_section_info *fsec = NULL;
3828
3829         filehdr_size = sizeof(struct flash_file_hdr_g3);
3830         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3831         if (!fsec) {
3832                 dev_err(&adapter->pdev->dev,
3833                         "Invalid Cookie. UFI corrupted ?\n");
3834                 return -1;
3835         }
3836
3837         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3838                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3839                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3840
3841                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3842                 case IMAGE_FIRMWARE_iSCSI:
3843                         img_optype = OPTYPE_ISCSI_ACTIVE;
3844                         break;
3845                 case IMAGE_BOOT_CODE:
3846                         img_optype = OPTYPE_REDBOOT;
3847                         break;
3848                 case IMAGE_OPTION_ROM_ISCSI:
3849                         img_optype = OPTYPE_BIOS;
3850                         break;
3851                 case IMAGE_OPTION_ROM_PXE:
3852                         img_optype = OPTYPE_PXE_BIOS;
3853                         break;
3854                 case IMAGE_OPTION_ROM_FCoE:
3855                         img_optype = OPTYPE_FCOE_BIOS;
3856                         break;
3857                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3858                         img_optype = OPTYPE_ISCSI_BACKUP;
3859                         break;
3860                 case IMAGE_NCSI:
3861                         img_optype = OPTYPE_NCSI_FW;
3862                         break;
3863                 default:
3864                         continue;
3865                 }
3866
3867                 if (img_optype == OPTYPE_REDBOOT) {
3868                         redboot = be_flash_redboot(adapter, fw->data,
3869                                         img_offset, img_size,
3870                                         filehdr_size + img_hdrs_size);
3871                         if (!redboot)
3872                                 continue;
3873                 }
3874
3875                 p = fw->data;
3876                 p += filehdr_size + img_offset + img_hdrs_size;
3877                 if (p + img_size > fw->data + fw->size)
3878                         return -1;
3879
3880                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3881                 if (status) {
3882                         dev_err(&adapter->pdev->dev,
3883                                 "Flashing section type %d failed.\n",
3884                                 fsec->fsec_entry[i].type);
3885                         return status;
3886                 }
3887         }
3888         return 0;
3889 }
3890
3891 static int lancer_fw_download(struct be_adapter *adapter,
3892                                 const struct firmware *fw)
3893 {
3894 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3895 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3896         struct be_dma_mem flash_cmd;
3897         const u8 *data_ptr = NULL;
3898         u8 *dest_image_ptr = NULL;
3899         size_t image_size = 0;
3900         u32 chunk_size = 0;
3901         u32 data_written = 0;
3902         u32 offset = 0;
3903         int status = 0;
3904         u8 add_status = 0;
3905         u8 change_status;
3906
3907         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3908                 dev_err(&adapter->pdev->dev,
3909                         "FW Image not properly aligned. "
3910                         "Length must be 4 byte aligned.\n");
3911                 status = -EINVAL;
3912                 goto lancer_fw_exit;
3913         }
3914
3915         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3916                                 + LANCER_FW_DOWNLOAD_CHUNK;
3917         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3918                                           &flash_cmd.dma, GFP_KERNEL);
3919         if (!flash_cmd.va) {
3920                 status = -ENOMEM;
3921                 goto lancer_fw_exit;
3922         }
3923
3924         dest_image_ptr = flash_cmd.va +
3925                                 sizeof(struct lancer_cmd_req_write_object);
3926         image_size = fw->size;
3927         data_ptr = fw->data;
3928
3929         while (image_size) {
3930                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3931
3932                 /* Copy the image chunk content. */
3933                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3934
3935                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3936                                                  chunk_size, offset,
3937                                                  LANCER_FW_DOWNLOAD_LOCATION,
3938                                                  &data_written, &change_status,
3939                                                  &add_status);
3940                 if (status)
3941                         break;
3942
3943                 offset += data_written;
3944                 data_ptr += data_written;
3945                 image_size -= data_written;
3946         }
3947
3948         if (!status) {
3949                 /* Commit the FW written */
3950                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3951                                                  0, offset,
3952                                                  LANCER_FW_DOWNLOAD_LOCATION,
3953                                                  &data_written, &change_status,
3954                                                  &add_status);
3955         }
3956
3957         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3958                                 flash_cmd.dma);
3959         if (status) {
3960                 dev_err(&adapter->pdev->dev,
3961                         "Firmware load error. "
3962                         "Status code: 0x%x Additional Status: 0x%x\n",
3963                         status, add_status);
3964                 goto lancer_fw_exit;
3965         }
3966
3967         if (change_status == LANCER_FW_RESET_NEEDED) {
3968                 dev_info(&adapter->pdev->dev,
3969                          "Resetting adapter to activate new FW\n");
3970                 status = lancer_physdev_ctrl(adapter,
3971                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3972                 if (status) {
3973                         dev_err(&adapter->pdev->dev,
3974                                 "Adapter busy for FW reset.\n"
3975                                 "New FW will not be active.\n");
3976                         goto lancer_fw_exit;
3977                 }
3978         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3979                         dev_err(&adapter->pdev->dev,
3980                                 "System reboot required for new FW"
3981                                 " to be active\n");
3982         }
3983
3984         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3985 lancer_fw_exit:
3986         return status;
3987 }
3988
3989 #define UFI_TYPE2               2
3990 #define UFI_TYPE3               3
3991 #define UFI_TYPE3R              10
3992 #define UFI_TYPE4               4
3993 static int be_get_ufi_type(struct be_adapter *adapter,
3994                            struct flash_file_hdr_g3 *fhdr)
3995 {
3996         if (fhdr == NULL)
3997                 goto be_get_ufi_exit;
3998
3999         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4000                 return UFI_TYPE4;
4001         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4002                 if (fhdr->asic_type_rev == 0x10)
4003                         return UFI_TYPE3R;
4004                 else
4005                         return UFI_TYPE3;
4006         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
4007                 return UFI_TYPE2;
4008
4009 be_get_ufi_exit:
4010         dev_err(&adapter->pdev->dev,
4011                 "UFI and Interface are not compatible for flashing\n");
4012         return -1;
4013 }
4014
4015 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4016 {
4017         struct flash_file_hdr_g3 *fhdr3;
4018         struct image_hdr *img_hdr_ptr = NULL;
4019         struct be_dma_mem flash_cmd;
4020         const u8 *p;
4021         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
4022
4023         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4024         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4025                                           &flash_cmd.dma, GFP_KERNEL);
4026         if (!flash_cmd.va) {
4027                 status = -ENOMEM;
4028                 goto be_fw_exit;
4029         }
4030
4031         p = fw->data;
4032         fhdr3 = (struct flash_file_hdr_g3 *)p;
4033
4034         ufi_type = be_get_ufi_type(adapter, fhdr3);
4035
4036         num_imgs = le32_to_cpu(fhdr3->num_imgs);
4037         for (i = 0; i < num_imgs; i++) {
4038                 img_hdr_ptr = (struct image_hdr *)(fw->data +
4039                                 (sizeof(struct flash_file_hdr_g3) +
4040                                  i * sizeof(struct image_hdr)));
4041                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
4042                         switch (ufi_type) {
4043                         case UFI_TYPE4:
4044                                 status = be_flash_skyhawk(adapter, fw,
4045                                                         &flash_cmd, num_imgs);
4046                                 break;
4047                         case UFI_TYPE3R:
4048                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
4049                                                       num_imgs);
4050                                 break;
4051                         case UFI_TYPE3:
4052                                 /* Do not flash this ufi on BE3-R cards */
4053                                 if (adapter->asic_rev < 0x10)
4054                                         status = be_flash_BEx(adapter, fw,
4055                                                               &flash_cmd,
4056                                                               num_imgs);
4057                                 else {
4058                                         status = -1;
4059                                         dev_err(&adapter->pdev->dev,
4060                                                 "Can't load BE3 UFI on BE3R\n");
4061                                 }
4062                         }
4063                 }
4064         }
4065
4066         if (ufi_type == UFI_TYPE2)
4067                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4068         else if (ufi_type == -1)
4069                 status = -1;
4070
4071         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4072                           flash_cmd.dma);
4073         if (status) {
4074                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
4075                 goto be_fw_exit;
4076         }
4077
4078         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4079
4080 be_fw_exit:
4081         return status;
4082 }
4083
4084 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4085 {
4086         const struct firmware *fw;
4087         int status;
4088
4089         if (!netif_running(adapter->netdev)) {
4090                 dev_err(&adapter->pdev->dev,
4091                         "Firmware load not allowed (interface is down)\n");
4092                 return -1;
4093         }
4094
4095         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4096         if (status)
4097                 goto fw_exit;
4098
4099         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4100
4101         if (lancer_chip(adapter))
4102                 status = lancer_fw_download(adapter, fw);
4103         else
4104                 status = be_fw_download(adapter, fw);
4105
4106         if (!status)
4107                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4108                                   adapter->fw_on_flash);
4109
4110 fw_exit:
4111         release_firmware(fw);
4112         return status;
4113 }
4114
4115 static int be_ndo_bridge_setlink(struct net_device *dev,
4116                                     struct nlmsghdr *nlh)
4117 {
4118         struct be_adapter *adapter = netdev_priv(dev);
4119         struct nlattr *attr, *br_spec;
4120         int rem;
4121         int status = 0;
4122         u16 mode = 0;
4123
4124         if (!sriov_enabled(adapter))
4125                 return -EOPNOTSUPP;
4126
4127         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4128
4129         nla_for_each_nested(attr, br_spec, rem) {
4130                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4131                         continue;
4132
4133                 mode = nla_get_u16(attr);
4134                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4135                         return -EINVAL;
4136
4137                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4138                                                adapter->if_handle,
4139                                                mode == BRIDGE_MODE_VEPA ?
4140                                                PORT_FWD_TYPE_VEPA :
4141                                                PORT_FWD_TYPE_VEB);
4142                 if (status)
4143                         goto err;
4144
4145                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4146                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4147
4148                 return status;
4149         }
4150 err:
4151         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4152                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4153
4154         return status;
4155 }
4156
4157 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4158                                     struct net_device *dev,
4159                                     u32 filter_mask)
4160 {
4161         struct be_adapter *adapter = netdev_priv(dev);
4162         int status = 0;
4163         u8 hsw_mode;
4164
4165         if (!sriov_enabled(adapter))
4166                 return 0;
4167
4168         /* BE and Lancer chips support VEB mode only */
4169         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4170                 hsw_mode = PORT_FWD_TYPE_VEB;
4171         } else {
4172                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4173                                                adapter->if_handle, &hsw_mode);
4174                 if (status)
4175                         return 0;
4176         }
4177
4178         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4179                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4180                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4181 }
4182
4183 #ifdef CONFIG_BE2NET_VXLAN
4184 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4185                               __be16 port)
4186 {
4187         struct be_adapter *adapter = netdev_priv(netdev);
4188         struct device *dev = &adapter->pdev->dev;
4189         int status;
4190
4191         if (lancer_chip(adapter) || BEx_chip(adapter))
4192                 return;
4193
4194         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4195                 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4196                          be16_to_cpu(port));
4197                 dev_info(dev,
4198                          "Only one UDP port supported for VxLAN offloads\n");
4199                 return;
4200         }
4201
4202         status = be_cmd_manage_iface(adapter, adapter->if_handle,
4203                                      OP_CONVERT_NORMAL_TO_TUNNEL);
4204         if (status) {
4205                 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4206                 goto err;
4207         }
4208
4209         status = be_cmd_set_vxlan_port(adapter, port);
4210         if (status) {
4211                 dev_warn(dev, "Failed to add VxLAN port\n");
4212                 goto err;
4213         }
4214         adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4215         adapter->vxlan_port = port;
4216
4217         dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4218                  be16_to_cpu(port));
4219         return;
4220 err:
4221         be_disable_vxlan_offloads(adapter);
4222         return;
4223 }
4224
4225 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4226                               __be16 port)
4227 {
4228         struct be_adapter *adapter = netdev_priv(netdev);
4229
4230         if (lancer_chip(adapter) || BEx_chip(adapter))
4231                 return;
4232
4233         if (adapter->vxlan_port != port)
4234                 return;
4235
4236         be_disable_vxlan_offloads(adapter);
4237
4238         dev_info(&adapter->pdev->dev,
4239                  "Disabled VxLAN offloads for UDP port %d\n",
4240                  be16_to_cpu(port));
4241 }
4242 #endif
4243
4244 static const struct net_device_ops be_netdev_ops = {
4245         .ndo_open               = be_open,
4246         .ndo_stop               = be_close,
4247         .ndo_start_xmit         = be_xmit,
4248         .ndo_set_rx_mode        = be_set_rx_mode,
4249         .ndo_set_mac_address    = be_mac_addr_set,
4250         .ndo_change_mtu         = be_change_mtu,
4251         .ndo_get_stats64        = be_get_stats64,
4252         .ndo_validate_addr      = eth_validate_addr,
4253         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4254         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4255         .ndo_set_vf_mac         = be_set_vf_mac,
4256         .ndo_set_vf_vlan        = be_set_vf_vlan,
4257         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4258         .ndo_get_vf_config      = be_get_vf_config,
4259         .ndo_set_vf_link_state  = be_set_vf_link_state,
4260 #ifdef CONFIG_NET_POLL_CONTROLLER
4261         .ndo_poll_controller    = be_netpoll,
4262 #endif
4263         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4264         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4265 #ifdef CONFIG_NET_RX_BUSY_POLL
4266         .ndo_busy_poll          = be_busy_poll,
4267 #endif
4268 #ifdef CONFIG_BE2NET_VXLAN
4269         .ndo_add_vxlan_port     = be_add_vxlan_port,
4270         .ndo_del_vxlan_port     = be_del_vxlan_port,
4271 #endif
4272 };
4273
4274 static void be_netdev_init(struct net_device *netdev)
4275 {
4276         struct be_adapter *adapter = netdev_priv(netdev);
4277
4278         if (skyhawk_chip(adapter)) {
4279                 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4280                                            NETIF_F_TSO | NETIF_F_TSO6 |
4281                                            NETIF_F_GSO_UDP_TUNNEL;
4282                 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4283         }
4284         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4285                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4286                 NETIF_F_HW_VLAN_CTAG_TX;
4287         if (be_multi_rxq(adapter))
4288                 netdev->hw_features |= NETIF_F_RXHASH;
4289
4290         netdev->features |= netdev->hw_features |
4291                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4292
4293         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4294                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4295
4296         netdev->priv_flags |= IFF_UNICAST_FLT;
4297
4298         netdev->flags |= IFF_MULTICAST;
4299
4300         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4301
4302         netdev->netdev_ops = &be_netdev_ops;
4303
4304         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4305 }
4306
4307 static void be_unmap_pci_bars(struct be_adapter *adapter)
4308 {
4309         if (adapter->csr)
4310                 pci_iounmap(adapter->pdev, adapter->csr);
4311         if (adapter->db)
4312                 pci_iounmap(adapter->pdev, adapter->db);
4313 }
4314
4315 static int db_bar(struct be_adapter *adapter)
4316 {
4317         if (lancer_chip(adapter) || !be_physfn(adapter))
4318                 return 0;
4319         else
4320                 return 4;
4321 }
4322
4323 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4324 {
4325         if (skyhawk_chip(adapter)) {
4326                 adapter->roce_db.size = 4096;
4327                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4328                                                               db_bar(adapter));
4329                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4330                                                                db_bar(adapter));
4331         }
4332         return 0;
4333 }
4334
4335 static int be_map_pci_bars(struct be_adapter *adapter)
4336 {
4337         u8 __iomem *addr;
4338
4339         if (BEx_chip(adapter) && be_physfn(adapter)) {
4340                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4341                 if (adapter->csr == NULL)
4342                         return -ENOMEM;
4343         }
4344
4345         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4346         if (addr == NULL)
4347                 goto pci_map_err;
4348         adapter->db = addr;
4349
4350         be_roce_map_pci_bars(adapter);
4351         return 0;
4352
4353 pci_map_err:
4354         be_unmap_pci_bars(adapter);
4355         return -ENOMEM;
4356 }
4357
4358 static void be_ctrl_cleanup(struct be_adapter *adapter)
4359 {
4360         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4361
4362         be_unmap_pci_bars(adapter);
4363
4364         if (mem->va)
4365                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4366                                   mem->dma);
4367
4368         mem = &adapter->rx_filter;
4369         if (mem->va)
4370                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4371                                   mem->dma);
4372 }
4373
4374 static int be_ctrl_init(struct be_adapter *adapter)
4375 {
4376         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4377         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4378         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4379         u32 sli_intf;
4380         int status;
4381
4382         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4383         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4384                                  SLI_INTF_FAMILY_SHIFT;
4385         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4386
4387         status = be_map_pci_bars(adapter);
4388         if (status)
4389                 goto done;
4390
4391         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4392         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4393                                                 mbox_mem_alloc->size,
4394                                                 &mbox_mem_alloc->dma,
4395                                                 GFP_KERNEL);
4396         if (!mbox_mem_alloc->va) {
4397                 status = -ENOMEM;
4398                 goto unmap_pci_bars;
4399         }
4400         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4401         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4402         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4403         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4404
4405         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4406         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4407                                             rx_filter->size, &rx_filter->dma,
4408                                             GFP_KERNEL);
4409         if (rx_filter->va == NULL) {
4410                 status = -ENOMEM;
4411                 goto free_mbox;
4412         }
4413
4414         mutex_init(&adapter->mbox_lock);
4415         spin_lock_init(&adapter->mcc_lock);
4416         spin_lock_init(&adapter->mcc_cq_lock);
4417
4418         init_completion(&adapter->et_cmd_compl);
4419         pci_save_state(adapter->pdev);
4420         return 0;
4421
4422 free_mbox:
4423         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4424                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4425
4426 unmap_pci_bars:
4427         be_unmap_pci_bars(adapter);
4428
4429 done:
4430         return status;
4431 }
4432
4433 static void be_stats_cleanup(struct be_adapter *adapter)
4434 {
4435         struct be_dma_mem *cmd = &adapter->stats_cmd;
4436
4437         if (cmd->va)
4438                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4439                                   cmd->va, cmd->dma);
4440 }
4441
4442 static int be_stats_init(struct be_adapter *adapter)
4443 {
4444         struct be_dma_mem *cmd = &adapter->stats_cmd;
4445
4446         if (lancer_chip(adapter))
4447                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4448         else if (BE2_chip(adapter))
4449                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4450         else if (BE3_chip(adapter))
4451                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4452         else
4453                 /* ALL non-BE ASICs */
4454                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4455
4456         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4457                                       GFP_KERNEL);
4458         if (cmd->va == NULL)
4459                 return -1;
4460         return 0;
4461 }
4462
4463 static void be_remove(struct pci_dev *pdev)
4464 {
4465         struct be_adapter *adapter = pci_get_drvdata(pdev);
4466
4467         if (!adapter)
4468                 return;
4469
4470         be_roce_dev_remove(adapter);
4471         be_intr_set(adapter, false);
4472
4473         cancel_delayed_work_sync(&adapter->func_recovery_work);
4474
4475         unregister_netdev(adapter->netdev);
4476
4477         be_clear(adapter);
4478
4479         /* tell fw we're done with firing cmds */
4480         be_cmd_fw_clean(adapter);
4481
4482         be_stats_cleanup(adapter);
4483
4484         be_ctrl_cleanup(adapter);
4485
4486         pci_disable_pcie_error_reporting(pdev);
4487
4488         pci_release_regions(pdev);
4489         pci_disable_device(pdev);
4490
4491         free_netdev(adapter->netdev);
4492 }
4493
4494 static int be_get_initial_config(struct be_adapter *adapter)
4495 {
4496         int status, level;
4497
4498         status = be_cmd_get_cntl_attributes(adapter);
4499         if (status)
4500                 return status;
4501
4502         /* Must be a power of 2 or else MODULO will BUG_ON */
4503         adapter->be_get_temp_freq = 64;
4504
4505         if (BEx_chip(adapter)) {
4506                 level = be_cmd_get_fw_log_level(adapter);
4507                 adapter->msg_enable =
4508                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4509         }
4510
4511         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4512         return 0;
4513 }
4514
4515 static int lancer_recover_func(struct be_adapter *adapter)
4516 {
4517         struct device *dev = &adapter->pdev->dev;
4518         int status;
4519
4520         status = lancer_test_and_set_rdy_state(adapter);
4521         if (status)
4522                 goto err;
4523
4524         if (netif_running(adapter->netdev))
4525                 be_close(adapter->netdev);
4526
4527         be_clear(adapter);
4528
4529         be_clear_all_error(adapter);
4530
4531         status = be_setup(adapter);
4532         if (status)
4533                 goto err;
4534
4535         if (netif_running(adapter->netdev)) {
4536                 status = be_open(adapter->netdev);
4537                 if (status)
4538                         goto err;
4539         }
4540
4541         dev_err(dev, "Adapter recovery successful\n");
4542         return 0;
4543 err:
4544         if (status == -EAGAIN)
4545                 dev_err(dev, "Waiting for resource provisioning\n");
4546         else
4547                 dev_err(dev, "Adapter recovery failed\n");
4548
4549         return status;
4550 }
4551
4552 static void be_func_recovery_task(struct work_struct *work)
4553 {
4554         struct be_adapter *adapter =
4555                 container_of(work, struct be_adapter,  func_recovery_work.work);
4556         int status = 0;
4557
4558         be_detect_error(adapter);
4559
4560         if (adapter->hw_error && lancer_chip(adapter)) {
4561
4562                 rtnl_lock();
4563                 netif_device_detach(adapter->netdev);
4564                 rtnl_unlock();
4565
4566                 status = lancer_recover_func(adapter);
4567                 if (!status)
4568                         netif_device_attach(adapter->netdev);
4569         }
4570
4571         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4572          * no need to attempt further recovery.
4573          */
4574         if (!status || status == -EAGAIN)
4575                 schedule_delayed_work(&adapter->func_recovery_work,
4576                                       msecs_to_jiffies(1000));
4577 }
4578
4579 static void be_worker(struct work_struct *work)
4580 {
4581         struct be_adapter *adapter =
4582                 container_of(work, struct be_adapter, work.work);
4583         struct be_rx_obj *rxo;
4584         int i;
4585
4586         /* when interrupts are not yet enabled, just reap any pending
4587         * mcc completions */
4588         if (!netif_running(adapter->netdev)) {
4589                 local_bh_disable();
4590                 be_process_mcc(adapter);
4591                 local_bh_enable();
4592                 goto reschedule;
4593         }
4594
4595         if (!adapter->stats_cmd_sent) {
4596                 if (lancer_chip(adapter))
4597                         lancer_cmd_get_pport_stats(adapter,
4598                                                 &adapter->stats_cmd);
4599                 else
4600                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4601         }
4602
4603         if (be_physfn(adapter) &&
4604             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4605                 be_cmd_get_die_temperature(adapter);
4606
4607         for_all_rx_queues(adapter, rxo, i) {
4608                 /* Replenish RX-queues starved due to memory
4609                  * allocation failures.
4610                  */
4611                 if (rxo->rx_post_starved)
4612                         be_post_rx_frags(rxo, GFP_KERNEL);
4613         }
4614
4615         be_eqd_update(adapter);
4616
4617 reschedule:
4618         adapter->work_counter++;
4619         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4620 }
4621
4622 /* If any VFs are already enabled don't FLR the PF */
4623 static bool be_reset_required(struct be_adapter *adapter)
4624 {
4625         return pci_num_vf(adapter->pdev) ? false : true;
4626 }
4627
4628 static char *mc_name(struct be_adapter *adapter)
4629 {
4630         char *str = ""; /* default */
4631
4632         switch (adapter->mc_type) {
4633         case UMC:
4634                 str = "UMC";
4635                 break;
4636         case FLEX10:
4637                 str = "FLEX10";
4638                 break;
4639         case vNIC1:
4640                 str = "vNIC-1";
4641                 break;
4642         case nPAR:
4643                 str = "nPAR";
4644                 break;
4645         case UFP:
4646                 str = "UFP";
4647                 break;
4648         case vNIC2:
4649                 str = "vNIC-2";
4650                 break;
4651         default:
4652                 str = "";
4653         }
4654
4655         return str;
4656 }
4657
4658 static inline char *func_name(struct be_adapter *adapter)
4659 {
4660         return be_physfn(adapter) ? "PF" : "VF";
4661 }
4662
4663 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4664 {
4665         int status = 0;
4666         struct be_adapter *adapter;
4667         struct net_device *netdev;
4668         char port_name;
4669
4670         status = pci_enable_device(pdev);
4671         if (status)
4672                 goto do_none;
4673
4674         status = pci_request_regions(pdev, DRV_NAME);
4675         if (status)
4676                 goto disable_dev;
4677         pci_set_master(pdev);
4678
4679         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4680         if (netdev == NULL) {
4681                 status = -ENOMEM;
4682                 goto rel_reg;
4683         }
4684         adapter = netdev_priv(netdev);
4685         adapter->pdev = pdev;
4686         pci_set_drvdata(pdev, adapter);
4687         adapter->netdev = netdev;
4688         SET_NETDEV_DEV(netdev, &pdev->dev);
4689
4690         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4691         if (!status) {
4692                 netdev->features |= NETIF_F_HIGHDMA;
4693         } else {
4694                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4695                 if (status) {
4696                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4697                         goto free_netdev;
4698                 }
4699         }
4700
4701         if (be_physfn(adapter)) {
4702                 status = pci_enable_pcie_error_reporting(pdev);
4703                 if (!status)
4704                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4705         }
4706
4707         status = be_ctrl_init(adapter);
4708         if (status)
4709                 goto free_netdev;
4710
4711         /* sync up with fw's ready state */
4712         if (be_physfn(adapter)) {
4713                 status = be_fw_wait_ready(adapter);
4714                 if (status)
4715                         goto ctrl_clean;
4716         }
4717
4718         if (be_reset_required(adapter)) {
4719                 status = be_cmd_reset_function(adapter);
4720                 if (status)
4721                         goto ctrl_clean;
4722
4723                 /* Wait for interrupts to quiesce after an FLR */
4724                 msleep(100);
4725         }
4726
4727         /* Allow interrupts for other ULPs running on NIC function */
4728         be_intr_set(adapter, true);
4729
4730         /* tell fw we're ready to fire cmds */
4731         status = be_cmd_fw_init(adapter);
4732         if (status)
4733                 goto ctrl_clean;
4734
4735         status = be_stats_init(adapter);
4736         if (status)
4737                 goto ctrl_clean;
4738
4739         status = be_get_initial_config(adapter);
4740         if (status)
4741                 goto stats_clean;
4742
4743         INIT_DELAYED_WORK(&adapter->work, be_worker);
4744         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4745         adapter->rx_fc = adapter->tx_fc = true;
4746
4747         status = be_setup(adapter);
4748         if (status)
4749                 goto stats_clean;
4750
4751         be_netdev_init(netdev);
4752         status = register_netdev(netdev);
4753         if (status != 0)
4754                 goto unsetup;
4755
4756         be_roce_dev_add(adapter);
4757
4758         schedule_delayed_work(&adapter->func_recovery_work,
4759                               msecs_to_jiffies(1000));
4760
4761         be_cmd_query_port_name(adapter, &port_name);
4762
4763         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4764                  func_name(adapter), mc_name(adapter), port_name);
4765
4766         return 0;
4767
4768 unsetup:
4769         be_clear(adapter);
4770 stats_clean:
4771         be_stats_cleanup(adapter);
4772 ctrl_clean:
4773         be_ctrl_cleanup(adapter);
4774 free_netdev:
4775         free_netdev(netdev);
4776 rel_reg:
4777         pci_release_regions(pdev);
4778 disable_dev:
4779         pci_disable_device(pdev);
4780 do_none:
4781         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4782         return status;
4783 }
4784
4785 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4786 {
4787         struct be_adapter *adapter = pci_get_drvdata(pdev);
4788         struct net_device *netdev =  adapter->netdev;
4789
4790         if (adapter->wol_en)
4791                 be_setup_wol(adapter, true);
4792
4793         be_intr_set(adapter, false);
4794         cancel_delayed_work_sync(&adapter->func_recovery_work);
4795
4796         netif_device_detach(netdev);
4797         if (netif_running(netdev)) {
4798                 rtnl_lock();
4799                 be_close(netdev);
4800                 rtnl_unlock();
4801         }
4802         be_clear(adapter);
4803
4804         pci_save_state(pdev);
4805         pci_disable_device(pdev);
4806         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4807         return 0;
4808 }
4809
4810 static int be_resume(struct pci_dev *pdev)
4811 {
4812         int status = 0;
4813         struct be_adapter *adapter = pci_get_drvdata(pdev);
4814         struct net_device *netdev =  adapter->netdev;
4815
4816         netif_device_detach(netdev);
4817
4818         status = pci_enable_device(pdev);
4819         if (status)
4820                 return status;
4821
4822         pci_set_power_state(pdev, PCI_D0);
4823         pci_restore_state(pdev);
4824
4825         status = be_fw_wait_ready(adapter);
4826         if (status)
4827                 return status;
4828
4829         be_intr_set(adapter, true);
4830         /* tell fw we're ready to fire cmds */
4831         status = be_cmd_fw_init(adapter);
4832         if (status)
4833                 return status;
4834
4835         be_setup(adapter);
4836         if (netif_running(netdev)) {
4837                 rtnl_lock();
4838                 be_open(netdev);
4839                 rtnl_unlock();
4840         }
4841
4842         schedule_delayed_work(&adapter->func_recovery_work,
4843                               msecs_to_jiffies(1000));
4844         netif_device_attach(netdev);
4845
4846         if (adapter->wol_en)
4847                 be_setup_wol(adapter, false);
4848
4849         return 0;
4850 }
4851
4852 /*
4853  * An FLR will stop BE from DMAing any data.
4854  */
4855 static void be_shutdown(struct pci_dev *pdev)
4856 {
4857         struct be_adapter *adapter = pci_get_drvdata(pdev);
4858
4859         if (!adapter)
4860                 return;
4861
4862         cancel_delayed_work_sync(&adapter->work);
4863         cancel_delayed_work_sync(&adapter->func_recovery_work);
4864
4865         netif_device_detach(adapter->netdev);
4866
4867         be_cmd_reset_function(adapter);
4868
4869         pci_disable_device(pdev);
4870 }
4871
4872 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4873                                 pci_channel_state_t state)
4874 {
4875         struct be_adapter *adapter = pci_get_drvdata(pdev);
4876         struct net_device *netdev =  adapter->netdev;
4877
4878         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4879
4880         if (!adapter->eeh_error) {
4881                 adapter->eeh_error = true;
4882
4883                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4884
4885                 rtnl_lock();
4886                 netif_device_detach(netdev);
4887                 if (netif_running(netdev))
4888                         be_close(netdev);
4889                 rtnl_unlock();
4890
4891                 be_clear(adapter);
4892         }
4893
4894         if (state == pci_channel_io_perm_failure)
4895                 return PCI_ERS_RESULT_DISCONNECT;
4896
4897         pci_disable_device(pdev);
4898
4899         /* The error could cause the FW to trigger a flash debug dump.
4900          * Resetting the card while flash dump is in progress
4901          * can cause it not to recover; wait for it to finish.
4902          * Wait only for first function as it is needed only once per
4903          * adapter.
4904          */
4905         if (pdev->devfn == 0)
4906                 ssleep(30);
4907
4908         return PCI_ERS_RESULT_NEED_RESET;
4909 }
4910
4911 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4912 {
4913         struct be_adapter *adapter = pci_get_drvdata(pdev);
4914         int status;
4915
4916         dev_info(&adapter->pdev->dev, "EEH reset\n");
4917
4918         status = pci_enable_device(pdev);
4919         if (status)
4920                 return PCI_ERS_RESULT_DISCONNECT;
4921
4922         pci_set_master(pdev);
4923         pci_set_power_state(pdev, PCI_D0);
4924         pci_restore_state(pdev);
4925
4926         /* Check if card is ok and fw is ready */
4927         dev_info(&adapter->pdev->dev,
4928                  "Waiting for FW to be ready after EEH reset\n");
4929         status = be_fw_wait_ready(adapter);
4930         if (status)
4931                 return PCI_ERS_RESULT_DISCONNECT;
4932
4933         pci_cleanup_aer_uncorrect_error_status(pdev);
4934         be_clear_all_error(adapter);
4935         return PCI_ERS_RESULT_RECOVERED;
4936 }
4937
4938 static void be_eeh_resume(struct pci_dev *pdev)
4939 {
4940         int status = 0;
4941         struct be_adapter *adapter = pci_get_drvdata(pdev);
4942         struct net_device *netdev =  adapter->netdev;
4943
4944         dev_info(&adapter->pdev->dev, "EEH resume\n");
4945
4946         pci_save_state(pdev);
4947
4948         status = be_cmd_reset_function(adapter);
4949         if (status)
4950                 goto err;
4951
4952         /* On some BE3 FW versions, after a HW reset,
4953          * interrupts will remain disabled for each function.
4954          * So, explicitly enable interrupts
4955          */
4956         be_intr_set(adapter, true);
4957
4958         /* tell fw we're ready to fire cmds */
4959         status = be_cmd_fw_init(adapter);
4960         if (status)
4961                 goto err;
4962
4963         status = be_setup(adapter);
4964         if (status)
4965                 goto err;
4966
4967         if (netif_running(netdev)) {
4968                 status = be_open(netdev);
4969                 if (status)
4970                         goto err;
4971         }
4972
4973         schedule_delayed_work(&adapter->func_recovery_work,
4974                               msecs_to_jiffies(1000));
4975         netif_device_attach(netdev);
4976         return;
4977 err:
4978         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4979 }
4980
4981 static const struct pci_error_handlers be_eeh_handlers = {
4982         .error_detected = be_eeh_err_detected,
4983         .slot_reset = be_eeh_reset,
4984         .resume = be_eeh_resume,
4985 };
4986
4987 static struct pci_driver be_driver = {
4988         .name = DRV_NAME,
4989         .id_table = be_dev_ids,
4990         .probe = be_probe,
4991         .remove = be_remove,
4992         .suspend = be_suspend,
4993         .resume = be_resume,
4994         .shutdown = be_shutdown,
4995         .err_handler = &be_eeh_handlers
4996 };
4997
4998 static int __init be_init_module(void)
4999 {
5000         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5001             rx_frag_size != 2048) {
5002                 printk(KERN_WARNING DRV_NAME
5003                         " : Module param rx_frag_size must be 2048/4096/8192."
5004                         " Using 2048\n");
5005                 rx_frag_size = 2048;
5006         }
5007
5008         return pci_register_driver(&be_driver);
5009 }
5010 module_init(be_init_module);
5011
5012 static void __exit be_exit_module(void)
5013 {
5014         pci_unregister_driver(&be_driver);
5015 }
5016 module_exit(be_exit_module);