]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25
26 MODULE_VERSION(DRV_VER);
27 MODULE_DEVICE_TABLE(pci, be_dev_ids);
28 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
29 MODULE_AUTHOR("Emulex Corporation");
30 MODULE_LICENSE("GPL");
31
32 static unsigned int num_vfs;
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static ushort rx_frag_size = 2048;
37 module_param(rx_frag_size, ushort, S_IRUGO);
38 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
49         { 0 }
50 };
51 MODULE_DEVICE_TABLE(pci, be_dev_ids);
52 /* UE Status Low CSR */
53 static const char * const ue_status_low_desc[] = {
54         "CEV",
55         "CTX",
56         "DBUF",
57         "ERX",
58         "Host",
59         "MPU",
60         "NDMA",
61         "PTC ",
62         "RDMA ",
63         "RXF ",
64         "RXIPS ",
65         "RXULP0 ",
66         "RXULP1 ",
67         "RXULP2 ",
68         "TIM ",
69         "TPOST ",
70         "TPRE ",
71         "TXIPS ",
72         "TXULP0 ",
73         "TXULP1 ",
74         "UC ",
75         "WDMA ",
76         "TXULP2 ",
77         "HOST1 ",
78         "P0_OB_LINK ",
79         "P1_OB_LINK ",
80         "HOST_GPIO ",
81         "MBOX ",
82         "AXGMAC0",
83         "AXGMAC1",
84         "JTAG",
85         "MPU_INTPEND"
86 };
87 /* UE Status High CSR */
88 static const char * const ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122
123 /* Is BE in a multi-channel mode */
124 static inline bool be_is_mc(struct be_adapter *adapter) {
125         return (adapter->function_mode & FLEX10_MODE ||
126                 adapter->function_mode & VNIC_MODE ||
127                 adapter->function_mode & UMC_ENABLED);
128 }
129
130 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131 {
132         struct be_dma_mem *mem = &q->dma_mem;
133         if (mem->va) {
134                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135                                   mem->dma);
136                 mem->va = NULL;
137         }
138 }
139
140 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141                 u16 len, u16 entry_size)
142 {
143         struct be_dma_mem *mem = &q->dma_mem;
144
145         memset(q, 0, sizeof(*q));
146         q->len = len;
147         q->entry_size = entry_size;
148         mem->size = len * entry_size;
149         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150                                       GFP_KERNEL);
151         if (!mem->va)
152                 return -ENOMEM;
153         return 0;
154 }
155
156 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161                                 &reg);
162         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
164         if (!enabled && enable)
165                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166         else if (enabled && !enable)
167                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else
169                 return;
170
171         pci_write_config_dword(adapter->pdev,
172                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
173 }
174
175 static void be_intr_set(struct be_adapter *adapter, bool enable)
176 {
177         int status = 0;
178
179         /* On lancer interrupts can't be controlled via this register */
180         if (lancer_chip(adapter))
181                 return;
182
183         if (adapter->eeh_error)
184                 return;
185
186         status = be_cmd_intr_set(adapter, enable);
187         if (status)
188                 be_reg_intr_set(adapter, enable);
189 }
190
191 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
192 {
193         u32 val = 0;
194         val |= qid & DB_RQ_RING_ID_MASK;
195         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
196
197         wmb();
198         iowrite32(val, adapter->db + DB_RQ_OFFSET);
199 }
200
201 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202                           u16 posted)
203 {
204         u32 val = 0;
205         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
206         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
207
208         wmb();
209         iowrite32(val, adapter->db + txo->db_offset);
210 }
211
212 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
213                 bool arm, bool clear_int, u16 num_popped)
214 {
215         u32 val = 0;
216         val |= qid & DB_EQ_RING_ID_MASK;
217         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
219
220         if (adapter->eeh_error)
221                 return;
222
223         if (arm)
224                 val |= 1 << DB_EQ_REARM_SHIFT;
225         if (clear_int)
226                 val |= 1 << DB_EQ_CLR_SHIFT;
227         val |= 1 << DB_EQ_EVNT_SHIFT;
228         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_EQ_OFFSET);
230 }
231
232 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
233 {
234         u32 val = 0;
235         val |= qid & DB_CQ_RING_ID_MASK;
236         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
238
239         if (adapter->eeh_error)
240                 return;
241
242         if (arm)
243                 val |= 1 << DB_CQ_REARM_SHIFT;
244         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
245         iowrite32(val, adapter->db + DB_CQ_OFFSET);
246 }
247
248 static int be_mac_addr_set(struct net_device *netdev, void *p)
249 {
250         struct be_adapter *adapter = netdev_priv(netdev);
251         struct device *dev = &adapter->pdev->dev;
252         struct sockaddr *addr = p;
253         int status;
254         u8 mac[ETH_ALEN];
255         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
256
257         if (!is_valid_ether_addr(addr->sa_data))
258                 return -EADDRNOTAVAIL;
259
260         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261          * privilege or if PF did not provision the new MAC address.
262          * On BE3, this cmd will always fail if the VF doesn't have the
263          * FILTMGMT privilege. This failure is OK, only if the PF programmed
264          * the MAC for the VF.
265          */
266         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267                                  adapter->if_handle, &adapter->pmac_id[0], 0);
268         if (!status) {
269                 curr_pmac_id = adapter->pmac_id[0];
270
271                 /* Delete the old programmed MAC. This call may fail if the
272                  * old MAC was already deleted by the PF driver.
273                  */
274                 if (adapter->pmac_id[0] != old_pmac_id)
275                         be_cmd_pmac_del(adapter, adapter->if_handle,
276                                         old_pmac_id, 0);
277         }
278
279         /* Decide if the new MAC is successfully activated only after
280          * querying the FW
281          */
282         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
283         if (status)
284                 goto err;
285
286         /* The MAC change did not happen, either due to lack of privilege
287          * or PF didn't pre-provision.
288          */
289         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290                 status = -EPERM;
291                 goto err;
292         }
293
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         dev_info(dev, "MAC address changed to %pM\n", mac);
296         return 0;
297 err:
298         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
299         return status;
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *hw_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308                 return &cmd->hw_stats;
309         } else if (BE3_chip(adapter)) {
310                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312                 return &cmd->hw_stats;
313         } else {
314                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
315
316                 return &cmd->hw_stats;
317         }
318 }
319
320 /* BE2 supports only v0 cmd */
321 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
322 {
323         if (BE2_chip(adapter)) {
324                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
325
326                 return &hw_stats->erx;
327         } else if (BE3_chip(adapter)) {
328                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
329
330                 return &hw_stats->erx;
331         } else {
332                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
333
334                 return &hw_stats->erx;
335         }
336 }
337
338 static void populate_be_v0_stats(struct be_adapter *adapter)
339 {
340         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
341         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
342         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
343         struct be_port_rxf_stats_v0 *port_stats =
344                                         &rxf_stats->port[adapter->port_num];
345         struct be_drv_stats *drvs = &adapter->drv_stats;
346
347         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
348         drvs->rx_pause_frames = port_stats->rx_pause_frames;
349         drvs->rx_crc_errors = port_stats->rx_crc_errors;
350         drvs->rx_control_frames = port_stats->rx_control_frames;
351         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
358         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
362         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
363         drvs->rx_dropped_header_too_small =
364                 port_stats->rx_dropped_header_too_small;
365         drvs->rx_address_filtered =
366                                         port_stats->rx_address_filtered +
367                                         port_stats->rx_vlan_filtered;
368         drvs->rx_alignment_symbol_errors =
369                 port_stats->rx_alignment_symbol_errors;
370
371         drvs->tx_pauseframes = port_stats->tx_pauseframes;
372         drvs->tx_controlframes = port_stats->tx_controlframes;
373
374         if (adapter->port_num)
375                 drvs->jabber_events = rxf_stats->port1_jabber_events;
376         else
377                 drvs->jabber_events = rxf_stats->port0_jabber_events;
378         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
379         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
380         drvs->forwarded_packets = rxf_stats->forwarded_packets;
381         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
382         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
383         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
384         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
385 }
386
387 static void populate_be_v1_stats(struct be_adapter *adapter)
388 {
389         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
390         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
391         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
392         struct be_port_rxf_stats_v1 *port_stats =
393                                         &rxf_stats->port[adapter->port_num];
394         struct be_drv_stats *drvs = &adapter->drv_stats;
395
396         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
397         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
398         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
399         drvs->rx_pause_frames = port_stats->rx_pause_frames;
400         drvs->rx_crc_errors = port_stats->rx_crc_errors;
401         drvs->rx_control_frames = port_stats->rx_control_frames;
402         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
403         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
404         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
405         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
406         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
407         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
408         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
409         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
410         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
411         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
412         drvs->rx_dropped_header_too_small =
413                 port_stats->rx_dropped_header_too_small;
414         drvs->rx_input_fifo_overflow_drop =
415                 port_stats->rx_input_fifo_overflow_drop;
416         drvs->rx_address_filtered = port_stats->rx_address_filtered;
417         drvs->rx_alignment_symbol_errors =
418                 port_stats->rx_alignment_symbol_errors;
419         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
420         drvs->tx_pauseframes = port_stats->tx_pauseframes;
421         drvs->tx_controlframes = port_stats->tx_controlframes;
422         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
423         drvs->jabber_events = port_stats->jabber_events;
424         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
425         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
426         drvs->forwarded_packets = rxf_stats->forwarded_packets;
427         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
428         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
429         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
430         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
431 }
432
433 static void populate_be_v2_stats(struct be_adapter *adapter)
434 {
435         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
436         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
437         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
438         struct be_port_rxf_stats_v2 *port_stats =
439                                         &rxf_stats->port[adapter->port_num];
440         struct be_drv_stats *drvs = &adapter->drv_stats;
441
442         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
443         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
444         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
445         drvs->rx_pause_frames = port_stats->rx_pause_frames;
446         drvs->rx_crc_errors = port_stats->rx_crc_errors;
447         drvs->rx_control_frames = port_stats->rx_control_frames;
448         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
449         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
450         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
451         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
452         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
453         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
454         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
455         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
456         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
457         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
458         drvs->rx_dropped_header_too_small =
459                 port_stats->rx_dropped_header_too_small;
460         drvs->rx_input_fifo_overflow_drop =
461                 port_stats->rx_input_fifo_overflow_drop;
462         drvs->rx_address_filtered = port_stats->rx_address_filtered;
463         drvs->rx_alignment_symbol_errors =
464                 port_stats->rx_alignment_symbol_errors;
465         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
466         drvs->tx_pauseframes = port_stats->tx_pauseframes;
467         drvs->tx_controlframes = port_stats->tx_controlframes;
468         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
469         drvs->jabber_events = port_stats->jabber_events;
470         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
471         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
472         drvs->forwarded_packets = rxf_stats->forwarded_packets;
473         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
474         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
475         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
476         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
477         if (be_roce_supported(adapter))  {
478                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
479                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
480                 drvs->rx_roce_frames = port_stats->roce_frames_received;
481                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
482                 drvs->roce_drops_payload_len =
483                         port_stats->roce_drops_payload_len;
484         }
485 }
486
487 static void populate_lancer_stats(struct be_adapter *adapter)
488 {
489
490         struct be_drv_stats *drvs = &adapter->drv_stats;
491         struct lancer_pport_stats *pport_stats =
492                                         pport_stats_from_cmd(adapter);
493
494         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
495         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
496         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
497         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
498         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
499         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
500         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
501         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
502         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
503         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
504         drvs->rx_dropped_tcp_length =
505                                 pport_stats->rx_dropped_invalid_tcp_length;
506         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
507         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
508         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
509         drvs->rx_dropped_header_too_small =
510                                 pport_stats->rx_dropped_header_too_small;
511         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
512         drvs->rx_address_filtered =
513                                         pport_stats->rx_address_filtered +
514                                         pport_stats->rx_vlan_filtered;
515         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
516         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
517         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
518         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
519         drvs->jabber_events = pport_stats->rx_jabbers;
520         drvs->forwarded_packets = pport_stats->num_forwards_lo;
521         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
522         drvs->rx_drops_too_many_frags =
523                                 pport_stats->rx_drops_too_many_frags_lo;
524 }
525
526 static void accumulate_16bit_val(u32 *acc, u16 val)
527 {
528 #define lo(x)                   (x & 0xFFFF)
529 #define hi(x)                   (x & 0xFFFF0000)
530         bool wrapped = val < lo(*acc);
531         u32 newacc = hi(*acc) + val;
532
533         if (wrapped)
534                 newacc += 65536;
535         ACCESS_ONCE(*acc) = newacc;
536 }
537
538 static void populate_erx_stats(struct be_adapter *adapter,
539                         struct be_rx_obj *rxo,
540                         u32 erx_stat)
541 {
542         if (!BEx_chip(adapter))
543                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544         else
545                 /* below erx HW counter can actually wrap around after
546                  * 65535. Driver accumulates a 32-bit value
547                  */
548                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549                                      (u16)erx_stat);
550 }
551
552 void be_parse_stats(struct be_adapter *adapter)
553 {
554         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
555         struct be_rx_obj *rxo;
556         int i;
557         u32 erx_stat;
558
559         if (lancer_chip(adapter)) {
560                 populate_lancer_stats(adapter);
561         } else {
562                 if (BE2_chip(adapter))
563                         populate_be_v0_stats(adapter);
564                 else if (BE3_chip(adapter))
565                         /* for BE3 */
566                         populate_be_v1_stats(adapter);
567                 else
568                         populate_be_v2_stats(adapter);
569
570                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
571                 for_all_rx_queues(adapter, rxo, i) {
572                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573                         populate_erx_stats(adapter, rxo, erx_stat);
574                 }
575         }
576 }
577
578 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
579                                         struct rtnl_link_stats64 *stats)
580 {
581         struct be_adapter *adapter = netdev_priv(netdev);
582         struct be_drv_stats *drvs = &adapter->drv_stats;
583         struct be_rx_obj *rxo;
584         struct be_tx_obj *txo;
585         u64 pkts, bytes;
586         unsigned int start;
587         int i;
588
589         for_all_rx_queues(adapter, rxo, i) {
590                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591                 do {
592                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
593                         pkts = rx_stats(rxo)->rx_pkts;
594                         bytes = rx_stats(rxo)->rx_bytes;
595                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
596                 stats->rx_packets += pkts;
597                 stats->rx_bytes += bytes;
598                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600                                         rx_stats(rxo)->rx_drops_no_frags;
601         }
602
603         for_all_tx_queues(adapter, txo, i) {
604                 const struct be_tx_stats *tx_stats = tx_stats(txo);
605                 do {
606                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
607                         pkts = tx_stats(txo)->tx_pkts;
608                         bytes = tx_stats(txo)->tx_bytes;
609                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
610                 stats->tx_packets += pkts;
611                 stats->tx_bytes += bytes;
612         }
613
614         /* bad pkts received */
615         stats->rx_errors = drvs->rx_crc_errors +
616                 drvs->rx_alignment_symbol_errors +
617                 drvs->rx_in_range_errors +
618                 drvs->rx_out_range_errors +
619                 drvs->rx_frame_too_long +
620                 drvs->rx_dropped_too_small +
621                 drvs->rx_dropped_too_short +
622                 drvs->rx_dropped_header_too_small +
623                 drvs->rx_dropped_tcp_length +
624                 drvs->rx_dropped_runt;
625
626         /* detailed rx errors */
627         stats->rx_length_errors = drvs->rx_in_range_errors +
628                 drvs->rx_out_range_errors +
629                 drvs->rx_frame_too_long;
630
631         stats->rx_crc_errors = drvs->rx_crc_errors;
632
633         /* frame alignment errors */
634         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
635
636         /* receiver fifo overrun */
637         /* drops_no_pbuf is no per i/f, it's per BE card */
638         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
639                                 drvs->rx_input_fifo_overflow_drop +
640                                 drvs->rx_drops_no_pbuf;
641         return stats;
642 }
643
644 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
645 {
646         struct net_device *netdev = adapter->netdev;
647
648         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
649                 netif_carrier_off(netdev);
650                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
651         }
652
653         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
654                 netif_carrier_on(netdev);
655         else
656                 netif_carrier_off(netdev);
657 }
658
659 static void be_tx_stats_update(struct be_tx_obj *txo,
660                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
661 {
662         struct be_tx_stats *stats = tx_stats(txo);
663
664         u64_stats_update_begin(&stats->sync);
665         stats->tx_reqs++;
666         stats->tx_wrbs += wrb_cnt;
667         stats->tx_bytes += copied;
668         stats->tx_pkts += (gso_segs ? gso_segs : 1);
669         if (stopped)
670                 stats->tx_stops++;
671         u64_stats_update_end(&stats->sync);
672 }
673
674 /* Determine number of WRB entries needed to xmit data in an skb */
675 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
676                                                                 bool *dummy)
677 {
678         int cnt = (skb->len > skb->data_len);
679
680         cnt += skb_shinfo(skb)->nr_frags;
681
682         /* to account for hdr wrb */
683         cnt++;
684         if (lancer_chip(adapter) || !(cnt & 1)) {
685                 *dummy = false;
686         } else {
687                 /* add a dummy to make it an even num */
688                 cnt++;
689                 *dummy = true;
690         }
691         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
692         return cnt;
693 }
694
695 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696 {
697         wrb->frag_pa_hi = upper_32_bits(addr);
698         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
699         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
700         wrb->rsvd0 = 0;
701 }
702
703 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
704                                         struct sk_buff *skb)
705 {
706         u8 vlan_prio;
707         u16 vlan_tag;
708
709         vlan_tag = vlan_tx_tag_get(skb);
710         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
711         /* If vlan priority provided by OS is NOT in available bmap */
712         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
713                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
714                                 adapter->recommended_prio;
715
716         return vlan_tag;
717 }
718
719 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
720                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
721 {
722         u16 vlan_tag;
723
724         memset(hdr, 0, sizeof(*hdr));
725
726         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
727
728         if (skb_is_gso(skb)) {
729                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
730                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
731                         hdr, skb_shinfo(skb)->gso_size);
732                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
733                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
734         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
735                 if (is_tcp_pkt(skb))
736                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
737                 else if (is_udp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
739         }
740
741         if (vlan_tx_tag_present(skb)) {
742                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
743                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
745         }
746
747         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
748         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
749         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
752 }
753
754 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
755                 bool unmap_single)
756 {
757         dma_addr_t dma;
758
759         be_dws_le_to_cpu(wrb, sizeof(*wrb));
760
761         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
762         if (wrb->frag_len) {
763                 if (unmap_single)
764                         dma_unmap_single(dev, dma, wrb->frag_len,
765                                          DMA_TO_DEVICE);
766                 else
767                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
768         }
769 }
770
771 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
772                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
773                 bool skip_hw_vlan)
774 {
775         dma_addr_t busaddr;
776         int i, copied = 0;
777         struct device *dev = &adapter->pdev->dev;
778         struct sk_buff *first_skb = skb;
779         struct be_eth_wrb *wrb;
780         struct be_eth_hdr_wrb *hdr;
781         bool map_single = false;
782         u16 map_head;
783
784         hdr = queue_head_node(txq);
785         queue_head_inc(txq);
786         map_head = txq->head;
787
788         if (skb->len > skb->data_len) {
789                 int len = skb_headlen(skb);
790                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
791                 if (dma_mapping_error(dev, busaddr))
792                         goto dma_err;
793                 map_single = true;
794                 wrb = queue_head_node(txq);
795                 wrb_fill(wrb, busaddr, len);
796                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
797                 queue_head_inc(txq);
798                 copied += len;
799         }
800
801         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
802                 const struct skb_frag_struct *frag =
803                         &skb_shinfo(skb)->frags[i];
804                 busaddr = skb_frag_dma_map(dev, frag, 0,
805                                            skb_frag_size(frag), DMA_TO_DEVICE);
806                 if (dma_mapping_error(dev, busaddr))
807                         goto dma_err;
808                 wrb = queue_head_node(txq);
809                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
810                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
811                 queue_head_inc(txq);
812                 copied += skb_frag_size(frag);
813         }
814
815         if (dummy_wrb) {
816                 wrb = queue_head_node(txq);
817                 wrb_fill(wrb, 0, 0);
818                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
819                 queue_head_inc(txq);
820         }
821
822         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
823         be_dws_cpu_to_le(hdr, sizeof(*hdr));
824
825         return copied;
826 dma_err:
827         txq->head = map_head;
828         while (copied) {
829                 wrb = queue_head_node(txq);
830                 unmap_tx_frag(dev, wrb, map_single);
831                 map_single = false;
832                 copied -= wrb->frag_len;
833                 queue_head_inc(txq);
834         }
835         return 0;
836 }
837
838 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
839                                              struct sk_buff *skb,
840                                              bool *skip_hw_vlan)
841 {
842         u16 vlan_tag = 0;
843
844         skb = skb_share_check(skb, GFP_ATOMIC);
845         if (unlikely(!skb))
846                 return skb;
847
848         if (vlan_tx_tag_present(skb))
849                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
850
851         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
852                 if (!vlan_tag)
853                         vlan_tag = adapter->pvid;
854                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
855                  * skip VLAN insertion
856                  */
857                 if (skip_hw_vlan)
858                         *skip_hw_vlan = true;
859         }
860
861         if (vlan_tag) {
862                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
863                 if (unlikely(!skb))
864                         return skb;
865                 skb->vlan_tci = 0;
866         }
867
868         /* Insert the outer VLAN, if any */
869         if (adapter->qnq_vid) {
870                 vlan_tag = adapter->qnq_vid;
871                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
872                 if (unlikely(!skb))
873                         return skb;
874                 if (skip_hw_vlan)
875                         *skip_hw_vlan = true;
876         }
877
878         return skb;
879 }
880
881 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
882 {
883         struct ethhdr *eh = (struct ethhdr *)skb->data;
884         u16 offset = ETH_HLEN;
885
886         if (eh->h_proto == htons(ETH_P_IPV6)) {
887                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
888
889                 offset += sizeof(struct ipv6hdr);
890                 if (ip6h->nexthdr != NEXTHDR_TCP &&
891                     ip6h->nexthdr != NEXTHDR_UDP) {
892                         struct ipv6_opt_hdr *ehdr =
893                                 (struct ipv6_opt_hdr *) (skb->data + offset);
894
895                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
896                         if (ehdr->hdrlen == 0xff)
897                                 return true;
898                 }
899         }
900         return false;
901 }
902
903 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
904 {
905         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
906 }
907
908 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
909                                 struct sk_buff *skb)
910 {
911         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
912 }
913
914 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
915                                            struct sk_buff *skb,
916                                            bool *skip_hw_vlan)
917 {
918         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
919         unsigned int eth_hdr_len;
920         struct iphdr *ip;
921
922         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
923          * may cause a transmit stall on that port. So the work-around is to
924          * pad short packets (<= 32 bytes) to a 36-byte length.
925          */
926         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
927                 if (skb_padto(skb, 36))
928                         goto tx_drop;
929                 skb->len = 36;
930         }
931
932         /* For padded packets, BE HW modifies tot_len field in IP header
933          * incorrecly when VLAN tag is inserted by HW.
934          * For padded packets, Lancer computes incorrect checksum.
935          */
936         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
937                                                 VLAN_ETH_HLEN : ETH_HLEN;
938         if (skb->len <= 60 &&
939             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
940             is_ipv4_pkt(skb)) {
941                 ip = (struct iphdr *)ip_hdr(skb);
942                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
943         }
944
945         /* If vlan tag is already inlined in the packet, skip HW VLAN
946          * tagging in UMC mode
947          */
948         if ((adapter->function_mode & UMC_ENABLED) &&
949             veh->h_vlan_proto == htons(ETH_P_8021Q))
950                         *skip_hw_vlan = true;
951
952         /* HW has a bug wherein it will calculate CSUM for VLAN
953          * pkts even though it is disabled.
954          * Manually insert VLAN in pkt.
955          */
956         if (skb->ip_summed != CHECKSUM_PARTIAL &&
957             vlan_tx_tag_present(skb)) {
958                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
959                 if (unlikely(!skb))
960                         goto tx_drop;
961         }
962
963         /* HW may lockup when VLAN HW tagging is requested on
964          * certain ipv6 packets. Drop such pkts if the HW workaround to
965          * skip HW tagging is not enabled by FW.
966          */
967         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
968             (adapter->pvid || adapter->qnq_vid) &&
969             !qnq_async_evt_rcvd(adapter)))
970                 goto tx_drop;
971
972         /* Manual VLAN tag insertion to prevent:
973          * ASIC lockup when the ASIC inserts VLAN tag into
974          * certain ipv6 packets. Insert VLAN tags in driver,
975          * and set event, completion, vlan bits accordingly
976          * in the Tx WRB.
977          */
978         if (be_ipv6_tx_stall_chk(adapter, skb) &&
979             be_vlan_tag_tx_chk(adapter, skb)) {
980                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
981                 if (unlikely(!skb))
982                         goto tx_drop;
983         }
984
985         return skb;
986 tx_drop:
987         dev_kfree_skb_any(skb);
988         return NULL;
989 }
990
991 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
992 {
993         struct be_adapter *adapter = netdev_priv(netdev);
994         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
995         struct be_queue_info *txq = &txo->q;
996         bool dummy_wrb, stopped = false;
997         u32 wrb_cnt = 0, copied = 0;
998         bool skip_hw_vlan = false;
999         u32 start = txq->head;
1000
1001         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1002         if (!skb) {
1003                 tx_stats(txo)->tx_drv_drops++;
1004                 return NETDEV_TX_OK;
1005         }
1006
1007         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1008
1009         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1010                               skip_hw_vlan);
1011         if (copied) {
1012                 int gso_segs = skb_shinfo(skb)->gso_segs;
1013
1014                 /* record the sent skb in the sent_skb table */
1015                 BUG_ON(txo->sent_skb_list[start]);
1016                 txo->sent_skb_list[start] = skb;
1017
1018                 /* Ensure txq has space for the next skb; Else stop the queue
1019                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1020                  * tx compls of the current transmit which'll wake up the queue
1021                  */
1022                 atomic_add(wrb_cnt, &txq->used);
1023                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1024                                                                 txq->len) {
1025                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1026                         stopped = true;
1027                 }
1028
1029                 be_txq_notify(adapter, txo, wrb_cnt);
1030
1031                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1032         } else {
1033                 txq->head = start;
1034                 tx_stats(txo)->tx_drv_drops++;
1035                 dev_kfree_skb_any(skb);
1036         }
1037         return NETDEV_TX_OK;
1038 }
1039
1040 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1041 {
1042         struct be_adapter *adapter = netdev_priv(netdev);
1043         if (new_mtu < BE_MIN_MTU ||
1044                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1045                                         (ETH_HLEN + ETH_FCS_LEN))) {
1046                 dev_info(&adapter->pdev->dev,
1047                         "MTU must be between %d and %d bytes\n",
1048                         BE_MIN_MTU,
1049                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1050                 return -EINVAL;
1051         }
1052         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1053                         netdev->mtu, new_mtu);
1054         netdev->mtu = new_mtu;
1055         return 0;
1056 }
1057
1058 /*
1059  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1060  * If the user configures more, place BE in vlan promiscuous mode.
1061  */
1062 static int be_vid_config(struct be_adapter *adapter)
1063 {
1064         u16 vids[BE_NUM_VLANS_SUPPORTED];
1065         u16 num = 0, i;
1066         int status = 0;
1067
1068         /* No need to further configure vids if in promiscuous mode */
1069         if (adapter->promiscuous)
1070                 return 0;
1071
1072         if (adapter->vlans_added > be_max_vlans(adapter))
1073                 goto set_vlan_promisc;
1074
1075         /* Construct VLAN Table to give to HW */
1076         for (i = 0; i < VLAN_N_VID; i++)
1077                 if (adapter->vlan_tag[i])
1078                         vids[num++] = cpu_to_le16(i);
1079
1080         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1081                                     vids, num, 1, 0);
1082
1083         if (status) {
1084                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1085                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1086                         goto set_vlan_promisc;
1087                 dev_err(&adapter->pdev->dev,
1088                         "Setting HW VLAN filtering failed.\n");
1089         } else {
1090                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1091                         /* hw VLAN filtering re-enabled. */
1092                         status = be_cmd_rx_filter(adapter,
1093                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1094                         if (!status) {
1095                                 dev_info(&adapter->pdev->dev,
1096                                          "Disabling VLAN Promiscuous mode.\n");
1097                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1098                                 dev_info(&adapter->pdev->dev,
1099                                          "Re-Enabling HW VLAN filtering\n");
1100                         }
1101                 }
1102         }
1103
1104         return status;
1105
1106 set_vlan_promisc:
1107         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1108
1109         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1110         if (!status) {
1111                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1112                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1113                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114         } else
1115                 dev_err(&adapter->pdev->dev,
1116                         "Failed to enable VLAN Promiscuous mode.\n");
1117         return status;
1118 }
1119
1120 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1121 {
1122         struct be_adapter *adapter = netdev_priv(netdev);
1123         int status = 0;
1124
1125
1126         /* Packets with VID 0 are always received by Lancer by default */
1127         if (lancer_chip(adapter) && vid == 0)
1128                 goto ret;
1129
1130         adapter->vlan_tag[vid] = 1;
1131         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1132                 status = be_vid_config(adapter);
1133
1134         if (!status)
1135                 adapter->vlans_added++;
1136         else
1137                 adapter->vlan_tag[vid] = 0;
1138 ret:
1139         return status;
1140 }
1141
1142 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1143 {
1144         struct be_adapter *adapter = netdev_priv(netdev);
1145         int status = 0;
1146
1147         /* Packets with VID 0 are always received by Lancer by default */
1148         if (lancer_chip(adapter) && vid == 0)
1149                 goto ret;
1150
1151         adapter->vlan_tag[vid] = 0;
1152         if (adapter->vlans_added <= be_max_vlans(adapter))
1153                 status = be_vid_config(adapter);
1154
1155         if (!status)
1156                 adapter->vlans_added--;
1157         else
1158                 adapter->vlan_tag[vid] = 1;
1159 ret:
1160         return status;
1161 }
1162
1163 static void be_set_rx_mode(struct net_device *netdev)
1164 {
1165         struct be_adapter *adapter = netdev_priv(netdev);
1166         int status;
1167
1168         if (netdev->flags & IFF_PROMISC) {
1169                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1170                 adapter->promiscuous = true;
1171                 goto done;
1172         }
1173
1174         /* BE was previously in promiscuous mode; disable it */
1175         if (adapter->promiscuous) {
1176                 adapter->promiscuous = false;
1177                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1178
1179                 if (adapter->vlans_added)
1180                         be_vid_config(adapter);
1181         }
1182
1183         /* Enable multicast promisc if num configured exceeds what we support */
1184         if (netdev->flags & IFF_ALLMULTI ||
1185             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1186                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1187                 goto done;
1188         }
1189
1190         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1191                 struct netdev_hw_addr *ha;
1192                 int i = 1; /* First slot is claimed by the Primary MAC */
1193
1194                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1195                         be_cmd_pmac_del(adapter, adapter->if_handle,
1196                                         adapter->pmac_id[i], 0);
1197                 }
1198
1199                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1200                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1201                         adapter->promiscuous = true;
1202                         goto done;
1203                 }
1204
1205                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1206                         adapter->uc_macs++; /* First slot is for Primary MAC */
1207                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1208                                         adapter->if_handle,
1209                                         &adapter->pmac_id[adapter->uc_macs], 0);
1210                 }
1211         }
1212
1213         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1214
1215         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1216         if (status) {
1217                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1218                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1219                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1220         }
1221 done:
1222         return;
1223 }
1224
1225 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1226 {
1227         struct be_adapter *adapter = netdev_priv(netdev);
1228         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1229         int status;
1230
1231         if (!sriov_enabled(adapter))
1232                 return -EPERM;
1233
1234         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1235                 return -EINVAL;
1236
1237         if (BEx_chip(adapter)) {
1238                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1239                                 vf + 1);
1240
1241                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1242                                          &vf_cfg->pmac_id, vf + 1);
1243         } else {
1244                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1245                                         vf + 1);
1246         }
1247
1248         if (status)
1249                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1250                                 mac, vf);
1251         else
1252                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1253
1254         return status;
1255 }
1256
1257 static int be_get_vf_config(struct net_device *netdev, int vf,
1258                         struct ifla_vf_info *vi)
1259 {
1260         struct be_adapter *adapter = netdev_priv(netdev);
1261         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1262
1263         if (!sriov_enabled(adapter))
1264                 return -EPERM;
1265
1266         if (vf >= adapter->num_vfs)
1267                 return -EINVAL;
1268
1269         vi->vf = vf;
1270         vi->tx_rate = vf_cfg->tx_rate;
1271         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1272         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1273         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1274
1275         return 0;
1276 }
1277
1278 static int be_set_vf_vlan(struct net_device *netdev,
1279                         int vf, u16 vlan, u8 qos)
1280 {
1281         struct be_adapter *adapter = netdev_priv(netdev);
1282         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1283         int status = 0;
1284
1285         if (!sriov_enabled(adapter))
1286                 return -EPERM;
1287
1288         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1289                 return -EINVAL;
1290
1291         if (vlan || qos) {
1292                 vlan |= qos << VLAN_PRIO_SHIFT;
1293                 if (vf_cfg->vlan_tag != vlan) {
1294                         /* If this is new value, program it. Else skip. */
1295                         vf_cfg->vlan_tag = vlan;
1296                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1297                                                        vf_cfg->if_handle, 0);
1298                 }
1299         } else {
1300                 /* Reset Transparent Vlan Tagging. */
1301                 vf_cfg->vlan_tag = 0;
1302                 vlan = vf_cfg->def_vid;
1303                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1304                                                vf_cfg->if_handle, 0);
1305         }
1306
1307
1308         if (status)
1309                 dev_info(&adapter->pdev->dev,
1310                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1311         return status;
1312 }
1313
1314 static int be_set_vf_tx_rate(struct net_device *netdev,
1315                         int vf, int rate)
1316 {
1317         struct be_adapter *adapter = netdev_priv(netdev);
1318         int status = 0;
1319
1320         if (!sriov_enabled(adapter))
1321                 return -EPERM;
1322
1323         if (vf >= adapter->num_vfs)
1324                 return -EINVAL;
1325
1326         if (rate < 100 || rate > 10000) {
1327                 dev_err(&adapter->pdev->dev,
1328                         "tx rate must be between 100 and 10000 Mbps\n");
1329                 return -EINVAL;
1330         }
1331
1332         if (lancer_chip(adapter))
1333                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1334         else
1335                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1336
1337         if (status)
1338                 dev_err(&adapter->pdev->dev,
1339                                 "tx rate %d on VF %d failed\n", rate, vf);
1340         else
1341                 adapter->vf_cfg[vf].tx_rate = rate;
1342         return status;
1343 }
1344
1345 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1346                           ulong now)
1347 {
1348         aic->rx_pkts_prev = rx_pkts;
1349         aic->tx_reqs_prev = tx_pkts;
1350         aic->jiffies = now;
1351 }
1352
1353 static void be_eqd_update(struct be_adapter *adapter)
1354 {
1355         struct be_set_eqd set_eqd[MAX_EVT_QS];
1356         int eqd, i, num = 0, start;
1357         struct be_aic_obj *aic;
1358         struct be_eq_obj *eqo;
1359         struct be_rx_obj *rxo;
1360         struct be_tx_obj *txo;
1361         u64 rx_pkts, tx_pkts;
1362         ulong now;
1363         u32 pps, delta;
1364
1365         for_all_evt_queues(adapter, eqo, i) {
1366                 aic = &adapter->aic_obj[eqo->idx];
1367                 if (!aic->enable) {
1368                         if (aic->jiffies)
1369                                 aic->jiffies = 0;
1370                         eqd = aic->et_eqd;
1371                         goto modify_eqd;
1372                 }
1373
1374                 rxo = &adapter->rx_obj[eqo->idx];
1375                 do {
1376                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1377                         rx_pkts = rxo->stats.rx_pkts;
1378                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1379
1380                 txo = &adapter->tx_obj[eqo->idx];
1381                 do {
1382                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1383                         tx_pkts = txo->stats.tx_reqs;
1384                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1385
1386
1387                 /* Skip, if wrapped around or first calculation */
1388                 now = jiffies;
1389                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1390                     rx_pkts < aic->rx_pkts_prev ||
1391                     tx_pkts < aic->tx_reqs_prev) {
1392                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1393                         continue;
1394                 }
1395
1396                 delta = jiffies_to_msecs(now - aic->jiffies);
1397                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1398                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1399                 eqd = (pps / 15000) << 2;
1400
1401                 if (eqd < 8)
1402                         eqd = 0;
1403                 eqd = min_t(u32, eqd, aic->max_eqd);
1404                 eqd = max_t(u32, eqd, aic->min_eqd);
1405
1406                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1407 modify_eqd:
1408                 if (eqd != aic->prev_eqd) {
1409                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1410                         set_eqd[num].eq_id = eqo->q.id;
1411                         aic->prev_eqd = eqd;
1412                         num++;
1413                 }
1414         }
1415
1416         if (num)
1417                 be_cmd_modify_eqd(adapter, set_eqd, num);
1418 }
1419
1420 static void be_rx_stats_update(struct be_rx_obj *rxo,
1421                 struct be_rx_compl_info *rxcp)
1422 {
1423         struct be_rx_stats *stats = rx_stats(rxo);
1424
1425         u64_stats_update_begin(&stats->sync);
1426         stats->rx_compl++;
1427         stats->rx_bytes += rxcp->pkt_size;
1428         stats->rx_pkts++;
1429         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1430                 stats->rx_mcast_pkts++;
1431         if (rxcp->err)
1432                 stats->rx_compl_err++;
1433         u64_stats_update_end(&stats->sync);
1434 }
1435
1436 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1437 {
1438         /* L4 checksum is not reliable for non TCP/UDP packets.
1439          * Also ignore ipcksm for ipv6 pkts */
1440         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1441                                 (rxcp->ip_csum || rxcp->ipv6);
1442 }
1443
1444 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1445                                                 u16 frag_idx)
1446 {
1447         struct be_adapter *adapter = rxo->adapter;
1448         struct be_rx_page_info *rx_page_info;
1449         struct be_queue_info *rxq = &rxo->q;
1450
1451         rx_page_info = &rxo->page_info_tbl[frag_idx];
1452         BUG_ON(!rx_page_info->page);
1453
1454         if (rx_page_info->last_page_user) {
1455                 dma_unmap_page(&adapter->pdev->dev,
1456                                dma_unmap_addr(rx_page_info, bus),
1457                                adapter->big_page_size, DMA_FROM_DEVICE);
1458                 rx_page_info->last_page_user = false;
1459         }
1460
1461         atomic_dec(&rxq->used);
1462         return rx_page_info;
1463 }
1464
1465 /* Throwaway the data in the Rx completion */
1466 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1467                                 struct be_rx_compl_info *rxcp)
1468 {
1469         struct be_queue_info *rxq = &rxo->q;
1470         struct be_rx_page_info *page_info;
1471         u16 i, num_rcvd = rxcp->num_rcvd;
1472
1473         for (i = 0; i < num_rcvd; i++) {
1474                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1475                 put_page(page_info->page);
1476                 memset(page_info, 0, sizeof(*page_info));
1477                 index_inc(&rxcp->rxq_idx, rxq->len);
1478         }
1479 }
1480
1481 /*
1482  * skb_fill_rx_data forms a complete skb for an ether frame
1483  * indicated by rxcp.
1484  */
1485 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1486                              struct be_rx_compl_info *rxcp)
1487 {
1488         struct be_queue_info *rxq = &rxo->q;
1489         struct be_rx_page_info *page_info;
1490         u16 i, j;
1491         u16 hdr_len, curr_frag_len, remaining;
1492         u8 *start;
1493
1494         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1495         start = page_address(page_info->page) + page_info->page_offset;
1496         prefetch(start);
1497
1498         /* Copy data in the first descriptor of this completion */
1499         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1500
1501         skb->len = curr_frag_len;
1502         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1503                 memcpy(skb->data, start, curr_frag_len);
1504                 /* Complete packet has now been moved to data */
1505                 put_page(page_info->page);
1506                 skb->data_len = 0;
1507                 skb->tail += curr_frag_len;
1508         } else {
1509                 hdr_len = ETH_HLEN;
1510                 memcpy(skb->data, start, hdr_len);
1511                 skb_shinfo(skb)->nr_frags = 1;
1512                 skb_frag_set_page(skb, 0, page_info->page);
1513                 skb_shinfo(skb)->frags[0].page_offset =
1514                                         page_info->page_offset + hdr_len;
1515                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1516                 skb->data_len = curr_frag_len - hdr_len;
1517                 skb->truesize += rx_frag_size;
1518                 skb->tail += hdr_len;
1519         }
1520         page_info->page = NULL;
1521
1522         if (rxcp->pkt_size <= rx_frag_size) {
1523                 BUG_ON(rxcp->num_rcvd != 1);
1524                 return;
1525         }
1526
1527         /* More frags present for this completion */
1528         index_inc(&rxcp->rxq_idx, rxq->len);
1529         remaining = rxcp->pkt_size - curr_frag_len;
1530         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1531                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1532                 curr_frag_len = min(remaining, rx_frag_size);
1533
1534                 /* Coalesce all frags from the same physical page in one slot */
1535                 if (page_info->page_offset == 0) {
1536                         /* Fresh page */
1537                         j++;
1538                         skb_frag_set_page(skb, j, page_info->page);
1539                         skb_shinfo(skb)->frags[j].page_offset =
1540                                                         page_info->page_offset;
1541                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1542                         skb_shinfo(skb)->nr_frags++;
1543                 } else {
1544                         put_page(page_info->page);
1545                 }
1546
1547                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1548                 skb->len += curr_frag_len;
1549                 skb->data_len += curr_frag_len;
1550                 skb->truesize += rx_frag_size;
1551                 remaining -= curr_frag_len;
1552                 index_inc(&rxcp->rxq_idx, rxq->len);
1553                 page_info->page = NULL;
1554         }
1555         BUG_ON(j > MAX_SKB_FRAGS);
1556 }
1557
1558 /* Process the RX completion indicated by rxcp when GRO is disabled */
1559 static void be_rx_compl_process(struct be_rx_obj *rxo,
1560                                 struct be_rx_compl_info *rxcp)
1561 {
1562         struct be_adapter *adapter = rxo->adapter;
1563         struct net_device *netdev = adapter->netdev;
1564         struct sk_buff *skb;
1565
1566         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1567         if (unlikely(!skb)) {
1568                 rx_stats(rxo)->rx_drops_no_skbs++;
1569                 be_rx_compl_discard(rxo, rxcp);
1570                 return;
1571         }
1572
1573         skb_fill_rx_data(rxo, skb, rxcp);
1574
1575         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1576                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1577         else
1578                 skb_checksum_none_assert(skb);
1579
1580         skb->protocol = eth_type_trans(skb, netdev);
1581         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1582         if (netdev->features & NETIF_F_RXHASH)
1583                 skb->rxhash = rxcp->rss_hash;
1584
1585
1586         if (rxcp->vlanf)
1587                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1588
1589         netif_receive_skb(skb);
1590 }
1591
1592 /* Process the RX completion indicated by rxcp when GRO is enabled */
1593 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1594                                     struct napi_struct *napi,
1595                                     struct be_rx_compl_info *rxcp)
1596 {
1597         struct be_adapter *adapter = rxo->adapter;
1598         struct be_rx_page_info *page_info;
1599         struct sk_buff *skb = NULL;
1600         struct be_queue_info *rxq = &rxo->q;
1601         u16 remaining, curr_frag_len;
1602         u16 i, j;
1603
1604         skb = napi_get_frags(napi);
1605         if (!skb) {
1606                 be_rx_compl_discard(rxo, rxcp);
1607                 return;
1608         }
1609
1610         remaining = rxcp->pkt_size;
1611         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1612                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1613
1614                 curr_frag_len = min(remaining, rx_frag_size);
1615
1616                 /* Coalesce all frags from the same physical page in one slot */
1617                 if (i == 0 || page_info->page_offset == 0) {
1618                         /* First frag or Fresh page */
1619                         j++;
1620                         skb_frag_set_page(skb, j, page_info->page);
1621                         skb_shinfo(skb)->frags[j].page_offset =
1622                                                         page_info->page_offset;
1623                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1624                 } else {
1625                         put_page(page_info->page);
1626                 }
1627                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1628                 skb->truesize += rx_frag_size;
1629                 remaining -= curr_frag_len;
1630                 index_inc(&rxcp->rxq_idx, rxq->len);
1631                 memset(page_info, 0, sizeof(*page_info));
1632         }
1633         BUG_ON(j > MAX_SKB_FRAGS);
1634
1635         skb_shinfo(skb)->nr_frags = j + 1;
1636         skb->len = rxcp->pkt_size;
1637         skb->data_len = rxcp->pkt_size;
1638         skb->ip_summed = CHECKSUM_UNNECESSARY;
1639         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1640         if (adapter->netdev->features & NETIF_F_RXHASH)
1641                 skb->rxhash = rxcp->rss_hash;
1642
1643         if (rxcp->vlanf)
1644                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1645
1646         napi_gro_frags(napi);
1647 }
1648
1649 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1650                                  struct be_rx_compl_info *rxcp)
1651 {
1652         rxcp->pkt_size =
1653                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1654         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1655         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1656         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1657         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1658         rxcp->ip_csum =
1659                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1660         rxcp->l4_csum =
1661                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1662         rxcp->ipv6 =
1663                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1664         rxcp->rxq_idx =
1665                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1666         rxcp->num_rcvd =
1667                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1668         rxcp->pkt_type =
1669                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1670         rxcp->rss_hash =
1671                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1672         if (rxcp->vlanf) {
1673                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1674                                           compl);
1675                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1676                                                compl);
1677         }
1678         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1679 }
1680
1681 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1682                                  struct be_rx_compl_info *rxcp)
1683 {
1684         rxcp->pkt_size =
1685                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1686         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1687         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1688         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1689         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1690         rxcp->ip_csum =
1691                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1692         rxcp->l4_csum =
1693                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1694         rxcp->ipv6 =
1695                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1696         rxcp->rxq_idx =
1697                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1698         rxcp->num_rcvd =
1699                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1700         rxcp->pkt_type =
1701                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1702         rxcp->rss_hash =
1703                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1704         if (rxcp->vlanf) {
1705                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1706                                           compl);
1707                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1708                                                compl);
1709         }
1710         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1711         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1712                                       ip_frag, compl);
1713 }
1714
1715 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1716 {
1717         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1718         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1719         struct be_adapter *adapter = rxo->adapter;
1720
1721         /* For checking the valid bit it is Ok to use either definition as the
1722          * valid bit is at the same position in both v0 and v1 Rx compl */
1723         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1724                 return NULL;
1725
1726         rmb();
1727         be_dws_le_to_cpu(compl, sizeof(*compl));
1728
1729         if (adapter->be3_native)
1730                 be_parse_rx_compl_v1(compl, rxcp);
1731         else
1732                 be_parse_rx_compl_v0(compl, rxcp);
1733
1734         if (rxcp->ip_frag)
1735                 rxcp->l4_csum = 0;
1736
1737         if (rxcp->vlanf) {
1738                 /* vlanf could be wrongly set in some cards.
1739                  * ignore if vtm is not set */
1740                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1741                         rxcp->vlanf = 0;
1742
1743                 if (!lancer_chip(adapter))
1744                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1745
1746                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1747                     !adapter->vlan_tag[rxcp->vlan_tag])
1748                         rxcp->vlanf = 0;
1749         }
1750
1751         /* As the compl has been parsed, reset it; we wont touch it again */
1752         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1753
1754         queue_tail_inc(&rxo->cq);
1755         return rxcp;
1756 }
1757
1758 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1759 {
1760         u32 order = get_order(size);
1761
1762         if (order > 0)
1763                 gfp |= __GFP_COMP;
1764         return  alloc_pages(gfp, order);
1765 }
1766
1767 /*
1768  * Allocate a page, split it to fragments of size rx_frag_size and post as
1769  * receive buffers to BE
1770  */
1771 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1772 {
1773         struct be_adapter *adapter = rxo->adapter;
1774         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1775         struct be_queue_info *rxq = &rxo->q;
1776         struct page *pagep = NULL;
1777         struct be_eth_rx_d *rxd;
1778         u64 page_dmaaddr = 0, frag_dmaaddr;
1779         u32 posted, page_offset = 0;
1780
1781         page_info = &rxo->page_info_tbl[rxq->head];
1782         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1783                 if (!pagep) {
1784                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1785                         if (unlikely(!pagep)) {
1786                                 rx_stats(rxo)->rx_post_fail++;
1787                                 break;
1788                         }
1789                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1790                                                     0, adapter->big_page_size,
1791                                                     DMA_FROM_DEVICE);
1792                         page_info->page_offset = 0;
1793                 } else {
1794                         get_page(pagep);
1795                         page_info->page_offset = page_offset + rx_frag_size;
1796                 }
1797                 page_offset = page_info->page_offset;
1798                 page_info->page = pagep;
1799                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1800                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1801
1802                 rxd = queue_head_node(rxq);
1803                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1804                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1805
1806                 /* Any space left in the current big page for another frag? */
1807                 if ((page_offset + rx_frag_size + rx_frag_size) >
1808                                         adapter->big_page_size) {
1809                         pagep = NULL;
1810                         page_info->last_page_user = true;
1811                 }
1812
1813                 prev_page_info = page_info;
1814                 queue_head_inc(rxq);
1815                 page_info = &rxo->page_info_tbl[rxq->head];
1816         }
1817         if (pagep)
1818                 prev_page_info->last_page_user = true;
1819
1820         if (posted) {
1821                 atomic_add(posted, &rxq->used);
1822                 be_rxq_notify(adapter, rxq->id, posted);
1823         } else if (atomic_read(&rxq->used) == 0) {
1824                 /* Let be_worker replenish when memory is available */
1825                 rxo->rx_post_starved = true;
1826         }
1827 }
1828
1829 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1830 {
1831         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1832
1833         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1834                 return NULL;
1835
1836         rmb();
1837         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1838
1839         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1840
1841         queue_tail_inc(tx_cq);
1842         return txcp;
1843 }
1844
1845 static u16 be_tx_compl_process(struct be_adapter *adapter,
1846                 struct be_tx_obj *txo, u16 last_index)
1847 {
1848         struct be_queue_info *txq = &txo->q;
1849         struct be_eth_wrb *wrb;
1850         struct sk_buff **sent_skbs = txo->sent_skb_list;
1851         struct sk_buff *sent_skb;
1852         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1853         bool unmap_skb_hdr = true;
1854
1855         sent_skb = sent_skbs[txq->tail];
1856         BUG_ON(!sent_skb);
1857         sent_skbs[txq->tail] = NULL;
1858
1859         /* skip header wrb */
1860         queue_tail_inc(txq);
1861
1862         do {
1863                 cur_index = txq->tail;
1864                 wrb = queue_tail_node(txq);
1865                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1866                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1867                 unmap_skb_hdr = false;
1868
1869                 num_wrbs++;
1870                 queue_tail_inc(txq);
1871         } while (cur_index != last_index);
1872
1873         kfree_skb(sent_skb);
1874         return num_wrbs;
1875 }
1876
1877 /* Return the number of events in the event queue */
1878 static inline int events_get(struct be_eq_obj *eqo)
1879 {
1880         struct be_eq_entry *eqe;
1881         int num = 0;
1882
1883         do {
1884                 eqe = queue_tail_node(&eqo->q);
1885                 if (eqe->evt == 0)
1886                         break;
1887
1888                 rmb();
1889                 eqe->evt = 0;
1890                 num++;
1891                 queue_tail_inc(&eqo->q);
1892         } while (true);
1893
1894         return num;
1895 }
1896
1897 /* Leaves the EQ is disarmed state */
1898 static void be_eq_clean(struct be_eq_obj *eqo)
1899 {
1900         int num = events_get(eqo);
1901
1902         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1903 }
1904
1905 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1906 {
1907         struct be_rx_page_info *page_info;
1908         struct be_queue_info *rxq = &rxo->q;
1909         struct be_queue_info *rx_cq = &rxo->cq;
1910         struct be_rx_compl_info *rxcp;
1911         struct be_adapter *adapter = rxo->adapter;
1912         int flush_wait = 0;
1913         u16 tail;
1914
1915         /* Consume pending rx completions.
1916          * Wait for the flush completion (identified by zero num_rcvd)
1917          * to arrive. Notify CQ even when there are no more CQ entries
1918          * for HW to flush partially coalesced CQ entries.
1919          * In Lancer, there is no need to wait for flush compl.
1920          */
1921         for (;;) {
1922                 rxcp = be_rx_compl_get(rxo);
1923                 if (rxcp == NULL) {
1924                         if (lancer_chip(adapter))
1925                                 break;
1926
1927                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1928                                 dev_warn(&adapter->pdev->dev,
1929                                          "did not receive flush compl\n");
1930                                 break;
1931                         }
1932                         be_cq_notify(adapter, rx_cq->id, true, 0);
1933                         mdelay(1);
1934                 } else {
1935                         be_rx_compl_discard(rxo, rxcp);
1936                         be_cq_notify(adapter, rx_cq->id, false, 1);
1937                         if (rxcp->num_rcvd == 0)
1938                                 break;
1939                 }
1940         }
1941
1942         /* After cleanup, leave the CQ in unarmed state */
1943         be_cq_notify(adapter, rx_cq->id, false, 0);
1944
1945         /* Then free posted rx buffers that were not used */
1946         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1947         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1948                 page_info = get_rx_page_info(rxo, tail);
1949                 put_page(page_info->page);
1950                 memset(page_info, 0, sizeof(*page_info));
1951         }
1952         BUG_ON(atomic_read(&rxq->used));
1953         rxq->tail = rxq->head = 0;
1954 }
1955
1956 static void be_tx_compl_clean(struct be_adapter *adapter)
1957 {
1958         struct be_tx_obj *txo;
1959         struct be_queue_info *txq;
1960         struct be_eth_tx_compl *txcp;
1961         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1962         struct sk_buff *sent_skb;
1963         bool dummy_wrb;
1964         int i, pending_txqs;
1965
1966         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1967         do {
1968                 pending_txqs = adapter->num_tx_qs;
1969
1970                 for_all_tx_queues(adapter, txo, i) {
1971                         txq = &txo->q;
1972                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1973                                 end_idx =
1974                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1975                                                       wrb_index, txcp);
1976                                 num_wrbs += be_tx_compl_process(adapter, txo,
1977                                                                 end_idx);
1978                                 cmpl++;
1979                         }
1980                         if (cmpl) {
1981                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1982                                 atomic_sub(num_wrbs, &txq->used);
1983                                 cmpl = 0;
1984                                 num_wrbs = 0;
1985                         }
1986                         if (atomic_read(&txq->used) == 0)
1987                                 pending_txqs--;
1988                 }
1989
1990                 if (pending_txqs == 0 || ++timeo > 200)
1991                         break;
1992
1993                 mdelay(1);
1994         } while (true);
1995
1996         for_all_tx_queues(adapter, txo, i) {
1997                 txq = &txo->q;
1998                 if (atomic_read(&txq->used))
1999                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2000                                 atomic_read(&txq->used));
2001
2002                 /* free posted tx for which compls will never arrive */
2003                 while (atomic_read(&txq->used)) {
2004                         sent_skb = txo->sent_skb_list[txq->tail];
2005                         end_idx = txq->tail;
2006                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2007                                                    &dummy_wrb);
2008                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2009                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2010                         atomic_sub(num_wrbs, &txq->used);
2011                 }
2012         }
2013 }
2014
2015 static void be_evt_queues_destroy(struct be_adapter *adapter)
2016 {
2017         struct be_eq_obj *eqo;
2018         int i;
2019
2020         for_all_evt_queues(adapter, eqo, i) {
2021                 if (eqo->q.created) {
2022                         be_eq_clean(eqo);
2023                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2024                         netif_napi_del(&eqo->napi);
2025                 }
2026                 be_queue_free(adapter, &eqo->q);
2027         }
2028 }
2029
2030 static int be_evt_queues_create(struct be_adapter *adapter)
2031 {
2032         struct be_queue_info *eq;
2033         struct be_eq_obj *eqo;
2034         struct be_aic_obj *aic;
2035         int i, rc;
2036
2037         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2038                                     adapter->cfg_num_qs);
2039
2040         for_all_evt_queues(adapter, eqo, i) {
2041                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2042                                BE_NAPI_WEIGHT);
2043                 aic = &adapter->aic_obj[i];
2044                 eqo->adapter = adapter;
2045                 eqo->tx_budget = BE_TX_BUDGET;
2046                 eqo->idx = i;
2047                 aic->max_eqd = BE_MAX_EQD;
2048                 aic->enable = true;
2049
2050                 eq = &eqo->q;
2051                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2052                                         sizeof(struct be_eq_entry));
2053                 if (rc)
2054                         return rc;
2055
2056                 rc = be_cmd_eq_create(adapter, eqo);
2057                 if (rc)
2058                         return rc;
2059         }
2060         return 0;
2061 }
2062
2063 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2064 {
2065         struct be_queue_info *q;
2066
2067         q = &adapter->mcc_obj.q;
2068         if (q->created)
2069                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2070         be_queue_free(adapter, q);
2071
2072         q = &adapter->mcc_obj.cq;
2073         if (q->created)
2074                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2075         be_queue_free(adapter, q);
2076 }
2077
2078 /* Must be called only after TX qs are created as MCC shares TX EQ */
2079 static int be_mcc_queues_create(struct be_adapter *adapter)
2080 {
2081         struct be_queue_info *q, *cq;
2082
2083         cq = &adapter->mcc_obj.cq;
2084         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2085                         sizeof(struct be_mcc_compl)))
2086                 goto err;
2087
2088         /* Use the default EQ for MCC completions */
2089         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2090                 goto mcc_cq_free;
2091
2092         q = &adapter->mcc_obj.q;
2093         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2094                 goto mcc_cq_destroy;
2095
2096         if (be_cmd_mccq_create(adapter, q, cq))
2097                 goto mcc_q_free;
2098
2099         return 0;
2100
2101 mcc_q_free:
2102         be_queue_free(adapter, q);
2103 mcc_cq_destroy:
2104         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2105 mcc_cq_free:
2106         be_queue_free(adapter, cq);
2107 err:
2108         return -1;
2109 }
2110
2111 static void be_tx_queues_destroy(struct be_adapter *adapter)
2112 {
2113         struct be_queue_info *q;
2114         struct be_tx_obj *txo;
2115         u8 i;
2116
2117         for_all_tx_queues(adapter, txo, i) {
2118                 q = &txo->q;
2119                 if (q->created)
2120                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2121                 be_queue_free(adapter, q);
2122
2123                 q = &txo->cq;
2124                 if (q->created)
2125                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2126                 be_queue_free(adapter, q);
2127         }
2128 }
2129
2130 static int be_tx_qs_create(struct be_adapter *adapter)
2131 {
2132         struct be_queue_info *cq, *eq;
2133         struct be_tx_obj *txo;
2134         int status, i;
2135
2136         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2137
2138         for_all_tx_queues(adapter, txo, i) {
2139                 cq = &txo->cq;
2140                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2141                                         sizeof(struct be_eth_tx_compl));
2142                 if (status)
2143                         return status;
2144
2145                 /* If num_evt_qs is less than num_tx_qs, then more than
2146                  * one txq share an eq
2147                  */
2148                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2149                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2150                 if (status)
2151                         return status;
2152
2153                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2154                                         sizeof(struct be_eth_wrb));
2155                 if (status)
2156                         return status;
2157
2158                 status = be_cmd_txq_create(adapter, txo);
2159                 if (status)
2160                         return status;
2161         }
2162
2163         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2164                  adapter->num_tx_qs);
2165         return 0;
2166 }
2167
2168 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2169 {
2170         struct be_queue_info *q;
2171         struct be_rx_obj *rxo;
2172         int i;
2173
2174         for_all_rx_queues(adapter, rxo, i) {
2175                 q = &rxo->cq;
2176                 if (q->created)
2177                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2178                 be_queue_free(adapter, q);
2179         }
2180 }
2181
2182 static int be_rx_cqs_create(struct be_adapter *adapter)
2183 {
2184         struct be_queue_info *eq, *cq;
2185         struct be_rx_obj *rxo;
2186         int rc, i;
2187
2188         /* We can create as many RSS rings as there are EQs. */
2189         adapter->num_rx_qs = adapter->num_evt_qs;
2190
2191         /* We'll use RSS only if atleast 2 RSS rings are supported.
2192          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2193          */
2194         if (adapter->num_rx_qs > 1)
2195                 adapter->num_rx_qs++;
2196
2197         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2198         for_all_rx_queues(adapter, rxo, i) {
2199                 rxo->adapter = adapter;
2200                 cq = &rxo->cq;
2201                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2202                                 sizeof(struct be_eth_rx_compl));
2203                 if (rc)
2204                         return rc;
2205
2206                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2207                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2208                 if (rc)
2209                         return rc;
2210         }
2211
2212         dev_info(&adapter->pdev->dev,
2213                  "created %d RSS queue(s) and 1 default RX queue\n",
2214                  adapter->num_rx_qs - 1);
2215         return 0;
2216 }
2217
2218 static irqreturn_t be_intx(int irq, void *dev)
2219 {
2220         struct be_eq_obj *eqo = dev;
2221         struct be_adapter *adapter = eqo->adapter;
2222         int num_evts = 0;
2223
2224         /* IRQ is not expected when NAPI is scheduled as the EQ
2225          * will not be armed.
2226          * But, this can happen on Lancer INTx where it takes
2227          * a while to de-assert INTx or in BE2 where occasionaly
2228          * an interrupt may be raised even when EQ is unarmed.
2229          * If NAPI is already scheduled, then counting & notifying
2230          * events will orphan them.
2231          */
2232         if (napi_schedule_prep(&eqo->napi)) {
2233                 num_evts = events_get(eqo);
2234                 __napi_schedule(&eqo->napi);
2235                 if (num_evts)
2236                         eqo->spurious_intr = 0;
2237         }
2238         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2239
2240         /* Return IRQ_HANDLED only for the the first spurious intr
2241          * after a valid intr to stop the kernel from branding
2242          * this irq as a bad one!
2243          */
2244         if (num_evts || eqo->spurious_intr++ == 0)
2245                 return IRQ_HANDLED;
2246         else
2247                 return IRQ_NONE;
2248 }
2249
2250 static irqreturn_t be_msix(int irq, void *dev)
2251 {
2252         struct be_eq_obj *eqo = dev;
2253
2254         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2255         napi_schedule(&eqo->napi);
2256         return IRQ_HANDLED;
2257 }
2258
2259 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2260 {
2261         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2262 }
2263
2264 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2265                         int budget)
2266 {
2267         struct be_adapter *adapter = rxo->adapter;
2268         struct be_queue_info *rx_cq = &rxo->cq;
2269         struct be_rx_compl_info *rxcp;
2270         u32 work_done;
2271
2272         for (work_done = 0; work_done < budget; work_done++) {
2273                 rxcp = be_rx_compl_get(rxo);
2274                 if (!rxcp)
2275                         break;
2276
2277                 /* Is it a flush compl that has no data */
2278                 if (unlikely(rxcp->num_rcvd == 0))
2279                         goto loop_continue;
2280
2281                 /* Discard compl with partial DMA Lancer B0 */
2282                 if (unlikely(!rxcp->pkt_size)) {
2283                         be_rx_compl_discard(rxo, rxcp);
2284                         goto loop_continue;
2285                 }
2286
2287                 /* On BE drop pkts that arrive due to imperfect filtering in
2288                  * promiscuous mode on some skews
2289                  */
2290                 if (unlikely(rxcp->port != adapter->port_num &&
2291                                 !lancer_chip(adapter))) {
2292                         be_rx_compl_discard(rxo, rxcp);
2293                         goto loop_continue;
2294                 }
2295
2296                 if (do_gro(rxcp))
2297                         be_rx_compl_process_gro(rxo, napi, rxcp);
2298                 else
2299                         be_rx_compl_process(rxo, rxcp);
2300 loop_continue:
2301                 be_rx_stats_update(rxo, rxcp);
2302         }
2303
2304         if (work_done) {
2305                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2306
2307                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2308                         be_post_rx_frags(rxo, GFP_ATOMIC);
2309         }
2310
2311         return work_done;
2312 }
2313
2314 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2315                           int budget, int idx)
2316 {
2317         struct be_eth_tx_compl *txcp;
2318         int num_wrbs = 0, work_done;
2319
2320         for (work_done = 0; work_done < budget; work_done++) {
2321                 txcp = be_tx_compl_get(&txo->cq);
2322                 if (!txcp)
2323                         break;
2324                 num_wrbs += be_tx_compl_process(adapter, txo,
2325                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2326                                         wrb_index, txcp));
2327         }
2328
2329         if (work_done) {
2330                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2331                 atomic_sub(num_wrbs, &txo->q.used);
2332
2333                 /* As Tx wrbs have been freed up, wake up netdev queue
2334                  * if it was stopped due to lack of tx wrbs.  */
2335                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2336                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2337                         netif_wake_subqueue(adapter->netdev, idx);
2338                 }
2339
2340                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2341                 tx_stats(txo)->tx_compl += work_done;
2342                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2343         }
2344         return (work_done < budget); /* Done */
2345 }
2346
2347 int be_poll(struct napi_struct *napi, int budget)
2348 {
2349         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2350         struct be_adapter *adapter = eqo->adapter;
2351         int max_work = 0, work, i, num_evts;
2352         bool tx_done;
2353
2354         num_evts = events_get(eqo);
2355
2356         /* Process all TXQs serviced by this EQ */
2357         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2358                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2359                                         eqo->tx_budget, i);
2360                 if (!tx_done)
2361                         max_work = budget;
2362         }
2363
2364         /* This loop will iterate twice for EQ0 in which
2365          * completions of the last RXQ (default one) are also processed
2366          * For other EQs the loop iterates only once
2367          */
2368         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2369                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2370                 max_work = max(work, max_work);
2371         }
2372
2373         if (is_mcc_eqo(eqo))
2374                 be_process_mcc(adapter);
2375
2376         if (max_work < budget) {
2377                 napi_complete(napi);
2378                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2379         } else {
2380                 /* As we'll continue in polling mode, count and clear events */
2381                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2382         }
2383         return max_work;
2384 }
2385
2386 void be_detect_error(struct be_adapter *adapter)
2387 {
2388         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2389         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2390         u32 i;
2391
2392         if (be_hw_error(adapter))
2393                 return;
2394
2395         if (lancer_chip(adapter)) {
2396                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2397                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2398                         sliport_err1 = ioread32(adapter->db +
2399                                         SLIPORT_ERROR1_OFFSET);
2400                         sliport_err2 = ioread32(adapter->db +
2401                                         SLIPORT_ERROR2_OFFSET);
2402                 }
2403         } else {
2404                 pci_read_config_dword(adapter->pdev,
2405                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2406                 pci_read_config_dword(adapter->pdev,
2407                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2408                 pci_read_config_dword(adapter->pdev,
2409                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2410                 pci_read_config_dword(adapter->pdev,
2411                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2412
2413                 ue_lo = (ue_lo & ~ue_lo_mask);
2414                 ue_hi = (ue_hi & ~ue_hi_mask);
2415         }
2416
2417         /* On certain platforms BE hardware can indicate spurious UEs.
2418          * Allow the h/w to stop working completely in case of a real UE.
2419          * Hence not setting the hw_error for UE detection.
2420          */
2421         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2422                 adapter->hw_error = true;
2423                 dev_err(&adapter->pdev->dev,
2424                         "Error detected in the card\n");
2425         }
2426
2427         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2428                 dev_err(&adapter->pdev->dev,
2429                         "ERR: sliport status 0x%x\n", sliport_status);
2430                 dev_err(&adapter->pdev->dev,
2431                         "ERR: sliport error1 0x%x\n", sliport_err1);
2432                 dev_err(&adapter->pdev->dev,
2433                         "ERR: sliport error2 0x%x\n", sliport_err2);
2434         }
2435
2436         if (ue_lo) {
2437                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2438                         if (ue_lo & 1)
2439                                 dev_err(&adapter->pdev->dev,
2440                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2441                 }
2442         }
2443
2444         if (ue_hi) {
2445                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2446                         if (ue_hi & 1)
2447                                 dev_err(&adapter->pdev->dev,
2448                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2449                 }
2450         }
2451
2452 }
2453
2454 static void be_msix_disable(struct be_adapter *adapter)
2455 {
2456         if (msix_enabled(adapter)) {
2457                 pci_disable_msix(adapter->pdev);
2458                 adapter->num_msix_vec = 0;
2459                 adapter->num_msix_roce_vec = 0;
2460         }
2461 }
2462
2463 static int be_msix_enable(struct be_adapter *adapter)
2464 {
2465         int i, status, num_vec;
2466         struct device *dev = &adapter->pdev->dev;
2467
2468         /* If RoCE is supported, program the max number of NIC vectors that
2469          * may be configured via set-channels, along with vectors needed for
2470          * RoCe. Else, just program the number we'll use initially.
2471          */
2472         if (be_roce_supported(adapter))
2473                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2474                                 2 * num_online_cpus());
2475         else
2476                 num_vec = adapter->cfg_num_qs;
2477
2478         for (i = 0; i < num_vec; i++)
2479                 adapter->msix_entries[i].entry = i;
2480
2481         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2482         if (status == 0) {
2483                 goto done;
2484         } else if (status >= MIN_MSIX_VECTORS) {
2485                 num_vec = status;
2486                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2487                                          num_vec);
2488                 if (!status)
2489                         goto done;
2490         }
2491
2492         dev_warn(dev, "MSIx enable failed\n");
2493
2494         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2495         if (!be_physfn(adapter))
2496                 return status;
2497         return 0;
2498 done:
2499         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2500                 adapter->num_msix_roce_vec = num_vec / 2;
2501                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2502                          adapter->num_msix_roce_vec);
2503         }
2504
2505         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2506
2507         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2508                  adapter->num_msix_vec);
2509         return 0;
2510 }
2511
2512 static inline int be_msix_vec_get(struct be_adapter *adapter,
2513                                 struct be_eq_obj *eqo)
2514 {
2515         return adapter->msix_entries[eqo->msix_idx].vector;
2516 }
2517
2518 static int be_msix_register(struct be_adapter *adapter)
2519 {
2520         struct net_device *netdev = adapter->netdev;
2521         struct be_eq_obj *eqo;
2522         int status, i, vec;
2523
2524         for_all_evt_queues(adapter, eqo, i) {
2525                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2526                 vec = be_msix_vec_get(adapter, eqo);
2527                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2528                 if (status)
2529                         goto err_msix;
2530         }
2531
2532         return 0;
2533 err_msix:
2534         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2535                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2536         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2537                 status);
2538         be_msix_disable(adapter);
2539         return status;
2540 }
2541
2542 static int be_irq_register(struct be_adapter *adapter)
2543 {
2544         struct net_device *netdev = adapter->netdev;
2545         int status;
2546
2547         if (msix_enabled(adapter)) {
2548                 status = be_msix_register(adapter);
2549                 if (status == 0)
2550                         goto done;
2551                 /* INTx is not supported for VF */
2552                 if (!be_physfn(adapter))
2553                         return status;
2554         }
2555
2556         /* INTx: only the first EQ is used */
2557         netdev->irq = adapter->pdev->irq;
2558         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2559                              &adapter->eq_obj[0]);
2560         if (status) {
2561                 dev_err(&adapter->pdev->dev,
2562                         "INTx request IRQ failed - err %d\n", status);
2563                 return status;
2564         }
2565 done:
2566         adapter->isr_registered = true;
2567         return 0;
2568 }
2569
2570 static void be_irq_unregister(struct be_adapter *adapter)
2571 {
2572         struct net_device *netdev = adapter->netdev;
2573         struct be_eq_obj *eqo;
2574         int i;
2575
2576         if (!adapter->isr_registered)
2577                 return;
2578
2579         /* INTx */
2580         if (!msix_enabled(adapter)) {
2581                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2582                 goto done;
2583         }
2584
2585         /* MSIx */
2586         for_all_evt_queues(adapter, eqo, i)
2587                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2588
2589 done:
2590         adapter->isr_registered = false;
2591 }
2592
2593 static void be_rx_qs_destroy(struct be_adapter *adapter)
2594 {
2595         struct be_queue_info *q;
2596         struct be_rx_obj *rxo;
2597         int i;
2598
2599         for_all_rx_queues(adapter, rxo, i) {
2600                 q = &rxo->q;
2601                 if (q->created) {
2602                         be_cmd_rxq_destroy(adapter, q);
2603                         be_rx_cq_clean(rxo);
2604                 }
2605                 be_queue_free(adapter, q);
2606         }
2607 }
2608
2609 static int be_close(struct net_device *netdev)
2610 {
2611         struct be_adapter *adapter = netdev_priv(netdev);
2612         struct be_eq_obj *eqo;
2613         int i;
2614
2615         be_roce_dev_close(adapter);
2616
2617         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2618                 for_all_evt_queues(adapter, eqo, i)
2619                         napi_disable(&eqo->napi);
2620                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2621         }
2622
2623         be_async_mcc_disable(adapter);
2624
2625         /* Wait for all pending tx completions to arrive so that
2626          * all tx skbs are freed.
2627          */
2628         netif_tx_disable(netdev);
2629         be_tx_compl_clean(adapter);
2630
2631         be_rx_qs_destroy(adapter);
2632
2633         for_all_evt_queues(adapter, eqo, i) {
2634                 if (msix_enabled(adapter))
2635                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2636                 else
2637                         synchronize_irq(netdev->irq);
2638                 be_eq_clean(eqo);
2639         }
2640
2641         be_irq_unregister(adapter);
2642
2643         return 0;
2644 }
2645
2646 static int be_rx_qs_create(struct be_adapter *adapter)
2647 {
2648         struct be_rx_obj *rxo;
2649         int rc, i, j;
2650         u8 rsstable[128];
2651
2652         for_all_rx_queues(adapter, rxo, i) {
2653                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2654                                     sizeof(struct be_eth_rx_d));
2655                 if (rc)
2656                         return rc;
2657         }
2658
2659         /* The FW would like the default RXQ to be created first */
2660         rxo = default_rxo(adapter);
2661         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2662                                adapter->if_handle, false, &rxo->rss_id);
2663         if (rc)
2664                 return rc;
2665
2666         for_all_rss_queues(adapter, rxo, i) {
2667                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2668                                        rx_frag_size, adapter->if_handle,
2669                                        true, &rxo->rss_id);
2670                 if (rc)
2671                         return rc;
2672         }
2673
2674         if (be_multi_rxq(adapter)) {
2675                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2676                         for_all_rss_queues(adapter, rxo, i) {
2677                                 if ((j + i) >= 128)
2678                                         break;
2679                                 rsstable[j + i] = rxo->rss_id;
2680                         }
2681                 }
2682                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2683                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2684
2685                 if (!BEx_chip(adapter))
2686                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2687                                                 RSS_ENABLE_UDP_IPV6;
2688
2689                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2690                                        128);
2691                 if (rc) {
2692                         adapter->rss_flags = 0;
2693                         return rc;
2694                 }
2695         }
2696
2697         /* First time posting */
2698         for_all_rx_queues(adapter, rxo, i)
2699                 be_post_rx_frags(rxo, GFP_KERNEL);
2700         return 0;
2701 }
2702
2703 static int be_open(struct net_device *netdev)
2704 {
2705         struct be_adapter *adapter = netdev_priv(netdev);
2706         struct be_eq_obj *eqo;
2707         struct be_rx_obj *rxo;
2708         struct be_tx_obj *txo;
2709         u8 link_status;
2710         int status, i;
2711
2712         status = be_rx_qs_create(adapter);
2713         if (status)
2714                 goto err;
2715
2716         status = be_irq_register(adapter);
2717         if (status)
2718                 goto err;
2719
2720         for_all_rx_queues(adapter, rxo, i)
2721                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2722
2723         for_all_tx_queues(adapter, txo, i)
2724                 be_cq_notify(adapter, txo->cq.id, true, 0);
2725
2726         be_async_mcc_enable(adapter);
2727
2728         for_all_evt_queues(adapter, eqo, i) {
2729                 napi_enable(&eqo->napi);
2730                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2731         }
2732         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2733
2734         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2735         if (!status)
2736                 be_link_status_update(adapter, link_status);
2737
2738         netif_tx_start_all_queues(netdev);
2739         be_roce_dev_open(adapter);
2740         return 0;
2741 err:
2742         be_close(adapter->netdev);
2743         return -EIO;
2744 }
2745
2746 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2747 {
2748         struct be_dma_mem cmd;
2749         int status = 0;
2750         u8 mac[ETH_ALEN];
2751
2752         memset(mac, 0, ETH_ALEN);
2753
2754         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2755         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2756                                      GFP_KERNEL);
2757         if (cmd.va == NULL)
2758                 return -1;
2759
2760         if (enable) {
2761                 status = pci_write_config_dword(adapter->pdev,
2762                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2763                 if (status) {
2764                         dev_err(&adapter->pdev->dev,
2765                                 "Could not enable Wake-on-lan\n");
2766                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2767                                           cmd.dma);
2768                         return status;
2769                 }
2770                 status = be_cmd_enable_magic_wol(adapter,
2771                                 adapter->netdev->dev_addr, &cmd);
2772                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2773                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2774         } else {
2775                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2776                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2777                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2778         }
2779
2780         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2781         return status;
2782 }
2783
2784 /*
2785  * Generate a seed MAC address from the PF MAC Address using jhash.
2786  * MAC Address for VFs are assigned incrementally starting from the seed.
2787  * These addresses are programmed in the ASIC by the PF and the VF driver
2788  * queries for the MAC address during its probe.
2789  */
2790 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2791 {
2792         u32 vf;
2793         int status = 0;
2794         u8 mac[ETH_ALEN];
2795         struct be_vf_cfg *vf_cfg;
2796
2797         be_vf_eth_addr_generate(adapter, mac);
2798
2799         for_all_vfs(adapter, vf_cfg, vf) {
2800                 if (BEx_chip(adapter))
2801                         status = be_cmd_pmac_add(adapter, mac,
2802                                                  vf_cfg->if_handle,
2803                                                  &vf_cfg->pmac_id, vf + 1);
2804                 else
2805                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2806                                                 vf + 1);
2807
2808                 if (status)
2809                         dev_err(&adapter->pdev->dev,
2810                         "Mac address assignment failed for VF %d\n", vf);
2811                 else
2812                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2813
2814                 mac[5] += 1;
2815         }
2816         return status;
2817 }
2818
2819 static int be_vfs_mac_query(struct be_adapter *adapter)
2820 {
2821         int status, vf;
2822         u8 mac[ETH_ALEN];
2823         struct be_vf_cfg *vf_cfg;
2824         bool active = false;
2825
2826         for_all_vfs(adapter, vf_cfg, vf) {
2827                 be_cmd_get_mac_from_list(adapter, mac, &active,
2828                                          &vf_cfg->pmac_id, 0);
2829
2830                 status = be_cmd_mac_addr_query(adapter, mac, false,
2831                                                vf_cfg->if_handle, 0);
2832                 if (status)
2833                         return status;
2834                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2835         }
2836         return 0;
2837 }
2838
2839 static void be_vf_clear(struct be_adapter *adapter)
2840 {
2841         struct be_vf_cfg *vf_cfg;
2842         u32 vf;
2843
2844         if (pci_vfs_assigned(adapter->pdev)) {
2845                 dev_warn(&adapter->pdev->dev,
2846                          "VFs are assigned to VMs: not disabling VFs\n");
2847                 goto done;
2848         }
2849
2850         pci_disable_sriov(adapter->pdev);
2851
2852         for_all_vfs(adapter, vf_cfg, vf) {
2853                 if (BEx_chip(adapter))
2854                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2855                                         vf_cfg->pmac_id, vf + 1);
2856                 else
2857                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2858                                        vf + 1);
2859
2860                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2861         }
2862 done:
2863         kfree(adapter->vf_cfg);
2864         adapter->num_vfs = 0;
2865 }
2866
2867 static void be_clear_queues(struct be_adapter *adapter)
2868 {
2869         be_mcc_queues_destroy(adapter);
2870         be_rx_cqs_destroy(adapter);
2871         be_tx_queues_destroy(adapter);
2872         be_evt_queues_destroy(adapter);
2873 }
2874
2875 static void be_cancel_worker(struct be_adapter *adapter)
2876 {
2877         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2878                 cancel_delayed_work_sync(&adapter->work);
2879                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2880         }
2881 }
2882
2883 static int be_clear(struct be_adapter *adapter)
2884 {
2885         int i;
2886
2887         be_cancel_worker(adapter);
2888
2889         if (sriov_enabled(adapter))
2890                 be_vf_clear(adapter);
2891
2892         /* delete the primary mac along with the uc-mac list */
2893         for (i = 0; i < (adapter->uc_macs + 1); i++)
2894                 be_cmd_pmac_del(adapter, adapter->if_handle,
2895                                 adapter->pmac_id[i], 0);
2896         adapter->uc_macs = 0;
2897
2898         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2899
2900         be_clear_queues(adapter);
2901
2902         kfree(adapter->pmac_id);
2903         adapter->pmac_id = NULL;
2904
2905         be_msix_disable(adapter);
2906         return 0;
2907 }
2908
2909 static int be_vfs_if_create(struct be_adapter *adapter)
2910 {
2911         struct be_resources res = {0};
2912         struct be_vf_cfg *vf_cfg;
2913         u32 cap_flags, en_flags, vf;
2914         int status = 0;
2915
2916         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2917                     BE_IF_FLAGS_MULTICAST;
2918
2919         for_all_vfs(adapter, vf_cfg, vf) {
2920                 if (!BE3_chip(adapter)) {
2921                         status = be_cmd_get_profile_config(adapter, &res,
2922                                                            vf + 1);
2923                         if (!status)
2924                                 cap_flags = res.if_cap_flags;
2925                 }
2926
2927                 /* If a FW profile exists, then cap_flags are updated */
2928                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2929                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2930                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2931                                           &vf_cfg->if_handle, vf + 1);
2932                 if (status)
2933                         goto err;
2934         }
2935 err:
2936         return status;
2937 }
2938
2939 static int be_vf_setup_init(struct be_adapter *adapter)
2940 {
2941         struct be_vf_cfg *vf_cfg;
2942         int vf;
2943
2944         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2945                                   GFP_KERNEL);
2946         if (!adapter->vf_cfg)
2947                 return -ENOMEM;
2948
2949         for_all_vfs(adapter, vf_cfg, vf) {
2950                 vf_cfg->if_handle = -1;
2951                 vf_cfg->pmac_id = -1;
2952         }
2953         return 0;
2954 }
2955
2956 static int be_vf_setup(struct be_adapter *adapter)
2957 {
2958         struct be_vf_cfg *vf_cfg;
2959         u16 def_vlan, lnk_speed;
2960         int status, old_vfs, vf;
2961         struct device *dev = &adapter->pdev->dev;
2962         u32 privileges;
2963
2964         old_vfs = pci_num_vf(adapter->pdev);
2965         if (old_vfs) {
2966                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2967                 if (old_vfs != num_vfs)
2968                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2969                 adapter->num_vfs = old_vfs;
2970         } else {
2971                 if (num_vfs > be_max_vfs(adapter))
2972                         dev_info(dev, "Device supports %d VFs and not %d\n",
2973                                  be_max_vfs(adapter), num_vfs);
2974                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
2975                 if (!adapter->num_vfs)
2976                         return 0;
2977         }
2978
2979         status = be_vf_setup_init(adapter);
2980         if (status)
2981                 goto err;
2982
2983         if (old_vfs) {
2984                 for_all_vfs(adapter, vf_cfg, vf) {
2985                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2986                         if (status)
2987                                 goto err;
2988                 }
2989         } else {
2990                 status = be_vfs_if_create(adapter);
2991                 if (status)
2992                         goto err;
2993         }
2994
2995         if (old_vfs) {
2996                 status = be_vfs_mac_query(adapter);
2997                 if (status)
2998                         goto err;
2999         } else {
3000                 status = be_vf_eth_addr_config(adapter);
3001                 if (status)
3002                         goto err;
3003         }
3004
3005         for_all_vfs(adapter, vf_cfg, vf) {
3006                 /* Allow VFs to programs MAC/VLAN filters */
3007                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3008                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3009                         status = be_cmd_set_fn_privileges(adapter,
3010                                                           privileges |
3011                                                           BE_PRIV_FILTMGMT,
3012                                                           vf + 1);
3013                         if (!status)
3014                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3015                                          vf);
3016                 }
3017
3018                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3019                  * Allow full available bandwidth
3020                  */
3021                 if (BE3_chip(adapter) && !old_vfs)
3022                         be_cmd_set_qos(adapter, 1000, vf+1);
3023
3024                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3025                                                   NULL, vf + 1);
3026                 if (!status)
3027                         vf_cfg->tx_rate = lnk_speed;
3028
3029                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3030                                                vf + 1, vf_cfg->if_handle, NULL);
3031                 if (status)
3032                         goto err;
3033                 vf_cfg->def_vid = def_vlan;
3034
3035                 if (!old_vfs)
3036                         be_cmd_enable_vf(adapter, vf + 1);
3037         }
3038
3039         if (!old_vfs) {
3040                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3041                 if (status) {
3042                         dev_err(dev, "SRIOV enable failed\n");
3043                         adapter->num_vfs = 0;
3044                         goto err;
3045                 }
3046         }
3047         return 0;
3048 err:
3049         dev_err(dev, "VF setup failed\n");
3050         be_vf_clear(adapter);
3051         return status;
3052 }
3053
3054 /* On BE2/BE3 FW does not suggest the supported limits */
3055 static void BEx_get_resources(struct be_adapter *adapter,
3056                               struct be_resources *res)
3057 {
3058         struct pci_dev *pdev = adapter->pdev;
3059         bool use_sriov = false;
3060
3061         if (BE3_chip(adapter) && sriov_want(adapter)) {
3062                 int max_vfs;
3063
3064                 max_vfs = pci_sriov_get_totalvfs(pdev);
3065                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3066                 use_sriov = res->max_vfs;
3067         }
3068
3069         if (be_physfn(adapter))
3070                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3071         else
3072                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3073
3074         if (adapter->function_mode & FLEX10_MODE)
3075                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3076         else if (adapter->function_mode & UMC_ENABLED)
3077                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3078         else
3079                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3080         res->max_mcast_mac = BE_MAX_MC;
3081
3082         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3083         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3084             !be_physfn(adapter) || (adapter->port_num > 1))
3085                 res->max_tx_qs = 1;
3086         else
3087                 res->max_tx_qs = BE3_MAX_TX_QS;
3088
3089         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3090             !use_sriov && be_physfn(adapter))
3091                 res->max_rss_qs = (adapter->be3_native) ?
3092                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3093         res->max_rx_qs = res->max_rss_qs + 1;
3094
3095         res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
3096
3097         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3098         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3099                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3100 }
3101
3102 static void be_setup_init(struct be_adapter *adapter)
3103 {
3104         adapter->vlan_prio_bmap = 0xff;
3105         adapter->phy.link_speed = -1;
3106         adapter->if_handle = -1;
3107         adapter->be3_native = false;
3108         adapter->promiscuous = false;
3109         if (be_physfn(adapter))
3110                 adapter->cmd_privileges = MAX_PRIVILEGES;
3111         else
3112                 adapter->cmd_privileges = MIN_PRIVILEGES;
3113 }
3114
3115 static int be_get_resources(struct be_adapter *adapter)
3116 {
3117         struct device *dev = &adapter->pdev->dev;
3118         struct be_resources res = {0};
3119         int status;
3120
3121         if (BEx_chip(adapter)) {
3122                 BEx_get_resources(adapter, &res);
3123                 adapter->res = res;
3124         }
3125
3126         /* For Lancer, SH etc read per-function resource limits from FW.
3127          * GET_FUNC_CONFIG returns per function guaranteed limits.
3128          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3129          */
3130         if (!BEx_chip(adapter)) {
3131                 status = be_cmd_get_func_config(adapter, &res);
3132                 if (status)
3133                         return status;
3134
3135                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3136                 if (be_roce_supported(adapter))
3137                         res.max_evt_qs /= 2;
3138                 adapter->res = res;
3139
3140                 if (be_physfn(adapter)) {
3141                         status = be_cmd_get_profile_config(adapter, &res, 0);
3142                         if (status)
3143                                 return status;
3144                         adapter->res.max_vfs = res.max_vfs;
3145                 }
3146
3147                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3148                          be_max_txqs(adapter), be_max_rxqs(adapter),
3149                          be_max_rss(adapter), be_max_eqs(adapter),
3150                          be_max_vfs(adapter));
3151                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3152                          be_max_uc(adapter), be_max_mc(adapter),
3153                          be_max_vlans(adapter));
3154         }
3155
3156         return 0;
3157 }
3158
3159 /* Routine to query per function resource limits */
3160 static int be_get_config(struct be_adapter *adapter)
3161 {
3162         int status;
3163
3164         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3165                                      &adapter->function_mode,
3166                                      &adapter->function_caps,
3167                                      &adapter->asic_rev);
3168         if (status)
3169                 return status;
3170
3171         status = be_get_resources(adapter);
3172         if (status)
3173                 return status;
3174
3175         /* primary mac needs 1 pmac entry */
3176         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3177                                    GFP_KERNEL);
3178         if (!adapter->pmac_id)
3179                 return -ENOMEM;
3180
3181         /* Sanitize cfg_num_qs based on HW and platform limits */
3182         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3183
3184         return 0;
3185 }
3186
3187 static int be_mac_setup(struct be_adapter *adapter)
3188 {
3189         u8 mac[ETH_ALEN];
3190         int status;
3191
3192         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3193                 status = be_cmd_get_perm_mac(adapter, mac);
3194                 if (status)
3195                         return status;
3196
3197                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3198                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3199         } else {
3200                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3201                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3202         }
3203
3204         /* On BE3 VFs this cmd may fail due to lack of privilege.
3205          * Ignore the failure as in this case pmac_id is fetched
3206          * in the IFACE_CREATE cmd.
3207          */
3208         be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3209                         &adapter->pmac_id[0], 0);
3210         return 0;
3211 }
3212
3213 static void be_schedule_worker(struct be_adapter *adapter)
3214 {
3215         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3216         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3217 }
3218
3219 static int be_setup_queues(struct be_adapter *adapter)
3220 {
3221         struct net_device *netdev = adapter->netdev;
3222         int status;
3223
3224         status = be_evt_queues_create(adapter);
3225         if (status)
3226                 goto err;
3227
3228         status = be_tx_qs_create(adapter);
3229         if (status)
3230                 goto err;
3231
3232         status = be_rx_cqs_create(adapter);
3233         if (status)
3234                 goto err;
3235
3236         status = be_mcc_queues_create(adapter);
3237         if (status)
3238                 goto err;
3239
3240         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3241         if (status)
3242                 goto err;
3243
3244         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3245         if (status)
3246                 goto err;
3247
3248         return 0;
3249 err:
3250         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3251         return status;
3252 }
3253
3254 int be_update_queues(struct be_adapter *adapter)
3255 {
3256         struct net_device *netdev = adapter->netdev;
3257         int status;
3258
3259         if (netif_running(netdev))
3260                 be_close(netdev);
3261
3262         be_cancel_worker(adapter);
3263
3264         /* If any vectors have been shared with RoCE we cannot re-program
3265          * the MSIx table.
3266          */
3267         if (!adapter->num_msix_roce_vec)
3268                 be_msix_disable(adapter);
3269
3270         be_clear_queues(adapter);
3271
3272         if (!msix_enabled(adapter)) {
3273                 status = be_msix_enable(adapter);
3274                 if (status)
3275                         return status;
3276         }
3277
3278         status = be_setup_queues(adapter);
3279         if (status)
3280                 return status;
3281
3282         be_schedule_worker(adapter);
3283
3284         if (netif_running(netdev))
3285                 status = be_open(netdev);
3286
3287         return status;
3288 }
3289
3290 static int be_setup(struct be_adapter *adapter)
3291 {
3292         struct device *dev = &adapter->pdev->dev;
3293         u32 tx_fc, rx_fc, en_flags;
3294         int status;
3295
3296         be_setup_init(adapter);
3297
3298         if (!lancer_chip(adapter))
3299                 be_cmd_req_native_mode(adapter);
3300
3301         status = be_get_config(adapter);
3302         if (status)
3303                 goto err;
3304
3305         status = be_msix_enable(adapter);
3306         if (status)
3307                 goto err;
3308
3309         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3310                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3311         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3312                 en_flags |= BE_IF_FLAGS_RSS;
3313         en_flags = en_flags & be_if_cap_flags(adapter);
3314         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3315                                   &adapter->if_handle, 0);
3316         if (status)
3317                 goto err;
3318
3319         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3320         rtnl_lock();
3321         status = be_setup_queues(adapter);
3322         rtnl_unlock();
3323         if (status)
3324                 goto err;
3325
3326         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3327         /* In UMC mode FW does not return right privileges.
3328          * Override with correct privilege equivalent to PF.
3329          */
3330         if (be_is_mc(adapter))
3331                 adapter->cmd_privileges = MAX_PRIVILEGES;
3332
3333         status = be_mac_setup(adapter);
3334         if (status)
3335                 goto err;
3336
3337         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3338
3339         if (adapter->vlans_added)
3340                 be_vid_config(adapter);
3341
3342         be_set_rx_mode(adapter->netdev);
3343
3344         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3345
3346         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3347                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3348                                         adapter->rx_fc);
3349
3350         if (sriov_want(adapter)) {
3351                 if (be_max_vfs(adapter))
3352                         be_vf_setup(adapter);
3353                 else
3354                         dev_warn(dev, "device doesn't support SRIOV\n");
3355         }
3356
3357         status = be_cmd_get_phy_info(adapter);
3358         if (!status && be_pause_supported(adapter))
3359                 adapter->phy.fc_autoneg = 1;
3360
3361         be_schedule_worker(adapter);
3362         return 0;
3363 err:
3364         be_clear(adapter);
3365         return status;
3366 }
3367
3368 #ifdef CONFIG_NET_POLL_CONTROLLER
3369 static void be_netpoll(struct net_device *netdev)
3370 {
3371         struct be_adapter *adapter = netdev_priv(netdev);
3372         struct be_eq_obj *eqo;
3373         int i;
3374
3375         for_all_evt_queues(adapter, eqo, i) {
3376                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3377                 napi_schedule(&eqo->napi);
3378         }
3379
3380         return;
3381 }
3382 #endif
3383
3384 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3385 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3386
3387 static bool be_flash_redboot(struct be_adapter *adapter,
3388                         const u8 *p, u32 img_start, int image_size,
3389                         int hdr_size)
3390 {
3391         u32 crc_offset;
3392         u8 flashed_crc[4];
3393         int status;
3394
3395         crc_offset = hdr_size + img_start + image_size - 4;
3396
3397         p += crc_offset;
3398
3399         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3400                         (image_size - 4));
3401         if (status) {
3402                 dev_err(&adapter->pdev->dev,
3403                 "could not get crc from flash, not flashing redboot\n");
3404                 return false;
3405         }
3406
3407         /*update redboot only if crc does not match*/
3408         if (!memcmp(flashed_crc, p, 4))
3409                 return false;
3410         else
3411                 return true;
3412 }
3413
3414 static bool phy_flashing_required(struct be_adapter *adapter)
3415 {
3416         return (adapter->phy.phy_type == TN_8022 &&
3417                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3418 }
3419
3420 static bool is_comp_in_ufi(struct be_adapter *adapter,
3421                            struct flash_section_info *fsec, int type)
3422 {
3423         int i = 0, img_type = 0;
3424         struct flash_section_info_g2 *fsec_g2 = NULL;
3425
3426         if (BE2_chip(adapter))
3427                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3428
3429         for (i = 0; i < MAX_FLASH_COMP; i++) {
3430                 if (fsec_g2)
3431                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3432                 else
3433                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3434
3435                 if (img_type == type)
3436                         return true;
3437         }
3438         return false;
3439
3440 }
3441
3442 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3443                                          int header_size,
3444                                          const struct firmware *fw)
3445 {
3446         struct flash_section_info *fsec = NULL;
3447         const u8 *p = fw->data;
3448
3449         p += header_size;
3450         while (p < (fw->data + fw->size)) {
3451                 fsec = (struct flash_section_info *)p;
3452                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3453                         return fsec;
3454                 p += 32;
3455         }
3456         return NULL;
3457 }
3458
3459 static int be_flash(struct be_adapter *adapter, const u8 *img,
3460                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3461 {
3462         u32 total_bytes = 0, flash_op, num_bytes = 0;
3463         int status = 0;
3464         struct be_cmd_write_flashrom *req = flash_cmd->va;
3465
3466         total_bytes = img_size;
3467         while (total_bytes) {
3468                 num_bytes = min_t(u32, 32*1024, total_bytes);
3469
3470                 total_bytes -= num_bytes;
3471
3472                 if (!total_bytes) {
3473                         if (optype == OPTYPE_PHY_FW)
3474                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3475                         else
3476                                 flash_op = FLASHROM_OPER_FLASH;
3477                 } else {
3478                         if (optype == OPTYPE_PHY_FW)
3479                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3480                         else
3481                                 flash_op = FLASHROM_OPER_SAVE;
3482                 }
3483
3484                 memcpy(req->data_buf, img, num_bytes);
3485                 img += num_bytes;
3486                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3487                                                 flash_op, num_bytes);
3488                 if (status) {
3489                         if (status == ILLEGAL_IOCTL_REQ &&
3490                             optype == OPTYPE_PHY_FW)
3491                                 break;
3492                         dev_err(&adapter->pdev->dev,
3493                                 "cmd to write to flash rom failed.\n");
3494                         return status;
3495                 }
3496         }
3497         return 0;
3498 }
3499
3500 /* For BE2, BE3 and BE3-R */
3501 static int be_flash_BEx(struct be_adapter *adapter,
3502                          const struct firmware *fw,
3503                          struct be_dma_mem *flash_cmd,
3504                          int num_of_images)
3505
3506 {
3507         int status = 0, i, filehdr_size = 0;
3508         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3509         const u8 *p = fw->data;
3510         const struct flash_comp *pflashcomp;
3511         int num_comp, redboot;
3512         struct flash_section_info *fsec = NULL;
3513
3514         struct flash_comp gen3_flash_types[] = {
3515                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3516                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3517                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3518                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3519                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3520                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3521                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3522                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3523                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3524                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3525                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3526                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3527                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3528                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3529                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3530                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3531                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3532                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3533                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3534                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3535         };
3536
3537         struct flash_comp gen2_flash_types[] = {
3538                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3539                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3540                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3541                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3542                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3543                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3544                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3545                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3546                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3547                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3548                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3549                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3550                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3551                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3552                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3553                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3554         };
3555
3556         if (BE3_chip(adapter)) {
3557                 pflashcomp = gen3_flash_types;
3558                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3559                 num_comp = ARRAY_SIZE(gen3_flash_types);
3560         } else {
3561                 pflashcomp = gen2_flash_types;
3562                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3563                 num_comp = ARRAY_SIZE(gen2_flash_types);
3564         }
3565
3566         /* Get flash section info*/
3567         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3568         if (!fsec) {
3569                 dev_err(&adapter->pdev->dev,
3570                         "Invalid Cookie. UFI corrupted ?\n");
3571                 return -1;
3572         }
3573         for (i = 0; i < num_comp; i++) {
3574                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3575                         continue;
3576
3577                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3578                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3579                         continue;
3580
3581                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3582                     !phy_flashing_required(adapter))
3583                                 continue;
3584
3585                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3586                         redboot = be_flash_redboot(adapter, fw->data,
3587                                 pflashcomp[i].offset, pflashcomp[i].size,
3588                                 filehdr_size + img_hdrs_size);
3589                         if (!redboot)
3590                                 continue;
3591                 }
3592
3593                 p = fw->data;
3594                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3595                 if (p + pflashcomp[i].size > fw->data + fw->size)
3596                         return -1;
3597
3598                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3599                                         pflashcomp[i].size);
3600                 if (status) {
3601                         dev_err(&adapter->pdev->dev,
3602                                 "Flashing section type %d failed.\n",
3603                                 pflashcomp[i].img_type);
3604                         return status;
3605                 }
3606         }
3607         return 0;
3608 }
3609
3610 static int be_flash_skyhawk(struct be_adapter *adapter,
3611                 const struct firmware *fw,
3612                 struct be_dma_mem *flash_cmd, int num_of_images)
3613 {
3614         int status = 0, i, filehdr_size = 0;
3615         int img_offset, img_size, img_optype, redboot;
3616         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3617         const u8 *p = fw->data;
3618         struct flash_section_info *fsec = NULL;
3619
3620         filehdr_size = sizeof(struct flash_file_hdr_g3);
3621         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3622         if (!fsec) {
3623                 dev_err(&adapter->pdev->dev,
3624                         "Invalid Cookie. UFI corrupted ?\n");
3625                 return -1;
3626         }
3627
3628         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3629                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3630                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3631
3632                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3633                 case IMAGE_FIRMWARE_iSCSI:
3634                         img_optype = OPTYPE_ISCSI_ACTIVE;
3635                         break;
3636                 case IMAGE_BOOT_CODE:
3637                         img_optype = OPTYPE_REDBOOT;
3638                         break;
3639                 case IMAGE_OPTION_ROM_ISCSI:
3640                         img_optype = OPTYPE_BIOS;
3641                         break;
3642                 case IMAGE_OPTION_ROM_PXE:
3643                         img_optype = OPTYPE_PXE_BIOS;
3644                         break;
3645                 case IMAGE_OPTION_ROM_FCoE:
3646                         img_optype = OPTYPE_FCOE_BIOS;
3647                         break;
3648                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3649                         img_optype = OPTYPE_ISCSI_BACKUP;
3650                         break;
3651                 case IMAGE_NCSI:
3652                         img_optype = OPTYPE_NCSI_FW;
3653                         break;
3654                 default:
3655                         continue;
3656                 }
3657
3658                 if (img_optype == OPTYPE_REDBOOT) {
3659                         redboot = be_flash_redboot(adapter, fw->data,
3660                                         img_offset, img_size,
3661                                         filehdr_size + img_hdrs_size);
3662                         if (!redboot)
3663                                 continue;
3664                 }
3665
3666                 p = fw->data;
3667                 p += filehdr_size + img_offset + img_hdrs_size;
3668                 if (p + img_size > fw->data + fw->size)
3669                         return -1;
3670
3671                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3672                 if (status) {
3673                         dev_err(&adapter->pdev->dev,
3674                                 "Flashing section type %d failed.\n",
3675                                 fsec->fsec_entry[i].type);
3676                         return status;
3677                 }
3678         }
3679         return 0;
3680 }
3681
3682 static int lancer_fw_download(struct be_adapter *adapter,
3683                                 const struct firmware *fw)
3684 {
3685 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3686 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3687         struct be_dma_mem flash_cmd;
3688         const u8 *data_ptr = NULL;
3689         u8 *dest_image_ptr = NULL;
3690         size_t image_size = 0;
3691         u32 chunk_size = 0;
3692         u32 data_written = 0;
3693         u32 offset = 0;
3694         int status = 0;
3695         u8 add_status = 0;
3696         u8 change_status;
3697
3698         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3699                 dev_err(&adapter->pdev->dev,
3700                         "FW Image not properly aligned. "
3701                         "Length must be 4 byte aligned.\n");
3702                 status = -EINVAL;
3703                 goto lancer_fw_exit;
3704         }
3705
3706         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3707                                 + LANCER_FW_DOWNLOAD_CHUNK;
3708         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3709                                           &flash_cmd.dma, GFP_KERNEL);
3710         if (!flash_cmd.va) {
3711                 status = -ENOMEM;
3712                 goto lancer_fw_exit;
3713         }
3714
3715         dest_image_ptr = flash_cmd.va +
3716                                 sizeof(struct lancer_cmd_req_write_object);
3717         image_size = fw->size;
3718         data_ptr = fw->data;
3719
3720         while (image_size) {
3721                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3722
3723                 /* Copy the image chunk content. */
3724                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3725
3726                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3727                                                  chunk_size, offset,
3728                                                  LANCER_FW_DOWNLOAD_LOCATION,
3729                                                  &data_written, &change_status,
3730                                                  &add_status);
3731                 if (status)
3732                         break;
3733
3734                 offset += data_written;
3735                 data_ptr += data_written;
3736                 image_size -= data_written;
3737         }
3738
3739         if (!status) {
3740                 /* Commit the FW written */
3741                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3742                                                  0, offset,
3743                                                  LANCER_FW_DOWNLOAD_LOCATION,
3744                                                  &data_written, &change_status,
3745                                                  &add_status);
3746         }
3747
3748         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3749                                 flash_cmd.dma);
3750         if (status) {
3751                 dev_err(&adapter->pdev->dev,
3752                         "Firmware load error. "
3753                         "Status code: 0x%x Additional Status: 0x%x\n",
3754                         status, add_status);
3755                 goto lancer_fw_exit;
3756         }
3757
3758         if (change_status == LANCER_FW_RESET_NEEDED) {
3759                 status = lancer_physdev_ctrl(adapter,
3760                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3761                 if (status) {
3762                         dev_err(&adapter->pdev->dev,
3763                                 "Adapter busy for FW reset.\n"
3764                                 "New FW will not be active.\n");
3765                         goto lancer_fw_exit;
3766                 }
3767         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3768                         dev_err(&adapter->pdev->dev,
3769                                 "System reboot required for new FW"
3770                                 " to be active\n");
3771         }
3772
3773         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3774 lancer_fw_exit:
3775         return status;
3776 }
3777
3778 #define UFI_TYPE2               2
3779 #define UFI_TYPE3               3
3780 #define UFI_TYPE3R              10
3781 #define UFI_TYPE4               4
3782 static int be_get_ufi_type(struct be_adapter *adapter,
3783                            struct flash_file_hdr_g3 *fhdr)
3784 {
3785         if (fhdr == NULL)
3786                 goto be_get_ufi_exit;
3787
3788         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3789                 return UFI_TYPE4;
3790         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3791                 if (fhdr->asic_type_rev == 0x10)
3792                         return UFI_TYPE3R;
3793                 else
3794                         return UFI_TYPE3;
3795         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3796                 return UFI_TYPE2;
3797
3798 be_get_ufi_exit:
3799         dev_err(&adapter->pdev->dev,
3800                 "UFI and Interface are not compatible for flashing\n");
3801         return -1;
3802 }
3803
3804 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3805 {
3806         struct flash_file_hdr_g3 *fhdr3;
3807         struct image_hdr *img_hdr_ptr = NULL;
3808         struct be_dma_mem flash_cmd;
3809         const u8 *p;
3810         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3811
3812         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3813         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3814                                           &flash_cmd.dma, GFP_KERNEL);
3815         if (!flash_cmd.va) {
3816                 status = -ENOMEM;
3817                 goto be_fw_exit;
3818         }
3819
3820         p = fw->data;
3821         fhdr3 = (struct flash_file_hdr_g3 *)p;
3822
3823         ufi_type = be_get_ufi_type(adapter, fhdr3);
3824
3825         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3826         for (i = 0; i < num_imgs; i++) {
3827                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3828                                 (sizeof(struct flash_file_hdr_g3) +
3829                                  i * sizeof(struct image_hdr)));
3830                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3831                         switch (ufi_type) {
3832                         case UFI_TYPE4:
3833                                 status = be_flash_skyhawk(adapter, fw,
3834                                                         &flash_cmd, num_imgs);
3835                                 break;
3836                         case UFI_TYPE3R:
3837                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3838                                                       num_imgs);
3839                                 break;
3840                         case UFI_TYPE3:
3841                                 /* Do not flash this ufi on BE3-R cards */
3842                                 if (adapter->asic_rev < 0x10)
3843                                         status = be_flash_BEx(adapter, fw,
3844                                                               &flash_cmd,
3845                                                               num_imgs);
3846                                 else {
3847                                         status = -1;
3848                                         dev_err(&adapter->pdev->dev,
3849                                                 "Can't load BE3 UFI on BE3R\n");
3850                                 }
3851                         }
3852                 }
3853         }
3854
3855         if (ufi_type == UFI_TYPE2)
3856                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3857         else if (ufi_type == -1)
3858                 status = -1;
3859
3860         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3861                           flash_cmd.dma);
3862         if (status) {
3863                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3864                 goto be_fw_exit;
3865         }
3866
3867         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3868
3869 be_fw_exit:
3870         return status;
3871 }
3872
3873 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3874 {
3875         const struct firmware *fw;
3876         int status;
3877
3878         if (!netif_running(adapter->netdev)) {
3879                 dev_err(&adapter->pdev->dev,
3880                         "Firmware load not allowed (interface is down)\n");
3881                 return -1;
3882         }
3883
3884         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3885         if (status)
3886                 goto fw_exit;
3887
3888         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3889
3890         if (lancer_chip(adapter))
3891                 status = lancer_fw_download(adapter, fw);
3892         else
3893                 status = be_fw_download(adapter, fw);
3894
3895         if (!status)
3896                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3897                                   adapter->fw_on_flash);
3898
3899 fw_exit:
3900         release_firmware(fw);
3901         return status;
3902 }
3903
3904 static int be_ndo_bridge_setlink(struct net_device *dev,
3905                                     struct nlmsghdr *nlh)
3906 {
3907         struct be_adapter *adapter = netdev_priv(dev);
3908         struct nlattr *attr, *br_spec;
3909         int rem;
3910         int status = 0;
3911         u16 mode = 0;
3912
3913         if (!sriov_enabled(adapter))
3914                 return -EOPNOTSUPP;
3915
3916         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3917
3918         nla_for_each_nested(attr, br_spec, rem) {
3919                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3920                         continue;
3921
3922                 mode = nla_get_u16(attr);
3923                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3924                         return -EINVAL;
3925
3926                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3927                                                adapter->if_handle,
3928                                                mode == BRIDGE_MODE_VEPA ?
3929                                                PORT_FWD_TYPE_VEPA :
3930                                                PORT_FWD_TYPE_VEB);
3931                 if (status)
3932                         goto err;
3933
3934                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3935                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3936
3937                 return status;
3938         }
3939 err:
3940         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3941                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3942
3943         return status;
3944 }
3945
3946 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3947                                     struct net_device *dev,
3948                                     u32 filter_mask)
3949 {
3950         struct be_adapter *adapter = netdev_priv(dev);
3951         int status = 0;
3952         u8 hsw_mode;
3953
3954         if (!sriov_enabled(adapter))
3955                 return 0;
3956
3957         /* BE and Lancer chips support VEB mode only */
3958         if (BEx_chip(adapter) || lancer_chip(adapter)) {
3959                 hsw_mode = PORT_FWD_TYPE_VEB;
3960         } else {
3961                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3962                                                adapter->if_handle, &hsw_mode);
3963                 if (status)
3964                         return 0;
3965         }
3966
3967         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3968                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
3969                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3970 }
3971
3972 static const struct net_device_ops be_netdev_ops = {
3973         .ndo_open               = be_open,
3974         .ndo_stop               = be_close,
3975         .ndo_start_xmit         = be_xmit,
3976         .ndo_set_rx_mode        = be_set_rx_mode,
3977         .ndo_set_mac_address    = be_mac_addr_set,
3978         .ndo_change_mtu         = be_change_mtu,
3979         .ndo_get_stats64        = be_get_stats64,
3980         .ndo_validate_addr      = eth_validate_addr,
3981         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3982         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3983         .ndo_set_vf_mac         = be_set_vf_mac,
3984         .ndo_set_vf_vlan        = be_set_vf_vlan,
3985         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3986         .ndo_get_vf_config      = be_get_vf_config,
3987 #ifdef CONFIG_NET_POLL_CONTROLLER
3988         .ndo_poll_controller    = be_netpoll,
3989 #endif
3990         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
3991         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
3992 };
3993
3994 static void be_netdev_init(struct net_device *netdev)
3995 {
3996         struct be_adapter *adapter = netdev_priv(netdev);
3997
3998         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3999                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4000                 NETIF_F_HW_VLAN_CTAG_TX;
4001         if (be_multi_rxq(adapter))
4002                 netdev->hw_features |= NETIF_F_RXHASH;
4003
4004         netdev->features |= netdev->hw_features |
4005                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4006
4007         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4008                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4009
4010         netdev->priv_flags |= IFF_UNICAST_FLT;
4011
4012         netdev->flags |= IFF_MULTICAST;
4013
4014         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4015
4016         netdev->netdev_ops = &be_netdev_ops;
4017
4018         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4019 }
4020
4021 static void be_unmap_pci_bars(struct be_adapter *adapter)
4022 {
4023         if (adapter->csr)
4024                 pci_iounmap(adapter->pdev, adapter->csr);
4025         if (adapter->db)
4026                 pci_iounmap(adapter->pdev, adapter->db);
4027 }
4028
4029 static int db_bar(struct be_adapter *adapter)
4030 {
4031         if (lancer_chip(adapter) || !be_physfn(adapter))
4032                 return 0;
4033         else
4034                 return 4;
4035 }
4036
4037 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4038 {
4039         if (skyhawk_chip(adapter)) {
4040                 adapter->roce_db.size = 4096;
4041                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4042                                                               db_bar(adapter));
4043                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4044                                                                db_bar(adapter));
4045         }
4046         return 0;
4047 }
4048
4049 static int be_map_pci_bars(struct be_adapter *adapter)
4050 {
4051         u8 __iomem *addr;
4052         u32 sli_intf;
4053
4054         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4055         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
4056                                 SLI_INTF_IF_TYPE_SHIFT;
4057
4058         if (BEx_chip(adapter) && be_physfn(adapter)) {
4059                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4060                 if (adapter->csr == NULL)
4061                         return -ENOMEM;
4062         }
4063
4064         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4065         if (addr == NULL)
4066                 goto pci_map_err;
4067         adapter->db = addr;
4068
4069         be_roce_map_pci_bars(adapter);
4070         return 0;
4071
4072 pci_map_err:
4073         be_unmap_pci_bars(adapter);
4074         return -ENOMEM;
4075 }
4076
4077 static void be_ctrl_cleanup(struct be_adapter *adapter)
4078 {
4079         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4080
4081         be_unmap_pci_bars(adapter);
4082
4083         if (mem->va)
4084                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4085                                   mem->dma);
4086
4087         mem = &adapter->rx_filter;
4088         if (mem->va)
4089                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4090                                   mem->dma);
4091 }
4092
4093 static int be_ctrl_init(struct be_adapter *adapter)
4094 {
4095         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4096         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4097         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4098         u32 sli_intf;
4099         int status;
4100
4101         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4102         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4103                                  SLI_INTF_FAMILY_SHIFT;
4104         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4105
4106         status = be_map_pci_bars(adapter);
4107         if (status)
4108                 goto done;
4109
4110         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4111         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4112                                                 mbox_mem_alloc->size,
4113                                                 &mbox_mem_alloc->dma,
4114                                                 GFP_KERNEL);
4115         if (!mbox_mem_alloc->va) {
4116                 status = -ENOMEM;
4117                 goto unmap_pci_bars;
4118         }
4119         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4120         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4121         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4122         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4123
4124         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4125         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4126                                             rx_filter->size, &rx_filter->dma,
4127                                             GFP_KERNEL);
4128         if (rx_filter->va == NULL) {
4129                 status = -ENOMEM;
4130                 goto free_mbox;
4131         }
4132
4133         mutex_init(&adapter->mbox_lock);
4134         spin_lock_init(&adapter->mcc_lock);
4135         spin_lock_init(&adapter->mcc_cq_lock);
4136
4137         init_completion(&adapter->flash_compl);
4138         pci_save_state(adapter->pdev);
4139         return 0;
4140
4141 free_mbox:
4142         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4143                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4144
4145 unmap_pci_bars:
4146         be_unmap_pci_bars(adapter);
4147
4148 done:
4149         return status;
4150 }
4151
4152 static void be_stats_cleanup(struct be_adapter *adapter)
4153 {
4154         struct be_dma_mem *cmd = &adapter->stats_cmd;
4155
4156         if (cmd->va)
4157                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4158                                   cmd->va, cmd->dma);
4159 }
4160
4161 static int be_stats_init(struct be_adapter *adapter)
4162 {
4163         struct be_dma_mem *cmd = &adapter->stats_cmd;
4164
4165         if (lancer_chip(adapter))
4166                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4167         else if (BE2_chip(adapter))
4168                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4169         else if (BE3_chip(adapter))
4170                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4171         else
4172                 /* ALL non-BE ASICs */
4173                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4174
4175         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4176                                       GFP_KERNEL);
4177         if (cmd->va == NULL)
4178                 return -1;
4179         return 0;
4180 }
4181
4182 static void be_remove(struct pci_dev *pdev)
4183 {
4184         struct be_adapter *adapter = pci_get_drvdata(pdev);
4185
4186         if (!adapter)
4187                 return;
4188
4189         be_roce_dev_remove(adapter);
4190         be_intr_set(adapter, false);
4191
4192         cancel_delayed_work_sync(&adapter->func_recovery_work);
4193
4194         unregister_netdev(adapter->netdev);
4195
4196         be_clear(adapter);
4197
4198         /* tell fw we're done with firing cmds */
4199         be_cmd_fw_clean(adapter);
4200
4201         be_stats_cleanup(adapter);
4202
4203         be_ctrl_cleanup(adapter);
4204
4205         pci_disable_pcie_error_reporting(pdev);
4206
4207         pci_release_regions(pdev);
4208         pci_disable_device(pdev);
4209
4210         free_netdev(adapter->netdev);
4211 }
4212
4213 bool be_is_wol_supported(struct be_adapter *adapter)
4214 {
4215         return ((adapter->wol_cap & BE_WOL_CAP) &&
4216                 !be_is_wol_excluded(adapter)) ? true : false;
4217 }
4218
4219 u32 be_get_fw_log_level(struct be_adapter *adapter)
4220 {
4221         struct be_dma_mem extfat_cmd;
4222         struct be_fat_conf_params *cfgs;
4223         int status;
4224         u32 level = 0;
4225         int j;
4226
4227         if (lancer_chip(adapter))
4228                 return 0;
4229
4230         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4231         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4232         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4233                                              &extfat_cmd.dma);
4234
4235         if (!extfat_cmd.va) {
4236                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4237                         __func__);
4238                 goto err;
4239         }
4240
4241         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4242         if (!status) {
4243                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4244                                                 sizeof(struct be_cmd_resp_hdr));
4245                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4246                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4247                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4248                 }
4249         }
4250         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4251                             extfat_cmd.dma);
4252 err:
4253         return level;
4254 }
4255
4256 static int be_get_initial_config(struct be_adapter *adapter)
4257 {
4258         int status;
4259         u32 level;
4260
4261         status = be_cmd_get_cntl_attributes(adapter);
4262         if (status)
4263                 return status;
4264
4265         status = be_cmd_get_acpi_wol_cap(adapter);
4266         if (status) {
4267                 /* in case of a failure to get wol capabillities
4268                  * check the exclusion list to determine WOL capability */
4269                 if (!be_is_wol_excluded(adapter))
4270                         adapter->wol_cap |= BE_WOL_CAP;
4271         }
4272
4273         if (be_is_wol_supported(adapter))
4274                 adapter->wol = true;
4275
4276         /* Must be a power of 2 or else MODULO will BUG_ON */
4277         adapter->be_get_temp_freq = 64;
4278
4279         level = be_get_fw_log_level(adapter);
4280         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4281
4282         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4283         return 0;
4284 }
4285
4286 static int lancer_recover_func(struct be_adapter *adapter)
4287 {
4288         struct device *dev = &adapter->pdev->dev;
4289         int status;
4290
4291         status = lancer_test_and_set_rdy_state(adapter);
4292         if (status)
4293                 goto err;
4294
4295         if (netif_running(adapter->netdev))
4296                 be_close(adapter->netdev);
4297
4298         be_clear(adapter);
4299
4300         be_clear_all_error(adapter);
4301
4302         status = be_setup(adapter);
4303         if (status)
4304                 goto err;
4305
4306         if (netif_running(adapter->netdev)) {
4307                 status = be_open(adapter->netdev);
4308                 if (status)
4309                         goto err;
4310         }
4311
4312         dev_err(dev, "Error recovery successful\n");
4313         return 0;
4314 err:
4315         if (status == -EAGAIN)
4316                 dev_err(dev, "Waiting for resource provisioning\n");
4317         else
4318                 dev_err(dev, "Error recovery failed\n");
4319
4320         return status;
4321 }
4322
4323 static void be_func_recovery_task(struct work_struct *work)
4324 {
4325         struct be_adapter *adapter =
4326                 container_of(work, struct be_adapter,  func_recovery_work.work);
4327         int status = 0;
4328
4329         be_detect_error(adapter);
4330
4331         if (adapter->hw_error && lancer_chip(adapter)) {
4332
4333                 rtnl_lock();
4334                 netif_device_detach(adapter->netdev);
4335                 rtnl_unlock();
4336
4337                 status = lancer_recover_func(adapter);
4338                 if (!status)
4339                         netif_device_attach(adapter->netdev);
4340         }
4341
4342         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4343          * no need to attempt further recovery.
4344          */
4345         if (!status || status == -EAGAIN)
4346                 schedule_delayed_work(&adapter->func_recovery_work,
4347                                       msecs_to_jiffies(1000));
4348 }
4349
4350 static void be_worker(struct work_struct *work)
4351 {
4352         struct be_adapter *adapter =
4353                 container_of(work, struct be_adapter, work.work);
4354         struct be_rx_obj *rxo;
4355         int i;
4356
4357         /* when interrupts are not yet enabled, just reap any pending
4358         * mcc completions */
4359         if (!netif_running(adapter->netdev)) {
4360                 local_bh_disable();
4361                 be_process_mcc(adapter);
4362                 local_bh_enable();
4363                 goto reschedule;
4364         }
4365
4366         if (!adapter->stats_cmd_sent) {
4367                 if (lancer_chip(adapter))
4368                         lancer_cmd_get_pport_stats(adapter,
4369                                                 &adapter->stats_cmd);
4370                 else
4371                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4372         }
4373
4374         if (be_physfn(adapter) &&
4375             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4376                 be_cmd_get_die_temperature(adapter);
4377
4378         for_all_rx_queues(adapter, rxo, i) {
4379                 if (rxo->rx_post_starved) {
4380                         rxo->rx_post_starved = false;
4381                         be_post_rx_frags(rxo, GFP_KERNEL);
4382                 }
4383         }
4384
4385         be_eqd_update(adapter);
4386
4387 reschedule:
4388         adapter->work_counter++;
4389         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4390 }
4391
4392 /* If any VFs are already enabled don't FLR the PF */
4393 static bool be_reset_required(struct be_adapter *adapter)
4394 {
4395         return pci_num_vf(adapter->pdev) ? false : true;
4396 }
4397
4398 static char *mc_name(struct be_adapter *adapter)
4399 {
4400         if (adapter->function_mode & FLEX10_MODE)
4401                 return "FLEX10";
4402         else if (adapter->function_mode & VNIC_MODE)
4403                 return "vNIC";
4404         else if (adapter->function_mode & UMC_ENABLED)
4405                 return "UMC";
4406         else
4407                 return "";
4408 }
4409
4410 static inline char *func_name(struct be_adapter *adapter)
4411 {
4412         return be_physfn(adapter) ? "PF" : "VF";
4413 }
4414
4415 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4416 {
4417         int status = 0;
4418         struct be_adapter *adapter;
4419         struct net_device *netdev;
4420         char port_name;
4421
4422         status = pci_enable_device(pdev);
4423         if (status)
4424                 goto do_none;
4425
4426         status = pci_request_regions(pdev, DRV_NAME);
4427         if (status)
4428                 goto disable_dev;
4429         pci_set_master(pdev);
4430
4431         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4432         if (netdev == NULL) {
4433                 status = -ENOMEM;
4434                 goto rel_reg;
4435         }
4436         adapter = netdev_priv(netdev);
4437         adapter->pdev = pdev;
4438         pci_set_drvdata(pdev, adapter);
4439         adapter->netdev = netdev;
4440         SET_NETDEV_DEV(netdev, &pdev->dev);
4441
4442         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4443         if (!status) {
4444                 netdev->features |= NETIF_F_HIGHDMA;
4445         } else {
4446                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4447                 if (status) {
4448                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4449                         goto free_netdev;
4450                 }
4451         }
4452
4453         if (be_physfn(adapter)) {
4454                 status = pci_enable_pcie_error_reporting(pdev);
4455                 if (!status)
4456                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4457         }
4458
4459         status = be_ctrl_init(adapter);
4460         if (status)
4461                 goto free_netdev;
4462
4463         /* sync up with fw's ready state */
4464         if (be_physfn(adapter)) {
4465                 status = be_fw_wait_ready(adapter);
4466                 if (status)
4467                         goto ctrl_clean;
4468         }
4469
4470         if (be_reset_required(adapter)) {
4471                 status = be_cmd_reset_function(adapter);
4472                 if (status)
4473                         goto ctrl_clean;
4474
4475                 /* Wait for interrupts to quiesce after an FLR */
4476                 msleep(100);
4477         }
4478
4479         /* Allow interrupts for other ULPs running on NIC function */
4480         be_intr_set(adapter, true);
4481
4482         /* tell fw we're ready to fire cmds */
4483         status = be_cmd_fw_init(adapter);
4484         if (status)
4485                 goto ctrl_clean;
4486
4487         status = be_stats_init(adapter);
4488         if (status)
4489                 goto ctrl_clean;
4490
4491         status = be_get_initial_config(adapter);
4492         if (status)
4493                 goto stats_clean;
4494
4495         INIT_DELAYED_WORK(&adapter->work, be_worker);
4496         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4497         adapter->rx_fc = adapter->tx_fc = true;
4498
4499         status = be_setup(adapter);
4500         if (status)
4501                 goto stats_clean;
4502
4503         be_netdev_init(netdev);
4504         status = register_netdev(netdev);
4505         if (status != 0)
4506                 goto unsetup;
4507
4508         be_roce_dev_add(adapter);
4509
4510         schedule_delayed_work(&adapter->func_recovery_work,
4511                               msecs_to_jiffies(1000));
4512
4513         be_cmd_query_port_name(adapter, &port_name);
4514
4515         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4516                  func_name(adapter), mc_name(adapter), port_name);
4517
4518         return 0;
4519
4520 unsetup:
4521         be_clear(adapter);
4522 stats_clean:
4523         be_stats_cleanup(adapter);
4524 ctrl_clean:
4525         be_ctrl_cleanup(adapter);
4526 free_netdev:
4527         free_netdev(netdev);
4528 rel_reg:
4529         pci_release_regions(pdev);
4530 disable_dev:
4531         pci_disable_device(pdev);
4532 do_none:
4533         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4534         return status;
4535 }
4536
4537 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4538 {
4539         struct be_adapter *adapter = pci_get_drvdata(pdev);
4540         struct net_device *netdev =  adapter->netdev;
4541
4542         if (adapter->wol)
4543                 be_setup_wol(adapter, true);
4544
4545         cancel_delayed_work_sync(&adapter->func_recovery_work);
4546
4547         netif_device_detach(netdev);
4548         if (netif_running(netdev)) {
4549                 rtnl_lock();
4550                 be_close(netdev);
4551                 rtnl_unlock();
4552         }
4553         be_clear(adapter);
4554
4555         pci_save_state(pdev);
4556         pci_disable_device(pdev);
4557         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4558         return 0;
4559 }
4560
4561 static int be_resume(struct pci_dev *pdev)
4562 {
4563         int status = 0;
4564         struct be_adapter *adapter = pci_get_drvdata(pdev);
4565         struct net_device *netdev =  adapter->netdev;
4566
4567         netif_device_detach(netdev);
4568
4569         status = pci_enable_device(pdev);
4570         if (status)
4571                 return status;
4572
4573         pci_set_power_state(pdev, PCI_D0);
4574         pci_restore_state(pdev);
4575
4576         status = be_fw_wait_ready(adapter);
4577         if (status)
4578                 return status;
4579
4580         /* tell fw we're ready to fire cmds */
4581         status = be_cmd_fw_init(adapter);
4582         if (status)
4583                 return status;
4584
4585         be_setup(adapter);
4586         if (netif_running(netdev)) {
4587                 rtnl_lock();
4588                 be_open(netdev);
4589                 rtnl_unlock();
4590         }
4591
4592         schedule_delayed_work(&adapter->func_recovery_work,
4593                               msecs_to_jiffies(1000));
4594         netif_device_attach(netdev);
4595
4596         if (adapter->wol)
4597                 be_setup_wol(adapter, false);
4598
4599         return 0;
4600 }
4601
4602 /*
4603  * An FLR will stop BE from DMAing any data.
4604  */
4605 static void be_shutdown(struct pci_dev *pdev)
4606 {
4607         struct be_adapter *adapter = pci_get_drvdata(pdev);
4608
4609         if (!adapter)
4610                 return;
4611
4612         cancel_delayed_work_sync(&adapter->work);
4613         cancel_delayed_work_sync(&adapter->func_recovery_work);
4614
4615         netif_device_detach(adapter->netdev);
4616
4617         be_cmd_reset_function(adapter);
4618
4619         pci_disable_device(pdev);
4620 }
4621
4622 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4623                                 pci_channel_state_t state)
4624 {
4625         struct be_adapter *adapter = pci_get_drvdata(pdev);
4626         struct net_device *netdev =  adapter->netdev;
4627
4628         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4629
4630         if (!adapter->eeh_error) {
4631                 adapter->eeh_error = true;
4632
4633                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4634
4635                 rtnl_lock();
4636                 netif_device_detach(netdev);
4637                 if (netif_running(netdev))
4638                         be_close(netdev);
4639                 rtnl_unlock();
4640
4641                 be_clear(adapter);
4642         }
4643
4644         if (state == pci_channel_io_perm_failure)
4645                 return PCI_ERS_RESULT_DISCONNECT;
4646
4647         pci_disable_device(pdev);
4648
4649         /* The error could cause the FW to trigger a flash debug dump.
4650          * Resetting the card while flash dump is in progress
4651          * can cause it not to recover; wait for it to finish.
4652          * Wait only for first function as it is needed only once per
4653          * adapter.
4654          */
4655         if (pdev->devfn == 0)
4656                 ssleep(30);
4657
4658         return PCI_ERS_RESULT_NEED_RESET;
4659 }
4660
4661 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4662 {
4663         struct be_adapter *adapter = pci_get_drvdata(pdev);
4664         int status;
4665
4666         dev_info(&adapter->pdev->dev, "EEH reset\n");
4667
4668         status = pci_enable_device(pdev);
4669         if (status)
4670                 return PCI_ERS_RESULT_DISCONNECT;
4671
4672         pci_set_master(pdev);
4673         pci_set_power_state(pdev, PCI_D0);
4674         pci_restore_state(pdev);
4675
4676         /* Check if card is ok and fw is ready */
4677         dev_info(&adapter->pdev->dev,
4678                  "Waiting for FW to be ready after EEH reset\n");
4679         status = be_fw_wait_ready(adapter);
4680         if (status)
4681                 return PCI_ERS_RESULT_DISCONNECT;
4682
4683         pci_cleanup_aer_uncorrect_error_status(pdev);
4684         be_clear_all_error(adapter);
4685         return PCI_ERS_RESULT_RECOVERED;
4686 }
4687
4688 static void be_eeh_resume(struct pci_dev *pdev)
4689 {
4690         int status = 0;
4691         struct be_adapter *adapter = pci_get_drvdata(pdev);
4692         struct net_device *netdev =  adapter->netdev;
4693
4694         dev_info(&adapter->pdev->dev, "EEH resume\n");
4695
4696         pci_save_state(pdev);
4697
4698         status = be_cmd_reset_function(adapter);
4699         if (status)
4700                 goto err;
4701
4702         /* tell fw we're ready to fire cmds */
4703         status = be_cmd_fw_init(adapter);
4704         if (status)
4705                 goto err;
4706
4707         status = be_setup(adapter);
4708         if (status)
4709                 goto err;
4710
4711         if (netif_running(netdev)) {
4712                 status = be_open(netdev);
4713                 if (status)
4714                         goto err;
4715         }
4716
4717         schedule_delayed_work(&adapter->func_recovery_work,
4718                               msecs_to_jiffies(1000));
4719         netif_device_attach(netdev);
4720         return;
4721 err:
4722         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4723 }
4724
4725 static const struct pci_error_handlers be_eeh_handlers = {
4726         .error_detected = be_eeh_err_detected,
4727         .slot_reset = be_eeh_reset,
4728         .resume = be_eeh_resume,
4729 };
4730
4731 static struct pci_driver be_driver = {
4732         .name = DRV_NAME,
4733         .id_table = be_dev_ids,
4734         .probe = be_probe,
4735         .remove = be_remove,
4736         .suspend = be_suspend,
4737         .resume = be_resume,
4738         .shutdown = be_shutdown,
4739         .err_handler = &be_eeh_handlers
4740 };
4741
4742 static int __init be_init_module(void)
4743 {
4744         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4745             rx_frag_size != 2048) {
4746                 printk(KERN_WARNING DRV_NAME
4747                         " : Module param rx_frag_size must be 2048/4096/8192."
4748                         " Using 2048\n");
4749                 rx_frag_size = 2048;
4750         }
4751
4752         return pci_register_driver(&be_driver);
4753 }
4754 module_init(be_init_module);
4755
4756 static void __exit be_exit_module(void)
4757 {
4758         pci_unregister_driver(&be_driver);
4759 }
4760 module_exit(be_exit_module);