]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_main.c
be2net: Enable NETIF_F_TSO6 for VLAN traffic for BE
[karo-tx-linux.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static bool multi_rxq = true;
37 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
40 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static char *ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static char *ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC"
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122 {
123         struct be_dma_mem *mem = &q->dma_mem;
124         if (mem->va)
125                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126                                   mem->dma);
127 }
128
129 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130                 u16 len, u16 entry_size)
131 {
132         struct be_dma_mem *mem = &q->dma_mem;
133
134         memset(q, 0, sizeof(*q));
135         q->len = len;
136         q->entry_size = entry_size;
137         mem->size = len * entry_size;
138         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139                                      GFP_KERNEL);
140         if (!mem->va)
141                 return -1;
142         memset(mem->va, 0, mem->size);
143         return 0;
144 }
145
146 static void be_intr_set(struct be_adapter *adapter, bool enable)
147 {
148         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
149         u32 reg = ioread32(addr);
150         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151
152         if (adapter->eeh_err)
153                 return;
154
155         if (!enabled && enable)
156                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else if (enabled && !enable)
158                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159         else
160                 return;
161
162         iowrite32(reg, addr);
163 }
164
165 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 {
167         u32 val = 0;
168         val |= qid & DB_RQ_RING_ID_MASK;
169         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170
171         wmb();
172         iowrite32(val, adapter->db + DB_RQ_OFFSET);
173 }
174
175 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 {
177         u32 val = 0;
178         val |= qid & DB_TXULP_RING_ID_MASK;
179         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180
181         wmb();
182         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
183 }
184
185 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
186                 bool arm, bool clear_int, u16 num_popped)
187 {
188         u32 val = 0;
189         val |= qid & DB_EQ_RING_ID_MASK;
190         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
192
193         if (adapter->eeh_err)
194                 return;
195
196         if (arm)
197                 val |= 1 << DB_EQ_REARM_SHIFT;
198         if (clear_int)
199                 val |= 1 << DB_EQ_CLR_SHIFT;
200         val |= 1 << DB_EQ_EVNT_SHIFT;
201         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
202         iowrite32(val, adapter->db + DB_EQ_OFFSET);
203 }
204
205 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 {
207         u32 val = 0;
208         val |= qid & DB_CQ_RING_ID_MASK;
209         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
211
212         if (adapter->eeh_err)
213                 return;
214
215         if (arm)
216                 val |= 1 << DB_CQ_REARM_SHIFT;
217         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
218         iowrite32(val, adapter->db + DB_CQ_OFFSET);
219 }
220
221 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 {
223         struct be_adapter *adapter = netdev_priv(netdev);
224         struct sockaddr *addr = p;
225         int status = 0;
226
227         if (!is_valid_ether_addr(addr->sa_data))
228                 return -EADDRNOTAVAIL;
229
230         /* MAC addr configuration will be done in hardware for VFs
231          * by their corresponding PFs. Just copy to netdev addr here
232          */
233         if (!be_physfn(adapter))
234                 goto netdev_addr;
235
236         status = be_cmd_pmac_del(adapter, adapter->if_handle,
237                                 adapter->pmac_id, 0);
238         if (status)
239                 return status;
240
241         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
242                                 adapter->if_handle, &adapter->pmac_id, 0);
243 netdev_addr:
244         if (!status)
245                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247         return status;
248 }
249
250 static void populate_be2_stats(struct be_adapter *adapter)
251 {
252
253         struct be_drv_stats *drvs = &adapter->drv_stats;
254         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255         struct be_port_rxf_stats_v0 *port_stats =
256                 be_port_rxf_stats_from_cmd(adapter);
257         struct be_rxf_stats_v0 *rxf_stats =
258                 be_rxf_stats_from_cmd(adapter);
259
260         drvs->rx_pause_frames = port_stats->rx_pause_frames;
261         drvs->rx_crc_errors = port_stats->rx_crc_errors;
262         drvs->rx_control_frames = port_stats->rx_control_frames;
263         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274         drvs->rx_input_fifo_overflow_drop =
275                 port_stats->rx_input_fifo_overflow;
276         drvs->rx_dropped_header_too_small =
277                 port_stats->rx_dropped_header_too_small;
278         drvs->rx_address_match_errors =
279                 port_stats->rx_address_match_errors;
280         drvs->rx_alignment_symbol_errors =
281                 port_stats->rx_alignment_symbol_errors;
282
283         drvs->tx_pauseframes = port_stats->tx_pauseframes;
284         drvs->tx_controlframes = port_stats->tx_controlframes;
285
286         if (adapter->port_num)
287                 drvs->jabber_events =
288                         rxf_stats->port1_jabber_events;
289         else
290                 drvs->jabber_events =
291                         rxf_stats->port0_jabber_events;
292         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296         drvs->forwarded_packets = rxf_stats->forwarded_packets;
297         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298         drvs->rx_drops_no_tpre_descr =
299                 rxf_stats->rx_drops_no_tpre_descr;
300         drvs->rx_drops_too_many_frags =
301                 rxf_stats->rx_drops_too_many_frags;
302         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303 }
304
305 static void populate_be3_stats(struct be_adapter *adapter)
306 {
307         struct be_drv_stats *drvs = &adapter->drv_stats;
308         struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310         struct be_rxf_stats_v1 *rxf_stats =
311                 be_rxf_stats_from_cmd(adapter);
312         struct be_port_rxf_stats_v1 *port_stats =
313                 be_port_rxf_stats_from_cmd(adapter);
314
315         drvs->rx_priority_pause_frames = 0;
316         drvs->pmem_fifo_overflow_drop = 0;
317         drvs->rx_pause_frames = port_stats->rx_pause_frames;
318         drvs->rx_crc_errors = port_stats->rx_crc_errors;
319         drvs->rx_control_frames = port_stats->rx_control_frames;
320         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330         drvs->rx_dropped_header_too_small =
331                 port_stats->rx_dropped_header_too_small;
332         drvs->rx_input_fifo_overflow_drop =
333                 port_stats->rx_input_fifo_overflow_drop;
334         drvs->rx_address_match_errors =
335                 port_stats->rx_address_match_errors;
336         drvs->rx_alignment_symbol_errors =
337                 port_stats->rx_alignment_symbol_errors;
338         drvs->rxpp_fifo_overflow_drop =
339                 port_stats->rxpp_fifo_overflow_drop;
340         drvs->tx_pauseframes = port_stats->tx_pauseframes;
341         drvs->tx_controlframes = port_stats->tx_controlframes;
342         drvs->jabber_events = port_stats->jabber_events;
343         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347         drvs->forwarded_packets = rxf_stats->forwarded_packets;
348         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349         drvs->rx_drops_no_tpre_descr =
350                 rxf_stats->rx_drops_no_tpre_descr;
351         drvs->rx_drops_too_many_frags =
352                 rxf_stats->rx_drops_too_many_frags;
353         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354 }
355
356 static void populate_lancer_stats(struct be_adapter *adapter)
357 {
358
359         struct be_drv_stats *drvs = &adapter->drv_stats;
360         struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361                                                 (adapter);
362         drvs->rx_priority_pause_frames = 0;
363         drvs->pmem_fifo_overflow_drop = 0;
364         drvs->rx_pause_frames =
365                 make_64bit_val(pport_stats->rx_pause_frames_hi,
366                                  pport_stats->rx_pause_frames_lo);
367         drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368                                                 pport_stats->rx_crc_errors_lo);
369         drvs->rx_control_frames =
370                         make_64bit_val(pport_stats->rx_control_frames_hi,
371                         pport_stats->rx_control_frames_lo);
372         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373         drvs->rx_frame_too_long =
374                 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375                                         pport_stats->rx_frames_too_long_lo);
376         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380         drvs->rx_dropped_tcp_length =
381                                 pport_stats->rx_dropped_invalid_tcp_length;
382         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385         drvs->rx_dropped_header_too_small =
386                                 pport_stats->rx_dropped_header_too_small;
387         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389         drvs->rx_alignment_symbol_errors =
390                 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391                                 pport_stats->rx_symbol_errors_lo);
392         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393         drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394                                         pport_stats->tx_pause_frames_lo);
395         drvs->tx_controlframes =
396                 make_64bit_val(pport_stats->tx_control_frames_hi,
397                                 pport_stats->tx_control_frames_lo);
398         drvs->jabber_events = pport_stats->rx_jabbers;
399         drvs->rx_drops_no_pbuf = 0;
400         drvs->rx_drops_no_txpb = 0;
401         drvs->rx_drops_no_erx_descr = 0;
402         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403         drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404                                                 pport_stats->num_forwards_lo);
405         drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406                                                 pport_stats->rx_drops_mtu_lo);
407         drvs->rx_drops_no_tpre_descr = 0;
408         drvs->rx_drops_too_many_frags =
409                 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410                                 pport_stats->rx_drops_too_many_frags_lo);
411 }
412
413 void be_parse_stats(struct be_adapter *adapter)
414 {
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423 }
424
425 void netdev_stats_update(struct be_adapter *adapter)
426 {
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct net_device_stats *dev_stats = &adapter->netdev->stats;
429         struct be_rx_obj *rxo;
430         int i;
431
432         memset(dev_stats, 0, sizeof(*dev_stats));
433         for_all_rx_queues(adapter, rxo, i) {
434                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
435                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
436                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
437                 /*  no space in linux buffers: best possible approximation */
438                 if (adapter->generation == BE_GEN3) {
439                         if (!(lancer_chip(adapter))) {
440                                 struct be_erx_stats_v1 *erx_stats =
441                                         be_erx_stats_from_cmd(adapter);
442                                 dev_stats->rx_dropped +=
443                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
444                         }
445                 } else {
446                         struct be_erx_stats_v0 *erx_stats =
447                                         be_erx_stats_from_cmd(adapter);
448                         dev_stats->rx_dropped +=
449                                 erx_stats->rx_drops_no_fragments[rxo->q.id];
450                 }
451         }
452
453         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
454         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
455
456         /* bad pkts received */
457         dev_stats->rx_errors = drvs->rx_crc_errors +
458                 drvs->rx_alignment_symbol_errors +
459                 drvs->rx_in_range_errors +
460                 drvs->rx_out_range_errors +
461                 drvs->rx_frame_too_long +
462                 drvs->rx_dropped_too_small +
463                 drvs->rx_dropped_too_short +
464                 drvs->rx_dropped_header_too_small +
465                 drvs->rx_dropped_tcp_length +
466                 drvs->rx_dropped_runt +
467                 drvs->rx_tcp_checksum_errs +
468                 drvs->rx_ip_checksum_errs +
469                 drvs->rx_udp_checksum_errs;
470
471         /* detailed rx errors */
472         dev_stats->rx_length_errors = drvs->rx_in_range_errors +
473                 drvs->rx_out_range_errors +
474                 drvs->rx_frame_too_long;
475
476         dev_stats->rx_crc_errors = drvs->rx_crc_errors;
477
478         /* frame alignment errors */
479         dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
480
481         /* receiver fifo overrun */
482         /* drops_no_pbuf is no per i/f, it's per BE card */
483         dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
484                                 drvs->rx_input_fifo_overflow_drop +
485                                 drvs->rx_drops_no_pbuf;
486 }
487
488 void be_link_status_update(struct be_adapter *adapter, bool link_up)
489 {
490         struct net_device *netdev = adapter->netdev;
491
492         /* If link came up or went down */
493         if (adapter->link_up != link_up) {
494                 adapter->link_speed = -1;
495                 if (link_up) {
496                         netif_carrier_on(netdev);
497                         printk(KERN_INFO "%s: Link up\n", netdev->name);
498                 } else {
499                         netif_carrier_off(netdev);
500                         printk(KERN_INFO "%s: Link down\n", netdev->name);
501                 }
502                 adapter->link_up = link_up;
503         }
504 }
505
506 /* Update the EQ delay n BE based on the RX frags consumed / sec */
507 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
508 {
509         struct be_eq_obj *rx_eq = &rxo->rx_eq;
510         struct be_rx_stats *stats = &rxo->stats;
511         ulong now = jiffies;
512         u32 eqd;
513
514         if (!rx_eq->enable_aic)
515                 return;
516
517         /* Wrapped around */
518         if (time_before(now, stats->rx_fps_jiffies)) {
519                 stats->rx_fps_jiffies = now;
520                 return;
521         }
522
523         /* Update once a second */
524         if ((now - stats->rx_fps_jiffies) < HZ)
525                 return;
526
527         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
528                         ((now - stats->rx_fps_jiffies) / HZ);
529
530         stats->rx_fps_jiffies = now;
531         stats->prev_rx_frags = stats->rx_frags;
532         eqd = stats->rx_fps / 110000;
533         eqd = eqd << 3;
534         if (eqd > rx_eq->max_eqd)
535                 eqd = rx_eq->max_eqd;
536         if (eqd < rx_eq->min_eqd)
537                 eqd = rx_eq->min_eqd;
538         if (eqd < 10)
539                 eqd = 0;
540         if (eqd != rx_eq->cur_eqd)
541                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
542
543         rx_eq->cur_eqd = eqd;
544 }
545
546 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
547 {
548         u64 rate = bytes;
549
550         do_div(rate, ticks / HZ);
551         rate <<= 3;                     /* bytes/sec -> bits/sec */
552         do_div(rate, 1000000ul);        /* MB/Sec */
553
554         return rate;
555 }
556
557 static void be_tx_rate_update(struct be_adapter *adapter)
558 {
559         struct be_tx_stats *stats = tx_stats(adapter);
560         ulong now = jiffies;
561
562         /* Wrapped around? */
563         if (time_before(now, stats->be_tx_jiffies)) {
564                 stats->be_tx_jiffies = now;
565                 return;
566         }
567
568         /* Update tx rate once in two seconds */
569         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
570                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
571                                                   - stats->be_tx_bytes_prev,
572                                                  now - stats->be_tx_jiffies);
573                 stats->be_tx_jiffies = now;
574                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
575         }
576 }
577
578 static void be_tx_stats_update(struct be_adapter *adapter,
579                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
580 {
581         struct be_tx_stats *stats = tx_stats(adapter);
582         stats->be_tx_reqs++;
583         stats->be_tx_wrbs += wrb_cnt;
584         stats->be_tx_bytes += copied;
585         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
586         if (stopped)
587                 stats->be_tx_stops++;
588 }
589
590 /* Determine number of WRB entries needed to xmit data in an skb */
591 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
592                                                                 bool *dummy)
593 {
594         int cnt = (skb->len > skb->data_len);
595
596         cnt += skb_shinfo(skb)->nr_frags;
597
598         /* to account for hdr wrb */
599         cnt++;
600         if (lancer_chip(adapter) || !(cnt & 1)) {
601                 *dummy = false;
602         } else {
603                 /* add a dummy to make it an even num */
604                 cnt++;
605                 *dummy = true;
606         }
607         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
608         return cnt;
609 }
610
611 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
612 {
613         wrb->frag_pa_hi = upper_32_bits(addr);
614         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
615         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
616 }
617
618 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
619                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
620 {
621         u8 vlan_prio = 0;
622         u16 vlan_tag = 0;
623
624         memset(hdr, 0, sizeof(*hdr));
625
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
627
628         if (skb_is_gso(skb)) {
629                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
630                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
631                         hdr, skb_shinfo(skb)->gso_size);
632                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
633                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
634                 if (lancer_chip(adapter) && adapter->sli_family  ==
635                                                         LANCER_A0_SLI_FAMILY) {
636                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
637                         if (is_tcp_pkt(skb))
638                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
639                                                                 tcpcs, hdr, 1);
640                         else if (is_udp_pkt(skb))
641                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
642                                                                 udpcs, hdr, 1);
643                 }
644         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645                 if (is_tcp_pkt(skb))
646                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647                 else if (is_udp_pkt(skb))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649         }
650
651         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
652                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
653                 vlan_tag = vlan_tx_tag_get(skb);
654                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
655                 /* If vlan priority provided by OS is NOT in available bmap */
656                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
657                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
658                                         adapter->recommended_prio;
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_queue_info *txq = &adapter->tx_obj.q;
693         struct be_eth_wrb *wrb;
694         struct be_eth_hdr_wrb *hdr;
695         bool map_single = false;
696         u16 map_head;
697
698         hdr = queue_head_node(txq);
699         queue_head_inc(txq);
700         map_head = txq->head;
701
702         if (skb->len > skb->data_len) {
703                 int len = skb_headlen(skb);
704                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
705                 if (dma_mapping_error(dev, busaddr))
706                         goto dma_err;
707                 map_single = true;
708                 wrb = queue_head_node(txq);
709                 wrb_fill(wrb, busaddr, len);
710                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
711                 queue_head_inc(txq);
712                 copied += len;
713         }
714
715         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
716                 struct skb_frag_struct *frag =
717                         &skb_shinfo(skb)->frags[i];
718                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
719                                        frag->size, DMA_TO_DEVICE);
720                 if (dma_mapping_error(dev, busaddr))
721                         goto dma_err;
722                 wrb = queue_head_node(txq);
723                 wrb_fill(wrb, busaddr, frag->size);
724                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
725                 queue_head_inc(txq);
726                 copied += frag->size;
727         }
728
729         if (dummy_wrb) {
730                 wrb = queue_head_node(txq);
731                 wrb_fill(wrb, 0, 0);
732                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733                 queue_head_inc(txq);
734         }
735
736         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
737         be_dws_cpu_to_le(hdr, sizeof(*hdr));
738
739         return copied;
740 dma_err:
741         txq->head = map_head;
742         while (copied) {
743                 wrb = queue_head_node(txq);
744                 unmap_tx_frag(dev, wrb, map_single);
745                 map_single = false;
746                 copied -= wrb->frag_len;
747                 queue_head_inc(txq);
748         }
749         return 0;
750 }
751
752 static netdev_tx_t be_xmit(struct sk_buff *skb,
753                         struct net_device *netdev)
754 {
755         struct be_adapter *adapter = netdev_priv(netdev);
756         struct be_tx_obj *tx_obj = &adapter->tx_obj;
757         struct be_queue_info *txq = &tx_obj->q;
758         u32 wrb_cnt = 0, copied = 0;
759         u32 start = txq->head;
760         bool dummy_wrb, stopped = false;
761
762         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
763
764         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
765         if (copied) {
766                 /* record the sent skb in the sent_skb table */
767                 BUG_ON(tx_obj->sent_skb_list[start]);
768                 tx_obj->sent_skb_list[start] = skb;
769
770                 /* Ensure txq has space for the next skb; Else stop the queue
771                  * *BEFORE* ringing the tx doorbell, so that we serialze the
772                  * tx compls of the current transmit which'll wake up the queue
773                  */
774                 atomic_add(wrb_cnt, &txq->used);
775                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
776                                                                 txq->len) {
777                         netif_stop_queue(netdev);
778                         stopped = true;
779                 }
780
781                 be_txq_notify(adapter, txq->id, wrb_cnt);
782
783                 be_tx_stats_update(adapter, wrb_cnt, copied,
784                                 skb_shinfo(skb)->gso_segs, stopped);
785         } else {
786                 txq->head = start;
787                 dev_kfree_skb_any(skb);
788         }
789         return NETDEV_TX_OK;
790 }
791
792 static int be_change_mtu(struct net_device *netdev, int new_mtu)
793 {
794         struct be_adapter *adapter = netdev_priv(netdev);
795         if (new_mtu < BE_MIN_MTU ||
796                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
797                                         (ETH_HLEN + ETH_FCS_LEN))) {
798                 dev_info(&adapter->pdev->dev,
799                         "MTU must be between %d and %d bytes\n",
800                         BE_MIN_MTU,
801                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
802                 return -EINVAL;
803         }
804         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
805                         netdev->mtu, new_mtu);
806         netdev->mtu = new_mtu;
807         return 0;
808 }
809
810 /*
811  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
812  * If the user configures more, place BE in vlan promiscuous mode.
813  */
814 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
815 {
816         u16 vtag[BE_NUM_VLANS_SUPPORTED];
817         u16 ntags = 0, i;
818         int status = 0;
819         u32 if_handle;
820
821         if (vf) {
822                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
823                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
824                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
825         }
826
827         if (adapter->vlans_added <= adapter->max_vlans)  {
828                 /* Construct VLAN Table to give to HW */
829                 for (i = 0; i < VLAN_N_VID; i++) {
830                         if (adapter->vlan_tag[i]) {
831                                 vtag[ntags] = cpu_to_le16(i);
832                                 ntags++;
833                         }
834                 }
835                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
836                                         vtag, ntags, 1, 0);
837         } else {
838                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
839                                         NULL, 0, 1, 1);
840         }
841
842         return status;
843 }
844
845 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
846 {
847         struct be_adapter *adapter = netdev_priv(netdev);
848
849         adapter->vlan_grp = grp;
850 }
851
852 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
853 {
854         struct be_adapter *adapter = netdev_priv(netdev);
855
856         adapter->vlans_added++;
857         if (!be_physfn(adapter))
858                 return;
859
860         adapter->vlan_tag[vid] = 1;
861         if (adapter->vlans_added <= (adapter->max_vlans + 1))
862                 be_vid_config(adapter, false, 0);
863 }
864
865 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
866 {
867         struct be_adapter *adapter = netdev_priv(netdev);
868
869         adapter->vlans_added--;
870         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
871
872         if (!be_physfn(adapter))
873                 return;
874
875         adapter->vlan_tag[vid] = 0;
876         if (adapter->vlans_added <= adapter->max_vlans)
877                 be_vid_config(adapter, false, 0);
878 }
879
880 static void be_set_multicast_list(struct net_device *netdev)
881 {
882         struct be_adapter *adapter = netdev_priv(netdev);
883
884         if (netdev->flags & IFF_PROMISC) {
885                 be_cmd_promiscuous_config(adapter, true);
886                 adapter->promiscuous = true;
887                 goto done;
888         }
889
890         /* BE was previously in promiscuous mode; disable it */
891         if (adapter->promiscuous) {
892                 adapter->promiscuous = false;
893                 be_cmd_promiscuous_config(adapter, false);
894         }
895
896         /* Enable multicast promisc if num configured exceeds what we support */
897         if (netdev->flags & IFF_ALLMULTI ||
898             netdev_mc_count(netdev) > BE_MAX_MC) {
899                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
900                                 &adapter->mc_cmd_mem);
901                 goto done;
902         }
903
904         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
905                 &adapter->mc_cmd_mem);
906 done:
907         return;
908 }
909
910 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
911 {
912         struct be_adapter *adapter = netdev_priv(netdev);
913         int status;
914
915         if (!adapter->sriov_enabled)
916                 return -EPERM;
917
918         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
919                 return -EINVAL;
920
921         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
922                 status = be_cmd_pmac_del(adapter,
923                                         adapter->vf_cfg[vf].vf_if_handle,
924                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
925
926         status = be_cmd_pmac_add(adapter, mac,
927                                 adapter->vf_cfg[vf].vf_if_handle,
928                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
929
930         if (status)
931                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
932                                 mac, vf);
933         else
934                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
935
936         return status;
937 }
938
939 static int be_get_vf_config(struct net_device *netdev, int vf,
940                         struct ifla_vf_info *vi)
941 {
942         struct be_adapter *adapter = netdev_priv(netdev);
943
944         if (!adapter->sriov_enabled)
945                 return -EPERM;
946
947         if (vf >= num_vfs)
948                 return -EINVAL;
949
950         vi->vf = vf;
951         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
952         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
953         vi->qos = 0;
954         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
955
956         return 0;
957 }
958
959 static int be_set_vf_vlan(struct net_device *netdev,
960                         int vf, u16 vlan, u8 qos)
961 {
962         struct be_adapter *adapter = netdev_priv(netdev);
963         int status = 0;
964
965         if (!adapter->sriov_enabled)
966                 return -EPERM;
967
968         if ((vf >= num_vfs) || (vlan > 4095))
969                 return -EINVAL;
970
971         if (vlan) {
972                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
973                 adapter->vlans_added++;
974         } else {
975                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
976                 adapter->vlans_added--;
977         }
978
979         status = be_vid_config(adapter, true, vf);
980
981         if (status)
982                 dev_info(&adapter->pdev->dev,
983                                 "VLAN %d config on VF %d failed\n", vlan, vf);
984         return status;
985 }
986
987 static int be_set_vf_tx_rate(struct net_device *netdev,
988                         int vf, int rate)
989 {
990         struct be_adapter *adapter = netdev_priv(netdev);
991         int status = 0;
992
993         if (!adapter->sriov_enabled)
994                 return -EPERM;
995
996         if ((vf >= num_vfs) || (rate < 0))
997                 return -EINVAL;
998
999         if (rate > 10000)
1000                 rate = 10000;
1001
1002         adapter->vf_cfg[vf].vf_tx_rate = rate;
1003         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1004
1005         if (status)
1006                 dev_info(&adapter->pdev->dev,
1007                                 "tx rate %d on VF %d failed\n", rate, vf);
1008         return status;
1009 }
1010
1011 static void be_rx_rate_update(struct be_rx_obj *rxo)
1012 {
1013         struct be_rx_stats *stats = &rxo->stats;
1014         ulong now = jiffies;
1015
1016         /* Wrapped around */
1017         if (time_before(now, stats->rx_jiffies)) {
1018                 stats->rx_jiffies = now;
1019                 return;
1020         }
1021
1022         /* Update the rate once in two seconds */
1023         if ((now - stats->rx_jiffies) < 2 * HZ)
1024                 return;
1025
1026         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1027                                 now - stats->rx_jiffies);
1028         stats->rx_jiffies = now;
1029         stats->rx_bytes_prev = stats->rx_bytes;
1030 }
1031
1032 static void be_rx_stats_update(struct be_rx_obj *rxo,
1033                 struct be_rx_compl_info *rxcp)
1034 {
1035         struct be_rx_stats *stats = &rxo->stats;
1036
1037         stats->rx_compl++;
1038         stats->rx_frags += rxcp->num_rcvd;
1039         stats->rx_bytes += rxcp->pkt_size;
1040         stats->rx_pkts++;
1041         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1042                 stats->rx_mcast_pkts++;
1043         if (rxcp->err)
1044                 stats->rxcp_err++;
1045 }
1046
1047 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1048 {
1049         /* L4 checksum is not reliable for non TCP/UDP packets.
1050          * Also ignore ipcksm for ipv6 pkts */
1051         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1052                                 (rxcp->ip_csum || rxcp->ipv6);
1053 }
1054
1055 static struct be_rx_page_info *
1056 get_rx_page_info(struct be_adapter *adapter,
1057                 struct be_rx_obj *rxo,
1058                 u16 frag_idx)
1059 {
1060         struct be_rx_page_info *rx_page_info;
1061         struct be_queue_info *rxq = &rxo->q;
1062
1063         rx_page_info = &rxo->page_info_tbl[frag_idx];
1064         BUG_ON(!rx_page_info->page);
1065
1066         if (rx_page_info->last_page_user) {
1067                 dma_unmap_page(&adapter->pdev->dev,
1068                                dma_unmap_addr(rx_page_info, bus),
1069                                adapter->big_page_size, DMA_FROM_DEVICE);
1070                 rx_page_info->last_page_user = false;
1071         }
1072
1073         atomic_dec(&rxq->used);
1074         return rx_page_info;
1075 }
1076
1077 /* Throwaway the data in the Rx completion */
1078 static void be_rx_compl_discard(struct be_adapter *adapter,
1079                 struct be_rx_obj *rxo,
1080                 struct be_rx_compl_info *rxcp)
1081 {
1082         struct be_queue_info *rxq = &rxo->q;
1083         struct be_rx_page_info *page_info;
1084         u16 i, num_rcvd = rxcp->num_rcvd;
1085
1086         for (i = 0; i < num_rcvd; i++) {
1087                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1088                 put_page(page_info->page);
1089                 memset(page_info, 0, sizeof(*page_info));
1090                 index_inc(&rxcp->rxq_idx, rxq->len);
1091         }
1092 }
1093
1094 /*
1095  * skb_fill_rx_data forms a complete skb for an ether frame
1096  * indicated by rxcp.
1097  */
1098 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1099                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1100 {
1101         struct be_queue_info *rxq = &rxo->q;
1102         struct be_rx_page_info *page_info;
1103         u16 i, j;
1104         u16 hdr_len, curr_frag_len, remaining;
1105         u8 *start;
1106
1107         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1108         start = page_address(page_info->page) + page_info->page_offset;
1109         prefetch(start);
1110
1111         /* Copy data in the first descriptor of this completion */
1112         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1113
1114         /* Copy the header portion into skb_data */
1115         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1116         memcpy(skb->data, start, hdr_len);
1117         skb->len = curr_frag_len;
1118         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1119                 /* Complete packet has now been moved to data */
1120                 put_page(page_info->page);
1121                 skb->data_len = 0;
1122                 skb->tail += curr_frag_len;
1123         } else {
1124                 skb_shinfo(skb)->nr_frags = 1;
1125                 skb_shinfo(skb)->frags[0].page = page_info->page;
1126                 skb_shinfo(skb)->frags[0].page_offset =
1127                                         page_info->page_offset + hdr_len;
1128                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1129                 skb->data_len = curr_frag_len - hdr_len;
1130                 skb->tail += hdr_len;
1131         }
1132         page_info->page = NULL;
1133
1134         if (rxcp->pkt_size <= rx_frag_size) {
1135                 BUG_ON(rxcp->num_rcvd != 1);
1136                 return;
1137         }
1138
1139         /* More frags present for this completion */
1140         index_inc(&rxcp->rxq_idx, rxq->len);
1141         remaining = rxcp->pkt_size - curr_frag_len;
1142         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1143                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1144                 curr_frag_len = min(remaining, rx_frag_size);
1145
1146                 /* Coalesce all frags from the same physical page in one slot */
1147                 if (page_info->page_offset == 0) {
1148                         /* Fresh page */
1149                         j++;
1150                         skb_shinfo(skb)->frags[j].page = page_info->page;
1151                         skb_shinfo(skb)->frags[j].page_offset =
1152                                                         page_info->page_offset;
1153                         skb_shinfo(skb)->frags[j].size = 0;
1154                         skb_shinfo(skb)->nr_frags++;
1155                 } else {
1156                         put_page(page_info->page);
1157                 }
1158
1159                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1160                 skb->len += curr_frag_len;
1161                 skb->data_len += curr_frag_len;
1162
1163                 remaining -= curr_frag_len;
1164                 index_inc(&rxcp->rxq_idx, rxq->len);
1165                 page_info->page = NULL;
1166         }
1167         BUG_ON(j > MAX_SKB_FRAGS);
1168 }
1169
1170 /* Process the RX completion indicated by rxcp when GRO is disabled */
1171 static void be_rx_compl_process(struct be_adapter *adapter,
1172                         struct be_rx_obj *rxo,
1173                         struct be_rx_compl_info *rxcp)
1174 {
1175         struct net_device *netdev = adapter->netdev;
1176         struct sk_buff *skb;
1177
1178         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1179         if (unlikely(!skb)) {
1180                 if (net_ratelimit())
1181                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1182                 be_rx_compl_discard(adapter, rxo, rxcp);
1183                 return;
1184         }
1185
1186         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1187
1188         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1189                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1190         else
1191                 skb_checksum_none_assert(skb);
1192
1193         skb->truesize = skb->len + sizeof(struct sk_buff);
1194         skb->protocol = eth_type_trans(skb, netdev);
1195         if (adapter->netdev->features & NETIF_F_RXHASH)
1196                 skb->rxhash = rxcp->rss_hash;
1197
1198
1199         if (unlikely(rxcp->vlanf)) {
1200                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1201                         kfree_skb(skb);
1202                         return;
1203                 }
1204                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1205                                         rxcp->vlan_tag);
1206         } else {
1207                 netif_receive_skb(skb);
1208         }
1209 }
1210
1211 /* Process the RX completion indicated by rxcp when GRO is enabled */
1212 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1213                 struct be_rx_obj *rxo,
1214                 struct be_rx_compl_info *rxcp)
1215 {
1216         struct be_rx_page_info *page_info;
1217         struct sk_buff *skb = NULL;
1218         struct be_queue_info *rxq = &rxo->q;
1219         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1220         u16 remaining, curr_frag_len;
1221         u16 i, j;
1222
1223         skb = napi_get_frags(&eq_obj->napi);
1224         if (!skb) {
1225                 be_rx_compl_discard(adapter, rxo, rxcp);
1226                 return;
1227         }
1228
1229         remaining = rxcp->pkt_size;
1230         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1231                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1232
1233                 curr_frag_len = min(remaining, rx_frag_size);
1234
1235                 /* Coalesce all frags from the same physical page in one slot */
1236                 if (i == 0 || page_info->page_offset == 0) {
1237                         /* First frag or Fresh page */
1238                         j++;
1239                         skb_shinfo(skb)->frags[j].page = page_info->page;
1240                         skb_shinfo(skb)->frags[j].page_offset =
1241                                                         page_info->page_offset;
1242                         skb_shinfo(skb)->frags[j].size = 0;
1243                 } else {
1244                         put_page(page_info->page);
1245                 }
1246                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1247
1248                 remaining -= curr_frag_len;
1249                 index_inc(&rxcp->rxq_idx, rxq->len);
1250                 memset(page_info, 0, sizeof(*page_info));
1251         }
1252         BUG_ON(j > MAX_SKB_FRAGS);
1253
1254         skb_shinfo(skb)->nr_frags = j + 1;
1255         skb->len = rxcp->pkt_size;
1256         skb->data_len = rxcp->pkt_size;
1257         skb->truesize += rxcp->pkt_size;
1258         skb->ip_summed = CHECKSUM_UNNECESSARY;
1259         if (adapter->netdev->features & NETIF_F_RXHASH)
1260                 skb->rxhash = rxcp->rss_hash;
1261
1262         if (likely(!rxcp->vlanf))
1263                 napi_gro_frags(&eq_obj->napi);
1264         else
1265                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1266                                 rxcp->vlan_tag);
1267 }
1268
1269 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1270                                 struct be_eth_rx_compl *compl,
1271                                 struct be_rx_compl_info *rxcp)
1272 {
1273         rxcp->pkt_size =
1274                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1275         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1276         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1277         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1278         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1279         rxcp->ip_csum =
1280                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1281         rxcp->l4_csum =
1282                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1283         rxcp->ipv6 =
1284                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1285         rxcp->rxq_idx =
1286                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1287         rxcp->num_rcvd =
1288                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1289         rxcp->pkt_type =
1290                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1291         rxcp->rss_hash =
1292                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1293         if (rxcp->vlanf) {
1294                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1295                                           compl);
1296                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1297                                                compl);
1298         }
1299 }
1300
1301 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1302                                 struct be_eth_rx_compl *compl,
1303                                 struct be_rx_compl_info *rxcp)
1304 {
1305         rxcp->pkt_size =
1306                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1307         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1308         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1309         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1310         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1311         rxcp->ip_csum =
1312                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1313         rxcp->l4_csum =
1314                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1315         rxcp->ipv6 =
1316                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1317         rxcp->rxq_idx =
1318                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1319         rxcp->num_rcvd =
1320                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1321         rxcp->pkt_type =
1322                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1323         rxcp->rss_hash =
1324                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1325         if (rxcp->vlanf) {
1326                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1327                                           compl);
1328                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1329                                                compl);
1330         }
1331 }
1332
1333 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1334 {
1335         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1336         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1337         struct be_adapter *adapter = rxo->adapter;
1338
1339         /* For checking the valid bit it is Ok to use either definition as the
1340          * valid bit is at the same position in both v0 and v1 Rx compl */
1341         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1342                 return NULL;
1343
1344         rmb();
1345         be_dws_le_to_cpu(compl, sizeof(*compl));
1346
1347         if (adapter->be3_native)
1348                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1349         else
1350                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1351
1352         if (rxcp->vlanf) {
1353                 /* vlanf could be wrongly set in some cards.
1354                  * ignore if vtm is not set */
1355                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1356                         rxcp->vlanf = 0;
1357
1358                 if (!lancer_chip(adapter))
1359                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1360
1361                 if (((adapter->pvid & VLAN_VID_MASK) ==
1362                      (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1363                     !adapter->vlan_tag[rxcp->vlan_tag])
1364                         rxcp->vlanf = 0;
1365         }
1366
1367         /* As the compl has been parsed, reset it; we wont touch it again */
1368         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1369
1370         queue_tail_inc(&rxo->cq);
1371         return rxcp;
1372 }
1373
1374 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1375 {
1376         u32 order = get_order(size);
1377
1378         if (order > 0)
1379                 gfp |= __GFP_COMP;
1380         return  alloc_pages(gfp, order);
1381 }
1382
1383 /*
1384  * Allocate a page, split it to fragments of size rx_frag_size and post as
1385  * receive buffers to BE
1386  */
1387 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1388 {
1389         struct be_adapter *adapter = rxo->adapter;
1390         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1391         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1392         struct be_queue_info *rxq = &rxo->q;
1393         struct page *pagep = NULL;
1394         struct be_eth_rx_d *rxd;
1395         u64 page_dmaaddr = 0, frag_dmaaddr;
1396         u32 posted, page_offset = 0;
1397
1398         page_info = &rxo->page_info_tbl[rxq->head];
1399         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1400                 if (!pagep) {
1401                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1402                         if (unlikely(!pagep)) {
1403                                 rxo->stats.rx_post_fail++;
1404                                 break;
1405                         }
1406                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1407                                                     0, adapter->big_page_size,
1408                                                     DMA_FROM_DEVICE);
1409                         page_info->page_offset = 0;
1410                 } else {
1411                         get_page(pagep);
1412                         page_info->page_offset = page_offset + rx_frag_size;
1413                 }
1414                 page_offset = page_info->page_offset;
1415                 page_info->page = pagep;
1416                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1417                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1418
1419                 rxd = queue_head_node(rxq);
1420                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1421                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1422
1423                 /* Any space left in the current big page for another frag? */
1424                 if ((page_offset + rx_frag_size + rx_frag_size) >
1425                                         adapter->big_page_size) {
1426                         pagep = NULL;
1427                         page_info->last_page_user = true;
1428                 }
1429
1430                 prev_page_info = page_info;
1431                 queue_head_inc(rxq);
1432                 page_info = &page_info_tbl[rxq->head];
1433         }
1434         if (pagep)
1435                 prev_page_info->last_page_user = true;
1436
1437         if (posted) {
1438                 atomic_add(posted, &rxq->used);
1439                 be_rxq_notify(adapter, rxq->id, posted);
1440         } else if (atomic_read(&rxq->used) == 0) {
1441                 /* Let be_worker replenish when memory is available */
1442                 rxo->rx_post_starved = true;
1443         }
1444 }
1445
1446 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1447 {
1448         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1449
1450         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1451                 return NULL;
1452
1453         rmb();
1454         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1455
1456         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1457
1458         queue_tail_inc(tx_cq);
1459         return txcp;
1460 }
1461
1462 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1463 {
1464         struct be_queue_info *txq = &adapter->tx_obj.q;
1465         struct be_eth_wrb *wrb;
1466         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1467         struct sk_buff *sent_skb;
1468         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1469         bool unmap_skb_hdr = true;
1470
1471         sent_skb = sent_skbs[txq->tail];
1472         BUG_ON(!sent_skb);
1473         sent_skbs[txq->tail] = NULL;
1474
1475         /* skip header wrb */
1476         queue_tail_inc(txq);
1477
1478         do {
1479                 cur_index = txq->tail;
1480                 wrb = queue_tail_node(txq);
1481                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1482                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1483                 unmap_skb_hdr = false;
1484
1485                 num_wrbs++;
1486                 queue_tail_inc(txq);
1487         } while (cur_index != last_index);
1488
1489         kfree_skb(sent_skb);
1490         return num_wrbs;
1491 }
1492
1493 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1494 {
1495         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1496
1497         if (!eqe->evt)
1498                 return NULL;
1499
1500         rmb();
1501         eqe->evt = le32_to_cpu(eqe->evt);
1502         queue_tail_inc(&eq_obj->q);
1503         return eqe;
1504 }
1505
1506 static int event_handle(struct be_adapter *adapter,
1507                         struct be_eq_obj *eq_obj)
1508 {
1509         struct be_eq_entry *eqe;
1510         u16 num = 0;
1511
1512         while ((eqe = event_get(eq_obj)) != NULL) {
1513                 eqe->evt = 0;
1514                 num++;
1515         }
1516
1517         /* Deal with any spurious interrupts that come
1518          * without events
1519          */
1520         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1521         if (num)
1522                 napi_schedule(&eq_obj->napi);
1523
1524         return num;
1525 }
1526
1527 /* Just read and notify events without processing them.
1528  * Used at the time of destroying event queues */
1529 static void be_eq_clean(struct be_adapter *adapter,
1530                         struct be_eq_obj *eq_obj)
1531 {
1532         struct be_eq_entry *eqe;
1533         u16 num = 0;
1534
1535         while ((eqe = event_get(eq_obj)) != NULL) {
1536                 eqe->evt = 0;
1537                 num++;
1538         }
1539
1540         if (num)
1541                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1542 }
1543
1544 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1545 {
1546         struct be_rx_page_info *page_info;
1547         struct be_queue_info *rxq = &rxo->q;
1548         struct be_queue_info *rx_cq = &rxo->cq;
1549         struct be_rx_compl_info *rxcp;
1550         u16 tail;
1551
1552         /* First cleanup pending rx completions */
1553         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1554                 be_rx_compl_discard(adapter, rxo, rxcp);
1555                 be_cq_notify(adapter, rx_cq->id, false, 1);
1556         }
1557
1558         /* Then free posted rx buffer that were not used */
1559         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1560         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1561                 page_info = get_rx_page_info(adapter, rxo, tail);
1562                 put_page(page_info->page);
1563                 memset(page_info, 0, sizeof(*page_info));
1564         }
1565         BUG_ON(atomic_read(&rxq->used));
1566 }
1567
1568 static void be_tx_compl_clean(struct be_adapter *adapter)
1569 {
1570         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1571         struct be_queue_info *txq = &adapter->tx_obj.q;
1572         struct be_eth_tx_compl *txcp;
1573         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1574         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1575         struct sk_buff *sent_skb;
1576         bool dummy_wrb;
1577
1578         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1579         do {
1580                 while ((txcp = be_tx_compl_get(tx_cq))) {
1581                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1582                                         wrb_index, txcp);
1583                         num_wrbs += be_tx_compl_process(adapter, end_idx);
1584                         cmpl++;
1585                 }
1586                 if (cmpl) {
1587                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1588                         atomic_sub(num_wrbs, &txq->used);
1589                         cmpl = 0;
1590                         num_wrbs = 0;
1591                 }
1592
1593                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1594                         break;
1595
1596                 mdelay(1);
1597         } while (true);
1598
1599         if (atomic_read(&txq->used))
1600                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1601                         atomic_read(&txq->used));
1602
1603         /* free posted tx for which compls will never arrive */
1604         while (atomic_read(&txq->used)) {
1605                 sent_skb = sent_skbs[txq->tail];
1606                 end_idx = txq->tail;
1607                 index_adv(&end_idx,
1608                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1609                         txq->len);
1610                 num_wrbs = be_tx_compl_process(adapter, end_idx);
1611                 atomic_sub(num_wrbs, &txq->used);
1612         }
1613 }
1614
1615 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1616 {
1617         struct be_queue_info *q;
1618
1619         q = &adapter->mcc_obj.q;
1620         if (q->created)
1621                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1622         be_queue_free(adapter, q);
1623
1624         q = &adapter->mcc_obj.cq;
1625         if (q->created)
1626                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1627         be_queue_free(adapter, q);
1628 }
1629
1630 /* Must be called only after TX qs are created as MCC shares TX EQ */
1631 static int be_mcc_queues_create(struct be_adapter *adapter)
1632 {
1633         struct be_queue_info *q, *cq;
1634
1635         /* Alloc MCC compl queue */
1636         cq = &adapter->mcc_obj.cq;
1637         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1638                         sizeof(struct be_mcc_compl)))
1639                 goto err;
1640
1641         /* Ask BE to create MCC compl queue; share TX's eq */
1642         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1643                 goto mcc_cq_free;
1644
1645         /* Alloc MCC queue */
1646         q = &adapter->mcc_obj.q;
1647         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1648                 goto mcc_cq_destroy;
1649
1650         /* Ask BE to create MCC queue */
1651         if (be_cmd_mccq_create(adapter, q, cq))
1652                 goto mcc_q_free;
1653
1654         return 0;
1655
1656 mcc_q_free:
1657         be_queue_free(adapter, q);
1658 mcc_cq_destroy:
1659         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1660 mcc_cq_free:
1661         be_queue_free(adapter, cq);
1662 err:
1663         return -1;
1664 }
1665
1666 static void be_tx_queues_destroy(struct be_adapter *adapter)
1667 {
1668         struct be_queue_info *q;
1669
1670         q = &adapter->tx_obj.q;
1671         if (q->created)
1672                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1673         be_queue_free(adapter, q);
1674
1675         q = &adapter->tx_obj.cq;
1676         if (q->created)
1677                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1678         be_queue_free(adapter, q);
1679
1680         /* Clear any residual events */
1681         be_eq_clean(adapter, &adapter->tx_eq);
1682
1683         q = &adapter->tx_eq.q;
1684         if (q->created)
1685                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1686         be_queue_free(adapter, q);
1687 }
1688
1689 static int be_tx_queues_create(struct be_adapter *adapter)
1690 {
1691         struct be_queue_info *eq, *q, *cq;
1692
1693         adapter->tx_eq.max_eqd = 0;
1694         adapter->tx_eq.min_eqd = 0;
1695         adapter->tx_eq.cur_eqd = 96;
1696         adapter->tx_eq.enable_aic = false;
1697         /* Alloc Tx Event queue */
1698         eq = &adapter->tx_eq.q;
1699         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1700                 return -1;
1701
1702         /* Ask BE to create Tx Event queue */
1703         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1704                 goto tx_eq_free;
1705
1706         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1707
1708
1709         /* Alloc TX eth compl queue */
1710         cq = &adapter->tx_obj.cq;
1711         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1712                         sizeof(struct be_eth_tx_compl)))
1713                 goto tx_eq_destroy;
1714
1715         /* Ask BE to create Tx eth compl queue */
1716         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1717                 goto tx_cq_free;
1718
1719         /* Alloc TX eth queue */
1720         q = &adapter->tx_obj.q;
1721         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1722                 goto tx_cq_destroy;
1723
1724         /* Ask BE to create Tx eth queue */
1725         if (be_cmd_txq_create(adapter, q, cq))
1726                 goto tx_q_free;
1727         return 0;
1728
1729 tx_q_free:
1730         be_queue_free(adapter, q);
1731 tx_cq_destroy:
1732         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1733 tx_cq_free:
1734         be_queue_free(adapter, cq);
1735 tx_eq_destroy:
1736         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1737 tx_eq_free:
1738         be_queue_free(adapter, eq);
1739         return -1;
1740 }
1741
1742 static void be_rx_queues_destroy(struct be_adapter *adapter)
1743 {
1744         struct be_queue_info *q;
1745         struct be_rx_obj *rxo;
1746         int i;
1747
1748         for_all_rx_queues(adapter, rxo, i) {
1749                 q = &rxo->q;
1750                 if (q->created) {
1751                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1752                         /* After the rxq is invalidated, wait for a grace time
1753                          * of 1ms for all dma to end and the flush compl to
1754                          * arrive
1755                          */
1756                         mdelay(1);
1757                         be_rx_q_clean(adapter, rxo);
1758                 }
1759                 be_queue_free(adapter, q);
1760
1761                 q = &rxo->cq;
1762                 if (q->created)
1763                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1764                 be_queue_free(adapter, q);
1765
1766                 /* Clear any residual events */
1767                 q = &rxo->rx_eq.q;
1768                 if (q->created) {
1769                         be_eq_clean(adapter, &rxo->rx_eq);
1770                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1771                 }
1772                 be_queue_free(adapter, q);
1773         }
1774 }
1775
1776 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1777 {
1778         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1779                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1780                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1781         } else {
1782                 dev_warn(&adapter->pdev->dev,
1783                         "No support for multiple RX queues\n");
1784                 return 1;
1785         }
1786 }
1787
1788 static int be_rx_queues_create(struct be_adapter *adapter)
1789 {
1790         struct be_queue_info *eq, *q, *cq;
1791         struct be_rx_obj *rxo;
1792         int rc, i;
1793
1794         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1795                                 msix_enabled(adapter) ?
1796                                         adapter->num_msix_vec - 1 : 1);
1797         if (adapter->num_rx_qs != MAX_RX_QS)
1798                 dev_warn(&adapter->pdev->dev,
1799                         "Can create only %d RX queues", adapter->num_rx_qs);
1800
1801         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1802         for_all_rx_queues(adapter, rxo, i) {
1803                 rxo->adapter = adapter;
1804                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1805                 rxo->rx_eq.enable_aic = true;
1806
1807                 /* EQ */
1808                 eq = &rxo->rx_eq.q;
1809                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1810                                         sizeof(struct be_eq_entry));
1811                 if (rc)
1812                         goto err;
1813
1814                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1815                 if (rc)
1816                         goto err;
1817
1818                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1819
1820                 /* CQ */
1821                 cq = &rxo->cq;
1822                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1823                                 sizeof(struct be_eth_rx_compl));
1824                 if (rc)
1825                         goto err;
1826
1827                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1828                 if (rc)
1829                         goto err;
1830                 /* Rx Q */
1831                 q = &rxo->q;
1832                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1833                                 sizeof(struct be_eth_rx_d));
1834                 if (rc)
1835                         goto err;
1836
1837                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1838                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1839                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1840                 if (rc)
1841                         goto err;
1842         }
1843
1844         if (be_multi_rxq(adapter)) {
1845                 u8 rsstable[MAX_RSS_QS];
1846
1847                 for_all_rss_queues(adapter, rxo, i)
1848                         rsstable[i] = rxo->rss_id;
1849
1850                 rc = be_cmd_rss_config(adapter, rsstable,
1851                         adapter->num_rx_qs - 1);
1852                 if (rc)
1853                         goto err;
1854         }
1855
1856         return 0;
1857 err:
1858         be_rx_queues_destroy(adapter);
1859         return -1;
1860 }
1861
1862 static bool event_peek(struct be_eq_obj *eq_obj)
1863 {
1864         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1865         if (!eqe->evt)
1866                 return false;
1867         else
1868                 return true;
1869 }
1870
1871 static irqreturn_t be_intx(int irq, void *dev)
1872 {
1873         struct be_adapter *adapter = dev;
1874         struct be_rx_obj *rxo;
1875         int isr, i, tx = 0 , rx = 0;
1876
1877         if (lancer_chip(adapter)) {
1878                 if (event_peek(&adapter->tx_eq))
1879                         tx = event_handle(adapter, &adapter->tx_eq);
1880                 for_all_rx_queues(adapter, rxo, i) {
1881                         if (event_peek(&rxo->rx_eq))
1882                                 rx |= event_handle(adapter, &rxo->rx_eq);
1883                 }
1884
1885                 if (!(tx || rx))
1886                         return IRQ_NONE;
1887
1888         } else {
1889                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1890                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1891                 if (!isr)
1892                         return IRQ_NONE;
1893
1894                 if ((1 << adapter->tx_eq.eq_idx & isr))
1895                         event_handle(adapter, &adapter->tx_eq);
1896
1897                 for_all_rx_queues(adapter, rxo, i) {
1898                         if ((1 << rxo->rx_eq.eq_idx & isr))
1899                                 event_handle(adapter, &rxo->rx_eq);
1900                 }
1901         }
1902
1903         return IRQ_HANDLED;
1904 }
1905
1906 static irqreturn_t be_msix_rx(int irq, void *dev)
1907 {
1908         struct be_rx_obj *rxo = dev;
1909         struct be_adapter *adapter = rxo->adapter;
1910
1911         event_handle(adapter, &rxo->rx_eq);
1912
1913         return IRQ_HANDLED;
1914 }
1915
1916 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1917 {
1918         struct be_adapter *adapter = dev;
1919
1920         event_handle(adapter, &adapter->tx_eq);
1921
1922         return IRQ_HANDLED;
1923 }
1924
1925 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1926 {
1927         return (rxcp->tcpf && !rxcp->err) ? true : false;
1928 }
1929
1930 static int be_poll_rx(struct napi_struct *napi, int budget)
1931 {
1932         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1933         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1934         struct be_adapter *adapter = rxo->adapter;
1935         struct be_queue_info *rx_cq = &rxo->cq;
1936         struct be_rx_compl_info *rxcp;
1937         u32 work_done;
1938
1939         rxo->stats.rx_polls++;
1940         for (work_done = 0; work_done < budget; work_done++) {
1941                 rxcp = be_rx_compl_get(rxo);
1942                 if (!rxcp)
1943                         break;
1944
1945                 /* Ignore flush completions */
1946                 if (rxcp->num_rcvd && rxcp->pkt_size) {
1947                         if (do_gro(rxcp))
1948                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1949                         else
1950                                 be_rx_compl_process(adapter, rxo, rxcp);
1951                 } else if (rxcp->pkt_size == 0) {
1952                         be_rx_compl_discard(adapter, rxo, rxcp);
1953                 }
1954
1955                 be_rx_stats_update(rxo, rxcp);
1956         }
1957
1958         /* Refill the queue */
1959         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1960                 be_post_rx_frags(rxo, GFP_ATOMIC);
1961
1962         /* All consumed */
1963         if (work_done < budget) {
1964                 napi_complete(napi);
1965                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1966         } else {
1967                 /* More to be consumed; continue with interrupts disabled */
1968                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1969         }
1970         return work_done;
1971 }
1972
1973 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1974  * For TX/MCC we don't honour budget; consume everything
1975  */
1976 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1977 {
1978         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1979         struct be_adapter *adapter =
1980                 container_of(tx_eq, struct be_adapter, tx_eq);
1981         struct be_queue_info *txq = &adapter->tx_obj.q;
1982         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1983         struct be_eth_tx_compl *txcp;
1984         int tx_compl = 0, mcc_compl, status = 0;
1985         u16 end_idx, num_wrbs = 0;
1986
1987         while ((txcp = be_tx_compl_get(tx_cq))) {
1988                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1989                                 wrb_index, txcp);
1990                 num_wrbs += be_tx_compl_process(adapter, end_idx);
1991                 tx_compl++;
1992         }
1993
1994         mcc_compl = be_process_mcc(adapter, &status);
1995
1996         napi_complete(napi);
1997
1998         if (mcc_compl) {
1999                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2000                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2001         }
2002
2003         if (tx_compl) {
2004                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
2005
2006                 atomic_sub(num_wrbs, &txq->used);
2007
2008                 /* As Tx wrbs have been freed up, wake up netdev queue if
2009                  * it was stopped due to lack of tx wrbs.
2010                  */
2011                 if (netif_queue_stopped(adapter->netdev) &&
2012                         atomic_read(&txq->used) < txq->len / 2) {
2013                         netif_wake_queue(adapter->netdev);
2014                 }
2015
2016                 tx_stats(adapter)->be_tx_events++;
2017                 tx_stats(adapter)->be_tx_compl += tx_compl;
2018         }
2019
2020         return 1;
2021 }
2022
2023 void be_detect_dump_ue(struct be_adapter *adapter)
2024 {
2025         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2026         u32 i;
2027
2028         pci_read_config_dword(adapter->pdev,
2029                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2030         pci_read_config_dword(adapter->pdev,
2031                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2032         pci_read_config_dword(adapter->pdev,
2033                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2034         pci_read_config_dword(adapter->pdev,
2035                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2036
2037         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2038         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2039
2040         if (ue_status_lo || ue_status_hi) {
2041                 adapter->ue_detected = true;
2042                 adapter->eeh_err = true;
2043                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2044         }
2045
2046         if (ue_status_lo) {
2047                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2048                         if (ue_status_lo & 1)
2049                                 dev_err(&adapter->pdev->dev,
2050                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2051                 }
2052         }
2053         if (ue_status_hi) {
2054                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2055                         if (ue_status_hi & 1)
2056                                 dev_err(&adapter->pdev->dev,
2057                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2058                 }
2059         }
2060
2061 }
2062
2063 static void be_worker(struct work_struct *work)
2064 {
2065         struct be_adapter *adapter =
2066                 container_of(work, struct be_adapter, work.work);
2067         struct be_rx_obj *rxo;
2068         int i;
2069
2070         if (!adapter->ue_detected && !lancer_chip(adapter))
2071                 be_detect_dump_ue(adapter);
2072
2073         /* when interrupts are not yet enabled, just reap any pending
2074         * mcc completions */
2075         if (!netif_running(adapter->netdev)) {
2076                 int mcc_compl, status = 0;
2077
2078                 mcc_compl = be_process_mcc(adapter, &status);
2079
2080                 if (mcc_compl) {
2081                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2082                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2083                 }
2084
2085                 goto reschedule;
2086         }
2087
2088         if (!adapter->stats_cmd_sent) {
2089                 if (lancer_chip(adapter))
2090                         lancer_cmd_get_pport_stats(adapter,
2091                                                 &adapter->stats_cmd);
2092                 else
2093                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2094         }
2095         be_tx_rate_update(adapter);
2096
2097         for_all_rx_queues(adapter, rxo, i) {
2098                 be_rx_rate_update(rxo);
2099                 be_rx_eqd_update(adapter, rxo);
2100
2101                 if (rxo->rx_post_starved) {
2102                         rxo->rx_post_starved = false;
2103                         be_post_rx_frags(rxo, GFP_KERNEL);
2104                 }
2105         }
2106
2107 reschedule:
2108         adapter->work_counter++;
2109         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2110 }
2111
2112 static void be_msix_disable(struct be_adapter *adapter)
2113 {
2114         if (msix_enabled(adapter)) {
2115                 pci_disable_msix(adapter->pdev);
2116                 adapter->num_msix_vec = 0;
2117         }
2118 }
2119
2120 static void be_msix_enable(struct be_adapter *adapter)
2121 {
2122 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2123         int i, status, num_vec;
2124
2125         num_vec = be_num_rxqs_want(adapter) + 1;
2126
2127         for (i = 0; i < num_vec; i++)
2128                 adapter->msix_entries[i].entry = i;
2129
2130         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2131         if (status == 0) {
2132                 goto done;
2133         } else if (status >= BE_MIN_MSIX_VECTORS) {
2134                 num_vec = status;
2135                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2136                                 num_vec) == 0)
2137                         goto done;
2138         }
2139         return;
2140 done:
2141         adapter->num_msix_vec = num_vec;
2142         return;
2143 }
2144
2145 static void be_sriov_enable(struct be_adapter *adapter)
2146 {
2147         be_check_sriov_fn_type(adapter);
2148 #ifdef CONFIG_PCI_IOV
2149         if (be_physfn(adapter) && num_vfs) {
2150                 int status, pos;
2151                 u16 nvfs;
2152
2153                 pos = pci_find_ext_capability(adapter->pdev,
2154                                                 PCI_EXT_CAP_ID_SRIOV);
2155                 pci_read_config_word(adapter->pdev,
2156                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2157
2158                 if (num_vfs > nvfs) {
2159                         dev_info(&adapter->pdev->dev,
2160                                         "Device supports %d VFs and not %d\n",
2161                                         nvfs, num_vfs);
2162                         num_vfs = nvfs;
2163                 }
2164
2165                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2166                 adapter->sriov_enabled = status ? false : true;
2167         }
2168 #endif
2169 }
2170
2171 static void be_sriov_disable(struct be_adapter *adapter)
2172 {
2173 #ifdef CONFIG_PCI_IOV
2174         if (adapter->sriov_enabled) {
2175                 pci_disable_sriov(adapter->pdev);
2176                 adapter->sriov_enabled = false;
2177         }
2178 #endif
2179 }
2180
2181 static inline int be_msix_vec_get(struct be_adapter *adapter,
2182                                         struct be_eq_obj *eq_obj)
2183 {
2184         return adapter->msix_entries[eq_obj->eq_idx].vector;
2185 }
2186
2187 static int be_request_irq(struct be_adapter *adapter,
2188                 struct be_eq_obj *eq_obj,
2189                 void *handler, char *desc, void *context)
2190 {
2191         struct net_device *netdev = adapter->netdev;
2192         int vec;
2193
2194         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2195         vec = be_msix_vec_get(adapter, eq_obj);
2196         return request_irq(vec, handler, 0, eq_obj->desc, context);
2197 }
2198
2199 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2200                         void *context)
2201 {
2202         int vec = be_msix_vec_get(adapter, eq_obj);
2203         free_irq(vec, context);
2204 }
2205
2206 static int be_msix_register(struct be_adapter *adapter)
2207 {
2208         struct be_rx_obj *rxo;
2209         int status, i;
2210         char qname[10];
2211
2212         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2213                                 adapter);
2214         if (status)
2215                 goto err;
2216
2217         for_all_rx_queues(adapter, rxo, i) {
2218                 sprintf(qname, "rxq%d", i);
2219                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2220                                 qname, rxo);
2221                 if (status)
2222                         goto err_msix;
2223         }
2224
2225         return 0;
2226
2227 err_msix:
2228         be_free_irq(adapter, &adapter->tx_eq, adapter);
2229
2230         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2231                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2232
2233 err:
2234         dev_warn(&adapter->pdev->dev,
2235                 "MSIX Request IRQ failed - err %d\n", status);
2236         be_msix_disable(adapter);
2237         return status;
2238 }
2239
2240 static int be_irq_register(struct be_adapter *adapter)
2241 {
2242         struct net_device *netdev = adapter->netdev;
2243         int status;
2244
2245         if (msix_enabled(adapter)) {
2246                 status = be_msix_register(adapter);
2247                 if (status == 0)
2248                         goto done;
2249                 /* INTx is not supported for VF */
2250                 if (!be_physfn(adapter))
2251                         return status;
2252         }
2253
2254         /* INTx */
2255         netdev->irq = adapter->pdev->irq;
2256         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2257                         adapter);
2258         if (status) {
2259                 dev_err(&adapter->pdev->dev,
2260                         "INTx request IRQ failed - err %d\n", status);
2261                 return status;
2262         }
2263 done:
2264         adapter->isr_registered = true;
2265         return 0;
2266 }
2267
2268 static void be_irq_unregister(struct be_adapter *adapter)
2269 {
2270         struct net_device *netdev = adapter->netdev;
2271         struct be_rx_obj *rxo;
2272         int i;
2273
2274         if (!adapter->isr_registered)
2275                 return;
2276
2277         /* INTx */
2278         if (!msix_enabled(adapter)) {
2279                 free_irq(netdev->irq, adapter);
2280                 goto done;
2281         }
2282
2283         /* MSIx */
2284         be_free_irq(adapter, &adapter->tx_eq, adapter);
2285
2286         for_all_rx_queues(adapter, rxo, i)
2287                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2288
2289 done:
2290         adapter->isr_registered = false;
2291 }
2292
2293 static int be_close(struct net_device *netdev)
2294 {
2295         struct be_adapter *adapter = netdev_priv(netdev);
2296         struct be_rx_obj *rxo;
2297         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2298         int vec, i;
2299
2300         be_async_mcc_disable(adapter);
2301
2302         netif_carrier_off(netdev);
2303         adapter->link_up = false;
2304
2305         if (!lancer_chip(adapter))
2306                 be_intr_set(adapter, false);
2307
2308         for_all_rx_queues(adapter, rxo, i)
2309                 napi_disable(&rxo->rx_eq.napi);
2310
2311         napi_disable(&tx_eq->napi);
2312
2313         if (lancer_chip(adapter)) {
2314                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2315                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2316                 for_all_rx_queues(adapter, rxo, i)
2317                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2318         }
2319
2320         if (msix_enabled(adapter)) {
2321                 vec = be_msix_vec_get(adapter, tx_eq);
2322                 synchronize_irq(vec);
2323
2324                 for_all_rx_queues(adapter, rxo, i) {
2325                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2326                         synchronize_irq(vec);
2327                 }
2328         } else {
2329                 synchronize_irq(netdev->irq);
2330         }
2331         be_irq_unregister(adapter);
2332
2333         /* Wait for all pending tx completions to arrive so that
2334          * all tx skbs are freed.
2335          */
2336         be_tx_compl_clean(adapter);
2337
2338         return 0;
2339 }
2340
2341 static int be_open(struct net_device *netdev)
2342 {
2343         struct be_adapter *adapter = netdev_priv(netdev);
2344         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2345         struct be_rx_obj *rxo;
2346         bool link_up;
2347         int status, i;
2348         u8 mac_speed;
2349         u16 link_speed;
2350
2351         for_all_rx_queues(adapter, rxo, i) {
2352                 be_post_rx_frags(rxo, GFP_KERNEL);
2353                 napi_enable(&rxo->rx_eq.napi);
2354         }
2355         napi_enable(&tx_eq->napi);
2356
2357         be_irq_register(adapter);
2358
2359         if (!lancer_chip(adapter))
2360                 be_intr_set(adapter, true);
2361
2362         /* The evt queues are created in unarmed state; arm them */
2363         for_all_rx_queues(adapter, rxo, i) {
2364                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2365                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2366         }
2367         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2368
2369         /* Now that interrupts are on we can process async mcc */
2370         be_async_mcc_enable(adapter);
2371
2372         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2373                         &link_speed, 0);
2374         if (status)
2375                 goto err;
2376         be_link_status_update(adapter, link_up);
2377
2378         if (be_physfn(adapter)) {
2379                 status = be_vid_config(adapter, false, 0);
2380                 if (status)
2381                         goto err;
2382
2383                 status = be_cmd_set_flow_control(adapter,
2384                                 adapter->tx_fc, adapter->rx_fc);
2385                 if (status)
2386                         goto err;
2387         }
2388
2389         return 0;
2390 err:
2391         be_close(adapter->netdev);
2392         return -EIO;
2393 }
2394
2395 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2396 {
2397         struct be_dma_mem cmd;
2398         int status = 0;
2399         u8 mac[ETH_ALEN];
2400
2401         memset(mac, 0, ETH_ALEN);
2402
2403         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2404         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2405                                     GFP_KERNEL);
2406         if (cmd.va == NULL)
2407                 return -1;
2408         memset(cmd.va, 0, cmd.size);
2409
2410         if (enable) {
2411                 status = pci_write_config_dword(adapter->pdev,
2412                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2413                 if (status) {
2414                         dev_err(&adapter->pdev->dev,
2415                                 "Could not enable Wake-on-lan\n");
2416                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2417                                           cmd.dma);
2418                         return status;
2419                 }
2420                 status = be_cmd_enable_magic_wol(adapter,
2421                                 adapter->netdev->dev_addr, &cmd);
2422                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2423                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2424         } else {
2425                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2426                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2427                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2428         }
2429
2430         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2431         return status;
2432 }
2433
2434 /*
2435  * Generate a seed MAC address from the PF MAC Address using jhash.
2436  * MAC Address for VFs are assigned incrementally starting from the seed.
2437  * These addresses are programmed in the ASIC by the PF and the VF driver
2438  * queries for the MAC address during its probe.
2439  */
2440 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2441 {
2442         u32 vf = 0;
2443         int status = 0;
2444         u8 mac[ETH_ALEN];
2445
2446         be_vf_eth_addr_generate(adapter, mac);
2447
2448         for (vf = 0; vf < num_vfs; vf++) {
2449                 status = be_cmd_pmac_add(adapter, mac,
2450                                         adapter->vf_cfg[vf].vf_if_handle,
2451                                         &adapter->vf_cfg[vf].vf_pmac_id,
2452                                         vf + 1);
2453                 if (status)
2454                         dev_err(&adapter->pdev->dev,
2455                                 "Mac address add failed for VF %d\n", vf);
2456                 else
2457                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2458
2459                 mac[5] += 1;
2460         }
2461         return status;
2462 }
2463
2464 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2465 {
2466         u32 vf;
2467
2468         for (vf = 0; vf < num_vfs; vf++) {
2469                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2470                         be_cmd_pmac_del(adapter,
2471                                         adapter->vf_cfg[vf].vf_if_handle,
2472                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2473         }
2474 }
2475
2476 static int be_setup(struct be_adapter *adapter)
2477 {
2478         struct net_device *netdev = adapter->netdev;
2479         u32 cap_flags, en_flags, vf = 0;
2480         int status;
2481         u8 mac[ETH_ALEN];
2482
2483         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2484                                 BE_IF_FLAGS_BROADCAST |
2485                                 BE_IF_FLAGS_MULTICAST;
2486
2487         if (be_physfn(adapter)) {
2488                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2489                                 BE_IF_FLAGS_PROMISCUOUS |
2490                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2491                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2492
2493                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2494                         cap_flags |= BE_IF_FLAGS_RSS;
2495                         en_flags |= BE_IF_FLAGS_RSS;
2496                 }
2497         }
2498
2499         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2500                         netdev->dev_addr, false/* pmac_invalid */,
2501                         &adapter->if_handle, &adapter->pmac_id, 0);
2502         if (status != 0)
2503                 goto do_none;
2504
2505         if (be_physfn(adapter)) {
2506                 if (adapter->sriov_enabled) {
2507                         while (vf < num_vfs) {
2508                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2509                                                         BE_IF_FLAGS_BROADCAST;
2510                                 status = be_cmd_if_create(adapter, cap_flags,
2511                                         en_flags, mac, true,
2512                                         &adapter->vf_cfg[vf].vf_if_handle,
2513                                         NULL, vf+1);
2514                                 if (status) {
2515                                         dev_err(&adapter->pdev->dev,
2516                                         "Interface Create failed for VF %d\n",
2517                                         vf);
2518                                         goto if_destroy;
2519                                 }
2520                                 adapter->vf_cfg[vf].vf_pmac_id =
2521                                                         BE_INVALID_PMAC_ID;
2522                                 vf++;
2523                         }
2524                 }
2525         } else {
2526                 status = be_cmd_mac_addr_query(adapter, mac,
2527                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2528                 if (!status) {
2529                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2530                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2531                 }
2532         }
2533
2534         status = be_tx_queues_create(adapter);
2535         if (status != 0)
2536                 goto if_destroy;
2537
2538         status = be_rx_queues_create(adapter);
2539         if (status != 0)
2540                 goto tx_qs_destroy;
2541
2542         status = be_mcc_queues_create(adapter);
2543         if (status != 0)
2544                 goto rx_qs_destroy;
2545
2546         adapter->link_speed = -1;
2547
2548         return 0;
2549
2550 rx_qs_destroy:
2551         be_rx_queues_destroy(adapter);
2552 tx_qs_destroy:
2553         be_tx_queues_destroy(adapter);
2554 if_destroy:
2555         if (be_physfn(adapter) && adapter->sriov_enabled)
2556                 for (vf = 0; vf < num_vfs; vf++)
2557                         if (adapter->vf_cfg[vf].vf_if_handle)
2558                                 be_cmd_if_destroy(adapter,
2559                                         adapter->vf_cfg[vf].vf_if_handle,
2560                                         vf + 1);
2561         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2562 do_none:
2563         return status;
2564 }
2565
2566 static int be_clear(struct be_adapter *adapter)
2567 {
2568         int vf;
2569
2570         if (be_physfn(adapter) && adapter->sriov_enabled)
2571                 be_vf_eth_addr_rem(adapter);
2572
2573         be_mcc_queues_destroy(adapter);
2574         be_rx_queues_destroy(adapter);
2575         be_tx_queues_destroy(adapter);
2576         adapter->eq_next_idx = 0;
2577
2578         if (be_physfn(adapter) && adapter->sriov_enabled)
2579                 for (vf = 0; vf < num_vfs; vf++)
2580                         if (adapter->vf_cfg[vf].vf_if_handle)
2581                                 be_cmd_if_destroy(adapter,
2582                                         adapter->vf_cfg[vf].vf_if_handle,
2583                                         vf + 1);
2584
2585         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2586
2587         /* tell fw we're done with firing cmds */
2588         be_cmd_fw_clean(adapter);
2589         return 0;
2590 }
2591
2592
2593 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2594 static bool be_flash_redboot(struct be_adapter *adapter,
2595                         const u8 *p, u32 img_start, int image_size,
2596                         int hdr_size)
2597 {
2598         u32 crc_offset;
2599         u8 flashed_crc[4];
2600         int status;
2601
2602         crc_offset = hdr_size + img_start + image_size - 4;
2603
2604         p += crc_offset;
2605
2606         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2607                         (image_size - 4));
2608         if (status) {
2609                 dev_err(&adapter->pdev->dev,
2610                 "could not get crc from flash, not flashing redboot\n");
2611                 return false;
2612         }
2613
2614         /*update redboot only if crc does not match*/
2615         if (!memcmp(flashed_crc, p, 4))
2616                 return false;
2617         else
2618                 return true;
2619 }
2620
2621 static int be_flash_data(struct be_adapter *adapter,
2622                         const struct firmware *fw,
2623                         struct be_dma_mem *flash_cmd, int num_of_images)
2624
2625 {
2626         int status = 0, i, filehdr_size = 0;
2627         u32 total_bytes = 0, flash_op;
2628         int num_bytes;
2629         const u8 *p = fw->data;
2630         struct be_cmd_write_flashrom *req = flash_cmd->va;
2631         const struct flash_comp *pflashcomp;
2632         int num_comp;
2633
2634         static const struct flash_comp gen3_flash_types[9] = {
2635                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2636                         FLASH_IMAGE_MAX_SIZE_g3},
2637                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2638                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2639                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2640                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2642                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2644                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2646                         FLASH_IMAGE_MAX_SIZE_g3},
2647                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2648                         FLASH_IMAGE_MAX_SIZE_g3},
2649                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2650                         FLASH_IMAGE_MAX_SIZE_g3},
2651                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2652                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2653         };
2654         static const struct flash_comp gen2_flash_types[8] = {
2655                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2656                         FLASH_IMAGE_MAX_SIZE_g2},
2657                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2658                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2659                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2660                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2661                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2662                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2664                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2666                         FLASH_IMAGE_MAX_SIZE_g2},
2667                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2668                         FLASH_IMAGE_MAX_SIZE_g2},
2669                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2670                          FLASH_IMAGE_MAX_SIZE_g2}
2671         };
2672
2673         if (adapter->generation == BE_GEN3) {
2674                 pflashcomp = gen3_flash_types;
2675                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2676                 num_comp = ARRAY_SIZE(gen3_flash_types);
2677         } else {
2678                 pflashcomp = gen2_flash_types;
2679                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2680                 num_comp = ARRAY_SIZE(gen2_flash_types);
2681         }
2682         for (i = 0; i < num_comp; i++) {
2683                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2684                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2685                         continue;
2686                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2687                         (!be_flash_redboot(adapter, fw->data,
2688                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2689                         (num_of_images * sizeof(struct image_hdr)))))
2690                         continue;
2691                 p = fw->data;
2692                 p += filehdr_size + pflashcomp[i].offset
2693                         + (num_of_images * sizeof(struct image_hdr));
2694         if (p + pflashcomp[i].size > fw->data + fw->size)
2695                 return -1;
2696         total_bytes = pflashcomp[i].size;
2697                 while (total_bytes) {
2698                         if (total_bytes > 32*1024)
2699                                 num_bytes = 32*1024;
2700                         else
2701                                 num_bytes = total_bytes;
2702                         total_bytes -= num_bytes;
2703
2704                         if (!total_bytes)
2705                                 flash_op = FLASHROM_OPER_FLASH;
2706                         else
2707                                 flash_op = FLASHROM_OPER_SAVE;
2708                         memcpy(req->params.data_buf, p, num_bytes);
2709                         p += num_bytes;
2710                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2711                                 pflashcomp[i].optype, flash_op, num_bytes);
2712                         if (status) {
2713                                 dev_err(&adapter->pdev->dev,
2714                                         "cmd to write to flash rom failed.\n");
2715                                 return -1;
2716                         }
2717                 }
2718         }
2719         return 0;
2720 }
2721
2722 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2723 {
2724         if (fhdr == NULL)
2725                 return 0;
2726         if (fhdr->build[0] == '3')
2727                 return BE_GEN3;
2728         else if (fhdr->build[0] == '2')
2729                 return BE_GEN2;
2730         else
2731                 return 0;
2732 }
2733
2734 static int lancer_fw_download(struct be_adapter *adapter,
2735                                 const struct firmware *fw)
2736 {
2737 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2738 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2739         struct be_dma_mem flash_cmd;
2740         const u8 *data_ptr = NULL;
2741         u8 *dest_image_ptr = NULL;
2742         size_t image_size = 0;
2743         u32 chunk_size = 0;
2744         u32 data_written = 0;
2745         u32 offset = 0;
2746         int status = 0;
2747         u8 add_status = 0;
2748
2749         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2750                 dev_err(&adapter->pdev->dev,
2751                         "FW Image not properly aligned. "
2752                         "Length must be 4 byte aligned.\n");
2753                 status = -EINVAL;
2754                 goto lancer_fw_exit;
2755         }
2756
2757         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2758                                 + LANCER_FW_DOWNLOAD_CHUNK;
2759         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2760                                                 &flash_cmd.dma, GFP_KERNEL);
2761         if (!flash_cmd.va) {
2762                 status = -ENOMEM;
2763                 dev_err(&adapter->pdev->dev,
2764                         "Memory allocation failure while flashing\n");
2765                 goto lancer_fw_exit;
2766         }
2767
2768         dest_image_ptr = flash_cmd.va +
2769                                 sizeof(struct lancer_cmd_req_write_object);
2770         image_size = fw->size;
2771         data_ptr = fw->data;
2772
2773         while (image_size) {
2774                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2775
2776                 /* Copy the image chunk content. */
2777                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2778
2779                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2780                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2781                                 &data_written, &add_status);
2782
2783                 if (status)
2784                         break;
2785
2786                 offset += data_written;
2787                 data_ptr += data_written;
2788                 image_size -= data_written;
2789         }
2790
2791         if (!status) {
2792                 /* Commit the FW written */
2793                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2794                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2795                                         &data_written, &add_status);
2796         }
2797
2798         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2799                                 flash_cmd.dma);
2800         if (status) {
2801                 dev_err(&adapter->pdev->dev,
2802                         "Firmware load error. "
2803                         "Status code: 0x%x Additional Status: 0x%x\n",
2804                         status, add_status);
2805                 goto lancer_fw_exit;
2806         }
2807
2808         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2809 lancer_fw_exit:
2810         return status;
2811 }
2812
2813 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2814 {
2815         struct flash_file_hdr_g2 *fhdr;
2816         struct flash_file_hdr_g3 *fhdr3;
2817         struct image_hdr *img_hdr_ptr = NULL;
2818         struct be_dma_mem flash_cmd;
2819         const u8 *p;
2820         int status = 0, i = 0, num_imgs = 0;
2821
2822         p = fw->data;
2823         fhdr = (struct flash_file_hdr_g2 *) p;
2824
2825         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2826         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2827                                           &flash_cmd.dma, GFP_KERNEL);
2828         if (!flash_cmd.va) {
2829                 status = -ENOMEM;
2830                 dev_err(&adapter->pdev->dev,
2831                         "Memory allocation failure while flashing\n");
2832                 goto be_fw_exit;
2833         }
2834
2835         if ((adapter->generation == BE_GEN3) &&
2836                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2837                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2838                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2839                 for (i = 0; i < num_imgs; i++) {
2840                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2841                                         (sizeof(struct flash_file_hdr_g3) +
2842                                          i * sizeof(struct image_hdr)));
2843                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2844                                 status = be_flash_data(adapter, fw, &flash_cmd,
2845                                                         num_imgs);
2846                 }
2847         } else if ((adapter->generation == BE_GEN2) &&
2848                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2849                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2850         } else {
2851                 dev_err(&adapter->pdev->dev,
2852                         "UFI and Interface are not compatible for flashing\n");
2853                 status = -1;
2854         }
2855
2856         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2857                           flash_cmd.dma);
2858         if (status) {
2859                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2860                 goto be_fw_exit;
2861         }
2862
2863         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2864
2865 be_fw_exit:
2866         return status;
2867 }
2868
2869 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2870 {
2871         const struct firmware *fw;
2872         int status;
2873
2874         if (!netif_running(adapter->netdev)) {
2875                 dev_err(&adapter->pdev->dev,
2876                         "Firmware load not allowed (interface is down)\n");
2877                 return -1;
2878         }
2879
2880         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2881         if (status)
2882                 goto fw_exit;
2883
2884         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2885
2886         if (lancer_chip(adapter))
2887                 status = lancer_fw_download(adapter, fw);
2888         else
2889                 status = be_fw_download(adapter, fw);
2890
2891 fw_exit:
2892         release_firmware(fw);
2893         return status;
2894 }
2895
2896 static struct net_device_ops be_netdev_ops = {
2897         .ndo_open               = be_open,
2898         .ndo_stop               = be_close,
2899         .ndo_start_xmit         = be_xmit,
2900         .ndo_set_rx_mode        = be_set_multicast_list,
2901         .ndo_set_mac_address    = be_mac_addr_set,
2902         .ndo_change_mtu         = be_change_mtu,
2903         .ndo_validate_addr      = eth_validate_addr,
2904         .ndo_vlan_rx_register   = be_vlan_register,
2905         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2906         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2907         .ndo_set_vf_mac         = be_set_vf_mac,
2908         .ndo_set_vf_vlan        = be_set_vf_vlan,
2909         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2910         .ndo_get_vf_config      = be_get_vf_config
2911 };
2912
2913 static void be_netdev_init(struct net_device *netdev)
2914 {
2915         struct be_adapter *adapter = netdev_priv(netdev);
2916         struct be_rx_obj *rxo;
2917         int i;
2918
2919         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2920                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2921                 NETIF_F_HW_VLAN_TX;
2922         if (be_multi_rxq(adapter))
2923                 netdev->hw_features |= NETIF_F_RXHASH;
2924
2925         netdev->features |= netdev->hw_features |
2926                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2927
2928         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2929                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2930
2931         netdev->flags |= IFF_MULTICAST;
2932
2933         /* Default settings for Rx and Tx flow control */
2934         adapter->rx_fc = true;
2935         adapter->tx_fc = true;
2936
2937         netif_set_gso_max_size(netdev, 65535);
2938
2939         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2940
2941         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2942
2943         for_all_rx_queues(adapter, rxo, i)
2944                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2945                                 BE_NAPI_WEIGHT);
2946
2947         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2948                 BE_NAPI_WEIGHT);
2949 }
2950
2951 static void be_unmap_pci_bars(struct be_adapter *adapter)
2952 {
2953         if (adapter->csr)
2954                 iounmap(adapter->csr);
2955         if (adapter->db)
2956                 iounmap(adapter->db);
2957         if (adapter->pcicfg && be_physfn(adapter))
2958                 iounmap(adapter->pcicfg);
2959 }
2960
2961 static int be_map_pci_bars(struct be_adapter *adapter)
2962 {
2963         u8 __iomem *addr;
2964         int pcicfg_reg, db_reg;
2965
2966         if (lancer_chip(adapter)) {
2967                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2968                         pci_resource_len(adapter->pdev, 0));
2969                 if (addr == NULL)
2970                         return -ENOMEM;
2971                 adapter->db = addr;
2972                 return 0;
2973         }
2974
2975         if (be_physfn(adapter)) {
2976                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2977                                 pci_resource_len(adapter->pdev, 2));
2978                 if (addr == NULL)
2979                         return -ENOMEM;
2980                 adapter->csr = addr;
2981         }
2982
2983         if (adapter->generation == BE_GEN2) {
2984                 pcicfg_reg = 1;
2985                 db_reg = 4;
2986         } else {
2987                 pcicfg_reg = 0;
2988                 if (be_physfn(adapter))
2989                         db_reg = 4;
2990                 else
2991                         db_reg = 0;
2992         }
2993         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2994                                 pci_resource_len(adapter->pdev, db_reg));
2995         if (addr == NULL)
2996                 goto pci_map_err;
2997         adapter->db = addr;
2998
2999         if (be_physfn(adapter)) {
3000                 addr = ioremap_nocache(
3001                                 pci_resource_start(adapter->pdev, pcicfg_reg),
3002                                 pci_resource_len(adapter->pdev, pcicfg_reg));
3003                 if (addr == NULL)
3004                         goto pci_map_err;
3005                 adapter->pcicfg = addr;
3006         } else
3007                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3008
3009         return 0;
3010 pci_map_err:
3011         be_unmap_pci_bars(adapter);
3012         return -ENOMEM;
3013 }
3014
3015
3016 static void be_ctrl_cleanup(struct be_adapter *adapter)
3017 {
3018         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3019
3020         be_unmap_pci_bars(adapter);
3021
3022         if (mem->va)
3023                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3024                                   mem->dma);
3025
3026         mem = &adapter->mc_cmd_mem;
3027         if (mem->va)
3028                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3029                                   mem->dma);
3030 }
3031
3032 static int be_ctrl_init(struct be_adapter *adapter)
3033 {
3034         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3035         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3036         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3037         int status;
3038
3039         status = be_map_pci_bars(adapter);
3040         if (status)
3041                 goto done;
3042
3043         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3044         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3045                                                 mbox_mem_alloc->size,
3046                                                 &mbox_mem_alloc->dma,
3047                                                 GFP_KERNEL);
3048         if (!mbox_mem_alloc->va) {
3049                 status = -ENOMEM;
3050                 goto unmap_pci_bars;
3051         }
3052
3053         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3054         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3055         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3056         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3057
3058         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3059         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3060                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
3061                                             GFP_KERNEL);
3062         if (mc_cmd_mem->va == NULL) {
3063                 status = -ENOMEM;
3064                 goto free_mbox;
3065         }
3066         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3067
3068         mutex_init(&adapter->mbox_lock);
3069         spin_lock_init(&adapter->mcc_lock);
3070         spin_lock_init(&adapter->mcc_cq_lock);
3071
3072         init_completion(&adapter->flash_compl);
3073         pci_save_state(adapter->pdev);
3074         return 0;
3075
3076 free_mbox:
3077         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3078                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3079
3080 unmap_pci_bars:
3081         be_unmap_pci_bars(adapter);
3082
3083 done:
3084         return status;
3085 }
3086
3087 static void be_stats_cleanup(struct be_adapter *adapter)
3088 {
3089         struct be_dma_mem *cmd = &adapter->stats_cmd;
3090
3091         if (cmd->va)
3092                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3093                                   cmd->va, cmd->dma);
3094 }
3095
3096 static int be_stats_init(struct be_adapter *adapter)
3097 {
3098         struct be_dma_mem *cmd = &adapter->stats_cmd;
3099
3100         if (adapter->generation == BE_GEN2) {
3101                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3102         } else {
3103                 if (lancer_chip(adapter))
3104                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3105                 else
3106                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3107         }
3108         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3109                                      GFP_KERNEL);
3110         if (cmd->va == NULL)
3111                 return -1;
3112         memset(cmd->va, 0, cmd->size);
3113         return 0;
3114 }
3115
3116 static void __devexit be_remove(struct pci_dev *pdev)
3117 {
3118         struct be_adapter *adapter = pci_get_drvdata(pdev);
3119
3120         if (!adapter)
3121                 return;
3122
3123         cancel_delayed_work_sync(&adapter->work);
3124
3125         unregister_netdev(adapter->netdev);
3126
3127         be_clear(adapter);
3128
3129         be_stats_cleanup(adapter);
3130
3131         be_ctrl_cleanup(adapter);
3132
3133         kfree(adapter->vf_cfg);
3134         be_sriov_disable(adapter);
3135
3136         be_msix_disable(adapter);
3137
3138         pci_set_drvdata(pdev, NULL);
3139         pci_release_regions(pdev);
3140         pci_disable_device(pdev);
3141
3142         free_netdev(adapter->netdev);
3143 }
3144
3145 static int be_get_config(struct be_adapter *adapter)
3146 {
3147         int status;
3148         u8 mac[ETH_ALEN];
3149
3150         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3151         if (status)
3152                 return status;
3153
3154         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3155                         &adapter->function_mode, &adapter->function_caps);
3156         if (status)
3157                 return status;
3158
3159         memset(mac, 0, ETH_ALEN);
3160
3161         /* A default permanent address is given to each VF for Lancer*/
3162         if (be_physfn(adapter) || lancer_chip(adapter)) {
3163                 status = be_cmd_mac_addr_query(adapter, mac,
3164                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3165
3166                 if (status)
3167                         return status;
3168
3169                 if (!is_valid_ether_addr(mac))
3170                         return -EADDRNOTAVAIL;
3171
3172                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3173                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3174         }
3175
3176         if (adapter->function_mode & 0x400)
3177                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3178         else
3179                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3180
3181         status = be_cmd_get_cntl_attributes(adapter);
3182         if (status)
3183                 return status;
3184
3185         be_cmd_check_native_mode(adapter);
3186         return 0;
3187 }
3188
3189 static int be_dev_family_check(struct be_adapter *adapter)
3190 {
3191         struct pci_dev *pdev = adapter->pdev;
3192         u32 sli_intf = 0, if_type;
3193
3194         switch (pdev->device) {
3195         case BE_DEVICE_ID1:
3196         case OC_DEVICE_ID1:
3197                 adapter->generation = BE_GEN2;
3198                 break;
3199         case BE_DEVICE_ID2:
3200         case OC_DEVICE_ID2:
3201                 adapter->generation = BE_GEN3;
3202                 break;
3203         case OC_DEVICE_ID3:
3204         case OC_DEVICE_ID4:
3205                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3206                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3207                                                 SLI_INTF_IF_TYPE_SHIFT;
3208
3209                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3210                         if_type != 0x02) {
3211                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3212                         return -EINVAL;
3213                 }
3214                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3215                                          SLI_INTF_FAMILY_SHIFT);
3216                 adapter->generation = BE_GEN3;
3217                 break;
3218         default:
3219                 adapter->generation = 0;
3220         }
3221         return 0;
3222 }
3223
3224 static int lancer_wait_ready(struct be_adapter *adapter)
3225 {
3226 #define SLIPORT_READY_TIMEOUT 500
3227         u32 sliport_status;
3228         int status = 0, i;
3229
3230         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3231                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3232                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3233                         break;
3234
3235                 msleep(20);
3236         }
3237
3238         if (i == SLIPORT_READY_TIMEOUT)
3239                 status = -1;
3240
3241         return status;
3242 }
3243
3244 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3245 {
3246         int status;
3247         u32 sliport_status, err, reset_needed;
3248         status = lancer_wait_ready(adapter);
3249         if (!status) {
3250                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3251                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3252                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3253                 if (err && reset_needed) {
3254                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3255                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3256
3257                         /* check adapter has corrected the error */
3258                         status = lancer_wait_ready(adapter);
3259                         sliport_status = ioread32(adapter->db +
3260                                                         SLIPORT_STATUS_OFFSET);
3261                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3262                                                 SLIPORT_STATUS_RN_MASK);
3263                         if (status || sliport_status)
3264                                 status = -1;
3265                 } else if (err || reset_needed) {
3266                         status = -1;
3267                 }
3268         }
3269         return status;
3270 }
3271
3272 static int __devinit be_probe(struct pci_dev *pdev,
3273                         const struct pci_device_id *pdev_id)
3274 {
3275         int status = 0;
3276         struct be_adapter *adapter;
3277         struct net_device *netdev;
3278
3279         status = pci_enable_device(pdev);
3280         if (status)
3281                 goto do_none;
3282
3283         status = pci_request_regions(pdev, DRV_NAME);
3284         if (status)
3285                 goto disable_dev;
3286         pci_set_master(pdev);
3287
3288         netdev = alloc_etherdev(sizeof(struct be_adapter));
3289         if (netdev == NULL) {
3290                 status = -ENOMEM;
3291                 goto rel_reg;
3292         }
3293         adapter = netdev_priv(netdev);
3294         adapter->pdev = pdev;
3295         pci_set_drvdata(pdev, adapter);
3296
3297         status = be_dev_family_check(adapter);
3298         if (status)
3299                 goto free_netdev;
3300
3301         adapter->netdev = netdev;
3302         SET_NETDEV_DEV(netdev, &pdev->dev);
3303
3304         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3305         if (!status) {
3306                 netdev->features |= NETIF_F_HIGHDMA;
3307         } else {
3308                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3309                 if (status) {
3310                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3311                         goto free_netdev;
3312                 }
3313         }
3314
3315         be_sriov_enable(adapter);
3316         if (adapter->sriov_enabled) {
3317                 adapter->vf_cfg = kcalloc(num_vfs,
3318                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3319
3320                 if (!adapter->vf_cfg)
3321                         goto free_netdev;
3322         }
3323
3324         status = be_ctrl_init(adapter);
3325         if (status)
3326                 goto free_vf_cfg;
3327
3328         if (lancer_chip(adapter)) {
3329                 status = lancer_test_and_set_rdy_state(adapter);
3330                 if (status) {
3331                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3332                         goto ctrl_clean;
3333                 }
3334         }
3335
3336         /* sync up with fw's ready state */
3337         if (be_physfn(adapter)) {
3338                 status = be_cmd_POST(adapter);
3339                 if (status)
3340                         goto ctrl_clean;
3341         }
3342
3343         /* tell fw we're ready to fire cmds */
3344         status = be_cmd_fw_init(adapter);
3345         if (status)
3346                 goto ctrl_clean;
3347
3348         status = be_cmd_reset_function(adapter);
3349         if (status)
3350                 goto ctrl_clean;
3351
3352         status = be_stats_init(adapter);
3353         if (status)
3354                 goto ctrl_clean;
3355
3356         status = be_get_config(adapter);
3357         if (status)
3358                 goto stats_clean;
3359
3360         be_msix_enable(adapter);
3361
3362         INIT_DELAYED_WORK(&adapter->work, be_worker);
3363
3364         status = be_setup(adapter);
3365         if (status)
3366                 goto msix_disable;
3367
3368         be_netdev_init(netdev);
3369         status = register_netdev(netdev);
3370         if (status != 0)
3371                 goto unsetup;
3372         netif_carrier_off(netdev);
3373
3374         if (be_physfn(adapter) && adapter->sriov_enabled) {
3375                 u8 mac_speed;
3376                 bool link_up;
3377                 u16 vf, lnk_speed;
3378
3379                 if (!lancer_chip(adapter)) {
3380                         status = be_vf_eth_addr_config(adapter);
3381                         if (status)
3382                                 goto unreg_netdev;
3383                 }
3384
3385                 for (vf = 0; vf < num_vfs; vf++) {
3386                         status = be_cmd_link_status_query(adapter, &link_up,
3387                                         &mac_speed, &lnk_speed, vf + 1);
3388                         if (!status)
3389                                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3390                         else
3391                                 goto unreg_netdev;
3392                 }
3393         }
3394
3395         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3396         /* By default all priorities are enabled.
3397          * Needed in case of no GRP5 evt support
3398          */
3399         adapter->vlan_prio_bmap = 0xff;
3400
3401         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3402         return 0;
3403
3404 unreg_netdev:
3405         unregister_netdev(netdev);
3406 unsetup:
3407         be_clear(adapter);
3408 msix_disable:
3409         be_msix_disable(adapter);
3410 stats_clean:
3411         be_stats_cleanup(adapter);
3412 ctrl_clean:
3413         be_ctrl_cleanup(adapter);
3414 free_vf_cfg:
3415         kfree(adapter->vf_cfg);
3416 free_netdev:
3417         be_sriov_disable(adapter);
3418         free_netdev(netdev);
3419         pci_set_drvdata(pdev, NULL);
3420 rel_reg:
3421         pci_release_regions(pdev);
3422 disable_dev:
3423         pci_disable_device(pdev);
3424 do_none:
3425         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3426         return status;
3427 }
3428
3429 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3430 {
3431         struct be_adapter *adapter = pci_get_drvdata(pdev);
3432         struct net_device *netdev =  adapter->netdev;
3433
3434         cancel_delayed_work_sync(&adapter->work);
3435         if (adapter->wol)
3436                 be_setup_wol(adapter, true);
3437
3438         netif_device_detach(netdev);
3439         if (netif_running(netdev)) {
3440                 rtnl_lock();
3441                 be_close(netdev);
3442                 rtnl_unlock();
3443         }
3444         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3445         be_clear(adapter);
3446
3447         be_msix_disable(adapter);
3448         pci_save_state(pdev);
3449         pci_disable_device(pdev);
3450         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3451         return 0;
3452 }
3453
3454 static int be_resume(struct pci_dev *pdev)
3455 {
3456         int status = 0;
3457         struct be_adapter *adapter = pci_get_drvdata(pdev);
3458         struct net_device *netdev =  adapter->netdev;
3459
3460         netif_device_detach(netdev);
3461
3462         status = pci_enable_device(pdev);
3463         if (status)
3464                 return status;
3465
3466         pci_set_power_state(pdev, 0);
3467         pci_restore_state(pdev);
3468
3469         be_msix_enable(adapter);
3470         /* tell fw we're ready to fire cmds */
3471         status = be_cmd_fw_init(adapter);
3472         if (status)
3473                 return status;
3474
3475         be_setup(adapter);
3476         if (netif_running(netdev)) {
3477                 rtnl_lock();
3478                 be_open(netdev);
3479                 rtnl_unlock();
3480         }
3481         netif_device_attach(netdev);
3482
3483         if (adapter->wol)
3484                 be_setup_wol(adapter, false);
3485
3486         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3487         return 0;
3488 }
3489
3490 /*
3491  * An FLR will stop BE from DMAing any data.
3492  */
3493 static void be_shutdown(struct pci_dev *pdev)
3494 {
3495         struct be_adapter *adapter = pci_get_drvdata(pdev);
3496
3497         if (!adapter)
3498                 return;
3499
3500         cancel_delayed_work_sync(&adapter->work);
3501
3502         netif_device_detach(adapter->netdev);
3503
3504         if (adapter->wol)
3505                 be_setup_wol(adapter, true);
3506
3507         be_cmd_reset_function(adapter);
3508
3509         pci_disable_device(pdev);
3510 }
3511
3512 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3513                                 pci_channel_state_t state)
3514 {
3515         struct be_adapter *adapter = pci_get_drvdata(pdev);
3516         struct net_device *netdev =  adapter->netdev;
3517
3518         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3519
3520         adapter->eeh_err = true;
3521
3522         netif_device_detach(netdev);
3523
3524         if (netif_running(netdev)) {
3525                 rtnl_lock();
3526                 be_close(netdev);
3527                 rtnl_unlock();
3528         }
3529         be_clear(adapter);
3530
3531         if (state == pci_channel_io_perm_failure)
3532                 return PCI_ERS_RESULT_DISCONNECT;
3533
3534         pci_disable_device(pdev);
3535
3536         return PCI_ERS_RESULT_NEED_RESET;
3537 }
3538
3539 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3540 {
3541         struct be_adapter *adapter = pci_get_drvdata(pdev);
3542         int status;
3543
3544         dev_info(&adapter->pdev->dev, "EEH reset\n");
3545         adapter->eeh_err = false;
3546
3547         status = pci_enable_device(pdev);
3548         if (status)
3549                 return PCI_ERS_RESULT_DISCONNECT;
3550
3551         pci_set_master(pdev);
3552         pci_set_power_state(pdev, 0);
3553         pci_restore_state(pdev);
3554
3555         /* Check if card is ok and fw is ready */
3556         status = be_cmd_POST(adapter);
3557         if (status)
3558                 return PCI_ERS_RESULT_DISCONNECT;
3559
3560         return PCI_ERS_RESULT_RECOVERED;
3561 }
3562
3563 static void be_eeh_resume(struct pci_dev *pdev)
3564 {
3565         int status = 0;
3566         struct be_adapter *adapter = pci_get_drvdata(pdev);
3567         struct net_device *netdev =  adapter->netdev;
3568
3569         dev_info(&adapter->pdev->dev, "EEH resume\n");
3570
3571         pci_save_state(pdev);
3572
3573         /* tell fw we're ready to fire cmds */
3574         status = be_cmd_fw_init(adapter);
3575         if (status)
3576                 goto err;
3577
3578         status = be_setup(adapter);
3579         if (status)
3580                 goto err;
3581
3582         if (netif_running(netdev)) {
3583                 status = be_open(netdev);
3584                 if (status)
3585                         goto err;
3586         }
3587         netif_device_attach(netdev);
3588         return;
3589 err:
3590         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3591 }
3592
3593 static struct pci_error_handlers be_eeh_handlers = {
3594         .error_detected = be_eeh_err_detected,
3595         .slot_reset = be_eeh_reset,
3596         .resume = be_eeh_resume,
3597 };
3598
3599 static struct pci_driver be_driver = {
3600         .name = DRV_NAME,
3601         .id_table = be_dev_ids,
3602         .probe = be_probe,
3603         .remove = be_remove,
3604         .suspend = be_suspend,
3605         .resume = be_resume,
3606         .shutdown = be_shutdown,
3607         .err_handler = &be_eeh_handlers
3608 };
3609
3610 static int __init be_init_module(void)
3611 {
3612         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3613             rx_frag_size != 2048) {
3614                 printk(KERN_WARNING DRV_NAME
3615                         " : Module param rx_frag_size must be 2048/4096/8192."
3616                         " Using 2048\n");
3617                 rx_frag_size = 2048;
3618         }
3619
3620         return pci_register_driver(&be_driver);
3621 }
3622 module_init(be_init_module);
3623
3624 static void __exit be_exit_module(void)
3625 {
3626         pci_unregister_driver(&be_driver);
3627 }
3628 module_exit(be_exit_module);