]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/benet/be_main.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[karo-tx-linux.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50         "CEV",
51         "CTX",
52         "DBUF",
53         "ERX",
54         "Host",
55         "MPU",
56         "NDMA",
57         "PTC ",
58         "RDMA ",
59         "RXF ",
60         "RXIPS ",
61         "RXULP0 ",
62         "RXULP1 ",
63         "RXULP2 ",
64         "TIM ",
65         "TPOST ",
66         "TPRE ",
67         "TXIPS ",
68         "TXULP0 ",
69         "TXULP1 ",
70         "UC ",
71         "WDMA ",
72         "TXULP2 ",
73         "HOST1 ",
74         "P0_OB_LINK ",
75         "P1_OB_LINK ",
76         "HOST_GPIO ",
77         "MBOX ",
78         "AXGMAC0",
79         "AXGMAC1",
80         "JTAG",
81         "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85         "LPCMEMHOST",
86         "MGMT_MAC",
87         "PCS0ONLINE",
88         "MPU_IRAM",
89         "PCS1ONLINE",
90         "PCTL0",
91         "PCTL1",
92         "PMEM",
93         "RR",
94         "TXPB",
95         "RXPP",
96         "XAUI",
97         "TXP",
98         "ARM",
99         "IPC",
100         "HOST2",
101         "HOST3",
102         "HOST4",
103         "HOST5",
104         "HOST6",
105         "HOST7",
106         "HOST8",
107         "HOST9",
108         "NETC"
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121         struct be_dma_mem *mem = &q->dma_mem;
122         if (mem->va)
123                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124                                   mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128                 u16 len, u16 entry_size)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131
132         memset(q, 0, sizeof(*q));
133         q->len = len;
134         q->entry_size = entry_size;
135         mem->size = len * entry_size;
136         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137                                      GFP_KERNEL);
138         if (!mem->va)
139                 return -1;
140         memset(mem->va, 0, mem->size);
141         return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147         u32 reg = ioread32(addr);
148         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150         if (adapter->eeh_err)
151                 return;
152
153         if (!enabled && enable)
154                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155         else if (enabled && !enable)
156                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157         else
158                 return;
159
160         iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165         u32 val = 0;
166         val |= qid & DB_RQ_RING_ID_MASK;
167         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169         wmb();
170         iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175         u32 val = 0;
176         val |= qid & DB_TXULP_RING_ID_MASK;
177         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179         wmb();
180         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184                 bool arm, bool clear_int, u16 num_popped)
185 {
186         u32 val = 0;
187         val |= qid & DB_EQ_RING_ID_MASK;
188         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191         if (adapter->eeh_err)
192                 return;
193
194         if (arm)
195                 val |= 1 << DB_EQ_REARM_SHIFT;
196         if (clear_int)
197                 val |= 1 << DB_EQ_CLR_SHIFT;
198         val |= 1 << DB_EQ_EVNT_SHIFT;
199         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200         iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205         u32 val = 0;
206         val |= qid & DB_CQ_RING_ID_MASK;
207         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210         if (adapter->eeh_err)
211                 return;
212
213         if (arm)
214                 val |= 1 << DB_CQ_REARM_SHIFT;
215         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216         iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221         struct be_adapter *adapter = netdev_priv(netdev);
222         struct sockaddr *addr = p;
223         int status = 0;
224
225         if (!is_valid_ether_addr(addr->sa_data))
226                 return -EADDRNOTAVAIL;
227
228         /* MAC addr configuration will be done in hardware for VFs
229          * by their corresponding PFs. Just copy to netdev addr here
230          */
231         if (!be_physfn(adapter))
232                 goto netdev_addr;
233
234         status = be_cmd_pmac_del(adapter, adapter->if_handle,
235                                 adapter->pmac_id, 0);
236         if (status)
237                 return status;
238
239         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240                                 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242         if (!status)
243                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245         return status;
246 }
247
248 void netdev_stats_update(struct be_adapter *adapter)
249 {
250         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252         struct be_port_rxf_stats *port_stats =
253                         &rxf_stats->port[adapter->port_num];
254         struct net_device_stats *dev_stats = &adapter->netdev->stats;
255         struct be_erx_stats *erx_stats = &hw_stats->erx;
256         struct be_rx_obj *rxo;
257         int i;
258
259         memset(dev_stats, 0, sizeof(*dev_stats));
260         for_all_rx_queues(adapter, rxo, i) {
261                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264                 /*  no space in linux buffers: best possible approximation */
265                 dev_stats->rx_dropped +=
266                         erx_stats->rx_drops_no_fragments[rxo->q.id];
267         }
268
269         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
271
272         /* bad pkts received */
273         dev_stats->rx_errors = port_stats->rx_crc_errors +
274                 port_stats->rx_alignment_symbol_errors +
275                 port_stats->rx_in_range_errors +
276                 port_stats->rx_out_range_errors +
277                 port_stats->rx_frame_too_long +
278                 port_stats->rx_dropped_too_small +
279                 port_stats->rx_dropped_too_short +
280                 port_stats->rx_dropped_header_too_small +
281                 port_stats->rx_dropped_tcp_length +
282                 port_stats->rx_dropped_runt +
283                 port_stats->rx_tcp_checksum_errs +
284                 port_stats->rx_ip_checksum_errs +
285                 port_stats->rx_udp_checksum_errs;
286
287         /* detailed rx errors */
288         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289                 port_stats->rx_out_range_errors +
290                 port_stats->rx_frame_too_long;
291
292         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294         /* frame alignment errors */
295         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
296
297         /* receiver fifo overrun */
298         /* drops_no_pbuf is no per i/f, it's per BE card */
299         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300                                         port_stats->rx_input_fifo_overflow +
301                                         rxf_stats->rx_drops_no_pbuf;
302 }
303
304 void be_link_status_update(struct be_adapter *adapter, bool link_up)
305 {
306         struct net_device *netdev = adapter->netdev;
307
308         /* If link came up or went down */
309         if (adapter->link_up != link_up) {
310                 adapter->link_speed = -1;
311                 if (link_up) {
312                         netif_carrier_on(netdev);
313                         printk(KERN_INFO "%s: Link up\n", netdev->name);
314                 } else {
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408                                                                 bool *dummy)
409 {
410         int cnt = (skb->len > skb->data_len);
411
412         cnt += skb_shinfo(skb)->nr_frags;
413
414         /* to account for hdr wrb */
415         cnt++;
416         if (lancer_chip(adapter) || !(cnt & 1)) {
417                 *dummy = false;
418         } else {
419                 /* add a dummy to make it an even num */
420                 cnt++;
421                 *dummy = true;
422         }
423         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424         return cnt;
425 }
426
427 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428 {
429         wrb->frag_pa_hi = upper_32_bits(addr);
430         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432 }
433
434 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
436 {
437         u8 vlan_prio = 0;
438         u16 vlan_tag = 0;
439
440         memset(hdr, 0, sizeof(*hdr));
441
442         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
444         if (skb_is_gso(skb)) {
445                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447                         hdr, skb_shinfo(skb)->gso_size);
448                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450                 if (lancer_chip(adapter) && adapter->sli_family  ==
451                                                         LANCER_A0_SLI_FAMILY) {
452                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453                         if (is_tcp_pkt(skb))
454                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455                                                                 tcpcs, hdr, 1);
456                         else if (is_udp_pkt(skb))
457                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458                                                                 udpcs, hdr, 1);
459                 }
460         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461                 if (is_tcp_pkt(skb))
462                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463                 else if (is_udp_pkt(skb))
464                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465         }
466
467         if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469                 vlan_tag = vlan_tx_tag_get(skb);
470                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471                 /* If vlan priority provided by OS is NOT in available bmap */
472                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474                                         adapter->recommended_prio;
475                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
476         }
477
478         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482 }
483
484 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
485                 bool unmap_single)
486 {
487         dma_addr_t dma;
488
489         be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
492         if (wrb->frag_len) {
493                 if (unmap_single)
494                         dma_unmap_single(dev, dma, wrb->frag_len,
495                                          DMA_TO_DEVICE);
496                 else
497                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
498         }
499 }
500
501 static int make_tx_wrbs(struct be_adapter *adapter,
502                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503 {
504         dma_addr_t busaddr;
505         int i, copied = 0;
506         struct device *dev = &adapter->pdev->dev;
507         struct sk_buff *first_skb = skb;
508         struct be_queue_info *txq = &adapter->tx_obj.q;
509         struct be_eth_wrb *wrb;
510         struct be_eth_hdr_wrb *hdr;
511         bool map_single = false;
512         u16 map_head;
513
514         hdr = queue_head_node(txq);
515         queue_head_inc(txq);
516         map_head = txq->head;
517
518         if (skb->len > skb->data_len) {
519                 int len = skb_headlen(skb);
520                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521                 if (dma_mapping_error(dev, busaddr))
522                         goto dma_err;
523                 map_single = true;
524                 wrb = queue_head_node(txq);
525                 wrb_fill(wrb, busaddr, len);
526                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527                 queue_head_inc(txq);
528                 copied += len;
529         }
530
531         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532                 struct skb_frag_struct *frag =
533                         &skb_shinfo(skb)->frags[i];
534                 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535                                        frag->size, DMA_TO_DEVICE);
536                 if (dma_mapping_error(dev, busaddr))
537                         goto dma_err;
538                 wrb = queue_head_node(txq);
539                 wrb_fill(wrb, busaddr, frag->size);
540                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541                 queue_head_inc(txq);
542                 copied += frag->size;
543         }
544
545         if (dummy_wrb) {
546                 wrb = queue_head_node(txq);
547                 wrb_fill(wrb, 0, 0);
548                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549                 queue_head_inc(txq);
550         }
551
552         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553         be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555         return copied;
556 dma_err:
557         txq->head = map_head;
558         while (copied) {
559                 wrb = queue_head_node(txq);
560                 unmap_tx_frag(dev, wrb, map_single);
561                 map_single = false;
562                 copied -= wrb->frag_len;
563                 queue_head_inc(txq);
564         }
565         return 0;
566 }
567
568 static netdev_tx_t be_xmit(struct sk_buff *skb,
569                         struct net_device *netdev)
570 {
571         struct be_adapter *adapter = netdev_priv(netdev);
572         struct be_tx_obj *tx_obj = &adapter->tx_obj;
573         struct be_queue_info *txq = &tx_obj->q;
574         u32 wrb_cnt = 0, copied = 0;
575         u32 start = txq->head;
576         bool dummy_wrb, stopped = false;
577
578         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
579
580         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
581         if (copied) {
582                 /* record the sent skb in the sent_skb table */
583                 BUG_ON(tx_obj->sent_skb_list[start]);
584                 tx_obj->sent_skb_list[start] = skb;
585
586                 /* Ensure txq has space for the next skb; Else stop the queue
587                  * *BEFORE* ringing the tx doorbell, so that we serialze the
588                  * tx compls of the current transmit which'll wake up the queue
589                  */
590                 atomic_add(wrb_cnt, &txq->used);
591                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592                                                                 txq->len) {
593                         netif_stop_queue(netdev);
594                         stopped = true;
595                 }
596
597                 be_txq_notify(adapter, txq->id, wrb_cnt);
598
599                 be_tx_stats_update(adapter, wrb_cnt, copied,
600                                 skb_shinfo(skb)->gso_segs, stopped);
601         } else {
602                 txq->head = start;
603                 dev_kfree_skb_any(skb);
604         }
605         return NETDEV_TX_OK;
606 }
607
608 static int be_change_mtu(struct net_device *netdev, int new_mtu)
609 {
610         struct be_adapter *adapter = netdev_priv(netdev);
611         if (new_mtu < BE_MIN_MTU ||
612                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613                                         (ETH_HLEN + ETH_FCS_LEN))) {
614                 dev_info(&adapter->pdev->dev,
615                         "MTU must be between %d and %d bytes\n",
616                         BE_MIN_MTU,
617                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
618                 return -EINVAL;
619         }
620         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621                         netdev->mtu, new_mtu);
622         netdev->mtu = new_mtu;
623         return 0;
624 }
625
626 /*
627  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628  * If the user configures more, place BE in vlan promiscuous mode.
629  */
630 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
631 {
632         u16 vtag[BE_NUM_VLANS_SUPPORTED];
633         u16 ntags = 0, i;
634         int status = 0;
635         u32 if_handle;
636
637         if (vf) {
638                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641         }
642
643         if (adapter->vlans_added <= adapter->max_vlans)  {
644                 /* Construct VLAN Table to give to HW */
645                 for (i = 0; i < VLAN_N_VID; i++) {
646                         if (adapter->vlan_tag[i]) {
647                                 vtag[ntags] = cpu_to_le16(i);
648                                 ntags++;
649                         }
650                 }
651                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652                                         vtag, ntags, 1, 0);
653         } else {
654                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655                                         NULL, 0, 1, 1);
656         }
657
658         return status;
659 }
660
661 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662 {
663         struct be_adapter *adapter = netdev_priv(netdev);
664
665         adapter->vlan_grp = grp;
666 }
667
668 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669 {
670         struct be_adapter *adapter = netdev_priv(netdev);
671
672         adapter->vlans_added++;
673         if (!be_physfn(adapter))
674                 return;
675
676         adapter->vlan_tag[vid] = 1;
677         if (adapter->vlans_added <= (adapter->max_vlans + 1))
678                 be_vid_config(adapter, false, 0);
679 }
680
681 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682 {
683         struct be_adapter *adapter = netdev_priv(netdev);
684
685         adapter->vlans_added--;
686         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
688         if (!be_physfn(adapter))
689                 return;
690
691         adapter->vlan_tag[vid] = 0;
692         if (adapter->vlans_added <= adapter->max_vlans)
693                 be_vid_config(adapter, false, 0);
694 }
695
696 static void be_set_multicast_list(struct net_device *netdev)
697 {
698         struct be_adapter *adapter = netdev_priv(netdev);
699
700         if (netdev->flags & IFF_PROMISC) {
701                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
702                 adapter->promiscuous = true;
703                 goto done;
704         }
705
706         /* BE was previously in promiscuous mode; disable it */
707         if (adapter->promiscuous) {
708                 adapter->promiscuous = false;
709                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
710         }
711
712         /* Enable multicast promisc if num configured exceeds what we support */
713         if (netdev->flags & IFF_ALLMULTI ||
714             netdev_mc_count(netdev) > BE_MAX_MC) {
715                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716                                 &adapter->mc_cmd_mem);
717                 goto done;
718         }
719
720         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721                 &adapter->mc_cmd_mem);
722 done:
723         return;
724 }
725
726 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         int status;
730
731         if (!adapter->sriov_enabled)
732                 return -EPERM;
733
734         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735                 return -EINVAL;
736
737         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738                 status = be_cmd_pmac_del(adapter,
739                                         adapter->vf_cfg[vf].vf_if_handle,
740                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
741
742         status = be_cmd_pmac_add(adapter, mac,
743                                 adapter->vf_cfg[vf].vf_if_handle,
744                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
745
746         if (status)
747                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748                                 mac, vf);
749         else
750                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
752         return status;
753 }
754
755 static int be_get_vf_config(struct net_device *netdev, int vf,
756                         struct ifla_vf_info *vi)
757 {
758         struct be_adapter *adapter = netdev_priv(netdev);
759
760         if (!adapter->sriov_enabled)
761                 return -EPERM;
762
763         if (vf >= num_vfs)
764                 return -EINVAL;
765
766         vi->vf = vf;
767         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
769         vi->qos = 0;
770         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772         return 0;
773 }
774
775 static int be_set_vf_vlan(struct net_device *netdev,
776                         int vf, u16 vlan, u8 qos)
777 {
778         struct be_adapter *adapter = netdev_priv(netdev);
779         int status = 0;
780
781         if (!adapter->sriov_enabled)
782                 return -EPERM;
783
784         if ((vf >= num_vfs) || (vlan > 4095))
785                 return -EINVAL;
786
787         if (vlan) {
788                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789                 adapter->vlans_added++;
790         } else {
791                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792                 adapter->vlans_added--;
793         }
794
795         status = be_vid_config(adapter, true, vf);
796
797         if (status)
798                 dev_info(&adapter->pdev->dev,
799                                 "VLAN %d config on VF %d failed\n", vlan, vf);
800         return status;
801 }
802
803 static int be_set_vf_tx_rate(struct net_device *netdev,
804                         int vf, int rate)
805 {
806         struct be_adapter *adapter = netdev_priv(netdev);
807         int status = 0;
808
809         if (!adapter->sriov_enabled)
810                 return -EPERM;
811
812         if ((vf >= num_vfs) || (rate < 0))
813                 return -EINVAL;
814
815         if (rate > 10000)
816                 rate = 10000;
817
818         adapter->vf_cfg[vf].vf_tx_rate = rate;
819         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
820
821         if (status)
822                 dev_info(&adapter->pdev->dev,
823                                 "tx rate %d on VF %d failed\n", rate, vf);
824         return status;
825 }
826
827 static void be_rx_rate_update(struct be_rx_obj *rxo)
828 {
829         struct be_rx_stats *stats = &rxo->stats;
830         ulong now = jiffies;
831
832         /* Wrapped around */
833         if (time_before(now, stats->rx_jiffies)) {
834                 stats->rx_jiffies = now;
835                 return;
836         }
837
838         /* Update the rate once in two seconds */
839         if ((now - stats->rx_jiffies) < 2 * HZ)
840                 return;
841
842         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843                                 now - stats->rx_jiffies);
844         stats->rx_jiffies = now;
845         stats->rx_bytes_prev = stats->rx_bytes;
846 }
847
848 static void be_rx_stats_update(struct be_rx_obj *rxo,
849                 struct be_rx_compl_info *rxcp)
850 {
851         struct be_rx_stats *stats = &rxo->stats;
852
853         stats->rx_compl++;
854         stats->rx_frags += rxcp->num_rcvd;
855         stats->rx_bytes += rxcp->pkt_size;
856         stats->rx_pkts++;
857         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858                 stats->rx_mcast_pkts++;
859         if (rxcp->err)
860                 stats->rxcp_err++;
861 }
862
863 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
864 {
865         /* L4 checksum is not reliable for non TCP/UDP packets.
866          * Also ignore ipcksm for ipv6 pkts */
867         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868                                 (rxcp->ip_csum || rxcp->ipv6);
869 }
870
871 static struct be_rx_page_info *
872 get_rx_page_info(struct be_adapter *adapter,
873                 struct be_rx_obj *rxo,
874                 u16 frag_idx)
875 {
876         struct be_rx_page_info *rx_page_info;
877         struct be_queue_info *rxq = &rxo->q;
878
879         rx_page_info = &rxo->page_info_tbl[frag_idx];
880         BUG_ON(!rx_page_info->page);
881
882         if (rx_page_info->last_page_user) {
883                 dma_unmap_page(&adapter->pdev->dev,
884                                dma_unmap_addr(rx_page_info, bus),
885                                adapter->big_page_size, DMA_FROM_DEVICE);
886                 rx_page_info->last_page_user = false;
887         }
888
889         atomic_dec(&rxq->used);
890         return rx_page_info;
891 }
892
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter *adapter,
895                 struct be_rx_obj *rxo,
896                 struct be_rx_compl_info *rxcp)
897 {
898         struct be_queue_info *rxq = &rxo->q;
899         struct be_rx_page_info *page_info;
900         u16 i, num_rcvd = rxcp->num_rcvd;
901
902         for (i = 0; i < num_rcvd; i++) {
903                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904                 put_page(page_info->page);
905                 memset(page_info, 0, sizeof(*page_info));
906                 index_inc(&rxcp->rxq_idx, rxq->len);
907         }
908 }
909
910 /*
911  * skb_fill_rx_data forms a complete skb for an ether frame
912  * indicated by rxcp.
913  */
914 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
916 {
917         struct be_queue_info *rxq = &rxo->q;
918         struct be_rx_page_info *page_info;
919         u16 i, j;
920         u16 hdr_len, curr_frag_len, remaining;
921         u8 *start;
922
923         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924         start = page_address(page_info->page) + page_info->page_offset;
925         prefetch(start);
926
927         /* Copy data in the first descriptor of this completion */
928         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
929
930         /* Copy the header portion into skb_data */
931         hdr_len = min(BE_HDR_LEN, curr_frag_len);
932         memcpy(skb->data, start, hdr_len);
933         skb->len = curr_frag_len;
934         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935                 /* Complete packet has now been moved to data */
936                 put_page(page_info->page);
937                 skb->data_len = 0;
938                 skb->tail += curr_frag_len;
939         } else {
940                 skb_shinfo(skb)->nr_frags = 1;
941                 skb_shinfo(skb)->frags[0].page = page_info->page;
942                 skb_shinfo(skb)->frags[0].page_offset =
943                                         page_info->page_offset + hdr_len;
944                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945                 skb->data_len = curr_frag_len - hdr_len;
946                 skb->tail += hdr_len;
947         }
948         page_info->page = NULL;
949
950         if (rxcp->pkt_size <= rx_frag_size) {
951                 BUG_ON(rxcp->num_rcvd != 1);
952                 return;
953         }
954
955         /* More frags present for this completion */
956         index_inc(&rxcp->rxq_idx, rxq->len);
957         remaining = rxcp->pkt_size - curr_frag_len;
958         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960                 curr_frag_len = min(remaining, rx_frag_size);
961
962                 /* Coalesce all frags from the same physical page in one slot */
963                 if (page_info->page_offset == 0) {
964                         /* Fresh page */
965                         j++;
966                         skb_shinfo(skb)->frags[j].page = page_info->page;
967                         skb_shinfo(skb)->frags[j].page_offset =
968                                                         page_info->page_offset;
969                         skb_shinfo(skb)->frags[j].size = 0;
970                         skb_shinfo(skb)->nr_frags++;
971                 } else {
972                         put_page(page_info->page);
973                 }
974
975                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976                 skb->len += curr_frag_len;
977                 skb->data_len += curr_frag_len;
978
979                 remaining -= curr_frag_len;
980                 index_inc(&rxcp->rxq_idx, rxq->len);
981                 page_info->page = NULL;
982         }
983         BUG_ON(j > MAX_SKB_FRAGS);
984 }
985
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter *adapter,
988                         struct be_rx_obj *rxo,
989                         struct be_rx_compl_info *rxcp)
990 {
991         struct net_device *netdev = adapter->netdev;
992         struct sk_buff *skb;
993
994         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
995         if (unlikely(!skb)) {
996                 if (net_ratelimit())
997                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
998                 be_rx_compl_discard(adapter, rxo, rxcp);
999                 return;
1000         }
1001
1002         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1003
1004         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1005                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1006         else
1007                 skb_checksum_none_assert(skb);
1008
1009         skb->truesize = skb->len + sizeof(struct sk_buff);
1010         skb->protocol = eth_type_trans(skb, netdev);
1011         if (adapter->netdev->features & NETIF_F_RXHASH)
1012                 skb->rxhash = rxcp->rss_hash;
1013
1014
1015         if (unlikely(rxcp->vlanf)) {
1016                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1017                         kfree_skb(skb);
1018                         return;
1019                 }
1020                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1021         } else {
1022                 netif_receive_skb(skb);
1023         }
1024 }
1025
1026 /* Process the RX completion indicated by rxcp when GRO is enabled */
1027 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1028                 struct be_rx_obj *rxo,
1029                 struct be_rx_compl_info *rxcp)
1030 {
1031         struct be_rx_page_info *page_info;
1032         struct sk_buff *skb = NULL;
1033         struct be_queue_info *rxq = &rxo->q;
1034         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1035         u16 remaining, curr_frag_len;
1036         u16 i, j;
1037
1038         skb = napi_get_frags(&eq_obj->napi);
1039         if (!skb) {
1040                 be_rx_compl_discard(adapter, rxo, rxcp);
1041                 return;
1042         }
1043
1044         remaining = rxcp->pkt_size;
1045         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1046                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1047
1048                 curr_frag_len = min(remaining, rx_frag_size);
1049
1050                 /* Coalesce all frags from the same physical page in one slot */
1051                 if (i == 0 || page_info->page_offset == 0) {
1052                         /* First frag or Fresh page */
1053                         j++;
1054                         skb_shinfo(skb)->frags[j].page = page_info->page;
1055                         skb_shinfo(skb)->frags[j].page_offset =
1056                                                         page_info->page_offset;
1057                         skb_shinfo(skb)->frags[j].size = 0;
1058                 } else {
1059                         put_page(page_info->page);
1060                 }
1061                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1062
1063                 remaining -= curr_frag_len;
1064                 index_inc(&rxcp->rxq_idx, rxq->len);
1065                 memset(page_info, 0, sizeof(*page_info));
1066         }
1067         BUG_ON(j > MAX_SKB_FRAGS);
1068
1069         skb_shinfo(skb)->nr_frags = j + 1;
1070         skb->len = rxcp->pkt_size;
1071         skb->data_len = rxcp->pkt_size;
1072         skb->truesize += rxcp->pkt_size;
1073         skb->ip_summed = CHECKSUM_UNNECESSARY;
1074         if (adapter->netdev->features & NETIF_F_RXHASH)
1075                 skb->rxhash = rxcp->rss_hash;
1076
1077         if (likely(!rxcp->vlanf))
1078                 napi_gro_frags(&eq_obj->napi);
1079         else
1080                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1081 }
1082
1083 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1084                                 struct be_eth_rx_compl *compl,
1085                                 struct be_rx_compl_info *rxcp)
1086 {
1087         rxcp->pkt_size =
1088                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1089         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1090         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1091         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1092         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1093         rxcp->ip_csum =
1094                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1095         rxcp->l4_csum =
1096                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1097         rxcp->ipv6 =
1098                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1099         rxcp->rxq_idx =
1100                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1101         rxcp->num_rcvd =
1102                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1103         rxcp->pkt_type =
1104                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1105         rxcp->rss_hash =
1106                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1107         if (rxcp->vlanf) {
1108                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1109                                 compl);
1110                 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1111                                 compl);
1112         }
1113 }
1114
1115 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1116                                 struct be_eth_rx_compl *compl,
1117                                 struct be_rx_compl_info *rxcp)
1118 {
1119         rxcp->pkt_size =
1120                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1121         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1122         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1123         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1124         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1125         rxcp->ip_csum =
1126                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1127         rxcp->l4_csum =
1128                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1129         rxcp->ipv6 =
1130                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1131         rxcp->rxq_idx =
1132                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1133         rxcp->num_rcvd =
1134                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1135         rxcp->pkt_type =
1136                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1137         rxcp->rss_hash =
1138                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1139         if (rxcp->vlanf) {
1140                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1141                                 compl);
1142                 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1143                                 compl);
1144         }
1145 }
1146
1147 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1148 {
1149         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1150         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1151         struct be_adapter *adapter = rxo->adapter;
1152
1153         /* For checking the valid bit it is Ok to use either definition as the
1154          * valid bit is at the same position in both v0 and v1 Rx compl */
1155         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1156                 return NULL;
1157
1158         rmb();
1159         be_dws_le_to_cpu(compl, sizeof(*compl));
1160
1161         if (adapter->be3_native)
1162                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1163         else
1164                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1165
1166         if (rxcp->vlanf) {
1167                 /* vlanf could be wrongly set in some cards.
1168                  * ignore if vtm is not set */
1169                 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1170                         rxcp->vlanf = 0;
1171
1172                 if (!lancer_chip(adapter))
1173                         rxcp->vid = swab16(rxcp->vid);
1174
1175                 if ((adapter->pvid == rxcp->vid) &&
1176                         !adapter->vlan_tag[rxcp->vid])
1177                         rxcp->vlanf = 0;
1178         }
1179
1180         /* As the compl has been parsed, reset it; we wont touch it again */
1181         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1182
1183         queue_tail_inc(&rxo->cq);
1184         return rxcp;
1185 }
1186
1187 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1188 {
1189         u32 order = get_order(size);
1190
1191         if (order > 0)
1192                 gfp |= __GFP_COMP;
1193         return  alloc_pages(gfp, order);
1194 }
1195
1196 /*
1197  * Allocate a page, split it to fragments of size rx_frag_size and post as
1198  * receive buffers to BE
1199  */
1200 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1201 {
1202         struct be_adapter *adapter = rxo->adapter;
1203         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1204         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1205         struct be_queue_info *rxq = &rxo->q;
1206         struct page *pagep = NULL;
1207         struct be_eth_rx_d *rxd;
1208         u64 page_dmaaddr = 0, frag_dmaaddr;
1209         u32 posted, page_offset = 0;
1210
1211         page_info = &rxo->page_info_tbl[rxq->head];
1212         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1213                 if (!pagep) {
1214                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1215                         if (unlikely(!pagep)) {
1216                                 rxo->stats.rx_post_fail++;
1217                                 break;
1218                         }
1219                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1220                                                     0, adapter->big_page_size,
1221                                                     DMA_FROM_DEVICE);
1222                         page_info->page_offset = 0;
1223                 } else {
1224                         get_page(pagep);
1225                         page_info->page_offset = page_offset + rx_frag_size;
1226                 }
1227                 page_offset = page_info->page_offset;
1228                 page_info->page = pagep;
1229                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1230                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1231
1232                 rxd = queue_head_node(rxq);
1233                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1234                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1235
1236                 /* Any space left in the current big page for another frag? */
1237                 if ((page_offset + rx_frag_size + rx_frag_size) >
1238                                         adapter->big_page_size) {
1239                         pagep = NULL;
1240                         page_info->last_page_user = true;
1241                 }
1242
1243                 prev_page_info = page_info;
1244                 queue_head_inc(rxq);
1245                 page_info = &page_info_tbl[rxq->head];
1246         }
1247         if (pagep)
1248                 prev_page_info->last_page_user = true;
1249
1250         if (posted) {
1251                 atomic_add(posted, &rxq->used);
1252                 be_rxq_notify(adapter, rxq->id, posted);
1253         } else if (atomic_read(&rxq->used) == 0) {
1254                 /* Let be_worker replenish when memory is available */
1255                 rxo->rx_post_starved = true;
1256         }
1257 }
1258
1259 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1260 {
1261         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1262
1263         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1264                 return NULL;
1265
1266         rmb();
1267         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1268
1269         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1270
1271         queue_tail_inc(tx_cq);
1272         return txcp;
1273 }
1274
1275 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1276 {
1277         struct be_queue_info *txq = &adapter->tx_obj.q;
1278         struct be_eth_wrb *wrb;
1279         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1280         struct sk_buff *sent_skb;
1281         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1282         bool unmap_skb_hdr = true;
1283
1284         sent_skb = sent_skbs[txq->tail];
1285         BUG_ON(!sent_skb);
1286         sent_skbs[txq->tail] = NULL;
1287
1288         /* skip header wrb */
1289         queue_tail_inc(txq);
1290
1291         do {
1292                 cur_index = txq->tail;
1293                 wrb = queue_tail_node(txq);
1294                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1295                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1296                 unmap_skb_hdr = false;
1297
1298                 num_wrbs++;
1299                 queue_tail_inc(txq);
1300         } while (cur_index != last_index);
1301
1302         atomic_sub(num_wrbs, &txq->used);
1303
1304         kfree_skb(sent_skb);
1305 }
1306
1307 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1308 {
1309         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1310
1311         if (!eqe->evt)
1312                 return NULL;
1313
1314         rmb();
1315         eqe->evt = le32_to_cpu(eqe->evt);
1316         queue_tail_inc(&eq_obj->q);
1317         return eqe;
1318 }
1319
1320 static int event_handle(struct be_adapter *adapter,
1321                         struct be_eq_obj *eq_obj)
1322 {
1323         struct be_eq_entry *eqe;
1324         u16 num = 0;
1325
1326         while ((eqe = event_get(eq_obj)) != NULL) {
1327                 eqe->evt = 0;
1328                 num++;
1329         }
1330
1331         /* Deal with any spurious interrupts that come
1332          * without events
1333          */
1334         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1335         if (num)
1336                 napi_schedule(&eq_obj->napi);
1337
1338         return num;
1339 }
1340
1341 /* Just read and notify events without processing them.
1342  * Used at the time of destroying event queues */
1343 static void be_eq_clean(struct be_adapter *adapter,
1344                         struct be_eq_obj *eq_obj)
1345 {
1346         struct be_eq_entry *eqe;
1347         u16 num = 0;
1348
1349         while ((eqe = event_get(eq_obj)) != NULL) {
1350                 eqe->evt = 0;
1351                 num++;
1352         }
1353
1354         if (num)
1355                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1356 }
1357
1358 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1359 {
1360         struct be_rx_page_info *page_info;
1361         struct be_queue_info *rxq = &rxo->q;
1362         struct be_queue_info *rx_cq = &rxo->cq;
1363         struct be_rx_compl_info *rxcp;
1364         u16 tail;
1365
1366         /* First cleanup pending rx completions */
1367         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1368                 be_rx_compl_discard(adapter, rxo, rxcp);
1369                 be_cq_notify(adapter, rx_cq->id, false, 1);
1370         }
1371
1372         /* Then free posted rx buffer that were not used */
1373         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1374         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1375                 page_info = get_rx_page_info(adapter, rxo, tail);
1376                 put_page(page_info->page);
1377                 memset(page_info, 0, sizeof(*page_info));
1378         }
1379         BUG_ON(atomic_read(&rxq->used));
1380 }
1381
1382 static void be_tx_compl_clean(struct be_adapter *adapter)
1383 {
1384         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1385         struct be_queue_info *txq = &adapter->tx_obj.q;
1386         struct be_eth_tx_compl *txcp;
1387         u16 end_idx, cmpl = 0, timeo = 0;
1388         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1389         struct sk_buff *sent_skb;
1390         bool dummy_wrb;
1391
1392         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1393         do {
1394                 while ((txcp = be_tx_compl_get(tx_cq))) {
1395                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1396                                         wrb_index, txcp);
1397                         be_tx_compl_process(adapter, end_idx);
1398                         cmpl++;
1399                 }
1400                 if (cmpl) {
1401                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1402                         cmpl = 0;
1403                 }
1404
1405                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1406                         break;
1407
1408                 mdelay(1);
1409         } while (true);
1410
1411         if (atomic_read(&txq->used))
1412                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1413                         atomic_read(&txq->used));
1414
1415         /* free posted tx for which compls will never arrive */
1416         while (atomic_read(&txq->used)) {
1417                 sent_skb = sent_skbs[txq->tail];
1418                 end_idx = txq->tail;
1419                 index_adv(&end_idx,
1420                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1421                         txq->len);
1422                 be_tx_compl_process(adapter, end_idx);
1423         }
1424 }
1425
1426 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1427 {
1428         struct be_queue_info *q;
1429
1430         q = &adapter->mcc_obj.q;
1431         if (q->created)
1432                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1433         be_queue_free(adapter, q);
1434
1435         q = &adapter->mcc_obj.cq;
1436         if (q->created)
1437                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1438         be_queue_free(adapter, q);
1439 }
1440
1441 /* Must be called only after TX qs are created as MCC shares TX EQ */
1442 static int be_mcc_queues_create(struct be_adapter *adapter)
1443 {
1444         struct be_queue_info *q, *cq;
1445
1446         /* Alloc MCC compl queue */
1447         cq = &adapter->mcc_obj.cq;
1448         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1449                         sizeof(struct be_mcc_compl)))
1450                 goto err;
1451
1452         /* Ask BE to create MCC compl queue; share TX's eq */
1453         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1454                 goto mcc_cq_free;
1455
1456         /* Alloc MCC queue */
1457         q = &adapter->mcc_obj.q;
1458         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1459                 goto mcc_cq_destroy;
1460
1461         /* Ask BE to create MCC queue */
1462         if (be_cmd_mccq_create(adapter, q, cq))
1463                 goto mcc_q_free;
1464
1465         return 0;
1466
1467 mcc_q_free:
1468         be_queue_free(adapter, q);
1469 mcc_cq_destroy:
1470         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1471 mcc_cq_free:
1472         be_queue_free(adapter, cq);
1473 err:
1474         return -1;
1475 }
1476
1477 static void be_tx_queues_destroy(struct be_adapter *adapter)
1478 {
1479         struct be_queue_info *q;
1480
1481         q = &adapter->tx_obj.q;
1482         if (q->created)
1483                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1484         be_queue_free(adapter, q);
1485
1486         q = &adapter->tx_obj.cq;
1487         if (q->created)
1488                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1489         be_queue_free(adapter, q);
1490
1491         /* Clear any residual events */
1492         be_eq_clean(adapter, &adapter->tx_eq);
1493
1494         q = &adapter->tx_eq.q;
1495         if (q->created)
1496                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1497         be_queue_free(adapter, q);
1498 }
1499
1500 static int be_tx_queues_create(struct be_adapter *adapter)
1501 {
1502         struct be_queue_info *eq, *q, *cq;
1503
1504         adapter->tx_eq.max_eqd = 0;
1505         adapter->tx_eq.min_eqd = 0;
1506         adapter->tx_eq.cur_eqd = 96;
1507         adapter->tx_eq.enable_aic = false;
1508         /* Alloc Tx Event queue */
1509         eq = &adapter->tx_eq.q;
1510         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1511                 return -1;
1512
1513         /* Ask BE to create Tx Event queue */
1514         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1515                 goto tx_eq_free;
1516
1517         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1518
1519
1520         /* Alloc TX eth compl queue */
1521         cq = &adapter->tx_obj.cq;
1522         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1523                         sizeof(struct be_eth_tx_compl)))
1524                 goto tx_eq_destroy;
1525
1526         /* Ask BE to create Tx eth compl queue */
1527         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1528                 goto tx_cq_free;
1529
1530         /* Alloc TX eth queue */
1531         q = &adapter->tx_obj.q;
1532         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1533                 goto tx_cq_destroy;
1534
1535         /* Ask BE to create Tx eth queue */
1536         if (be_cmd_txq_create(adapter, q, cq))
1537                 goto tx_q_free;
1538         return 0;
1539
1540 tx_q_free:
1541         be_queue_free(adapter, q);
1542 tx_cq_destroy:
1543         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1544 tx_cq_free:
1545         be_queue_free(adapter, cq);
1546 tx_eq_destroy:
1547         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1548 tx_eq_free:
1549         be_queue_free(adapter, eq);
1550         return -1;
1551 }
1552
1553 static void be_rx_queues_destroy(struct be_adapter *adapter)
1554 {
1555         struct be_queue_info *q;
1556         struct be_rx_obj *rxo;
1557         int i;
1558
1559         for_all_rx_queues(adapter, rxo, i) {
1560                 q = &rxo->q;
1561                 if (q->created) {
1562                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1563                         /* After the rxq is invalidated, wait for a grace time
1564                          * of 1ms for all dma to end and the flush compl to
1565                          * arrive
1566                          */
1567                         mdelay(1);
1568                         be_rx_q_clean(adapter, rxo);
1569                 }
1570                 be_queue_free(adapter, q);
1571
1572                 q = &rxo->cq;
1573                 if (q->created)
1574                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1575                 be_queue_free(adapter, q);
1576
1577                 /* Clear any residual events */
1578                 q = &rxo->rx_eq.q;
1579                 if (q->created) {
1580                         be_eq_clean(adapter, &rxo->rx_eq);
1581                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1582                 }
1583                 be_queue_free(adapter, q);
1584         }
1585 }
1586
1587 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1588 {
1589         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1590                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1591                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1592         } else {
1593                 dev_warn(&adapter->pdev->dev,
1594                         "No support for multiple RX queues\n");
1595                 return 1;
1596         }
1597 }
1598
1599 static int be_rx_queues_create(struct be_adapter *adapter)
1600 {
1601         struct be_queue_info *eq, *q, *cq;
1602         struct be_rx_obj *rxo;
1603         int rc, i;
1604
1605         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1606                                 msix_enabled(adapter) ?
1607                                         adapter->num_msix_vec - 1 : 1);
1608         if (adapter->num_rx_qs != MAX_RX_QS)
1609                 dev_warn(&adapter->pdev->dev,
1610                         "Can create only %d RX queues", adapter->num_rx_qs);
1611
1612         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1613         for_all_rx_queues(adapter, rxo, i) {
1614                 rxo->adapter = adapter;
1615                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1616                 rxo->rx_eq.enable_aic = true;
1617
1618                 /* EQ */
1619                 eq = &rxo->rx_eq.q;
1620                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1621                                         sizeof(struct be_eq_entry));
1622                 if (rc)
1623                         goto err;
1624
1625                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1626                 if (rc)
1627                         goto err;
1628
1629                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1630
1631                 /* CQ */
1632                 cq = &rxo->cq;
1633                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1634                                 sizeof(struct be_eth_rx_compl));
1635                 if (rc)
1636                         goto err;
1637
1638                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1639                 if (rc)
1640                         goto err;
1641                 /* Rx Q */
1642                 q = &rxo->q;
1643                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1644                                 sizeof(struct be_eth_rx_d));
1645                 if (rc)
1646                         goto err;
1647
1648                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1649                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1650                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1651                 if (rc)
1652                         goto err;
1653         }
1654
1655         if (be_multi_rxq(adapter)) {
1656                 u8 rsstable[MAX_RSS_QS];
1657
1658                 for_all_rss_queues(adapter, rxo, i)
1659                         rsstable[i] = rxo->rss_id;
1660
1661                 rc = be_cmd_rss_config(adapter, rsstable,
1662                         adapter->num_rx_qs - 1);
1663                 if (rc)
1664                         goto err;
1665         }
1666
1667         return 0;
1668 err:
1669         be_rx_queues_destroy(adapter);
1670         return -1;
1671 }
1672
1673 static bool event_peek(struct be_eq_obj *eq_obj)
1674 {
1675         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1676         if (!eqe->evt)
1677                 return false;
1678         else
1679                 return true;
1680 }
1681
1682 static irqreturn_t be_intx(int irq, void *dev)
1683 {
1684         struct be_adapter *adapter = dev;
1685         struct be_rx_obj *rxo;
1686         int isr, i, tx = 0 , rx = 0;
1687
1688         if (lancer_chip(adapter)) {
1689                 if (event_peek(&adapter->tx_eq))
1690                         tx = event_handle(adapter, &adapter->tx_eq);
1691                 for_all_rx_queues(adapter, rxo, i) {
1692                         if (event_peek(&rxo->rx_eq))
1693                                 rx |= event_handle(adapter, &rxo->rx_eq);
1694                 }
1695
1696                 if (!(tx || rx))
1697                         return IRQ_NONE;
1698
1699         } else {
1700                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1701                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1702                 if (!isr)
1703                         return IRQ_NONE;
1704
1705                 if ((1 << adapter->tx_eq.eq_idx & isr))
1706                         event_handle(adapter, &adapter->tx_eq);
1707
1708                 for_all_rx_queues(adapter, rxo, i) {
1709                         if ((1 << rxo->rx_eq.eq_idx & isr))
1710                                 event_handle(adapter, &rxo->rx_eq);
1711                 }
1712         }
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t be_msix_rx(int irq, void *dev)
1718 {
1719         struct be_rx_obj *rxo = dev;
1720         struct be_adapter *adapter = rxo->adapter;
1721
1722         event_handle(adapter, &rxo->rx_eq);
1723
1724         return IRQ_HANDLED;
1725 }
1726
1727 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1728 {
1729         struct be_adapter *adapter = dev;
1730
1731         event_handle(adapter, &adapter->tx_eq);
1732
1733         return IRQ_HANDLED;
1734 }
1735
1736 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1737 {
1738         return (rxcp->tcpf && !rxcp->err) ? true : false;
1739 }
1740
1741 static int be_poll_rx(struct napi_struct *napi, int budget)
1742 {
1743         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1744         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1745         struct be_adapter *adapter = rxo->adapter;
1746         struct be_queue_info *rx_cq = &rxo->cq;
1747         struct be_rx_compl_info *rxcp;
1748         u32 work_done;
1749
1750         rxo->stats.rx_polls++;
1751         for (work_done = 0; work_done < budget; work_done++) {
1752                 rxcp = be_rx_compl_get(rxo);
1753                 if (!rxcp)
1754                         break;
1755
1756                 /* Ignore flush completions */
1757                 if (rxcp->num_rcvd) {
1758                         if (do_gro(rxcp))
1759                                 be_rx_compl_process_gro(adapter, rxo, rxcp);
1760                         else
1761                                 be_rx_compl_process(adapter, rxo, rxcp);
1762                 }
1763                 be_rx_stats_update(rxo, rxcp);
1764         }
1765
1766         /* Refill the queue */
1767         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1768                 be_post_rx_frags(rxo, GFP_ATOMIC);
1769
1770         /* All consumed */
1771         if (work_done < budget) {
1772                 napi_complete(napi);
1773                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1774         } else {
1775                 /* More to be consumed; continue with interrupts disabled */
1776                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1777         }
1778         return work_done;
1779 }
1780
1781 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1782  * For TX/MCC we don't honour budget; consume everything
1783  */
1784 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1785 {
1786         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1787         struct be_adapter *adapter =
1788                 container_of(tx_eq, struct be_adapter, tx_eq);
1789         struct be_queue_info *txq = &adapter->tx_obj.q;
1790         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1791         struct be_eth_tx_compl *txcp;
1792         int tx_compl = 0, mcc_compl, status = 0;
1793         u16 end_idx;
1794
1795         while ((txcp = be_tx_compl_get(tx_cq))) {
1796                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1797                                 wrb_index, txcp);
1798                 be_tx_compl_process(adapter, end_idx);
1799                 tx_compl++;
1800         }
1801
1802         mcc_compl = be_process_mcc(adapter, &status);
1803
1804         napi_complete(napi);
1805
1806         if (mcc_compl) {
1807                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1808                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1809         }
1810
1811         if (tx_compl) {
1812                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1813
1814                 /* As Tx wrbs have been freed up, wake up netdev queue if
1815                  * it was stopped due to lack of tx wrbs.
1816                  */
1817                 if (netif_queue_stopped(adapter->netdev) &&
1818                         atomic_read(&txq->used) < txq->len / 2) {
1819                         netif_wake_queue(adapter->netdev);
1820                 }
1821
1822                 tx_stats(adapter)->be_tx_events++;
1823                 tx_stats(adapter)->be_tx_compl += tx_compl;
1824         }
1825
1826         return 1;
1827 }
1828
1829 void be_detect_dump_ue(struct be_adapter *adapter)
1830 {
1831         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1832         u32 i;
1833
1834         pci_read_config_dword(adapter->pdev,
1835                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1836         pci_read_config_dword(adapter->pdev,
1837                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1838         pci_read_config_dword(adapter->pdev,
1839                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1840         pci_read_config_dword(adapter->pdev,
1841                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1842
1843         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1844         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1845
1846         if (ue_status_lo || ue_status_hi) {
1847                 adapter->ue_detected = true;
1848                 adapter->eeh_err = true;
1849                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1850         }
1851
1852         if (ue_status_lo) {
1853                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1854                         if (ue_status_lo & 1)
1855                                 dev_err(&adapter->pdev->dev,
1856                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1857                 }
1858         }
1859         if (ue_status_hi) {
1860                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1861                         if (ue_status_hi & 1)
1862                                 dev_err(&adapter->pdev->dev,
1863                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1864                 }
1865         }
1866
1867 }
1868
1869 static void be_worker(struct work_struct *work)
1870 {
1871         struct be_adapter *adapter =
1872                 container_of(work, struct be_adapter, work.work);
1873         struct be_rx_obj *rxo;
1874         int i;
1875
1876         if (!adapter->ue_detected && !lancer_chip(adapter))
1877                 be_detect_dump_ue(adapter);
1878
1879         /* when interrupts are not yet enabled, just reap any pending
1880         * mcc completions */
1881         if (!netif_running(adapter->netdev)) {
1882                 int mcc_compl, status = 0;
1883
1884                 mcc_compl = be_process_mcc(adapter, &status);
1885
1886                 if (mcc_compl) {
1887                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1888                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1889                 }
1890
1891                 goto reschedule;
1892         }
1893
1894         if (!adapter->stats_cmd_sent)
1895                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1896
1897         be_tx_rate_update(adapter);
1898
1899         for_all_rx_queues(adapter, rxo, i) {
1900                 be_rx_rate_update(rxo);
1901                 be_rx_eqd_update(adapter, rxo);
1902
1903                 if (rxo->rx_post_starved) {
1904                         rxo->rx_post_starved = false;
1905                         be_post_rx_frags(rxo, GFP_KERNEL);
1906                 }
1907         }
1908
1909 reschedule:
1910         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1911 }
1912
1913 static void be_msix_disable(struct be_adapter *adapter)
1914 {
1915         if (msix_enabled(adapter)) {
1916                 pci_disable_msix(adapter->pdev);
1917                 adapter->num_msix_vec = 0;
1918         }
1919 }
1920
1921 static void be_msix_enable(struct be_adapter *adapter)
1922 {
1923 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1924         int i, status, num_vec;
1925
1926         num_vec = be_num_rxqs_want(adapter) + 1;
1927
1928         for (i = 0; i < num_vec; i++)
1929                 adapter->msix_entries[i].entry = i;
1930
1931         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1932         if (status == 0) {
1933                 goto done;
1934         } else if (status >= BE_MIN_MSIX_VECTORS) {
1935                 num_vec = status;
1936                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1937                                 num_vec) == 0)
1938                         goto done;
1939         }
1940         return;
1941 done:
1942         adapter->num_msix_vec = num_vec;
1943         return;
1944 }
1945
1946 static void be_sriov_enable(struct be_adapter *adapter)
1947 {
1948         be_check_sriov_fn_type(adapter);
1949 #ifdef CONFIG_PCI_IOV
1950         if (be_physfn(adapter) && num_vfs) {
1951                 int status, pos;
1952                 u16 nvfs;
1953
1954                 pos = pci_find_ext_capability(adapter->pdev,
1955                                                 PCI_EXT_CAP_ID_SRIOV);
1956                 pci_read_config_word(adapter->pdev,
1957                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1958
1959                 if (num_vfs > nvfs) {
1960                         dev_info(&adapter->pdev->dev,
1961                                         "Device supports %d VFs and not %d\n",
1962                                         nvfs, num_vfs);
1963                         num_vfs = nvfs;
1964                 }
1965
1966                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1967                 adapter->sriov_enabled = status ? false : true;
1968         }
1969 #endif
1970 }
1971
1972 static void be_sriov_disable(struct be_adapter *adapter)
1973 {
1974 #ifdef CONFIG_PCI_IOV
1975         if (adapter->sriov_enabled) {
1976                 pci_disable_sriov(adapter->pdev);
1977                 adapter->sriov_enabled = false;
1978         }
1979 #endif
1980 }
1981
1982 static inline int be_msix_vec_get(struct be_adapter *adapter,
1983                                         struct be_eq_obj *eq_obj)
1984 {
1985         return adapter->msix_entries[eq_obj->eq_idx].vector;
1986 }
1987
1988 static int be_request_irq(struct be_adapter *adapter,
1989                 struct be_eq_obj *eq_obj,
1990                 void *handler, char *desc, void *context)
1991 {
1992         struct net_device *netdev = adapter->netdev;
1993         int vec;
1994
1995         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1996         vec = be_msix_vec_get(adapter, eq_obj);
1997         return request_irq(vec, handler, 0, eq_obj->desc, context);
1998 }
1999
2000 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2001                         void *context)
2002 {
2003         int vec = be_msix_vec_get(adapter, eq_obj);
2004         free_irq(vec, context);
2005 }
2006
2007 static int be_msix_register(struct be_adapter *adapter)
2008 {
2009         struct be_rx_obj *rxo;
2010         int status, i;
2011         char qname[10];
2012
2013         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2014                                 adapter);
2015         if (status)
2016                 goto err;
2017
2018         for_all_rx_queues(adapter, rxo, i) {
2019                 sprintf(qname, "rxq%d", i);
2020                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2021                                 qname, rxo);
2022                 if (status)
2023                         goto err_msix;
2024         }
2025
2026         return 0;
2027
2028 err_msix:
2029         be_free_irq(adapter, &adapter->tx_eq, adapter);
2030
2031         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2032                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2033
2034 err:
2035         dev_warn(&adapter->pdev->dev,
2036                 "MSIX Request IRQ failed - err %d\n", status);
2037         be_msix_disable(adapter);
2038         return status;
2039 }
2040
2041 static int be_irq_register(struct be_adapter *adapter)
2042 {
2043         struct net_device *netdev = adapter->netdev;
2044         int status;
2045
2046         if (msix_enabled(adapter)) {
2047                 status = be_msix_register(adapter);
2048                 if (status == 0)
2049                         goto done;
2050                 /* INTx is not supported for VF */
2051                 if (!be_physfn(adapter))
2052                         return status;
2053         }
2054
2055         /* INTx */
2056         netdev->irq = adapter->pdev->irq;
2057         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2058                         adapter);
2059         if (status) {
2060                 dev_err(&adapter->pdev->dev,
2061                         "INTx request IRQ failed - err %d\n", status);
2062                 return status;
2063         }
2064 done:
2065         adapter->isr_registered = true;
2066         return 0;
2067 }
2068
2069 static void be_irq_unregister(struct be_adapter *adapter)
2070 {
2071         struct net_device *netdev = adapter->netdev;
2072         struct be_rx_obj *rxo;
2073         int i;
2074
2075         if (!adapter->isr_registered)
2076                 return;
2077
2078         /* INTx */
2079         if (!msix_enabled(adapter)) {
2080                 free_irq(netdev->irq, adapter);
2081                 goto done;
2082         }
2083
2084         /* MSIx */
2085         be_free_irq(adapter, &adapter->tx_eq, adapter);
2086
2087         for_all_rx_queues(adapter, rxo, i)
2088                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2089
2090 done:
2091         adapter->isr_registered = false;
2092 }
2093
2094 static int be_close(struct net_device *netdev)
2095 {
2096         struct be_adapter *adapter = netdev_priv(netdev);
2097         struct be_rx_obj *rxo;
2098         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2099         int vec, i;
2100
2101         be_async_mcc_disable(adapter);
2102
2103         netif_carrier_off(netdev);
2104         adapter->link_up = false;
2105
2106         if (!lancer_chip(adapter))
2107                 be_intr_set(adapter, false);
2108
2109         for_all_rx_queues(adapter, rxo, i)
2110                 napi_disable(&rxo->rx_eq.napi);
2111
2112         napi_disable(&tx_eq->napi);
2113
2114         if (lancer_chip(adapter)) {
2115                 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2116                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2117                 for_all_rx_queues(adapter, rxo, i)
2118                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2119         }
2120
2121         if (msix_enabled(adapter)) {
2122                 vec = be_msix_vec_get(adapter, tx_eq);
2123                 synchronize_irq(vec);
2124
2125                 for_all_rx_queues(adapter, rxo, i) {
2126                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2127                         synchronize_irq(vec);
2128                 }
2129         } else {
2130                 synchronize_irq(netdev->irq);
2131         }
2132         be_irq_unregister(adapter);
2133
2134         /* Wait for all pending tx completions to arrive so that
2135          * all tx skbs are freed.
2136          */
2137         be_tx_compl_clean(adapter);
2138
2139         return 0;
2140 }
2141
2142 static int be_open(struct net_device *netdev)
2143 {
2144         struct be_adapter *adapter = netdev_priv(netdev);
2145         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2146         struct be_rx_obj *rxo;
2147         bool link_up;
2148         int status, i;
2149         u8 mac_speed;
2150         u16 link_speed;
2151
2152         for_all_rx_queues(adapter, rxo, i) {
2153                 be_post_rx_frags(rxo, GFP_KERNEL);
2154                 napi_enable(&rxo->rx_eq.napi);
2155         }
2156         napi_enable(&tx_eq->napi);
2157
2158         be_irq_register(adapter);
2159
2160         if (!lancer_chip(adapter))
2161                 be_intr_set(adapter, true);
2162
2163         /* The evt queues are created in unarmed state; arm them */
2164         for_all_rx_queues(adapter, rxo, i) {
2165                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2166                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2167         }
2168         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2169
2170         /* Now that interrupts are on we can process async mcc */
2171         be_async_mcc_enable(adapter);
2172
2173         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2174                         &link_speed);
2175         if (status)
2176                 goto err;
2177         be_link_status_update(adapter, link_up);
2178
2179         if (be_physfn(adapter)) {
2180                 status = be_vid_config(adapter, false, 0);
2181                 if (status)
2182                         goto err;
2183
2184                 status = be_cmd_set_flow_control(adapter,
2185                                 adapter->tx_fc, adapter->rx_fc);
2186                 if (status)
2187                         goto err;
2188         }
2189
2190         return 0;
2191 err:
2192         be_close(adapter->netdev);
2193         return -EIO;
2194 }
2195
2196 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2197 {
2198         struct be_dma_mem cmd;
2199         int status = 0;
2200         u8 mac[ETH_ALEN];
2201
2202         memset(mac, 0, ETH_ALEN);
2203
2204         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2205         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2206                                     GFP_KERNEL);
2207         if (cmd.va == NULL)
2208                 return -1;
2209         memset(cmd.va, 0, cmd.size);
2210
2211         if (enable) {
2212                 status = pci_write_config_dword(adapter->pdev,
2213                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2214                 if (status) {
2215                         dev_err(&adapter->pdev->dev,
2216                                 "Could not enable Wake-on-lan\n");
2217                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2218                                           cmd.dma);
2219                         return status;
2220                 }
2221                 status = be_cmd_enable_magic_wol(adapter,
2222                                 adapter->netdev->dev_addr, &cmd);
2223                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2224                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2225         } else {
2226                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2227                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2228                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2229         }
2230
2231         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2232         return status;
2233 }
2234
2235 /*
2236  * Generate a seed MAC address from the PF MAC Address using jhash.
2237  * MAC Address for VFs are assigned incrementally starting from the seed.
2238  * These addresses are programmed in the ASIC by the PF and the VF driver
2239  * queries for the MAC address during its probe.
2240  */
2241 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2242 {
2243         u32 vf = 0;
2244         int status = 0;
2245         u8 mac[ETH_ALEN];
2246
2247         be_vf_eth_addr_generate(adapter, mac);
2248
2249         for (vf = 0; vf < num_vfs; vf++) {
2250                 status = be_cmd_pmac_add(adapter, mac,
2251                                         adapter->vf_cfg[vf].vf_if_handle,
2252                                         &adapter->vf_cfg[vf].vf_pmac_id,
2253                                         vf + 1);
2254                 if (status)
2255                         dev_err(&adapter->pdev->dev,
2256                                 "Mac address add failed for VF %d\n", vf);
2257                 else
2258                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2259
2260                 mac[5] += 1;
2261         }
2262         return status;
2263 }
2264
2265 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2266 {
2267         u32 vf;
2268
2269         for (vf = 0; vf < num_vfs; vf++) {
2270                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2271                         be_cmd_pmac_del(adapter,
2272                                         adapter->vf_cfg[vf].vf_if_handle,
2273                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2274         }
2275 }
2276
2277 static int be_setup(struct be_adapter *adapter)
2278 {
2279         struct net_device *netdev = adapter->netdev;
2280         u32 cap_flags, en_flags, vf = 0;
2281         int status;
2282         u8 mac[ETH_ALEN];
2283
2284         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2285                                 BE_IF_FLAGS_BROADCAST |
2286                                 BE_IF_FLAGS_MULTICAST;
2287
2288         if (be_physfn(adapter)) {
2289                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2290                                 BE_IF_FLAGS_PROMISCUOUS |
2291                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2292                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2293
2294                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2295                         cap_flags |= BE_IF_FLAGS_RSS;
2296                         en_flags |= BE_IF_FLAGS_RSS;
2297                 }
2298         }
2299
2300         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2301                         netdev->dev_addr, false/* pmac_invalid */,
2302                         &adapter->if_handle, &adapter->pmac_id, 0);
2303         if (status != 0)
2304                 goto do_none;
2305
2306         if (be_physfn(adapter)) {
2307                 if (adapter->sriov_enabled) {
2308                         while (vf < num_vfs) {
2309                                 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2310                                                         BE_IF_FLAGS_BROADCAST;
2311                                 status = be_cmd_if_create(adapter, cap_flags,
2312                                         en_flags, mac, true,
2313                                         &adapter->vf_cfg[vf].vf_if_handle,
2314                                         NULL, vf+1);
2315                                 if (status) {
2316                                         dev_err(&adapter->pdev->dev,
2317                                         "Interface Create failed for VF %d\n",
2318                                         vf);
2319                                         goto if_destroy;
2320                                 }
2321                                 adapter->vf_cfg[vf].vf_pmac_id =
2322                                                         BE_INVALID_PMAC_ID;
2323                                 vf++;
2324                         }
2325                 }
2326         } else {
2327                 status = be_cmd_mac_addr_query(adapter, mac,
2328                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2329                 if (!status) {
2330                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2331                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2332                 }
2333         }
2334
2335         status = be_tx_queues_create(adapter);
2336         if (status != 0)
2337                 goto if_destroy;
2338
2339         status = be_rx_queues_create(adapter);
2340         if (status != 0)
2341                 goto tx_qs_destroy;
2342
2343         status = be_mcc_queues_create(adapter);
2344         if (status != 0)
2345                 goto rx_qs_destroy;
2346
2347         adapter->link_speed = -1;
2348
2349         return 0;
2350
2351 rx_qs_destroy:
2352         be_rx_queues_destroy(adapter);
2353 tx_qs_destroy:
2354         be_tx_queues_destroy(adapter);
2355 if_destroy:
2356         if (be_physfn(adapter) && adapter->sriov_enabled)
2357                 for (vf = 0; vf < num_vfs; vf++)
2358                         if (adapter->vf_cfg[vf].vf_if_handle)
2359                                 be_cmd_if_destroy(adapter,
2360                                         adapter->vf_cfg[vf].vf_if_handle,
2361                                         vf + 1);
2362         be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2363 do_none:
2364         return status;
2365 }
2366
2367 static int be_clear(struct be_adapter *adapter)
2368 {
2369         int vf;
2370
2371         if (be_physfn(adapter) && adapter->sriov_enabled)
2372                 be_vf_eth_addr_rem(adapter);
2373
2374         be_mcc_queues_destroy(adapter);
2375         be_rx_queues_destroy(adapter);
2376         be_tx_queues_destroy(adapter);
2377         adapter->eq_next_idx = 0;
2378
2379         if (be_physfn(adapter) && adapter->sriov_enabled)
2380                 for (vf = 0; vf < num_vfs; vf++)
2381                         if (adapter->vf_cfg[vf].vf_if_handle)
2382                                 be_cmd_if_destroy(adapter,
2383                                         adapter->vf_cfg[vf].vf_if_handle,
2384                                         vf + 1);
2385
2386         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2387
2388         /* tell fw we're done with firing cmds */
2389         be_cmd_fw_clean(adapter);
2390         return 0;
2391 }
2392
2393
2394 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2395 static bool be_flash_redboot(struct be_adapter *adapter,
2396                         const u8 *p, u32 img_start, int image_size,
2397                         int hdr_size)
2398 {
2399         u32 crc_offset;
2400         u8 flashed_crc[4];
2401         int status;
2402
2403         crc_offset = hdr_size + img_start + image_size - 4;
2404
2405         p += crc_offset;
2406
2407         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2408                         (image_size - 4));
2409         if (status) {
2410                 dev_err(&adapter->pdev->dev,
2411                 "could not get crc from flash, not flashing redboot\n");
2412                 return false;
2413         }
2414
2415         /*update redboot only if crc does not match*/
2416         if (!memcmp(flashed_crc, p, 4))
2417                 return false;
2418         else
2419                 return true;
2420 }
2421
2422 static int be_flash_data(struct be_adapter *adapter,
2423                         const struct firmware *fw,
2424                         struct be_dma_mem *flash_cmd, int num_of_images)
2425
2426 {
2427         int status = 0, i, filehdr_size = 0;
2428         u32 total_bytes = 0, flash_op;
2429         int num_bytes;
2430         const u8 *p = fw->data;
2431         struct be_cmd_write_flashrom *req = flash_cmd->va;
2432         const struct flash_comp *pflashcomp;
2433         int num_comp;
2434
2435         static const struct flash_comp gen3_flash_types[9] = {
2436                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2437                         FLASH_IMAGE_MAX_SIZE_g3},
2438                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2439                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2440                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2441                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2442                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2443                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2444                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2445                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2446                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2447                         FLASH_IMAGE_MAX_SIZE_g3},
2448                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2449                         FLASH_IMAGE_MAX_SIZE_g3},
2450                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2451                         FLASH_IMAGE_MAX_SIZE_g3},
2452                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2453                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2454         };
2455         static const struct flash_comp gen2_flash_types[8] = {
2456                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2457                         FLASH_IMAGE_MAX_SIZE_g2},
2458                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2459                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2460                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2461                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2462                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2463                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2464                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2465                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2466                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2467                         FLASH_IMAGE_MAX_SIZE_g2},
2468                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2469                         FLASH_IMAGE_MAX_SIZE_g2},
2470                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2471                          FLASH_IMAGE_MAX_SIZE_g2}
2472         };
2473
2474         if (adapter->generation == BE_GEN3) {
2475                 pflashcomp = gen3_flash_types;
2476                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2477                 num_comp = ARRAY_SIZE(gen3_flash_types);
2478         } else {
2479                 pflashcomp = gen2_flash_types;
2480                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2481                 num_comp = ARRAY_SIZE(gen2_flash_types);
2482         }
2483         for (i = 0; i < num_comp; i++) {
2484                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2485                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2486                         continue;
2487                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2488                         (!be_flash_redboot(adapter, fw->data,
2489                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2490                         (num_of_images * sizeof(struct image_hdr)))))
2491                         continue;
2492                 p = fw->data;
2493                 p += filehdr_size + pflashcomp[i].offset
2494                         + (num_of_images * sizeof(struct image_hdr));
2495         if (p + pflashcomp[i].size > fw->data + fw->size)
2496                 return -1;
2497         total_bytes = pflashcomp[i].size;
2498                 while (total_bytes) {
2499                         if (total_bytes > 32*1024)
2500                                 num_bytes = 32*1024;
2501                         else
2502                                 num_bytes = total_bytes;
2503                         total_bytes -= num_bytes;
2504
2505                         if (!total_bytes)
2506                                 flash_op = FLASHROM_OPER_FLASH;
2507                         else
2508                                 flash_op = FLASHROM_OPER_SAVE;
2509                         memcpy(req->params.data_buf, p, num_bytes);
2510                         p += num_bytes;
2511                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2512                                 pflashcomp[i].optype, flash_op, num_bytes);
2513                         if (status) {
2514                                 dev_err(&adapter->pdev->dev,
2515                                         "cmd to write to flash rom failed.\n");
2516                                 return -1;
2517                         }
2518                         yield();
2519                 }
2520         }
2521         return 0;
2522 }
2523
2524 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2525 {
2526         if (fhdr == NULL)
2527                 return 0;
2528         if (fhdr->build[0] == '3')
2529                 return BE_GEN3;
2530         else if (fhdr->build[0] == '2')
2531                 return BE_GEN2;
2532         else
2533                 return 0;
2534 }
2535
2536 int be_load_fw(struct be_adapter *adapter, u8 *func)
2537 {
2538         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2539         const struct firmware *fw;
2540         struct flash_file_hdr_g2 *fhdr;
2541         struct flash_file_hdr_g3 *fhdr3;
2542         struct image_hdr *img_hdr_ptr = NULL;
2543         struct be_dma_mem flash_cmd;
2544         int status, i = 0, num_imgs = 0;
2545         const u8 *p;
2546
2547         if (!netif_running(adapter->netdev)) {
2548                 dev_err(&adapter->pdev->dev,
2549                         "Firmware load not allowed (interface is down)\n");
2550                 return -EPERM;
2551         }
2552
2553         strcpy(fw_file, func);
2554
2555         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2556         if (status)
2557                 goto fw_exit;
2558
2559         p = fw->data;
2560         fhdr = (struct flash_file_hdr_g2 *) p;
2561         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2562
2563         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2564         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2565                                           &flash_cmd.dma, GFP_KERNEL);
2566         if (!flash_cmd.va) {
2567                 status = -ENOMEM;
2568                 dev_err(&adapter->pdev->dev,
2569                         "Memory allocation failure while flashing\n");
2570                 goto fw_exit;
2571         }
2572
2573         if ((adapter->generation == BE_GEN3) &&
2574                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2575                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2576                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2577                 for (i = 0; i < num_imgs; i++) {
2578                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2579                                         (sizeof(struct flash_file_hdr_g3) +
2580                                          i * sizeof(struct image_hdr)));
2581                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2582                                 status = be_flash_data(adapter, fw, &flash_cmd,
2583                                                         num_imgs);
2584                 }
2585         } else if ((adapter->generation == BE_GEN2) &&
2586                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2587                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2588         } else {
2589                 dev_err(&adapter->pdev->dev,
2590                         "UFI and Interface are not compatible for flashing\n");
2591                 status = -1;
2592         }
2593
2594         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2595                           flash_cmd.dma);
2596         if (status) {
2597                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2598                 goto fw_exit;
2599         }
2600
2601         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2602
2603 fw_exit:
2604         release_firmware(fw);
2605         return status;
2606 }
2607
2608 static struct net_device_ops be_netdev_ops = {
2609         .ndo_open               = be_open,
2610         .ndo_stop               = be_close,
2611         .ndo_start_xmit         = be_xmit,
2612         .ndo_set_rx_mode        = be_set_multicast_list,
2613         .ndo_set_mac_address    = be_mac_addr_set,
2614         .ndo_change_mtu         = be_change_mtu,
2615         .ndo_validate_addr      = eth_validate_addr,
2616         .ndo_vlan_rx_register   = be_vlan_register,
2617         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2618         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2619         .ndo_set_vf_mac         = be_set_vf_mac,
2620         .ndo_set_vf_vlan        = be_set_vf_vlan,
2621         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2622         .ndo_get_vf_config      = be_get_vf_config
2623 };
2624
2625 static void be_netdev_init(struct net_device *netdev)
2626 {
2627         struct be_adapter *adapter = netdev_priv(netdev);
2628         struct be_rx_obj *rxo;
2629         int i;
2630
2631         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2632                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2633                 NETIF_F_HW_VLAN_TX;
2634         if (be_multi_rxq(adapter))
2635                 netdev->hw_features |= NETIF_F_RXHASH;
2636
2637         netdev->features |= netdev->hw_features |
2638                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2639
2640         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2641                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2642
2643         if (lancer_chip(adapter))
2644                 netdev->vlan_features |= NETIF_F_TSO6;
2645
2646         netdev->flags |= IFF_MULTICAST;
2647
2648         /* Default settings for Rx and Tx flow control */
2649         adapter->rx_fc = true;
2650         adapter->tx_fc = true;
2651
2652         netif_set_gso_max_size(netdev, 65535);
2653
2654         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2655
2656         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2657
2658         for_all_rx_queues(adapter, rxo, i)
2659                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2660                                 BE_NAPI_WEIGHT);
2661
2662         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2663                 BE_NAPI_WEIGHT);
2664 }
2665
2666 static void be_unmap_pci_bars(struct be_adapter *adapter)
2667 {
2668         if (adapter->csr)
2669                 iounmap(adapter->csr);
2670         if (adapter->db)
2671                 iounmap(adapter->db);
2672         if (adapter->pcicfg && be_physfn(adapter))
2673                 iounmap(adapter->pcicfg);
2674 }
2675
2676 static int be_map_pci_bars(struct be_adapter *adapter)
2677 {
2678         u8 __iomem *addr;
2679         int pcicfg_reg, db_reg;
2680
2681         if (lancer_chip(adapter)) {
2682                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2683                         pci_resource_len(adapter->pdev, 0));
2684                 if (addr == NULL)
2685                         return -ENOMEM;
2686                 adapter->db = addr;
2687                 return 0;
2688         }
2689
2690         if (be_physfn(adapter)) {
2691                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692                                 pci_resource_len(adapter->pdev, 2));
2693                 if (addr == NULL)
2694                         return -ENOMEM;
2695                 adapter->csr = addr;
2696         }
2697
2698         if (adapter->generation == BE_GEN2) {
2699                 pcicfg_reg = 1;
2700                 db_reg = 4;
2701         } else {
2702                 pcicfg_reg = 0;
2703                 if (be_physfn(adapter))
2704                         db_reg = 4;
2705                 else
2706                         db_reg = 0;
2707         }
2708         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2709                                 pci_resource_len(adapter->pdev, db_reg));
2710         if (addr == NULL)
2711                 goto pci_map_err;
2712         adapter->db = addr;
2713
2714         if (be_physfn(adapter)) {
2715                 addr = ioremap_nocache(
2716                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2717                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2718                 if (addr == NULL)
2719                         goto pci_map_err;
2720                 adapter->pcicfg = addr;
2721         } else
2722                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2723
2724         return 0;
2725 pci_map_err:
2726         be_unmap_pci_bars(adapter);
2727         return -ENOMEM;
2728 }
2729
2730
2731 static void be_ctrl_cleanup(struct be_adapter *adapter)
2732 {
2733         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2734
2735         be_unmap_pci_bars(adapter);
2736
2737         if (mem->va)
2738                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2739                                   mem->dma);
2740
2741         mem = &adapter->mc_cmd_mem;
2742         if (mem->va)
2743                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2744                                   mem->dma);
2745 }
2746
2747 static int be_ctrl_init(struct be_adapter *adapter)
2748 {
2749         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2750         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2751         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2752         int status;
2753
2754         status = be_map_pci_bars(adapter);
2755         if (status)
2756                 goto done;
2757
2758         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2759         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2760                                                 mbox_mem_alloc->size,
2761                                                 &mbox_mem_alloc->dma,
2762                                                 GFP_KERNEL);
2763         if (!mbox_mem_alloc->va) {
2764                 status = -ENOMEM;
2765                 goto unmap_pci_bars;
2766         }
2767
2768         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2769         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2770         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2771         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2772
2773         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2774         mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2775                                             mc_cmd_mem->size, &mc_cmd_mem->dma,
2776                                             GFP_KERNEL);
2777         if (mc_cmd_mem->va == NULL) {
2778                 status = -ENOMEM;
2779                 goto free_mbox;
2780         }
2781         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2782
2783         mutex_init(&adapter->mbox_lock);
2784         spin_lock_init(&adapter->mcc_lock);
2785         spin_lock_init(&adapter->mcc_cq_lock);
2786
2787         init_completion(&adapter->flash_compl);
2788         pci_save_state(adapter->pdev);
2789         return 0;
2790
2791 free_mbox:
2792         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2793                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
2794
2795 unmap_pci_bars:
2796         be_unmap_pci_bars(adapter);
2797
2798 done:
2799         return status;
2800 }
2801
2802 static void be_stats_cleanup(struct be_adapter *adapter)
2803 {
2804         struct be_dma_mem *cmd = &adapter->stats_cmd;
2805
2806         if (cmd->va)
2807                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2808                                   cmd->va, cmd->dma);
2809 }
2810
2811 static int be_stats_init(struct be_adapter *adapter)
2812 {
2813         struct be_dma_mem *cmd = &adapter->stats_cmd;
2814
2815         cmd->size = sizeof(struct be_cmd_req_get_stats);
2816         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2817                                      GFP_KERNEL);
2818         if (cmd->va == NULL)
2819                 return -1;
2820         memset(cmd->va, 0, cmd->size);
2821         return 0;
2822 }
2823
2824 static void __devexit be_remove(struct pci_dev *pdev)
2825 {
2826         struct be_adapter *adapter = pci_get_drvdata(pdev);
2827
2828         if (!adapter)
2829                 return;
2830
2831         cancel_delayed_work_sync(&adapter->work);
2832
2833         unregister_netdev(adapter->netdev);
2834
2835         be_clear(adapter);
2836
2837         be_stats_cleanup(adapter);
2838
2839         be_ctrl_cleanup(adapter);
2840
2841         kfree(adapter->vf_cfg);
2842         be_sriov_disable(adapter);
2843
2844         be_msix_disable(adapter);
2845
2846         pci_set_drvdata(pdev, NULL);
2847         pci_release_regions(pdev);
2848         pci_disable_device(pdev);
2849
2850         free_netdev(adapter->netdev);
2851 }
2852
2853 static int be_get_config(struct be_adapter *adapter)
2854 {
2855         int status;
2856         u8 mac[ETH_ALEN];
2857
2858         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2859         if (status)
2860                 return status;
2861
2862         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2863                         &adapter->function_mode, &adapter->function_caps);
2864         if (status)
2865                 return status;
2866
2867         memset(mac, 0, ETH_ALEN);
2868
2869         if (be_physfn(adapter)) {
2870                 status = be_cmd_mac_addr_query(adapter, mac,
2871                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2872
2873                 if (status)
2874                         return status;
2875
2876                 if (!is_valid_ether_addr(mac))
2877                         return -EADDRNOTAVAIL;
2878
2879                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2880                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2881         }
2882
2883         if (adapter->function_mode & 0x400)
2884                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2885         else
2886                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2887
2888         status = be_cmd_get_cntl_attributes(adapter);
2889         if (status)
2890                 return status;
2891
2892         be_cmd_check_native_mode(adapter);
2893         return 0;
2894 }
2895
2896 static int be_dev_family_check(struct be_adapter *adapter)
2897 {
2898         struct pci_dev *pdev = adapter->pdev;
2899         u32 sli_intf = 0, if_type;
2900
2901         switch (pdev->device) {
2902         case BE_DEVICE_ID1:
2903         case OC_DEVICE_ID1:
2904                 adapter->generation = BE_GEN2;
2905                 break;
2906         case BE_DEVICE_ID2:
2907         case OC_DEVICE_ID2:
2908                 adapter->generation = BE_GEN3;
2909                 break;
2910         case OC_DEVICE_ID3:
2911                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2912                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2913                                                 SLI_INTF_IF_TYPE_SHIFT;
2914
2915                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2916                         if_type != 0x02) {
2917                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2918                         return -EINVAL;
2919                 }
2920                 if (num_vfs > 0) {
2921                         dev_err(&pdev->dev, "VFs not supported\n");
2922                         return -EINVAL;
2923                 }
2924                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2925                                          SLI_INTF_FAMILY_SHIFT);
2926                 adapter->generation = BE_GEN3;
2927                 break;
2928         default:
2929                 adapter->generation = 0;
2930         }
2931         return 0;
2932 }
2933
2934 static int lancer_wait_ready(struct be_adapter *adapter)
2935 {
2936 #define SLIPORT_READY_TIMEOUT 500
2937         u32 sliport_status;
2938         int status = 0, i;
2939
2940         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2941                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2942                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2943                         break;
2944
2945                 msleep(20);
2946         }
2947
2948         if (i == SLIPORT_READY_TIMEOUT)
2949                 status = -1;
2950
2951         return status;
2952 }
2953
2954 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2955 {
2956         int status;
2957         u32 sliport_status, err, reset_needed;
2958         status = lancer_wait_ready(adapter);
2959         if (!status) {
2960                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2961                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2962                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2963                 if (err && reset_needed) {
2964                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
2965                                         adapter->db + SLIPORT_CONTROL_OFFSET);
2966
2967                         /* check adapter has corrected the error */
2968                         status = lancer_wait_ready(adapter);
2969                         sliport_status = ioread32(adapter->db +
2970                                                         SLIPORT_STATUS_OFFSET);
2971                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2972                                                 SLIPORT_STATUS_RN_MASK);
2973                         if (status || sliport_status)
2974                                 status = -1;
2975                 } else if (err || reset_needed) {
2976                         status = -1;
2977                 }
2978         }
2979         return status;
2980 }
2981
2982 static int __devinit be_probe(struct pci_dev *pdev,
2983                         const struct pci_device_id *pdev_id)
2984 {
2985         int status = 0;
2986         struct be_adapter *adapter;
2987         struct net_device *netdev;
2988
2989         status = pci_enable_device(pdev);
2990         if (status)
2991                 goto do_none;
2992
2993         status = pci_request_regions(pdev, DRV_NAME);
2994         if (status)
2995                 goto disable_dev;
2996         pci_set_master(pdev);
2997
2998         netdev = alloc_etherdev(sizeof(struct be_adapter));
2999         if (netdev == NULL) {
3000                 status = -ENOMEM;
3001                 goto rel_reg;
3002         }
3003         adapter = netdev_priv(netdev);
3004         adapter->pdev = pdev;
3005         pci_set_drvdata(pdev, adapter);
3006
3007         status = be_dev_family_check(adapter);
3008         if (status)
3009                 goto free_netdev;
3010
3011         adapter->netdev = netdev;
3012         SET_NETDEV_DEV(netdev, &pdev->dev);
3013
3014         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3015         if (!status) {
3016                 netdev->features |= NETIF_F_HIGHDMA;
3017         } else {
3018                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3019                 if (status) {
3020                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3021                         goto free_netdev;
3022                 }
3023         }
3024
3025         be_sriov_enable(adapter);
3026         if (adapter->sriov_enabled) {
3027                 adapter->vf_cfg = kcalloc(num_vfs,
3028                         sizeof(struct be_vf_cfg), GFP_KERNEL);
3029
3030                 if (!adapter->vf_cfg)
3031                         goto free_netdev;
3032         }
3033
3034         status = be_ctrl_init(adapter);
3035         if (status)
3036                 goto free_vf_cfg;
3037
3038         if (lancer_chip(adapter)) {
3039                 status = lancer_test_and_set_rdy_state(adapter);
3040                 if (status) {
3041                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3042                         goto ctrl_clean;
3043                 }
3044         }
3045
3046         /* sync up with fw's ready state */
3047         if (be_physfn(adapter)) {
3048                 status = be_cmd_POST(adapter);
3049                 if (status)
3050                         goto ctrl_clean;
3051         }
3052
3053         /* tell fw we're ready to fire cmds */
3054         status = be_cmd_fw_init(adapter);
3055         if (status)
3056                 goto ctrl_clean;
3057
3058         status = be_cmd_reset_function(adapter);
3059         if (status)
3060                 goto ctrl_clean;
3061
3062         status = be_stats_init(adapter);
3063         if (status)
3064                 goto ctrl_clean;
3065
3066         status = be_get_config(adapter);
3067         if (status)
3068                 goto stats_clean;
3069
3070         be_msix_enable(adapter);
3071
3072         INIT_DELAYED_WORK(&adapter->work, be_worker);
3073
3074         status = be_setup(adapter);
3075         if (status)
3076                 goto msix_disable;
3077
3078         be_netdev_init(netdev);
3079         status = register_netdev(netdev);
3080         if (status != 0)
3081                 goto unsetup;
3082         netif_carrier_off(netdev);
3083
3084         if (be_physfn(adapter) && adapter->sriov_enabled) {
3085                 status = be_vf_eth_addr_config(adapter);
3086                 if (status)
3087                         goto unreg_netdev;
3088         }
3089
3090         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3091         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3092         return 0;
3093
3094 unreg_netdev:
3095         unregister_netdev(netdev);
3096 unsetup:
3097         be_clear(adapter);
3098 msix_disable:
3099         be_msix_disable(adapter);
3100 stats_clean:
3101         be_stats_cleanup(adapter);
3102 ctrl_clean:
3103         be_ctrl_cleanup(adapter);
3104 free_vf_cfg:
3105         kfree(adapter->vf_cfg);
3106 free_netdev:
3107         be_sriov_disable(adapter);
3108         free_netdev(netdev);
3109         pci_set_drvdata(pdev, NULL);
3110 rel_reg:
3111         pci_release_regions(pdev);
3112 disable_dev:
3113         pci_disable_device(pdev);
3114 do_none:
3115         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3116         return status;
3117 }
3118
3119 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3120 {
3121         struct be_adapter *adapter = pci_get_drvdata(pdev);
3122         struct net_device *netdev =  adapter->netdev;
3123
3124         cancel_delayed_work_sync(&adapter->work);
3125         if (adapter->wol)
3126                 be_setup_wol(adapter, true);
3127
3128         netif_device_detach(netdev);
3129         if (netif_running(netdev)) {
3130                 rtnl_lock();
3131                 be_close(netdev);
3132                 rtnl_unlock();
3133         }
3134         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3135         be_clear(adapter);
3136
3137         be_msix_disable(adapter);
3138         pci_save_state(pdev);
3139         pci_disable_device(pdev);
3140         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3141         return 0;
3142 }
3143
3144 static int be_resume(struct pci_dev *pdev)
3145 {
3146         int status = 0;
3147         struct be_adapter *adapter = pci_get_drvdata(pdev);
3148         struct net_device *netdev =  adapter->netdev;
3149
3150         netif_device_detach(netdev);
3151
3152         status = pci_enable_device(pdev);
3153         if (status)
3154                 return status;
3155
3156         pci_set_power_state(pdev, 0);
3157         pci_restore_state(pdev);
3158
3159         be_msix_enable(adapter);
3160         /* tell fw we're ready to fire cmds */
3161         status = be_cmd_fw_init(adapter);
3162         if (status)
3163                 return status;
3164
3165         be_setup(adapter);
3166         if (netif_running(netdev)) {
3167                 rtnl_lock();
3168                 be_open(netdev);
3169                 rtnl_unlock();
3170         }
3171         netif_device_attach(netdev);
3172
3173         if (adapter->wol)
3174                 be_setup_wol(adapter, false);
3175
3176         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3177         return 0;
3178 }
3179
3180 /*
3181  * An FLR will stop BE from DMAing any data.
3182  */
3183 static void be_shutdown(struct pci_dev *pdev)
3184 {
3185         struct be_adapter *adapter = pci_get_drvdata(pdev);
3186
3187         if (!adapter)
3188                 return;
3189
3190         cancel_delayed_work_sync(&adapter->work);
3191
3192         netif_device_detach(adapter->netdev);
3193
3194         if (adapter->wol)
3195                 be_setup_wol(adapter, true);
3196
3197         be_cmd_reset_function(adapter);
3198
3199         pci_disable_device(pdev);
3200 }
3201
3202 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3203                                 pci_channel_state_t state)
3204 {
3205         struct be_adapter *adapter = pci_get_drvdata(pdev);
3206         struct net_device *netdev =  adapter->netdev;
3207
3208         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3209
3210         adapter->eeh_err = true;
3211
3212         netif_device_detach(netdev);
3213
3214         if (netif_running(netdev)) {
3215                 rtnl_lock();
3216                 be_close(netdev);
3217                 rtnl_unlock();
3218         }
3219         be_clear(adapter);
3220
3221         if (state == pci_channel_io_perm_failure)
3222                 return PCI_ERS_RESULT_DISCONNECT;
3223
3224         pci_disable_device(pdev);
3225
3226         return PCI_ERS_RESULT_NEED_RESET;
3227 }
3228
3229 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3230 {
3231         struct be_adapter *adapter = pci_get_drvdata(pdev);
3232         int status;
3233
3234         dev_info(&adapter->pdev->dev, "EEH reset\n");
3235         adapter->eeh_err = false;
3236
3237         status = pci_enable_device(pdev);
3238         if (status)
3239                 return PCI_ERS_RESULT_DISCONNECT;
3240
3241         pci_set_master(pdev);
3242         pci_set_power_state(pdev, 0);
3243         pci_restore_state(pdev);
3244
3245         /* Check if card is ok and fw is ready */
3246         status = be_cmd_POST(adapter);
3247         if (status)
3248                 return PCI_ERS_RESULT_DISCONNECT;
3249
3250         return PCI_ERS_RESULT_RECOVERED;
3251 }
3252
3253 static void be_eeh_resume(struct pci_dev *pdev)
3254 {
3255         int status = 0;
3256         struct be_adapter *adapter = pci_get_drvdata(pdev);
3257         struct net_device *netdev =  adapter->netdev;
3258
3259         dev_info(&adapter->pdev->dev, "EEH resume\n");
3260
3261         pci_save_state(pdev);
3262
3263         /* tell fw we're ready to fire cmds */
3264         status = be_cmd_fw_init(adapter);
3265         if (status)
3266                 goto err;
3267
3268         status = be_setup(adapter);
3269         if (status)
3270                 goto err;
3271
3272         if (netif_running(netdev)) {
3273                 status = be_open(netdev);
3274                 if (status)
3275                         goto err;
3276         }
3277         netif_device_attach(netdev);
3278         return;
3279 err:
3280         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3281 }
3282
3283 static struct pci_error_handlers be_eeh_handlers = {
3284         .error_detected = be_eeh_err_detected,
3285         .slot_reset = be_eeh_reset,
3286         .resume = be_eeh_resume,
3287 };
3288
3289 static struct pci_driver be_driver = {
3290         .name = DRV_NAME,
3291         .id_table = be_dev_ids,
3292         .probe = be_probe,
3293         .remove = be_remove,
3294         .suspend = be_suspend,
3295         .resume = be_resume,
3296         .shutdown = be_shutdown,
3297         .err_handler = &be_eeh_handlers
3298 };
3299
3300 static int __init be_init_module(void)
3301 {
3302         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3303             rx_frag_size != 2048) {
3304                 printk(KERN_WARNING DRV_NAME
3305                         " : Module param rx_frag_size must be 2048/4096/8192."
3306                         " Using 2048\n");
3307                 rx_frag_size = 2048;
3308         }
3309
3310         return pci_register_driver(&be_driver);
3311 }
3312 module_init(be_init_module);
3313
3314 static void __exit be_exit_module(void)
3315 {
3316         pci_unregister_driver(&be_driver);
3317 }
3318 module_exit(be_exit_module);