]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/brocade/bna/bnad.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[karo-tx-linux.git] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
29
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
33
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
35
36 /*
37  * Module params
38  */
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50                  " Range[false:0|true:1]");
51
52 /*
53  * Global variables
54  */
55 u32 bnad_rxqs_per_cq = 2;
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
60
61 /*
62  * Local MACROS
63  */
64 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
65
66 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
67
68 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
69         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
70          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71          ((_bnad)->pcidev->irq))
72
73 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)       \
74 do {                                                            \
75         (_res_info)->res_type = BNA_RES_T_MEM;                  \
76         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
77         (_res_info)->res_u.mem_info.num = (_num);               \
78         (_res_info)->res_u.mem_info.len =                       \
79         sizeof(struct bnad_unmap_q) +                           \
80         (sizeof(struct bnad_skb_unmap) * ((_depth) - 1));       \
81 } while (0)
82
83 #define BNAD_TXRX_SYNC_MDELAY   250     /* 250 msecs */
84
85 static void
86 bnad_add_to_list(struct bnad *bnad)
87 {
88         mutex_lock(&bnad_list_mutex);
89         list_add_tail(&bnad->list_entry, &bnad_list);
90         bnad->id = bna_id++;
91         mutex_unlock(&bnad_list_mutex);
92 }
93
94 static void
95 bnad_remove_from_list(struct bnad *bnad)
96 {
97         mutex_lock(&bnad_list_mutex);
98         list_del(&bnad->list_entry);
99         mutex_unlock(&bnad_list_mutex);
100 }
101
102 /*
103  * Reinitialize completions in CQ, once Rx is taken down
104  */
105 static void
106 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
107 {
108         struct bna_cq_entry *cmpl, *next_cmpl;
109         unsigned int wi_range, wis = 0, ccb_prod = 0;
110         int i;
111
112         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
113                             wi_range);
114
115         for (i = 0; i < ccb->q_depth; i++) {
116                 wis++;
117                 if (likely(--wi_range))
118                         next_cmpl = cmpl + 1;
119                 else {
120                         BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
121                         wis = 0;
122                         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
123                                                 next_cmpl, wi_range);
124                 }
125                 cmpl->valid = 0;
126                 cmpl = next_cmpl;
127         }
128 }
129
130 static u32
131 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
132         u32 index, u32 depth, struct sk_buff *skb, u32 frag)
133 {
134         int j;
135         array[index].skb = NULL;
136
137         dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
138                         skb_headlen(skb), DMA_TO_DEVICE);
139         dma_unmap_addr_set(&array[index], dma_addr, 0);
140         BNA_QE_INDX_ADD(index, 1, depth);
141
142         for (j = 0; j < frag; j++) {
143                 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
144                           skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
145                 dma_unmap_addr_set(&array[index], dma_addr, 0);
146                 BNA_QE_INDX_ADD(index, 1, depth);
147         }
148
149         return index;
150 }
151
152 /*
153  * Frees all pending Tx Bufs
154  * At this point no activity is expected on the Q,
155  * so DMA unmap & freeing is fine.
156  */
157 static void
158 bnad_free_all_txbufs(struct bnad *bnad,
159                  struct bna_tcb *tcb)
160 {
161         u32             unmap_cons;
162         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163         struct bnad_skb_unmap *unmap_array;
164         struct sk_buff          *skb = NULL;
165         int                     q;
166
167         unmap_array = unmap_q->unmap_array;
168
169         for (q = 0; q < unmap_q->q_depth; q++) {
170                 skb = unmap_array[q].skb;
171                 if (!skb)
172                         continue;
173
174                 unmap_cons = q;
175                 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
176                                 unmap_cons, unmap_q->q_depth, skb,
177                                 skb_shinfo(skb)->nr_frags);
178
179                 dev_kfree_skb_any(skb);
180         }
181 }
182
183 /* Data Path Handlers */
184
185 /*
186  * bnad_free_txbufs : Frees the Tx bufs on Tx completion
187  * Can be called in a) Interrupt context
188  *                  b) Sending context
189  *                  c) Tasklet context
190  */
191 static u32
192 bnad_free_txbufs(struct bnad *bnad,
193                  struct bna_tcb *tcb)
194 {
195         u32             unmap_cons, sent_packets = 0, sent_bytes = 0;
196         u16             wis, updated_hw_cons;
197         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
198         struct bnad_skb_unmap *unmap_array;
199         struct sk_buff          *skb;
200
201         /*
202          * Just return if TX is stopped. This check is useful
203          * when bnad_free_txbufs() runs out of a tasklet scheduled
204          * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
205          * but this routine runs actually after the cleanup has been
206          * executed.
207          */
208         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
209                 return 0;
210
211         updated_hw_cons = *(tcb->hw_consumer_index);
212
213         wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
214                                   updated_hw_cons, tcb->q_depth);
215
216         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
217
218         unmap_array = unmap_q->unmap_array;
219         unmap_cons = unmap_q->consumer_index;
220
221         prefetch(&unmap_array[unmap_cons + 1]);
222         while (wis) {
223                 skb = unmap_array[unmap_cons].skb;
224
225                 sent_packets++;
226                 sent_bytes += skb->len;
227                 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
228
229                 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
230                                 unmap_cons, unmap_q->q_depth, skb,
231                                 skb_shinfo(skb)->nr_frags);
232
233                 dev_kfree_skb_any(skb);
234         }
235
236         /* Update consumer pointers. */
237         tcb->consumer_index = updated_hw_cons;
238         unmap_q->consumer_index = unmap_cons;
239
240         tcb->txq->tx_packets += sent_packets;
241         tcb->txq->tx_bytes += sent_bytes;
242
243         return sent_packets;
244 }
245
246 /* Tx Free Tasklet function */
247 /* Frees for all the tcb's in all the Tx's */
248 /*
249  * Scheduled from sending context, so that
250  * the fat Tx lock is not held for too long
251  * in the sending context.
252  */
253 static void
254 bnad_tx_free_tasklet(unsigned long bnad_ptr)
255 {
256         struct bnad *bnad = (struct bnad *)bnad_ptr;
257         struct bna_tcb *tcb;
258         u32             acked = 0;
259         int                     i, j;
260
261         for (i = 0; i < bnad->num_tx; i++) {
262                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
263                         tcb = bnad->tx_info[i].tcb[j];
264                         if (!tcb)
265                                 continue;
266                         if (((u16) (*tcb->hw_consumer_index) !=
267                                 tcb->consumer_index) &&
268                                 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
269                                                   &tcb->flags))) {
270                                 acked = bnad_free_txbufs(bnad, tcb);
271                                 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
272                                         &tcb->flags)))
273                                         bna_ib_ack(tcb->i_dbell, acked);
274                                 smp_mb__before_clear_bit();
275                                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
276                         }
277                         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
278                                                 &tcb->flags)))
279                                 continue;
280                         if (netif_queue_stopped(bnad->netdev)) {
281                                 if (acked && netif_carrier_ok(bnad->netdev) &&
282                                         BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
283                                                 BNAD_NETIF_WAKE_THRESHOLD) {
284                                         netif_wake_queue(bnad->netdev);
285                                         /* TODO */
286                                         /* Counters for individual TxQs? */
287                                         BNAD_UPDATE_CTR(bnad,
288                                                 netif_queue_wakeup);
289                                 }
290                         }
291                 }
292         }
293 }
294
295 static u32
296 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
297 {
298         struct net_device *netdev = bnad->netdev;
299         u32 sent = 0;
300
301         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
302                 return 0;
303
304         sent = bnad_free_txbufs(bnad, tcb);
305         if (sent) {
306                 if (netif_queue_stopped(netdev) &&
307                     netif_carrier_ok(netdev) &&
308                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
309                                     BNAD_NETIF_WAKE_THRESHOLD) {
310                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
311                                 netif_wake_queue(netdev);
312                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
313                         }
314                 }
315         }
316
317         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
318                 bna_ib_ack(tcb->i_dbell, sent);
319
320         smp_mb__before_clear_bit();
321         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
322
323         return sent;
324 }
325
326 /* MSIX Tx Completion Handler */
327 static irqreturn_t
328 bnad_msix_tx(int irq, void *data)
329 {
330         struct bna_tcb *tcb = (struct bna_tcb *)data;
331         struct bnad *bnad = tcb->bnad;
332
333         bnad_tx(bnad, tcb);
334
335         return IRQ_HANDLED;
336 }
337
338 static void
339 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
340 {
341         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
342
343         rcb->producer_index = 0;
344         rcb->consumer_index = 0;
345
346         unmap_q->producer_index = 0;
347         unmap_q->consumer_index = 0;
348 }
349
350 static void
351 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
352 {
353         struct bnad_unmap_q *unmap_q;
354         struct bnad_skb_unmap *unmap_array;
355         struct sk_buff *skb;
356         int unmap_cons;
357
358         unmap_q = rcb->unmap_q;
359         unmap_array = unmap_q->unmap_array;
360         for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
361                 skb = unmap_array[unmap_cons].skb;
362                 if (!skb)
363                         continue;
364                 unmap_array[unmap_cons].skb = NULL;
365                 dma_unmap_single(&bnad->pcidev->dev,
366                                  dma_unmap_addr(&unmap_array[unmap_cons],
367                                                 dma_addr),
368                                  rcb->rxq->buffer_size,
369                                  DMA_FROM_DEVICE);
370                 dev_kfree_skb(skb);
371         }
372         bnad_reset_rcb(bnad, rcb);
373 }
374
375 static void
376 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
377 {
378         u16 to_alloc, alloced, unmap_prod, wi_range;
379         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
380         struct bnad_skb_unmap *unmap_array;
381         struct bna_rxq_entry *rxent;
382         struct sk_buff *skb;
383         dma_addr_t dma_addr;
384
385         alloced = 0;
386         to_alloc =
387                 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
388
389         unmap_array = unmap_q->unmap_array;
390         unmap_prod = unmap_q->producer_index;
391
392         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
393
394         while (to_alloc--) {
395                 if (!wi_range)
396                         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
397                                              wi_range);
398                 skb = netdev_alloc_skb_ip_align(bnad->netdev,
399                                                 rcb->rxq->buffer_size);
400                 if (unlikely(!skb)) {
401                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
402                         rcb->rxq->rxbuf_alloc_failed++;
403                         goto finishing;
404                 }
405                 unmap_array[unmap_prod].skb = skb;
406                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
407                                           rcb->rxq->buffer_size,
408                                           DMA_FROM_DEVICE);
409                 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
410                                    dma_addr);
411                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
412                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
413
414                 rxent++;
415                 wi_range--;
416                 alloced++;
417         }
418
419 finishing:
420         if (likely(alloced)) {
421                 unmap_q->producer_index = unmap_prod;
422                 rcb->producer_index = unmap_prod;
423                 smp_mb();
424                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
425                         bna_rxq_prod_indx_doorbell(rcb);
426         }
427 }
428
429 static inline void
430 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
431 {
432         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
433
434         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
435                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
436                          >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
437                         bnad_alloc_n_post_rxbufs(bnad, rcb);
438                 smp_mb__before_clear_bit();
439                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
440         }
441 }
442
443 static u32
444 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
445 {
446         struct bna_cq_entry *cmpl, *next_cmpl;
447         struct bna_rcb *rcb = NULL;
448         unsigned int wi_range, packets = 0, wis = 0;
449         struct bnad_unmap_q *unmap_q;
450         struct bnad_skb_unmap *unmap_array;
451         struct sk_buff *skb;
452         u32 flags, unmap_cons;
453         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
454         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
455
456         set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
457
458         if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
459                 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
460                 return 0;
461         }
462
463         prefetch(bnad->netdev);
464         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
465                             wi_range);
466         BUG_ON(!(wi_range <= ccb->q_depth));
467         while (cmpl->valid && packets < budget) {
468                 packets++;
469                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
470
471                 if (bna_is_small_rxq(cmpl->rxq_id))
472                         rcb = ccb->rcb[1];
473                 else
474                         rcb = ccb->rcb[0];
475
476                 unmap_q = rcb->unmap_q;
477                 unmap_array = unmap_q->unmap_array;
478                 unmap_cons = unmap_q->consumer_index;
479
480                 skb = unmap_array[unmap_cons].skb;
481                 BUG_ON(!(skb));
482                 unmap_array[unmap_cons].skb = NULL;
483                 dma_unmap_single(&bnad->pcidev->dev,
484                                  dma_unmap_addr(&unmap_array[unmap_cons],
485                                                 dma_addr),
486                                  rcb->rxq->buffer_size,
487                                  DMA_FROM_DEVICE);
488                 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
489
490                 /* Should be more efficient ? Performance ? */
491                 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
492
493                 wis++;
494                 if (likely(--wi_range))
495                         next_cmpl = cmpl + 1;
496                 else {
497                         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
498                         wis = 0;
499                         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
500                                                 next_cmpl, wi_range);
501                         BUG_ON(!(wi_range <= ccb->q_depth));
502                 }
503                 prefetch(next_cmpl);
504
505                 flags = ntohl(cmpl->flags);
506                 if (unlikely
507                     (flags &
508                      (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
509                       BNA_CQ_EF_TOO_LONG))) {
510                         dev_kfree_skb_any(skb);
511                         rcb->rxq->rx_packets_with_error++;
512                         goto next;
513                 }
514
515                 skb_put(skb, ntohs(cmpl->length));
516                 if (likely
517                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
518                      (((flags & BNA_CQ_EF_IPV4) &&
519                       (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
520                       (flags & BNA_CQ_EF_IPV6)) &&
521                       (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
522                       (flags & BNA_CQ_EF_L4_CKSUM_OK)))
523                         skb->ip_summed = CHECKSUM_UNNECESSARY;
524                 else
525                         skb_checksum_none_assert(skb);
526
527                 rcb->rxq->rx_packets++;
528                 rcb->rxq->rx_bytes += skb->len;
529                 skb->protocol = eth_type_trans(skb, bnad->netdev);
530
531                 if (flags & BNA_CQ_EF_VLAN)
532                         __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
533
534                 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
535                         napi_gro_receive(&rx_ctrl->napi, skb);
536                 else {
537                         netif_receive_skb(skb);
538                 }
539
540 next:
541                 cmpl->valid = 0;
542                 cmpl = next_cmpl;
543         }
544
545         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
546
547         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
549
550         bnad_refill_rxq(bnad, ccb->rcb[0]);
551         if (ccb->rcb[1])
552                 bnad_refill_rxq(bnad, ccb->rcb[1]);
553
554         clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
555
556         return packets;
557 }
558
559 static void
560 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
561 {
562         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
563         struct napi_struct *napi = &rx_ctrl->napi;
564
565         if (likely(napi_schedule_prep(napi))) {
566                 __napi_schedule(napi);
567                 rx_ctrl->rx_schedule++;
568         }
569 }
570
571 /* MSIX Rx Path Handler */
572 static irqreturn_t
573 bnad_msix_rx(int irq, void *data)
574 {
575         struct bna_ccb *ccb = (struct bna_ccb *)data;
576
577         if (ccb) {
578                 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
579                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
580         }
581
582         return IRQ_HANDLED;
583 }
584
585 /* Interrupt handlers */
586
587 /* Mbox Interrupt Handlers */
588 static irqreturn_t
589 bnad_msix_mbox_handler(int irq, void *data)
590 {
591         u32 intr_status;
592         unsigned long flags;
593         struct bnad *bnad = (struct bnad *)data;
594
595         spin_lock_irqsave(&bnad->bna_lock, flags);
596         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
597                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
598                 return IRQ_HANDLED;
599         }
600
601         bna_intr_status_get(&bnad->bna, intr_status);
602
603         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
604                 bna_mbox_handler(&bnad->bna, intr_status);
605
606         spin_unlock_irqrestore(&bnad->bna_lock, flags);
607
608         return IRQ_HANDLED;
609 }
610
611 static irqreturn_t
612 bnad_isr(int irq, void *data)
613 {
614         int i, j;
615         u32 intr_status;
616         unsigned long flags;
617         struct bnad *bnad = (struct bnad *)data;
618         struct bnad_rx_info *rx_info;
619         struct bnad_rx_ctrl *rx_ctrl;
620         struct bna_tcb *tcb = NULL;
621
622         spin_lock_irqsave(&bnad->bna_lock, flags);
623         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
624                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
625                 return IRQ_NONE;
626         }
627
628         bna_intr_status_get(&bnad->bna, intr_status);
629
630         if (unlikely(!intr_status)) {
631                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
632                 return IRQ_NONE;
633         }
634
635         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
636                 bna_mbox_handler(&bnad->bna, intr_status);
637
638         spin_unlock_irqrestore(&bnad->bna_lock, flags);
639
640         if (!BNA_IS_INTX_DATA_INTR(intr_status))
641                 return IRQ_HANDLED;
642
643         /* Process data interrupts */
644         /* Tx processing */
645         for (i = 0; i < bnad->num_tx; i++) {
646                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
647                         tcb = bnad->tx_info[i].tcb[j];
648                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
649                                 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
650                 }
651         }
652         /* Rx processing */
653         for (i = 0; i < bnad->num_rx; i++) {
654                 rx_info = &bnad->rx_info[i];
655                 if (!rx_info->rx)
656                         continue;
657                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
658                         rx_ctrl = &rx_info->rx_ctrl[j];
659                         if (rx_ctrl->ccb)
660                                 bnad_netif_rx_schedule_poll(bnad,
661                                                             rx_ctrl->ccb);
662                 }
663         }
664         return IRQ_HANDLED;
665 }
666
667 /*
668  * Called in interrupt / callback context
669  * with bna_lock held, so cfg_flags access is OK
670  */
671 static void
672 bnad_enable_mbox_irq(struct bnad *bnad)
673 {
674         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
675
676         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
677 }
678
679 /*
680  * Called with bnad->bna_lock held b'cos of
681  * bnad->cfg_flags access.
682  */
683 static void
684 bnad_disable_mbox_irq(struct bnad *bnad)
685 {
686         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
687
688         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
689 }
690
691 static void
692 bnad_set_netdev_perm_addr(struct bnad *bnad)
693 {
694         struct net_device *netdev = bnad->netdev;
695
696         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
697         if (is_zero_ether_addr(netdev->dev_addr))
698                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
699 }
700
701 /* Control Path Handlers */
702
703 /* Callbacks */
704 void
705 bnad_cb_mbox_intr_enable(struct bnad *bnad)
706 {
707         bnad_enable_mbox_irq(bnad);
708 }
709
710 void
711 bnad_cb_mbox_intr_disable(struct bnad *bnad)
712 {
713         bnad_disable_mbox_irq(bnad);
714 }
715
716 void
717 bnad_cb_ioceth_ready(struct bnad *bnad)
718 {
719         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
720         complete(&bnad->bnad_completions.ioc_comp);
721 }
722
723 void
724 bnad_cb_ioceth_failed(struct bnad *bnad)
725 {
726         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
727         complete(&bnad->bnad_completions.ioc_comp);
728 }
729
730 void
731 bnad_cb_ioceth_disabled(struct bnad *bnad)
732 {
733         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
734         complete(&bnad->bnad_completions.ioc_comp);
735 }
736
737 static void
738 bnad_cb_enet_disabled(void *arg)
739 {
740         struct bnad *bnad = (struct bnad *)arg;
741
742         netif_carrier_off(bnad->netdev);
743         complete(&bnad->bnad_completions.enet_comp);
744 }
745
746 void
747 bnad_cb_ethport_link_status(struct bnad *bnad,
748                         enum bna_link_status link_status)
749 {
750         bool link_up = false;
751
752         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
753
754         if (link_status == BNA_CEE_UP) {
755                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
756                         BNAD_UPDATE_CTR(bnad, cee_toggle);
757                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
758         } else {
759                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
760                         BNAD_UPDATE_CTR(bnad, cee_toggle);
761                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
762         }
763
764         if (link_up) {
765                 if (!netif_carrier_ok(bnad->netdev)) {
766                         uint tx_id, tcb_id;
767                         printk(KERN_WARNING "bna: %s link up\n",
768                                 bnad->netdev->name);
769                         netif_carrier_on(bnad->netdev);
770                         BNAD_UPDATE_CTR(bnad, link_toggle);
771                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
772                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
773                                       tcb_id++) {
774                                         struct bna_tcb *tcb =
775                                         bnad->tx_info[tx_id].tcb[tcb_id];
776                                         u32 txq_id;
777                                         if (!tcb)
778                                                 continue;
779
780                                         txq_id = tcb->id;
781
782                                         if (test_bit(BNAD_TXQ_TX_STARTED,
783                                                      &tcb->flags)) {
784                                                 /*
785                                                  * Force an immediate
786                                                  * Transmit Schedule */
787                                                 printk(KERN_INFO "bna: %s %d "
788                                                       "TXQ_STARTED\n",
789                                                        bnad->netdev->name,
790                                                        txq_id);
791                                                 netif_wake_subqueue(
792                                                                 bnad->netdev,
793                                                                 txq_id);
794                                                 BNAD_UPDATE_CTR(bnad,
795                                                         netif_queue_wakeup);
796                                         } else {
797                                                 netif_stop_subqueue(
798                                                                 bnad->netdev,
799                                                                 txq_id);
800                                                 BNAD_UPDATE_CTR(bnad,
801                                                         netif_queue_stop);
802                                         }
803                                 }
804                         }
805                 }
806         } else {
807                 if (netif_carrier_ok(bnad->netdev)) {
808                         printk(KERN_WARNING "bna: %s link down\n",
809                                 bnad->netdev->name);
810                         netif_carrier_off(bnad->netdev);
811                         BNAD_UPDATE_CTR(bnad, link_toggle);
812                 }
813         }
814 }
815
816 static void
817 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
818 {
819         struct bnad *bnad = (struct bnad *)arg;
820
821         complete(&bnad->bnad_completions.tx_comp);
822 }
823
824 static void
825 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
826 {
827         struct bnad_tx_info *tx_info =
828                         (struct bnad_tx_info *)tcb->txq->tx->priv;
829         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
830
831         tx_info->tcb[tcb->id] = tcb;
832         unmap_q->producer_index = 0;
833         unmap_q->consumer_index = 0;
834         unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
835 }
836
837 static void
838 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
839 {
840         struct bnad_tx_info *tx_info =
841                         (struct bnad_tx_info *)tcb->txq->tx->priv;
842         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
843
844         while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
845                 cpu_relax();
846
847         bnad_free_all_txbufs(bnad, tcb);
848
849         unmap_q->producer_index = 0;
850         unmap_q->consumer_index = 0;
851
852         smp_mb__before_clear_bit();
853         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
854
855         tx_info->tcb[tcb->id] = NULL;
856 }
857
858 static void
859 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
860 {
861         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
862
863         unmap_q->producer_index = 0;
864         unmap_q->consumer_index = 0;
865         unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
866 }
867
868 static void
869 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
870 {
871         bnad_free_all_rxbufs(bnad, rcb);
872 }
873
874 static void
875 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
876 {
877         struct bnad_rx_info *rx_info =
878                         (struct bnad_rx_info *)ccb->cq->rx->priv;
879
880         rx_info->rx_ctrl[ccb->id].ccb = ccb;
881         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
882 }
883
884 static void
885 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
886 {
887         struct bnad_rx_info *rx_info =
888                         (struct bnad_rx_info *)ccb->cq->rx->priv;
889
890         rx_info->rx_ctrl[ccb->id].ccb = NULL;
891 }
892
893 static void
894 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
895 {
896         struct bnad_tx_info *tx_info =
897                         (struct bnad_tx_info *)tx->priv;
898         struct bna_tcb *tcb;
899         u32 txq_id;
900         int i;
901
902         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
903                 tcb = tx_info->tcb[i];
904                 if (!tcb)
905                         continue;
906                 txq_id = tcb->id;
907                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
908                 netif_stop_subqueue(bnad->netdev, txq_id);
909                 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
910                         bnad->netdev->name, txq_id);
911         }
912 }
913
914 static void
915 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
916 {
917         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
918         struct bna_tcb *tcb;
919         struct bnad_unmap_q *unmap_q;
920         u32 txq_id;
921         int i;
922
923         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
924                 tcb = tx_info->tcb[i];
925                 if (!tcb)
926                         continue;
927                 txq_id = tcb->id;
928
929                 unmap_q = tcb->unmap_q;
930
931                 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
932                         continue;
933
934                 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
935                         cpu_relax();
936
937                 bnad_free_all_txbufs(bnad, tcb);
938
939                 unmap_q->producer_index = 0;
940                 unmap_q->consumer_index = 0;
941
942                 smp_mb__before_clear_bit();
943                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
944
945                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
946
947                 if (netif_carrier_ok(bnad->netdev)) {
948                         printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
949                                 bnad->netdev->name, txq_id);
950                         netif_wake_subqueue(bnad->netdev, txq_id);
951                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
952                 }
953         }
954
955         /*
956          * Workaround for first ioceth enable failure & we
957          * get a 0 MAC address. We try to get the MAC address
958          * again here.
959          */
960         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
961                 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
962                 bnad_set_netdev_perm_addr(bnad);
963         }
964 }
965
966 static void
967 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
968 {
969         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
970         struct bna_tcb *tcb;
971         int i;
972
973         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
974                 tcb = tx_info->tcb[i];
975                 if (!tcb)
976                         continue;
977         }
978
979         mdelay(BNAD_TXRX_SYNC_MDELAY);
980         bna_tx_cleanup_complete(tx);
981 }
982
983 static void
984 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
985 {
986         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
987         struct bna_ccb *ccb;
988         struct bnad_rx_ctrl *rx_ctrl;
989         int i;
990
991         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
992                 rx_ctrl = &rx_info->rx_ctrl[i];
993                 ccb = rx_ctrl->ccb;
994                 if (!ccb)
995                         continue;
996
997                 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
998
999                 if (ccb->rcb[1])
1000                         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1001         }
1002 }
1003
1004 static void
1005 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1006 {
1007         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1008         struct bna_ccb *ccb;
1009         struct bnad_rx_ctrl *rx_ctrl;
1010         int i;
1011
1012         mdelay(BNAD_TXRX_SYNC_MDELAY);
1013
1014         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1015                 rx_ctrl = &rx_info->rx_ctrl[i];
1016                 ccb = rx_ctrl->ccb;
1017                 if (!ccb)
1018                         continue;
1019
1020                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1021
1022                 if (ccb->rcb[1])
1023                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1024
1025                 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1026                         cpu_relax();
1027         }
1028
1029         bna_rx_cleanup_complete(rx);
1030 }
1031
1032 static void
1033 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1034 {
1035         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1036         struct bna_ccb *ccb;
1037         struct bna_rcb *rcb;
1038         struct bnad_rx_ctrl *rx_ctrl;
1039         struct bnad_unmap_q *unmap_q;
1040         int i;
1041         int j;
1042
1043         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1044                 rx_ctrl = &rx_info->rx_ctrl[i];
1045                 ccb = rx_ctrl->ccb;
1046                 if (!ccb)
1047                         continue;
1048
1049                 bnad_cq_cmpl_init(bnad, ccb);
1050
1051                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1052                         rcb = ccb->rcb[j];
1053                         if (!rcb)
1054                                 continue;
1055                         bnad_free_all_rxbufs(bnad, rcb);
1056
1057                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1058                         set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1059                         unmap_q = rcb->unmap_q;
1060
1061                         /* Now allocate & post buffers for this RCB */
1062                         /* !!Allocation in callback context */
1063                         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1064                                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1065                                         >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1066                                         bnad_alloc_n_post_rxbufs(bnad, rcb);
1067                                         smp_mb__before_clear_bit();
1068                                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1069                         }
1070                 }
1071         }
1072 }
1073
1074 static void
1075 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1076 {
1077         struct bnad *bnad = (struct bnad *)arg;
1078
1079         complete(&bnad->bnad_completions.rx_comp);
1080 }
1081
1082 static void
1083 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1084 {
1085         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1086         complete(&bnad->bnad_completions.mcast_comp);
1087 }
1088
1089 void
1090 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1091                        struct bna_stats *stats)
1092 {
1093         if (status == BNA_CB_SUCCESS)
1094                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1095
1096         if (!netif_running(bnad->netdev) ||
1097                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1098                 return;
1099
1100         mod_timer(&bnad->stats_timer,
1101                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1102 }
1103
1104 static void
1105 bnad_cb_enet_mtu_set(struct bnad *bnad)
1106 {
1107         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1108         complete(&bnad->bnad_completions.mtu_comp);
1109 }
1110
1111 void
1112 bnad_cb_completion(void *arg, enum bfa_status status)
1113 {
1114         struct bnad_iocmd_comp *iocmd_comp =
1115                         (struct bnad_iocmd_comp *)arg;
1116
1117         iocmd_comp->comp_status = (u32) status;
1118         complete(&iocmd_comp->comp);
1119 }
1120
1121 /* Resource allocation, free functions */
1122
1123 static void
1124 bnad_mem_free(struct bnad *bnad,
1125               struct bna_mem_info *mem_info)
1126 {
1127         int i;
1128         dma_addr_t dma_pa;
1129
1130         if (mem_info->mdl == NULL)
1131                 return;
1132
1133         for (i = 0; i < mem_info->num; i++) {
1134                 if (mem_info->mdl[i].kva != NULL) {
1135                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1136                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1137                                                 dma_pa);
1138                                 dma_free_coherent(&bnad->pcidev->dev,
1139                                                   mem_info->mdl[i].len,
1140                                                   mem_info->mdl[i].kva, dma_pa);
1141                         } else
1142                                 kfree(mem_info->mdl[i].kva);
1143                 }
1144         }
1145         kfree(mem_info->mdl);
1146         mem_info->mdl = NULL;
1147 }
1148
1149 static int
1150 bnad_mem_alloc(struct bnad *bnad,
1151                struct bna_mem_info *mem_info)
1152 {
1153         int i;
1154         dma_addr_t dma_pa;
1155
1156         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1157                 mem_info->mdl = NULL;
1158                 return 0;
1159         }
1160
1161         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1162                                 GFP_KERNEL);
1163         if (mem_info->mdl == NULL)
1164                 return -ENOMEM;
1165
1166         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1167                 for (i = 0; i < mem_info->num; i++) {
1168                         mem_info->mdl[i].len = mem_info->len;
1169                         mem_info->mdl[i].kva =
1170                                 dma_alloc_coherent(&bnad->pcidev->dev,
1171                                                 mem_info->len, &dma_pa,
1172                                                 GFP_KERNEL);
1173
1174                         if (mem_info->mdl[i].kva == NULL)
1175                                 goto err_return;
1176
1177                         BNA_SET_DMA_ADDR(dma_pa,
1178                                          &(mem_info->mdl[i].dma));
1179                 }
1180         } else {
1181                 for (i = 0; i < mem_info->num; i++) {
1182                         mem_info->mdl[i].len = mem_info->len;
1183                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1184                                                         GFP_KERNEL);
1185                         if (mem_info->mdl[i].kva == NULL)
1186                                 goto err_return;
1187                 }
1188         }
1189
1190         return 0;
1191
1192 err_return:
1193         bnad_mem_free(bnad, mem_info);
1194         return -ENOMEM;
1195 }
1196
1197 /* Free IRQ for Mailbox */
1198 static void
1199 bnad_mbox_irq_free(struct bnad *bnad)
1200 {
1201         int irq;
1202         unsigned long flags;
1203
1204         spin_lock_irqsave(&bnad->bna_lock, flags);
1205         bnad_disable_mbox_irq(bnad);
1206         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1207
1208         irq = BNAD_GET_MBOX_IRQ(bnad);
1209         free_irq(irq, bnad);
1210 }
1211
1212 /*
1213  * Allocates IRQ for Mailbox, but keep it disabled
1214  * This will be enabled once we get the mbox enable callback
1215  * from bna
1216  */
1217 static int
1218 bnad_mbox_irq_alloc(struct bnad *bnad)
1219 {
1220         int             err = 0;
1221         unsigned long   irq_flags, flags;
1222         u32     irq;
1223         irq_handler_t   irq_handler;
1224
1225         spin_lock_irqsave(&bnad->bna_lock, flags);
1226         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1227                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1228                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1229                 irq_flags = 0;
1230         } else {
1231                 irq_handler = (irq_handler_t)bnad_isr;
1232                 irq = bnad->pcidev->irq;
1233                 irq_flags = IRQF_SHARED;
1234         }
1235
1236         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1237         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1238
1239         /*
1240          * Set the Mbox IRQ disable flag, so that the IRQ handler
1241          * called from request_irq() for SHARED IRQs do not execute
1242          */
1243         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1244
1245         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1246
1247         err = request_irq(irq, irq_handler, irq_flags,
1248                           bnad->mbox_irq_name, bnad);
1249
1250         return err;
1251 }
1252
1253 static void
1254 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1255 {
1256         kfree(intr_info->idl);
1257         intr_info->idl = NULL;
1258 }
1259
1260 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1261 static int
1262 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1263                     u32 txrx_id, struct bna_intr_info *intr_info)
1264 {
1265         int i, vector_start = 0;
1266         u32 cfg_flags;
1267         unsigned long flags;
1268
1269         spin_lock_irqsave(&bnad->bna_lock, flags);
1270         cfg_flags = bnad->cfg_flags;
1271         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1272
1273         if (cfg_flags & BNAD_CF_MSIX) {
1274                 intr_info->intr_type = BNA_INTR_T_MSIX;
1275                 intr_info->idl = kcalloc(intr_info->num,
1276                                         sizeof(struct bna_intr_descr),
1277                                         GFP_KERNEL);
1278                 if (!intr_info->idl)
1279                         return -ENOMEM;
1280
1281                 switch (src) {
1282                 case BNAD_INTR_TX:
1283                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1284                         break;
1285
1286                 case BNAD_INTR_RX:
1287                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1288                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1289                                         txrx_id;
1290                         break;
1291
1292                 default:
1293                         BUG();
1294                 }
1295
1296                 for (i = 0; i < intr_info->num; i++)
1297                         intr_info->idl[i].vector = vector_start + i;
1298         } else {
1299                 intr_info->intr_type = BNA_INTR_T_INTX;
1300                 intr_info->num = 1;
1301                 intr_info->idl = kcalloc(intr_info->num,
1302                                         sizeof(struct bna_intr_descr),
1303                                         GFP_KERNEL);
1304                 if (!intr_info->idl)
1305                         return -ENOMEM;
1306
1307                 switch (src) {
1308                 case BNAD_INTR_TX:
1309                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1310                         break;
1311
1312                 case BNAD_INTR_RX:
1313                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1314                         break;
1315                 }
1316         }
1317         return 0;
1318 }
1319
1320 /**
1321  * NOTE: Should be called for MSIX only
1322  * Unregisters Tx MSIX vector(s) from the kernel
1323  */
1324 static void
1325 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1326                         int num_txqs)
1327 {
1328         int i;
1329         int vector_num;
1330
1331         for (i = 0; i < num_txqs; i++) {
1332                 if (tx_info->tcb[i] == NULL)
1333                         continue;
1334
1335                 vector_num = tx_info->tcb[i]->intr_vector;
1336                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1337         }
1338 }
1339
1340 /**
1341  * NOTE: Should be called for MSIX only
1342  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1343  */
1344 static int
1345 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1346                         u32 tx_id, int num_txqs)
1347 {
1348         int i;
1349         int err;
1350         int vector_num;
1351
1352         for (i = 0; i < num_txqs; i++) {
1353                 vector_num = tx_info->tcb[i]->intr_vector;
1354                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1355                                 tx_id + tx_info->tcb[i]->id);
1356                 err = request_irq(bnad->msix_table[vector_num].vector,
1357                                   (irq_handler_t)bnad_msix_tx, 0,
1358                                   tx_info->tcb[i]->name,
1359                                   tx_info->tcb[i]);
1360                 if (err)
1361                         goto err_return;
1362         }
1363
1364         return 0;
1365
1366 err_return:
1367         if (i > 0)
1368                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1369         return -1;
1370 }
1371
1372 /**
1373  * NOTE: Should be called for MSIX only
1374  * Unregisters Rx MSIX vector(s) from the kernel
1375  */
1376 static void
1377 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1378                         int num_rxps)
1379 {
1380         int i;
1381         int vector_num;
1382
1383         for (i = 0; i < num_rxps; i++) {
1384                 if (rx_info->rx_ctrl[i].ccb == NULL)
1385                         continue;
1386
1387                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1388                 free_irq(bnad->msix_table[vector_num].vector,
1389                          rx_info->rx_ctrl[i].ccb);
1390         }
1391 }
1392
1393 /**
1394  * NOTE: Should be called for MSIX only
1395  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1396  */
1397 static int
1398 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1399                         u32 rx_id, int num_rxps)
1400 {
1401         int i;
1402         int err;
1403         int vector_num;
1404
1405         for (i = 0; i < num_rxps; i++) {
1406                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1407                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1408                         bnad->netdev->name,
1409                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1410                 err = request_irq(bnad->msix_table[vector_num].vector,
1411                                   (irq_handler_t)bnad_msix_rx, 0,
1412                                   rx_info->rx_ctrl[i].ccb->name,
1413                                   rx_info->rx_ctrl[i].ccb);
1414                 if (err)
1415                         goto err_return;
1416         }
1417
1418         return 0;
1419
1420 err_return:
1421         if (i > 0)
1422                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1423         return -1;
1424 }
1425
1426 /* Free Tx object Resources */
1427 static void
1428 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1429 {
1430         int i;
1431
1432         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1433                 if (res_info[i].res_type == BNA_RES_T_MEM)
1434                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1435                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1436                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1437         }
1438 }
1439
1440 /* Allocates memory and interrupt resources for Tx object */
1441 static int
1442 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1443                   u32 tx_id)
1444 {
1445         int i, err = 0;
1446
1447         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1448                 if (res_info[i].res_type == BNA_RES_T_MEM)
1449                         err = bnad_mem_alloc(bnad,
1450                                         &res_info[i].res_u.mem_info);
1451                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1452                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1453                                         &res_info[i].res_u.intr_info);
1454                 if (err)
1455                         goto err_return;
1456         }
1457         return 0;
1458
1459 err_return:
1460         bnad_tx_res_free(bnad, res_info);
1461         return err;
1462 }
1463
1464 /* Free Rx object Resources */
1465 static void
1466 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1467 {
1468         int i;
1469
1470         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1471                 if (res_info[i].res_type == BNA_RES_T_MEM)
1472                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1473                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1474                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1475         }
1476 }
1477
1478 /* Allocates memory and interrupt resources for Rx object */
1479 static int
1480 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1481                   uint rx_id)
1482 {
1483         int i, err = 0;
1484
1485         /* All memory needs to be allocated before setup_ccbs */
1486         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1487                 if (res_info[i].res_type == BNA_RES_T_MEM)
1488                         err = bnad_mem_alloc(bnad,
1489                                         &res_info[i].res_u.mem_info);
1490                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1491                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1492                                         &res_info[i].res_u.intr_info);
1493                 if (err)
1494                         goto err_return;
1495         }
1496         return 0;
1497
1498 err_return:
1499         bnad_rx_res_free(bnad, res_info);
1500         return err;
1501 }
1502
1503 /* Timer callbacks */
1504 /* a) IOC timer */
1505 static void
1506 bnad_ioc_timeout(unsigned long data)
1507 {
1508         struct bnad *bnad = (struct bnad *)data;
1509         unsigned long flags;
1510
1511         spin_lock_irqsave(&bnad->bna_lock, flags);
1512         bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1513         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1514 }
1515
1516 static void
1517 bnad_ioc_hb_check(unsigned long data)
1518 {
1519         struct bnad *bnad = (struct bnad *)data;
1520         unsigned long flags;
1521
1522         spin_lock_irqsave(&bnad->bna_lock, flags);
1523         bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1524         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1525 }
1526
1527 static void
1528 bnad_iocpf_timeout(unsigned long data)
1529 {
1530         struct bnad *bnad = (struct bnad *)data;
1531         unsigned long flags;
1532
1533         spin_lock_irqsave(&bnad->bna_lock, flags);
1534         bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1535         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1536 }
1537
1538 static void
1539 bnad_iocpf_sem_timeout(unsigned long data)
1540 {
1541         struct bnad *bnad = (struct bnad *)data;
1542         unsigned long flags;
1543
1544         spin_lock_irqsave(&bnad->bna_lock, flags);
1545         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1546         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1547 }
1548
1549 /*
1550  * All timer routines use bnad->bna_lock to protect against
1551  * the following race, which may occur in case of no locking:
1552  *      Time    CPU m   CPU n
1553  *      0       1 = test_bit
1554  *      1                       clear_bit
1555  *      2                       del_timer_sync
1556  *      3       mod_timer
1557  */
1558
1559 /* b) Dynamic Interrupt Moderation Timer */
1560 static void
1561 bnad_dim_timeout(unsigned long data)
1562 {
1563         struct bnad *bnad = (struct bnad *)data;
1564         struct bnad_rx_info *rx_info;
1565         struct bnad_rx_ctrl *rx_ctrl;
1566         int i, j;
1567         unsigned long flags;
1568
1569         if (!netif_carrier_ok(bnad->netdev))
1570                 return;
1571
1572         spin_lock_irqsave(&bnad->bna_lock, flags);
1573         for (i = 0; i < bnad->num_rx; i++) {
1574                 rx_info = &bnad->rx_info[i];
1575                 if (!rx_info->rx)
1576                         continue;
1577                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1578                         rx_ctrl = &rx_info->rx_ctrl[j];
1579                         if (!rx_ctrl->ccb)
1580                                 continue;
1581                         bna_rx_dim_update(rx_ctrl->ccb);
1582                 }
1583         }
1584
1585         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1586         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1587                 mod_timer(&bnad->dim_timer,
1588                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1589         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1590 }
1591
1592 /* c)  Statistics Timer */
1593 static void
1594 bnad_stats_timeout(unsigned long data)
1595 {
1596         struct bnad *bnad = (struct bnad *)data;
1597         unsigned long flags;
1598
1599         if (!netif_running(bnad->netdev) ||
1600                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1601                 return;
1602
1603         spin_lock_irqsave(&bnad->bna_lock, flags);
1604         bna_hw_stats_get(&bnad->bna);
1605         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1606 }
1607
1608 /*
1609  * Set up timer for DIM
1610  * Called with bnad->bna_lock held
1611  */
1612 void
1613 bnad_dim_timer_start(struct bnad *bnad)
1614 {
1615         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1616             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1617                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1618                             (unsigned long)bnad);
1619                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1620                 mod_timer(&bnad->dim_timer,
1621                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1622         }
1623 }
1624
1625 /*
1626  * Set up timer for statistics
1627  * Called with mutex_lock(&bnad->conf_mutex) held
1628  */
1629 static void
1630 bnad_stats_timer_start(struct bnad *bnad)
1631 {
1632         unsigned long flags;
1633
1634         spin_lock_irqsave(&bnad->bna_lock, flags);
1635         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1636                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1637                             (unsigned long)bnad);
1638                 mod_timer(&bnad->stats_timer,
1639                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1640         }
1641         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1642 }
1643
1644 /*
1645  * Stops the stats timer
1646  * Called with mutex_lock(&bnad->conf_mutex) held
1647  */
1648 static void
1649 bnad_stats_timer_stop(struct bnad *bnad)
1650 {
1651         int to_del = 0;
1652         unsigned long flags;
1653
1654         spin_lock_irqsave(&bnad->bna_lock, flags);
1655         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1656                 to_del = 1;
1657         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1658         if (to_del)
1659                 del_timer_sync(&bnad->stats_timer);
1660 }
1661
1662 /* Utilities */
1663
1664 static void
1665 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1666 {
1667         int i = 1; /* Index 0 has broadcast address */
1668         struct netdev_hw_addr *mc_addr;
1669
1670         netdev_for_each_mc_addr(mc_addr, netdev) {
1671                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1672                                                         ETH_ALEN);
1673                 i++;
1674         }
1675 }
1676
1677 static int
1678 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1679 {
1680         struct bnad_rx_ctrl *rx_ctrl =
1681                 container_of(napi, struct bnad_rx_ctrl, napi);
1682         struct bnad *bnad = rx_ctrl->bnad;
1683         int rcvd = 0;
1684
1685         rx_ctrl->rx_poll_ctr++;
1686
1687         if (!netif_carrier_ok(bnad->netdev))
1688                 goto poll_exit;
1689
1690         rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
1691         if (rcvd >= budget)
1692                 return rcvd;
1693
1694 poll_exit:
1695         napi_complete(napi);
1696
1697         rx_ctrl->rx_complete++;
1698
1699         if (rx_ctrl->ccb)
1700                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1701
1702         return rcvd;
1703 }
1704
1705 #define BNAD_NAPI_POLL_QUOTA            64
1706 static void
1707 bnad_napi_init(struct bnad *bnad, u32 rx_id)
1708 {
1709         struct bnad_rx_ctrl *rx_ctrl;
1710         int i;
1711
1712         /* Initialize & enable NAPI */
1713         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1714                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1715                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1716                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1717         }
1718 }
1719
1720 static void
1721 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1722 {
1723         struct bnad_rx_ctrl *rx_ctrl;
1724         int i;
1725
1726         /* Initialize & enable NAPI */
1727         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1728                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1729
1730                 napi_enable(&rx_ctrl->napi);
1731         }
1732 }
1733
1734 static void
1735 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1736 {
1737         int i;
1738
1739         /* First disable and then clean up */
1740         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1741                 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1742                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1743         }
1744 }
1745
1746 /* Should be held with conf_lock held */
1747 void
1748 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1749 {
1750         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1752         unsigned long flags;
1753
1754         if (!tx_info->tx)
1755                 return;
1756
1757         init_completion(&bnad->bnad_completions.tx_comp);
1758         spin_lock_irqsave(&bnad->bna_lock, flags);
1759         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1760         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1761         wait_for_completion(&bnad->bnad_completions.tx_comp);
1762
1763         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1764                 bnad_tx_msix_unregister(bnad, tx_info,
1765                         bnad->num_txq_per_tx);
1766
1767         if (0 == tx_id)
1768                 tasklet_kill(&bnad->tx_free_tasklet);
1769
1770         spin_lock_irqsave(&bnad->bna_lock, flags);
1771         bna_tx_destroy(tx_info->tx);
1772         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1773
1774         tx_info->tx = NULL;
1775         tx_info->tx_id = 0;
1776
1777         bnad_tx_res_free(bnad, res_info);
1778 }
1779
1780 /* Should be held with conf_lock held */
1781 int
1782 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1783 {
1784         int err;
1785         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1786         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1787         struct bna_intr_info *intr_info =
1788                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1789         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1790         static const struct bna_tx_event_cbfn tx_cbfn = {
1791                 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1792                 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1793                 .tx_stall_cbfn = bnad_cb_tx_stall,
1794                 .tx_resume_cbfn = bnad_cb_tx_resume,
1795                 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1796         };
1797
1798         struct bna_tx *tx;
1799         unsigned long flags;
1800
1801         tx_info->tx_id = tx_id;
1802
1803         /* Initialize the Tx object configuration */
1804         tx_config->num_txq = bnad->num_txq_per_tx;
1805         tx_config->txq_depth = bnad->txq_depth;
1806         tx_config->tx_type = BNA_TX_T_REGULAR;
1807         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1808
1809         /* Get BNA's resource requirement for one tx object */
1810         spin_lock_irqsave(&bnad->bna_lock, flags);
1811         bna_tx_res_req(bnad->num_txq_per_tx,
1812                 bnad->txq_depth, res_info);
1813         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1814
1815         /* Fill Unmap Q memory requirements */
1816         BNAD_FILL_UNMAPQ_MEM_REQ(
1817                         &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1818                         bnad->num_txq_per_tx,
1819                         BNAD_TX_UNMAPQ_DEPTH);
1820
1821         /* Allocate resources */
1822         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1823         if (err)
1824                 return err;
1825
1826         /* Ask BNA to create one Tx object, supplying required resources */
1827         spin_lock_irqsave(&bnad->bna_lock, flags);
1828         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1829                         tx_info);
1830         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1831         if (!tx)
1832                 goto err_return;
1833         tx_info->tx = tx;
1834
1835         /* Register ISR for the Tx object */
1836         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1837                 err = bnad_tx_msix_register(bnad, tx_info,
1838                         tx_id, bnad->num_txq_per_tx);
1839                 if (err)
1840                         goto err_return;
1841         }
1842
1843         spin_lock_irqsave(&bnad->bna_lock, flags);
1844         bna_tx_enable(tx);
1845         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1846
1847         return 0;
1848
1849 err_return:
1850         bnad_tx_res_free(bnad, res_info);
1851         return err;
1852 }
1853
1854 /* Setup the rx config for bna_rx_create */
1855 /* bnad decides the configuration */
1856 static void
1857 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1858 {
1859         rx_config->rx_type = BNA_RX_T_REGULAR;
1860         rx_config->num_paths = bnad->num_rxp_per_rx;
1861         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1862
1863         if (bnad->num_rxp_per_rx > 1) {
1864                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1865                 rx_config->rss_config.hash_type =
1866                                 (BFI_ENET_RSS_IPV6 |
1867                                  BFI_ENET_RSS_IPV6_TCP |
1868                                  BFI_ENET_RSS_IPV4 |
1869                                  BFI_ENET_RSS_IPV4_TCP);
1870                 rx_config->rss_config.hash_mask =
1871                                 bnad->num_rxp_per_rx - 1;
1872                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1873                         sizeof(rx_config->rss_config.toeplitz_hash_key));
1874         } else {
1875                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1876                 memset(&rx_config->rss_config, 0,
1877                        sizeof(rx_config->rss_config));
1878         }
1879         rx_config->rxp_type = BNA_RXP_SLR;
1880         rx_config->q_depth = bnad->rxq_depth;
1881
1882         rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1883
1884         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1885 }
1886
1887 static void
1888 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1889 {
1890         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1891         int i;
1892
1893         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1894                 rx_info->rx_ctrl[i].bnad = bnad;
1895 }
1896
1897 /* Called with mutex_lock(&bnad->conf_mutex) held */
1898 void
1899 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1900 {
1901         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1902         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1903         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1904         unsigned long flags;
1905         int to_del = 0;
1906
1907         if (!rx_info->rx)
1908                 return;
1909
1910         if (0 == rx_id) {
1911                 spin_lock_irqsave(&bnad->bna_lock, flags);
1912                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1913                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1914                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1915                         to_del = 1;
1916                 }
1917                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1918                 if (to_del)
1919                         del_timer_sync(&bnad->dim_timer);
1920         }
1921
1922         init_completion(&bnad->bnad_completions.rx_comp);
1923         spin_lock_irqsave(&bnad->bna_lock, flags);
1924         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1925         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1926         wait_for_completion(&bnad->bnad_completions.rx_comp);
1927
1928         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1929                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1930
1931         bnad_napi_disable(bnad, rx_id);
1932
1933         spin_lock_irqsave(&bnad->bna_lock, flags);
1934         bna_rx_destroy(rx_info->rx);
1935
1936         rx_info->rx = NULL;
1937         rx_info->rx_id = 0;
1938         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939
1940         bnad_rx_res_free(bnad, res_info);
1941 }
1942
1943 /* Called with mutex_lock(&bnad->conf_mutex) held */
1944 int
1945 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1946 {
1947         int err;
1948         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1949         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1950         struct bna_intr_info *intr_info =
1951                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1952         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1953         static const struct bna_rx_event_cbfn rx_cbfn = {
1954                 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1955                 .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1956                 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1957                 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1958                 .rx_stall_cbfn = bnad_cb_rx_stall,
1959                 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1960                 .rx_post_cbfn = bnad_cb_rx_post,
1961         };
1962         struct bna_rx *rx;
1963         unsigned long flags;
1964
1965         rx_info->rx_id = rx_id;
1966
1967         /* Initialize the Rx object configuration */
1968         bnad_init_rx_config(bnad, rx_config);
1969
1970         /* Get BNA's resource requirement for one Rx object */
1971         spin_lock_irqsave(&bnad->bna_lock, flags);
1972         bna_rx_res_req(rx_config, res_info);
1973         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1974
1975         /* Fill Unmap Q memory requirements */
1976         BNAD_FILL_UNMAPQ_MEM_REQ(
1977                         &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1978                         rx_config->num_paths +
1979                         ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1980                                 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1981
1982         /* Allocate resource */
1983         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1984         if (err)
1985                 return err;
1986
1987         bnad_rx_ctrl_init(bnad, rx_id);
1988
1989         /* Ask BNA to create one Rx object, supplying required resources */
1990         spin_lock_irqsave(&bnad->bna_lock, flags);
1991         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1992                         rx_info);
1993         if (!rx) {
1994                 err = -ENOMEM;
1995                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1996                 goto err_return;
1997         }
1998         rx_info->rx = rx;
1999         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2000
2001         /*
2002          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2003          * so that IRQ handler cannot schedule NAPI at this point.
2004          */
2005         bnad_napi_init(bnad, rx_id);
2006
2007         /* Register ISR for the Rx object */
2008         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2009                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2010                                                 rx_config->num_paths);
2011                 if (err)
2012                         goto err_return;
2013         }
2014
2015         spin_lock_irqsave(&bnad->bna_lock, flags);
2016         if (0 == rx_id) {
2017                 /* Set up Dynamic Interrupt Moderation Vector */
2018                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2019                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2020
2021                 /* Enable VLAN filtering only on the default Rx */
2022                 bna_rx_vlanfilter_enable(rx);
2023
2024                 /* Start the DIM timer */
2025                 bnad_dim_timer_start(bnad);
2026         }
2027
2028         bna_rx_enable(rx);
2029         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2030
2031         /* Enable scheduling of NAPI */
2032         bnad_napi_enable(bnad, rx_id);
2033
2034         return 0;
2035
2036 err_return:
2037         bnad_cleanup_rx(bnad, rx_id);
2038         return err;
2039 }
2040
2041 /* Called with conf_lock & bnad->bna_lock held */
2042 void
2043 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2044 {
2045         struct bnad_tx_info *tx_info;
2046
2047         tx_info = &bnad->tx_info[0];
2048         if (!tx_info->tx)
2049                 return;
2050
2051         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2052 }
2053
2054 /* Called with conf_lock & bnad->bna_lock held */
2055 void
2056 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2057 {
2058         struct bnad_rx_info *rx_info;
2059         int     i;
2060
2061         for (i = 0; i < bnad->num_rx; i++) {
2062                 rx_info = &bnad->rx_info[i];
2063                 if (!rx_info->rx)
2064                         continue;
2065                 bna_rx_coalescing_timeo_set(rx_info->rx,
2066                                 bnad->rx_coalescing_timeo);
2067         }
2068 }
2069
2070 /*
2071  * Called with bnad->bna_lock held
2072  */
2073 int
2074 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2075 {
2076         int ret;
2077
2078         if (!is_valid_ether_addr(mac_addr))
2079                 return -EADDRNOTAVAIL;
2080
2081         /* If datapath is down, pretend everything went through */
2082         if (!bnad->rx_info[0].rx)
2083                 return 0;
2084
2085         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2086         if (ret != BNA_CB_SUCCESS)
2087                 return -EADDRNOTAVAIL;
2088
2089         return 0;
2090 }
2091
2092 /* Should be called with conf_lock held */
2093 int
2094 bnad_enable_default_bcast(struct bnad *bnad)
2095 {
2096         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2097         int ret;
2098         unsigned long flags;
2099
2100         init_completion(&bnad->bnad_completions.mcast_comp);
2101
2102         spin_lock_irqsave(&bnad->bna_lock, flags);
2103         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2104                                 bnad_cb_rx_mcast_add);
2105         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2106
2107         if (ret == BNA_CB_SUCCESS)
2108                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2109         else
2110                 return -ENODEV;
2111
2112         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2113                 return -ENODEV;
2114
2115         return 0;
2116 }
2117
2118 /* Called with mutex_lock(&bnad->conf_mutex) held */
2119 void
2120 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2121 {
2122         u16 vid;
2123         unsigned long flags;
2124
2125         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2126                 spin_lock_irqsave(&bnad->bna_lock, flags);
2127                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2128                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2129         }
2130 }
2131
2132 /* Statistics utilities */
2133 void
2134 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2135 {
2136         int i, j;
2137
2138         for (i = 0; i < bnad->num_rx; i++) {
2139                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2140                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2141                                 stats->rx_packets += bnad->rx_info[i].
2142                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2143                                 stats->rx_bytes += bnad->rx_info[i].
2144                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2145                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2146                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2147                                         rcb[1]->rxq) {
2148                                         stats->rx_packets +=
2149                                                 bnad->rx_info[i].rx_ctrl[j].
2150                                                 ccb->rcb[1]->rxq->rx_packets;
2151                                         stats->rx_bytes +=
2152                                                 bnad->rx_info[i].rx_ctrl[j].
2153                                                 ccb->rcb[1]->rxq->rx_bytes;
2154                                 }
2155                         }
2156                 }
2157         }
2158         for (i = 0; i < bnad->num_tx; i++) {
2159                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2160                         if (bnad->tx_info[i].tcb[j]) {
2161                                 stats->tx_packets +=
2162                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2163                                 stats->tx_bytes +=
2164                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2165                         }
2166                 }
2167         }
2168 }
2169
2170 /*
2171  * Must be called with the bna_lock held.
2172  */
2173 void
2174 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2175 {
2176         struct bfi_enet_stats_mac *mac_stats;
2177         u32 bmap;
2178         int i;
2179
2180         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2181         stats->rx_errors =
2182                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2183                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2184                 mac_stats->rx_undersize;
2185         stats->tx_errors = mac_stats->tx_fcs_error +
2186                                         mac_stats->tx_undersize;
2187         stats->rx_dropped = mac_stats->rx_drop;
2188         stats->tx_dropped = mac_stats->tx_drop;
2189         stats->multicast = mac_stats->rx_multicast;
2190         stats->collisions = mac_stats->tx_total_collision;
2191
2192         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2193
2194         /* receive ring buffer overflow  ?? */
2195
2196         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2197         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2198         /* recv'r fifo overrun */
2199         bmap = bna_rx_rid_mask(&bnad->bna);
2200         for (i = 0; bmap; i++) {
2201                 if (bmap & 1) {
2202                         stats->rx_fifo_errors +=
2203                                 bnad->stats.bna_stats->
2204                                         hw_stats.rxf_stats[i].frame_drops;
2205                         break;
2206                 }
2207                 bmap >>= 1;
2208         }
2209 }
2210
2211 static void
2212 bnad_mbox_irq_sync(struct bnad *bnad)
2213 {
2214         u32 irq;
2215         unsigned long flags;
2216
2217         spin_lock_irqsave(&bnad->bna_lock, flags);
2218         if (bnad->cfg_flags & BNAD_CF_MSIX)
2219                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2220         else
2221                 irq = bnad->pcidev->irq;
2222         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2223
2224         synchronize_irq(irq);
2225 }
2226
2227 /* Utility used by bnad_start_xmit, for doing TSO */
2228 static int
2229 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2230 {
2231         int err;
2232
2233         if (skb_header_cloned(skb)) {
2234                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2235                 if (err) {
2236                         BNAD_UPDATE_CTR(bnad, tso_err);
2237                         return err;
2238                 }
2239         }
2240
2241         /*
2242          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2243          * excluding the length field.
2244          */
2245         if (skb->protocol == htons(ETH_P_IP)) {
2246                 struct iphdr *iph = ip_hdr(skb);
2247
2248                 /* Do we really need these? */
2249                 iph->tot_len = 0;
2250                 iph->check = 0;
2251
2252                 tcp_hdr(skb)->check =
2253                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2254                                            IPPROTO_TCP, 0);
2255                 BNAD_UPDATE_CTR(bnad, tso4);
2256         } else {
2257                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2258
2259                 ipv6h->payload_len = 0;
2260                 tcp_hdr(skb)->check =
2261                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2262                                          IPPROTO_TCP, 0);
2263                 BNAD_UPDATE_CTR(bnad, tso6);
2264         }
2265
2266         return 0;
2267 }
2268
2269 /*
2270  * Initialize Q numbers depending on Rx Paths
2271  * Called with bnad->bna_lock held, because of cfg_flags
2272  * access.
2273  */
2274 static void
2275 bnad_q_num_init(struct bnad *bnad)
2276 {
2277         int rxps;
2278
2279         rxps = min((uint)num_online_cpus(),
2280                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2281
2282         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2283                 rxps = 1;       /* INTx */
2284
2285         bnad->num_rx = 1;
2286         bnad->num_tx = 1;
2287         bnad->num_rxp_per_rx = rxps;
2288         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2289 }
2290
2291 /*
2292  * Adjusts the Q numbers, given a number of msix vectors
2293  * Give preference to RSS as opposed to Tx priority Queues,
2294  * in such a case, just use 1 Tx Q
2295  * Called with bnad->bna_lock held b'cos of cfg_flags access
2296  */
2297 static void
2298 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2299 {
2300         bnad->num_txq_per_tx = 1;
2301         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2302              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2303             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2304                 bnad->num_rxp_per_rx = msix_vectors -
2305                         (bnad->num_tx * bnad->num_txq_per_tx) -
2306                         BNAD_MAILBOX_MSIX_VECTORS;
2307         } else
2308                 bnad->num_rxp_per_rx = 1;
2309 }
2310
2311 /* Enable / disable ioceth */
2312 static int
2313 bnad_ioceth_disable(struct bnad *bnad)
2314 {
2315         unsigned long flags;
2316         int err = 0;
2317
2318         spin_lock_irqsave(&bnad->bna_lock, flags);
2319         init_completion(&bnad->bnad_completions.ioc_comp);
2320         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2321         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2322
2323         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2324                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2325
2326         err = bnad->bnad_completions.ioc_comp_status;
2327         return err;
2328 }
2329
2330 static int
2331 bnad_ioceth_enable(struct bnad *bnad)
2332 {
2333         int err = 0;
2334         unsigned long flags;
2335
2336         spin_lock_irqsave(&bnad->bna_lock, flags);
2337         init_completion(&bnad->bnad_completions.ioc_comp);
2338         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2339         bna_ioceth_enable(&bnad->bna.ioceth);
2340         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2341
2342         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2343                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2344
2345         err = bnad->bnad_completions.ioc_comp_status;
2346
2347         return err;
2348 }
2349
2350 /* Free BNA resources */
2351 static void
2352 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2353                 u32 res_val_max)
2354 {
2355         int i;
2356
2357         for (i = 0; i < res_val_max; i++)
2358                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2359 }
2360
2361 /* Allocates memory and interrupt resources for BNA */
2362 static int
2363 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2364                 u32 res_val_max)
2365 {
2366         int i, err;
2367
2368         for (i = 0; i < res_val_max; i++) {
2369                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2370                 if (err)
2371                         goto err_return;
2372         }
2373         return 0;
2374
2375 err_return:
2376         bnad_res_free(bnad, res_info, res_val_max);
2377         return err;
2378 }
2379
2380 /* Interrupt enable / disable */
2381 static void
2382 bnad_enable_msix(struct bnad *bnad)
2383 {
2384         int i, ret;
2385         unsigned long flags;
2386
2387         spin_lock_irqsave(&bnad->bna_lock, flags);
2388         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2389                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2390                 return;
2391         }
2392         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2393
2394         if (bnad->msix_table)
2395                 return;
2396
2397         bnad->msix_table =
2398                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2399
2400         if (!bnad->msix_table)
2401                 goto intx_mode;
2402
2403         for (i = 0; i < bnad->msix_num; i++)
2404                 bnad->msix_table[i].entry = i;
2405
2406         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2407         if (ret > 0) {
2408                 /* Not enough MSI-X vectors. */
2409                 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2410                         ret, bnad->msix_num);
2411
2412                 spin_lock_irqsave(&bnad->bna_lock, flags);
2413                 /* ret = #of vectors that we got */
2414                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2415                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2416                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2417
2418                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2419                          BNAD_MAILBOX_MSIX_VECTORS;
2420
2421                 if (bnad->msix_num > ret)
2422                         goto intx_mode;
2423
2424                 /* Try once more with adjusted numbers */
2425                 /* If this fails, fall back to INTx */
2426                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2427                                       bnad->msix_num);
2428                 if (ret)
2429                         goto intx_mode;
2430
2431         } else if (ret < 0)
2432                 goto intx_mode;
2433
2434         pci_intx(bnad->pcidev, 0);
2435
2436         return;
2437
2438 intx_mode:
2439         pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2440
2441         kfree(bnad->msix_table);
2442         bnad->msix_table = NULL;
2443         bnad->msix_num = 0;
2444         spin_lock_irqsave(&bnad->bna_lock, flags);
2445         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2446         bnad_q_num_init(bnad);
2447         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2448 }
2449
2450 static void
2451 bnad_disable_msix(struct bnad *bnad)
2452 {
2453         u32 cfg_flags;
2454         unsigned long flags;
2455
2456         spin_lock_irqsave(&bnad->bna_lock, flags);
2457         cfg_flags = bnad->cfg_flags;
2458         if (bnad->cfg_flags & BNAD_CF_MSIX)
2459                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2460         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2461
2462         if (cfg_flags & BNAD_CF_MSIX) {
2463                 pci_disable_msix(bnad->pcidev);
2464                 kfree(bnad->msix_table);
2465                 bnad->msix_table = NULL;
2466         }
2467 }
2468
2469 /* Netdev entry points */
2470 static int
2471 bnad_open(struct net_device *netdev)
2472 {
2473         int err;
2474         struct bnad *bnad = netdev_priv(netdev);
2475         struct bna_pause_config pause_config;
2476         int mtu;
2477         unsigned long flags;
2478
2479         mutex_lock(&bnad->conf_mutex);
2480
2481         /* Tx */
2482         err = bnad_setup_tx(bnad, 0);
2483         if (err)
2484                 goto err_return;
2485
2486         /* Rx */
2487         err = bnad_setup_rx(bnad, 0);
2488         if (err)
2489                 goto cleanup_tx;
2490
2491         /* Port */
2492         pause_config.tx_pause = 0;
2493         pause_config.rx_pause = 0;
2494
2495         mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2496
2497         spin_lock_irqsave(&bnad->bna_lock, flags);
2498         bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2499         bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2500         bna_enet_enable(&bnad->bna.enet);
2501         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2502
2503         /* Enable broadcast */
2504         bnad_enable_default_bcast(bnad);
2505
2506         /* Restore VLANs, if any */
2507         bnad_restore_vlans(bnad, 0);
2508
2509         /* Set the UCAST address */
2510         spin_lock_irqsave(&bnad->bna_lock, flags);
2511         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2512         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2513
2514         /* Start the stats timer */
2515         bnad_stats_timer_start(bnad);
2516
2517         mutex_unlock(&bnad->conf_mutex);
2518
2519         return 0;
2520
2521 cleanup_tx:
2522         bnad_cleanup_tx(bnad, 0);
2523
2524 err_return:
2525         mutex_unlock(&bnad->conf_mutex);
2526         return err;
2527 }
2528
2529 static int
2530 bnad_stop(struct net_device *netdev)
2531 {
2532         struct bnad *bnad = netdev_priv(netdev);
2533         unsigned long flags;
2534
2535         mutex_lock(&bnad->conf_mutex);
2536
2537         /* Stop the stats timer */
2538         bnad_stats_timer_stop(bnad);
2539
2540         init_completion(&bnad->bnad_completions.enet_comp);
2541
2542         spin_lock_irqsave(&bnad->bna_lock, flags);
2543         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2544                         bnad_cb_enet_disabled);
2545         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2546
2547         wait_for_completion(&bnad->bnad_completions.enet_comp);
2548
2549         bnad_cleanup_tx(bnad, 0);
2550         bnad_cleanup_rx(bnad, 0);
2551
2552         /* Synchronize mailbox IRQ */
2553         bnad_mbox_irq_sync(bnad);
2554
2555         mutex_unlock(&bnad->conf_mutex);
2556
2557         return 0;
2558 }
2559
2560 /* TX */
2561 /*
2562  * bnad_start_xmit : Netdev entry point for Transmit
2563  *                   Called under lock held by net_device
2564  */
2565 static netdev_tx_t
2566 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2567 {
2568         struct bnad *bnad = netdev_priv(netdev);
2569         u32 txq_id = 0;
2570         struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2571
2572         u16             txq_prod, vlan_tag = 0;
2573         u32             unmap_prod, wis, wis_used, wi_range;
2574         u32             vectors, vect_id, i, acked;
2575         int                     err;
2576         unsigned int            len;
2577         u32                             gso_size;
2578
2579         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2580         dma_addr_t              dma_addr;
2581         struct bna_txq_entry *txqent;
2582         u16     flags;
2583
2584         if (unlikely(skb->len <= ETH_HLEN)) {
2585                 dev_kfree_skb(skb);
2586                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2587                 return NETDEV_TX_OK;
2588         }
2589         if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2590                 dev_kfree_skb(skb);
2591                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2592                 return NETDEV_TX_OK;
2593         }
2594         if (unlikely(skb_headlen(skb) == 0)) {
2595                 dev_kfree_skb(skb);
2596                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2597                 return NETDEV_TX_OK;
2598         }
2599
2600         /*
2601          * Takes care of the Tx that is scheduled between clearing the flag
2602          * and the netif_tx_stop_all_queues() call.
2603          */
2604         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2605                 dev_kfree_skb(skb);
2606                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2607                 return NETDEV_TX_OK;
2608         }
2609
2610         vectors = 1 + skb_shinfo(skb)->nr_frags;
2611         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2612                 dev_kfree_skb(skb);
2613                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2614                 return NETDEV_TX_OK;
2615         }
2616         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2617         acked = 0;
2618         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2619                         vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2620                 if ((u16) (*tcb->hw_consumer_index) !=
2621                     tcb->consumer_index &&
2622                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2623                         acked = bnad_free_txbufs(bnad, tcb);
2624                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2625                                 bna_ib_ack(tcb->i_dbell, acked);
2626                         smp_mb__before_clear_bit();
2627                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2628                 } else {
2629                         netif_stop_queue(netdev);
2630                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2631                 }
2632
2633                 smp_mb();
2634                 /*
2635                  * Check again to deal with race condition between
2636                  * netif_stop_queue here, and netif_wake_queue in
2637                  * interrupt handler which is not inside netif tx lock.
2638                  */
2639                 if (likely
2640                     (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2641                      vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2642                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2643                         return NETDEV_TX_BUSY;
2644                 } else {
2645                         netif_wake_queue(netdev);
2646                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2647                 }
2648         }
2649
2650         unmap_prod = unmap_q->producer_index;
2651         flags = 0;
2652
2653         txq_prod = tcb->producer_index;
2654         BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2655         txqent->hdr.wi.reserved = 0;
2656         txqent->hdr.wi.num_vectors = vectors;
2657
2658         if (vlan_tx_tag_present(skb)) {
2659                 vlan_tag = (u16) vlan_tx_tag_get(skb);
2660                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2661         }
2662         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2663                 vlan_tag =
2664                         (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2665                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2666         }
2667
2668         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2669
2670         if (skb_is_gso(skb)) {
2671                 gso_size = skb_shinfo(skb)->gso_size;
2672
2673                 if (unlikely(gso_size > netdev->mtu)) {
2674                         dev_kfree_skb(skb);
2675                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2676                         return NETDEV_TX_OK;
2677                 }
2678                 if (unlikely((gso_size + skb_transport_offset(skb) +
2679                         tcp_hdrlen(skb)) >= skb->len)) {
2680                         txqent->hdr.wi.opcode =
2681                                 __constant_htons(BNA_TXQ_WI_SEND);
2682                         txqent->hdr.wi.lso_mss = 0;
2683                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2684                 } else {
2685                         txqent->hdr.wi.opcode =
2686                                 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2687                         txqent->hdr.wi.lso_mss = htons(gso_size);
2688                 }
2689
2690                 err = bnad_tso_prepare(bnad, skb);
2691                 if (unlikely(err)) {
2692                         dev_kfree_skb(skb);
2693                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2694                         return NETDEV_TX_OK;
2695                 }
2696                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2697                 txqent->hdr.wi.l4_hdr_size_n_offset =
2698                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2699                               (tcp_hdrlen(skb) >> 2,
2700                                skb_transport_offset(skb)));
2701         } else {
2702                 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2703                 txqent->hdr.wi.lso_mss = 0;
2704
2705                 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2706                         dev_kfree_skb(skb);
2707                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2708                         return NETDEV_TX_OK;
2709                 }
2710
2711                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2712                         u8 proto = 0;
2713
2714                         if (skb->protocol == __constant_htons(ETH_P_IP))
2715                                 proto = ip_hdr(skb)->protocol;
2716                         else if (skb->protocol ==
2717                                  __constant_htons(ETH_P_IPV6)) {
2718                                 /* nexthdr may not be TCP immediately. */
2719                                 proto = ipv6_hdr(skb)->nexthdr;
2720                         }
2721                         if (proto == IPPROTO_TCP) {
2722                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2723                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2724                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2725                                               (0, skb_transport_offset(skb)));
2726
2727                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2728
2729                                 if (unlikely(skb_headlen(skb) <
2730                                 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2731                                         dev_kfree_skb(skb);
2732                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2733                                         return NETDEV_TX_OK;
2734                                 }
2735
2736                         } else if (proto == IPPROTO_UDP) {
2737                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2738                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2739                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2740                                               (0, skb_transport_offset(skb)));
2741
2742                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2743                                 if (unlikely(skb_headlen(skb) <
2744                                     skb_transport_offset(skb) +
2745                                     sizeof(struct udphdr))) {
2746                                         dev_kfree_skb(skb);
2747                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2748                                         return NETDEV_TX_OK;
2749                                 }
2750                         } else {
2751                                 dev_kfree_skb(skb);
2752                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2753                                 return NETDEV_TX_OK;
2754                         }
2755                 } else {
2756                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2757                 }
2758         }
2759
2760         txqent->hdr.wi.flags = htons(flags);
2761
2762         txqent->hdr.wi.frame_length = htonl(skb->len);
2763
2764         unmap_q->unmap_array[unmap_prod].skb = skb;
2765         len = skb_headlen(skb);
2766         txqent->vector[0].length = htons(len);
2767         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2768                                   skb_headlen(skb), DMA_TO_DEVICE);
2769         dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2770                            dma_addr);
2771
2772         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2773         BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2774
2775         vect_id = 0;
2776         wis_used = 1;
2777
2778         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2779                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2780                 u16             size = skb_frag_size(frag);
2781
2782                 if (unlikely(size == 0)) {
2783                         unmap_prod = unmap_q->producer_index;
2784
2785                         unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2786                                            unmap_q->unmap_array,
2787                                            unmap_prod, unmap_q->q_depth, skb,
2788                                            i);
2789                         dev_kfree_skb(skb);
2790                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2791                         return NETDEV_TX_OK;
2792                 }
2793
2794                 len += size;
2795
2796                 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2797                         vect_id = 0;
2798                         if (--wi_range)
2799                                 txqent++;
2800                         else {
2801                                 BNA_QE_INDX_ADD(txq_prod, wis_used,
2802                                                 tcb->q_depth);
2803                                 wis_used = 0;
2804                                 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2805                                                      txqent, wi_range);
2806                         }
2807                         wis_used++;
2808                         txqent->hdr.wi_ext.opcode =
2809                                 __constant_htons(BNA_TXQ_WI_EXTENSION);
2810                 }
2811
2812                 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2813                 txqent->vector[vect_id].length = htons(size);
2814                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2815                                             0, size, DMA_TO_DEVICE);
2816                 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2817                                    dma_addr);
2818                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2819                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2820         }
2821
2822         if (unlikely(len != skb->len)) {
2823                 unmap_prod = unmap_q->producer_index;
2824
2825                 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2826                                 unmap_q->unmap_array, unmap_prod,
2827                                 unmap_q->q_depth, skb,
2828                                 skb_shinfo(skb)->nr_frags);
2829                 dev_kfree_skb(skb);
2830                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2831                 return NETDEV_TX_OK;
2832         }
2833
2834         unmap_q->producer_index = unmap_prod;
2835         BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2836         tcb->producer_index = txq_prod;
2837
2838         smp_mb();
2839
2840         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2841                 return NETDEV_TX_OK;
2842
2843         bna_txq_prod_indx_doorbell(tcb);
2844         smp_mb();
2845
2846         if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2847                 tasklet_schedule(&bnad->tx_free_tasklet);
2848
2849         return NETDEV_TX_OK;
2850 }
2851
2852 /*
2853  * Used spin_lock to synchronize reading of stats structures, which
2854  * is written by BNA under the same lock.
2855  */
2856 static struct rtnl_link_stats64 *
2857 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2858 {
2859         struct bnad *bnad = netdev_priv(netdev);
2860         unsigned long flags;
2861
2862         spin_lock_irqsave(&bnad->bna_lock, flags);
2863
2864         bnad_netdev_qstats_fill(bnad, stats);
2865         bnad_netdev_hwstats_fill(bnad, stats);
2866
2867         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2868
2869         return stats;
2870 }
2871
2872 void
2873 bnad_set_rx_mode(struct net_device *netdev)
2874 {
2875         struct bnad *bnad = netdev_priv(netdev);
2876         u32     new_mask, valid_mask;
2877         unsigned long flags;
2878
2879         spin_lock_irqsave(&bnad->bna_lock, flags);
2880
2881         new_mask = valid_mask = 0;
2882
2883         if (netdev->flags & IFF_PROMISC) {
2884                 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2885                         new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2886                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2887                         bnad->cfg_flags |= BNAD_CF_PROMISC;
2888                 }
2889         } else {
2890                 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2891                         new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2892                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2893                         bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2894                 }
2895         }
2896
2897         if (netdev->flags & IFF_ALLMULTI) {
2898                 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2899                         new_mask |= BNA_RXMODE_ALLMULTI;
2900                         valid_mask |= BNA_RXMODE_ALLMULTI;
2901                         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2902                 }
2903         } else {
2904                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2905                         new_mask &= ~BNA_RXMODE_ALLMULTI;
2906                         valid_mask |= BNA_RXMODE_ALLMULTI;
2907                         bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2908                 }
2909         }
2910
2911         if (bnad->rx_info[0].rx == NULL)
2912                 goto unlock;
2913
2914         bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2915
2916         if (!netdev_mc_empty(netdev)) {
2917                 u8 *mcaddr_list;
2918                 int mc_count = netdev_mc_count(netdev);
2919
2920                 /* Index 0 holds the broadcast address */
2921                 mcaddr_list =
2922                         kzalloc((mc_count + 1) * ETH_ALEN,
2923                                 GFP_ATOMIC);
2924                 if (!mcaddr_list)
2925                         goto unlock;
2926
2927                 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2928
2929                 /* Copy rest of the MC addresses */
2930                 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2931
2932                 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2933                                         mcaddr_list, NULL);
2934
2935                 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2936                 kfree(mcaddr_list);
2937         }
2938 unlock:
2939         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2940 }
2941
2942 /*
2943  * bna_lock is used to sync writes to netdev->addr
2944  * conf_lock cannot be used since this call may be made
2945  * in a non-blocking context.
2946  */
2947 static int
2948 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2949 {
2950         int err;
2951         struct bnad *bnad = netdev_priv(netdev);
2952         struct sockaddr *sa = (struct sockaddr *)mac_addr;
2953         unsigned long flags;
2954
2955         spin_lock_irqsave(&bnad->bna_lock, flags);
2956
2957         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2958
2959         if (!err)
2960                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2961
2962         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2963
2964         return err;
2965 }
2966
2967 static int
2968 bnad_mtu_set(struct bnad *bnad, int mtu)
2969 {
2970         unsigned long flags;
2971
2972         init_completion(&bnad->bnad_completions.mtu_comp);
2973
2974         spin_lock_irqsave(&bnad->bna_lock, flags);
2975         bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2976         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2977
2978         wait_for_completion(&bnad->bnad_completions.mtu_comp);
2979
2980         return bnad->bnad_completions.mtu_comp_status;
2981 }
2982
2983 static int
2984 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2985 {
2986         int err, mtu = netdev->mtu;
2987         struct bnad *bnad = netdev_priv(netdev);
2988
2989         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2990                 return -EINVAL;
2991
2992         mutex_lock(&bnad->conf_mutex);
2993
2994         netdev->mtu = new_mtu;
2995
2996         mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2997         err = bnad_mtu_set(bnad, mtu);
2998         if (err)
2999                 err = -EBUSY;
3000
3001         mutex_unlock(&bnad->conf_mutex);
3002         return err;
3003 }
3004
3005 static int
3006 bnad_vlan_rx_add_vid(struct net_device *netdev,
3007                                  unsigned short vid)
3008 {
3009         struct bnad *bnad = netdev_priv(netdev);
3010         unsigned long flags;
3011
3012         if (!bnad->rx_info[0].rx)
3013                 return 0;
3014
3015         mutex_lock(&bnad->conf_mutex);
3016
3017         spin_lock_irqsave(&bnad->bna_lock, flags);
3018         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3019         set_bit(vid, bnad->active_vlans);
3020         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3021
3022         mutex_unlock(&bnad->conf_mutex);
3023
3024         return 0;
3025 }
3026
3027 static int
3028 bnad_vlan_rx_kill_vid(struct net_device *netdev,
3029                                   unsigned short vid)
3030 {
3031         struct bnad *bnad = netdev_priv(netdev);
3032         unsigned long flags;
3033
3034         if (!bnad->rx_info[0].rx)
3035                 return 0;
3036
3037         mutex_lock(&bnad->conf_mutex);
3038
3039         spin_lock_irqsave(&bnad->bna_lock, flags);
3040         clear_bit(vid, bnad->active_vlans);
3041         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3042         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3043
3044         mutex_unlock(&bnad->conf_mutex);
3045
3046         return 0;
3047 }
3048
3049 #ifdef CONFIG_NET_POLL_CONTROLLER
3050 static void
3051 bnad_netpoll(struct net_device *netdev)
3052 {
3053         struct bnad *bnad = netdev_priv(netdev);
3054         struct bnad_rx_info *rx_info;
3055         struct bnad_rx_ctrl *rx_ctrl;
3056         u32 curr_mask;
3057         int i, j;
3058
3059         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3060                 bna_intx_disable(&bnad->bna, curr_mask);
3061                 bnad_isr(bnad->pcidev->irq, netdev);
3062                 bna_intx_enable(&bnad->bna, curr_mask);
3063         } else {
3064                 /*
3065                  * Tx processing may happen in sending context, so no need
3066                  * to explicitly process completions here
3067                  */
3068
3069                 /* Rx processing */
3070                 for (i = 0; i < bnad->num_rx; i++) {
3071                         rx_info = &bnad->rx_info[i];
3072                         if (!rx_info->rx)
3073                                 continue;
3074                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3075                                 rx_ctrl = &rx_info->rx_ctrl[j];
3076                                 if (rx_ctrl->ccb)
3077                                         bnad_netif_rx_schedule_poll(bnad,
3078                                                             rx_ctrl->ccb);
3079                         }
3080                 }
3081         }
3082 }
3083 #endif
3084
3085 static const struct net_device_ops bnad_netdev_ops = {
3086         .ndo_open               = bnad_open,
3087         .ndo_stop               = bnad_stop,
3088         .ndo_start_xmit         = bnad_start_xmit,
3089         .ndo_get_stats64                = bnad_get_stats64,
3090         .ndo_set_rx_mode        = bnad_set_rx_mode,
3091         .ndo_validate_addr      = eth_validate_addr,
3092         .ndo_set_mac_address    = bnad_set_mac_address,
3093         .ndo_change_mtu         = bnad_change_mtu,
3094         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3095         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3096 #ifdef CONFIG_NET_POLL_CONTROLLER
3097         .ndo_poll_controller    = bnad_netpoll
3098 #endif
3099 };
3100
3101 static void
3102 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3103 {
3104         struct net_device *netdev = bnad->netdev;
3105
3106         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3107                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3108                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3109
3110         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3111                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3112                 NETIF_F_TSO | NETIF_F_TSO6;
3113
3114         netdev->features |= netdev->hw_features |
3115                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3116
3117         if (using_dac)
3118                 netdev->features |= NETIF_F_HIGHDMA;
3119
3120         netdev->mem_start = bnad->mmio_start;
3121         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3122
3123         netdev->netdev_ops = &bnad_netdev_ops;
3124         bnad_set_ethtool_ops(netdev);
3125 }
3126
3127 /*
3128  * 1. Initialize the bnad structure
3129  * 2. Setup netdev pointer in pci_dev
3130  * 3. Initialze Tx free tasklet
3131  * 4. Initialize no. of TxQ & CQs & MSIX vectors
3132  */
3133 static int
3134 bnad_init(struct bnad *bnad,
3135           struct pci_dev *pdev, struct net_device *netdev)
3136 {
3137         unsigned long flags;
3138
3139         SET_NETDEV_DEV(netdev, &pdev->dev);
3140         pci_set_drvdata(pdev, netdev);
3141
3142         bnad->netdev = netdev;
3143         bnad->pcidev = pdev;
3144         bnad->mmio_start = pci_resource_start(pdev, 0);
3145         bnad->mmio_len = pci_resource_len(pdev, 0);
3146         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3147         if (!bnad->bar0) {
3148                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3149                 pci_set_drvdata(pdev, NULL);
3150                 return -ENOMEM;
3151         }
3152         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3153                (unsigned long long) bnad->mmio_len);
3154
3155         spin_lock_irqsave(&bnad->bna_lock, flags);
3156         if (!bnad_msix_disable)
3157                 bnad->cfg_flags = BNAD_CF_MSIX;
3158
3159         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3160
3161         bnad_q_num_init(bnad);
3162         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3163
3164         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3165                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3166                          BNAD_MAILBOX_MSIX_VECTORS;
3167
3168         bnad->txq_depth = BNAD_TXQ_DEPTH;
3169         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3170
3171         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3172         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3173
3174         tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3175                      (unsigned long)bnad);
3176
3177         return 0;
3178 }
3179
3180 /*
3181  * Must be called after bnad_pci_uninit()
3182  * so that iounmap() and pci_set_drvdata(NULL)
3183  * happens only after PCI uninitialization.
3184  */
3185 static void
3186 bnad_uninit(struct bnad *bnad)
3187 {
3188         if (bnad->bar0)
3189                 iounmap(bnad->bar0);
3190         pci_set_drvdata(bnad->pcidev, NULL);
3191 }
3192
3193 /*
3194  * Initialize locks
3195         a) Per ioceth mutes used for serializing configuration
3196            changes from OS interface
3197         b) spin lock used to protect bna state machine
3198  */
3199 static void
3200 bnad_lock_init(struct bnad *bnad)
3201 {
3202         spin_lock_init(&bnad->bna_lock);
3203         mutex_init(&bnad->conf_mutex);
3204         mutex_init(&bnad_list_mutex);
3205 }
3206
3207 static void
3208 bnad_lock_uninit(struct bnad *bnad)
3209 {
3210         mutex_destroy(&bnad->conf_mutex);
3211         mutex_destroy(&bnad_list_mutex);
3212 }
3213
3214 /* PCI Initialization */
3215 static int
3216 bnad_pci_init(struct bnad *bnad,
3217               struct pci_dev *pdev, bool *using_dac)
3218 {
3219         int err;
3220
3221         err = pci_enable_device(pdev);
3222         if (err)
3223                 return err;
3224         err = pci_request_regions(pdev, BNAD_NAME);
3225         if (err)
3226                 goto disable_device;
3227         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3228             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3229                 *using_dac = true;
3230         } else {
3231                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3232                 if (err) {
3233                         err = dma_set_coherent_mask(&pdev->dev,
3234                                                     DMA_BIT_MASK(32));
3235                         if (err)
3236                                 goto release_regions;
3237                 }
3238                 *using_dac = false;
3239         }
3240         pci_set_master(pdev);
3241         return 0;
3242
3243 release_regions:
3244         pci_release_regions(pdev);
3245 disable_device:
3246         pci_disable_device(pdev);
3247
3248         return err;
3249 }
3250
3251 static void
3252 bnad_pci_uninit(struct pci_dev *pdev)
3253 {
3254         pci_release_regions(pdev);
3255         pci_disable_device(pdev);
3256 }
3257
3258 static int __devinit
3259 bnad_pci_probe(struct pci_dev *pdev,
3260                 const struct pci_device_id *pcidev_id)
3261 {
3262         bool    using_dac;
3263         int     err;
3264         struct bnad *bnad;
3265         struct bna *bna;
3266         struct net_device *netdev;
3267         struct bfa_pcidev pcidev_info;
3268         unsigned long flags;
3269
3270         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3271                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3272
3273         mutex_lock(&bnad_fwimg_mutex);
3274         if (!cna_get_firmware_buf(pdev)) {
3275                 mutex_unlock(&bnad_fwimg_mutex);
3276                 pr_warn("Failed to load Firmware Image!\n");
3277                 return -ENODEV;
3278         }
3279         mutex_unlock(&bnad_fwimg_mutex);
3280
3281         /*
3282          * Allocates sizeof(struct net_device + struct bnad)
3283          * bnad = netdev->priv
3284          */
3285         netdev = alloc_etherdev(sizeof(struct bnad));
3286         if (!netdev) {
3287                 err = -ENOMEM;
3288                 return err;
3289         }
3290         bnad = netdev_priv(netdev);
3291         bnad_lock_init(bnad);
3292         bnad_add_to_list(bnad);
3293
3294         mutex_lock(&bnad->conf_mutex);
3295         /*
3296          * PCI initialization
3297          *      Output : using_dac = 1 for 64 bit DMA
3298          *                         = 0 for 32 bit DMA
3299          */
3300         err = bnad_pci_init(bnad, pdev, &using_dac);
3301         if (err)
3302                 goto unlock_mutex;
3303
3304         /*
3305          * Initialize bnad structure
3306          * Setup relation between pci_dev & netdev
3307          * Init Tx free tasklet
3308          */
3309         err = bnad_init(bnad, pdev, netdev);
3310         if (err)
3311                 goto pci_uninit;
3312
3313         /* Initialize netdev structure, set up ethtool ops */
3314         bnad_netdev_init(bnad, using_dac);
3315
3316         /* Set link to down state */
3317         netif_carrier_off(netdev);
3318
3319         /* Setup the debugfs node for this bfad */
3320         if (bna_debugfs_enable)
3321                 bnad_debugfs_init(bnad);
3322
3323         /* Get resource requirement form bna */
3324         spin_lock_irqsave(&bnad->bna_lock, flags);
3325         bna_res_req(&bnad->res_info[0]);
3326         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3327
3328         /* Allocate resources from bna */
3329         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3330         if (err)
3331                 goto drv_uninit;
3332
3333         bna = &bnad->bna;
3334
3335         /* Setup pcidev_info for bna_init() */
3336         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3337         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3338         pcidev_info.device_id = bnad->pcidev->device;
3339         pcidev_info.pci_bar_kva = bnad->bar0;
3340
3341         spin_lock_irqsave(&bnad->bna_lock, flags);
3342         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3343         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3344
3345         bnad->stats.bna_stats = &bna->stats;
3346
3347         bnad_enable_msix(bnad);
3348         err = bnad_mbox_irq_alloc(bnad);
3349         if (err)
3350                 goto res_free;
3351
3352
3353         /* Set up timers */
3354         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3355                                 ((unsigned long)bnad));
3356         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3357                                 ((unsigned long)bnad));
3358         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3359                                 ((unsigned long)bnad));
3360         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3361                                 ((unsigned long)bnad));
3362
3363         /* Now start the timer before calling IOC */
3364         mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3365                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3366
3367         /*
3368          * Start the chip
3369          * If the call back comes with error, we bail out.
3370          * This is a catastrophic error.
3371          */
3372         err = bnad_ioceth_enable(bnad);
3373         if (err) {
3374                 pr_err("BNA: Initialization failed err=%d\n",
3375                        err);
3376                 goto probe_success;
3377         }
3378
3379         spin_lock_irqsave(&bnad->bna_lock, flags);
3380         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3381                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3382                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3383                         bna_attr(bna)->num_rxp - 1);
3384                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3385                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3386                         err = -EIO;
3387         }
3388         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3389         if (err)
3390                 goto disable_ioceth;
3391
3392         spin_lock_irqsave(&bnad->bna_lock, flags);
3393         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3394         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3395
3396         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3397         if (err) {
3398                 err = -EIO;
3399                 goto disable_ioceth;
3400         }
3401
3402         spin_lock_irqsave(&bnad->bna_lock, flags);
3403         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3404         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3405
3406         /* Get the burnt-in mac */
3407         spin_lock_irqsave(&bnad->bna_lock, flags);
3408         bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3409         bnad_set_netdev_perm_addr(bnad);
3410         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3411
3412         mutex_unlock(&bnad->conf_mutex);
3413
3414         /* Finally, reguister with net_device layer */
3415         err = register_netdev(netdev);
3416         if (err) {
3417                 pr_err("BNA : Registering with netdev failed\n");
3418                 goto probe_uninit;
3419         }
3420         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3421
3422         return 0;
3423
3424 probe_success:
3425         mutex_unlock(&bnad->conf_mutex);
3426         return 0;
3427
3428 probe_uninit:
3429         mutex_lock(&bnad->conf_mutex);
3430         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3431 disable_ioceth:
3432         bnad_ioceth_disable(bnad);
3433         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3434         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3435         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3436         spin_lock_irqsave(&bnad->bna_lock, flags);
3437         bna_uninit(bna);
3438         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3439         bnad_mbox_irq_free(bnad);
3440         bnad_disable_msix(bnad);
3441 res_free:
3442         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3443 drv_uninit:
3444         /* Remove the debugfs node for this bnad */
3445         kfree(bnad->regdata);
3446         bnad_debugfs_uninit(bnad);
3447         bnad_uninit(bnad);
3448 pci_uninit:
3449         bnad_pci_uninit(pdev);
3450 unlock_mutex:
3451         mutex_unlock(&bnad->conf_mutex);
3452         bnad_remove_from_list(bnad);
3453         bnad_lock_uninit(bnad);
3454         free_netdev(netdev);
3455         return err;
3456 }
3457
3458 static void __devexit
3459 bnad_pci_remove(struct pci_dev *pdev)
3460 {
3461         struct net_device *netdev = pci_get_drvdata(pdev);
3462         struct bnad *bnad;
3463         struct bna *bna;
3464         unsigned long flags;
3465
3466         if (!netdev)
3467                 return;
3468
3469         pr_info("%s bnad_pci_remove\n", netdev->name);
3470         bnad = netdev_priv(netdev);
3471         bna = &bnad->bna;
3472
3473         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3474                 unregister_netdev(netdev);
3475
3476         mutex_lock(&bnad->conf_mutex);
3477         bnad_ioceth_disable(bnad);
3478         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3479         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3480         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3481         spin_lock_irqsave(&bnad->bna_lock, flags);
3482         bna_uninit(bna);
3483         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3484
3485         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3486         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3487         bnad_mbox_irq_free(bnad);
3488         bnad_disable_msix(bnad);
3489         bnad_pci_uninit(pdev);
3490         mutex_unlock(&bnad->conf_mutex);
3491         bnad_remove_from_list(bnad);
3492         bnad_lock_uninit(bnad);
3493         /* Remove the debugfs node for this bnad */
3494         kfree(bnad->regdata);
3495         bnad_debugfs_uninit(bnad);
3496         bnad_uninit(bnad);
3497         free_netdev(netdev);
3498 }
3499
3500 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3501         {
3502                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3503                         PCI_DEVICE_ID_BROCADE_CT),
3504                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3505                 .class_mask =  0xffff00
3506         },
3507         {
3508                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3509                         BFA_PCI_DEVICE_ID_CT2),
3510                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3511                 .class_mask =  0xffff00
3512         },
3513         {0,  },
3514 };
3515
3516 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3517
3518 static struct pci_driver bnad_pci_driver = {
3519         .name = BNAD_NAME,
3520         .id_table = bnad_pci_id_table,
3521         .probe = bnad_pci_probe,
3522         .remove = __devexit_p(bnad_pci_remove),
3523 };
3524
3525 static int __init
3526 bnad_module_init(void)
3527 {
3528         int err;
3529
3530         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3531                         BNAD_VERSION);
3532
3533         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3534
3535         err = pci_register_driver(&bnad_pci_driver);
3536         if (err < 0) {
3537                 pr_err("bna : PCI registration failed in module init "
3538                        "(%d)\n", err);
3539                 return err;
3540         }
3541
3542         return 0;
3543 }
3544
3545 static void __exit
3546 bnad_module_exit(void)
3547 {
3548         pci_unregister_driver(&bnad_pci_driver);
3549
3550         if (bfi_fw)
3551                 release_firmware(bfi_fw);
3552 }
3553
3554 module_init(bnad_module_init);
3555 module_exit(bnad_module_exit);
3556
3557 MODULE_AUTHOR("Brocade");
3558 MODULE_LICENSE("GPL");
3559 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3560 MODULE_VERSION(BNAD_VERSION);
3561 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3562 MODULE_FIRMWARE(CNA_FW_FILE_CT2);