2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
35 static DEFINE_MUTEX(bnad_fwimg_mutex);
40 static uint bnad_msix_disable;
41 module_param(bnad_msix_disable, uint, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
44 static uint bnad_ioc_auto_recover = 1;
45 module_param(bnad_ioc_auto_recover, uint, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
48 static uint bna_debugfs_enable = 1;
49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
56 static u32 bnad_rxqs_per_cq = 2;
58 static struct mutex bnad_list_mutex;
59 static const u8 bnad_bcast_addr[] __aligned(2) =
60 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
65 #define BNAD_GET_MBOX_IRQ(_bnad) \
66 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
67 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
68 ((_bnad)->pcidev->irq))
70 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
72 (_res_info)->res_type = BNA_RES_T_MEM; \
73 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
74 (_res_info)->res_u.mem_info.num = (_num); \
75 (_res_info)->res_u.mem_info.len = (_size); \
79 bnad_add_to_list(struct bnad *bnad)
81 mutex_lock(&bnad_list_mutex);
83 mutex_unlock(&bnad_list_mutex);
87 bnad_remove_from_list(struct bnad *bnad)
89 mutex_lock(&bnad_list_mutex);
90 mutex_unlock(&bnad_list_mutex);
94 * Reinitialize completions in CQ, once Rx is taken down
97 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
99 struct bna_cq_entry *cmpl;
102 for (i = 0; i < ccb->q_depth; i++) {
103 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
108 /* Tx Datapath functions */
111 /* Caller should ensure that the entry at unmap_q[index] is valid */
113 bnad_tx_buff_unmap(struct bnad *bnad,
114 struct bnad_tx_unmap *unmap_q,
115 u32 q_depth, u32 index)
117 struct bnad_tx_unmap *unmap;
121 unmap = &unmap_q[index];
122 nvecs = unmap->nvecs;
127 dma_unmap_single(&bnad->pcidev->dev,
128 dma_unmap_addr(&unmap->vectors[0], dma_addr),
129 skb_headlen(skb), DMA_TO_DEVICE);
130 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
136 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138 BNA_QE_INDX_INC(index, q_depth);
139 unmap = &unmap_q[index];
142 dma_unmap_page(&bnad->pcidev->dev,
143 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
144 dma_unmap_len(&unmap->vectors[vector], dma_len),
146 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
150 BNA_QE_INDX_INC(index, q_depth);
156 * Frees all pending Tx Bufs
157 * At this point no activity is expected on the Q,
158 * so DMA unmap & freeing is fine.
161 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
167 for (i = 0; i < tcb->q_depth; i++) {
168 skb = unmap_q[i].skb;
171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
173 dev_kfree_skb_any(skb);
178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
179 * Can be called in a) Interrupt context
183 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
185 u32 sent_packets = 0, sent_bytes = 0;
186 u32 wis, unmap_wis, hw_cons, cons, q_depth;
187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188 struct bnad_tx_unmap *unmap;
191 /* Just return if TX is stopped */
192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
195 hw_cons = *(tcb->hw_consumer_index);
196 cons = tcb->consumer_index;
197 q_depth = tcb->q_depth;
199 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
200 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
203 unmap = &unmap_q[cons];
208 sent_bytes += skb->len;
210 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
213 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
214 dev_kfree_skb_any(skb);
217 /* Update consumer pointers. */
218 tcb->consumer_index = hw_cons;
220 tcb->txq->tx_packets += sent_packets;
221 tcb->txq->tx_bytes += sent_bytes;
227 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
229 struct net_device *netdev = bnad->netdev;
232 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
235 sent = bnad_txcmpl_process(bnad, tcb);
237 if (netif_queue_stopped(netdev) &&
238 netif_carrier_ok(netdev) &&
239 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240 BNAD_NETIF_WAKE_THRESHOLD) {
241 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242 netif_wake_queue(netdev);
243 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
248 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
249 bna_ib_ack(tcb->i_dbell, sent);
251 smp_mb__before_atomic();
252 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
257 /* MSIX Tx Completion Handler */
259 bnad_msix_tx(int irq, void *data)
261 struct bna_tcb *tcb = (struct bna_tcb *)data;
262 struct bnad *bnad = tcb->bnad;
264 bnad_tx_complete(bnad, tcb);
270 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
272 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
274 unmap_q->reuse_pi = -1;
275 unmap_q->alloc_order = -1;
276 unmap_q->map_size = 0;
277 unmap_q->type = BNAD_RXBUF_NONE;
280 /* Default is page-based allocation. Multi-buffer support - TBD */
282 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
287 bnad_rxq_alloc_uninit(bnad, rcb);
289 order = get_order(rcb->rxq->buffer_size);
291 unmap_q->type = BNAD_RXBUF_PAGE;
293 if (bna_is_small_rxq(rcb->id)) {
294 unmap_q->alloc_order = 0;
295 unmap_q->map_size = rcb->rxq->buffer_size;
297 if (rcb->rxq->multi_buffer) {
298 unmap_q->alloc_order = 0;
299 unmap_q->map_size = rcb->rxq->buffer_size;
300 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
302 unmap_q->alloc_order = order;
304 (rcb->rxq->buffer_size > 2048) ?
305 PAGE_SIZE << order : 2048;
309 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
315 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
320 dma_unmap_page(&bnad->pcidev->dev,
321 dma_unmap_addr(&unmap->vector, dma_addr),
322 unmap->vector.len, DMA_FROM_DEVICE);
323 put_page(unmap->page);
325 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
326 unmap->vector.len = 0;
330 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
335 dma_unmap_single(&bnad->pcidev->dev,
336 dma_unmap_addr(&unmap->vector, dma_addr),
337 unmap->vector.len, DMA_FROM_DEVICE);
338 dev_kfree_skb_any(unmap->skb);
340 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
341 unmap->vector.len = 0;
345 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
347 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
350 for (i = 0; i < rcb->q_depth; i++) {
351 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
353 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
354 bnad_rxq_cleanup_skb(bnad, unmap);
356 bnad_rxq_cleanup_page(bnad, unmap);
358 bnad_rxq_alloc_uninit(bnad, rcb);
362 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
364 u32 alloced, prod, q_depth;
365 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
366 struct bnad_rx_unmap *unmap, *prev;
367 struct bna_rxq_entry *rxent;
369 u32 page_offset, alloc_size;
372 prod = rcb->producer_index;
373 q_depth = rcb->q_depth;
375 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
379 unmap = &unmap_q->unmap[prod];
381 if (unmap_q->reuse_pi < 0) {
382 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
383 unmap_q->alloc_order);
386 prev = &unmap_q->unmap[unmap_q->reuse_pi];
388 page_offset = prev->page_offset + unmap_q->map_size;
392 if (unlikely(!page)) {
393 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394 rcb->rxq->rxbuf_alloc_failed++;
398 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
399 unmap_q->map_size, DMA_FROM_DEVICE);
400 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
402 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
403 rcb->rxq->rxbuf_map_failed++;
408 unmap->page_offset = page_offset;
409 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
410 unmap->vector.len = unmap_q->map_size;
411 page_offset += unmap_q->map_size;
413 if (page_offset < alloc_size)
414 unmap_q->reuse_pi = prod;
416 unmap_q->reuse_pi = -1;
418 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
419 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
420 BNA_QE_INDX_INC(prod, q_depth);
425 if (likely(alloced)) {
426 rcb->producer_index = prod;
428 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
429 bna_rxq_prod_indx_doorbell(rcb);
436 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
438 u32 alloced, prod, q_depth, buff_sz;
439 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
440 struct bnad_rx_unmap *unmap;
441 struct bna_rxq_entry *rxent;
445 buff_sz = rcb->rxq->buffer_size;
446 prod = rcb->producer_index;
447 q_depth = rcb->q_depth;
451 unmap = &unmap_q->unmap[prod];
453 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
455 if (unlikely(!skb)) {
456 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
457 rcb->rxq->rxbuf_alloc_failed++;
461 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
462 buff_sz, DMA_FROM_DEVICE);
463 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
464 dev_kfree_skb_any(skb);
465 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
466 rcb->rxq->rxbuf_map_failed++;
471 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
472 unmap->vector.len = buff_sz;
474 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
475 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
476 BNA_QE_INDX_INC(prod, q_depth);
481 if (likely(alloced)) {
482 rcb->producer_index = prod;
484 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
485 bna_rxq_prod_indx_doorbell(rcb);
492 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
494 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
497 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
498 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
501 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
502 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
504 bnad_rxq_refill_page(bnad, rcb, to_alloc);
507 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
509 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
510 BNA_CQ_EF_L4_CKSUM_OK)
512 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
513 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
514 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
515 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
516 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
517 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
518 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
519 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
522 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
523 u32 sop_ci, u32 nvecs)
525 struct bnad_rx_unmap_q *unmap_q;
526 struct bnad_rx_unmap *unmap;
529 unmap_q = rcb->unmap_q;
530 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
531 unmap = &unmap_q->unmap[ci];
532 BNA_QE_INDX_INC(ci, rcb->q_depth);
534 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
535 bnad_rxq_cleanup_skb(bnad, unmap);
537 bnad_rxq_cleanup_page(bnad, unmap);
542 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
546 struct bnad_rx_unmap_q *unmap_q;
547 struct bna_cq_entry *cq, *cmpl;
548 u32 ci, pi, totlen = 0;
551 pi = ccb->producer_index;
554 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
555 unmap_q = rcb->unmap_q;
557 ci = rcb->consumer_index;
559 /* prefetch header */
560 prefetch(page_address(unmap_q->unmap[ci].page) +
561 unmap_q->unmap[ci].page_offset);
564 struct bnad_rx_unmap *unmap;
567 unmap = &unmap_q->unmap[ci];
568 BNA_QE_INDX_INC(ci, rcb->q_depth);
570 dma_unmap_page(&bnad->pcidev->dev,
571 dma_unmap_addr(&unmap->vector, dma_addr),
572 unmap->vector.len, DMA_FROM_DEVICE);
574 len = ntohs(cmpl->length);
575 skb->truesize += unmap->vector.len;
578 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
579 unmap->page, unmap->page_offset, len);
582 unmap->vector.len = 0;
584 BNA_QE_INDX_INC(pi, ccb->q_depth);
589 skb->data_len += totlen;
593 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
594 struct bnad_rx_unmap *unmap, u32 len)
598 dma_unmap_single(&bnad->pcidev->dev,
599 dma_unmap_addr(&unmap->vector, dma_addr),
600 unmap->vector.len, DMA_FROM_DEVICE);
603 skb->protocol = eth_type_trans(skb, bnad->netdev);
606 unmap->vector.len = 0;
610 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
612 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
613 struct bna_rcb *rcb = NULL;
614 struct bnad_rx_unmap_q *unmap_q;
615 struct bnad_rx_unmap *unmap = NULL;
616 struct sk_buff *skb = NULL;
617 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
618 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
619 u32 packets = 0, len = 0, totlen = 0;
620 u32 pi, vec, sop_ci = 0, nvecs = 0;
621 u32 flags, masked_flags;
623 prefetch(bnad->netdev);
627 while (packets < budget) {
628 cmpl = &cq[ccb->producer_index];
631 /* The 'valid' field is set by the adapter, only after writing
632 * the other fields of completion entry. Hence, do not load
633 * other fields of completion entry *before* the 'valid' is
634 * loaded. Adding the rmb() here prevents the compiler and/or
635 * CPU from reordering the reads which would potentially result
636 * in reading stale values in completion entry.
640 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
642 if (bna_is_small_rxq(cmpl->rxq_id))
647 unmap_q = rcb->unmap_q;
649 /* start of packet ci */
650 sop_ci = rcb->consumer_index;
652 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
653 unmap = &unmap_q->unmap[sop_ci];
656 skb = napi_get_frags(&rx_ctrl->napi);
662 flags = ntohl(cmpl->flags);
663 len = ntohs(cmpl->length);
667 /* Check all the completions for this frame.
668 * busy-wait doesn't help much, break here.
670 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
671 (flags & BNA_CQ_EF_EOP) == 0) {
672 pi = ccb->producer_index;
674 BNA_QE_INDX_INC(pi, ccb->q_depth);
677 if (!next_cmpl->valid)
679 /* The 'valid' field is set by the adapter, only
680 * after writing the other fields of completion
681 * entry. Hence, do not load other fields of
682 * completion entry *before* the 'valid' is
683 * loaded. Adding the rmb() here prevents the
684 * compiler and/or CPU from reordering the reads
685 * which would potentially result in reading
686 * stale values in completion entry.
690 len = ntohs(next_cmpl->length);
691 flags = ntohl(next_cmpl->flags);
695 } while ((flags & BNA_CQ_EF_EOP) == 0);
697 if (!next_cmpl->valid)
702 /* TODO: BNA_CQ_EF_LOCAL ? */
703 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
704 BNA_CQ_EF_FCS_ERROR |
705 BNA_CQ_EF_TOO_LONG))) {
706 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
707 rcb->rxq->rx_packets_with_error++;
712 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
713 bnad_cq_setup_skb(bnad, skb, unmap, len);
715 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
717 rcb->rxq->rx_packets++;
718 rcb->rxq->rx_bytes += totlen;
719 ccb->bytes_per_intr += totlen;
721 masked_flags = flags & flags_cksum_prot_mask;
724 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
725 ((masked_flags == flags_tcp4) ||
726 (masked_flags == flags_udp4) ||
727 (masked_flags == flags_tcp6) ||
728 (masked_flags == flags_udp6))))
729 skb->ip_summed = CHECKSUM_UNNECESSARY;
731 skb_checksum_none_assert(skb);
733 if ((flags & BNA_CQ_EF_VLAN) &&
734 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
735 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
737 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
738 netif_receive_skb(skb);
740 napi_gro_frags(&rx_ctrl->napi);
743 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
744 for (vec = 0; vec < nvecs; vec++) {
745 cmpl = &cq[ccb->producer_index];
747 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
751 napi_gro_flush(&rx_ctrl->napi, false);
752 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
753 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
755 bnad_rxq_post(bnad, ccb->rcb[0]);
757 bnad_rxq_post(bnad, ccb->rcb[1]);
763 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
765 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
766 struct napi_struct *napi = &rx_ctrl->napi;
768 if (likely(napi_schedule_prep(napi))) {
769 __napi_schedule(napi);
770 rx_ctrl->rx_schedule++;
774 /* MSIX Rx Path Handler */
776 bnad_msix_rx(int irq, void *data)
778 struct bna_ccb *ccb = (struct bna_ccb *)data;
781 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
782 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
788 /* Interrupt handlers */
790 /* Mbox Interrupt Handlers */
792 bnad_msix_mbox_handler(int irq, void *data)
796 struct bnad *bnad = (struct bnad *)data;
798 spin_lock_irqsave(&bnad->bna_lock, flags);
799 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
800 spin_unlock_irqrestore(&bnad->bna_lock, flags);
804 bna_intr_status_get(&bnad->bna, intr_status);
806 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
807 bna_mbox_handler(&bnad->bna, intr_status);
809 spin_unlock_irqrestore(&bnad->bna_lock, flags);
815 bnad_isr(int irq, void *data)
820 struct bnad *bnad = (struct bnad *)data;
821 struct bnad_rx_info *rx_info;
822 struct bnad_rx_ctrl *rx_ctrl;
823 struct bna_tcb *tcb = NULL;
825 spin_lock_irqsave(&bnad->bna_lock, flags);
826 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
827 spin_unlock_irqrestore(&bnad->bna_lock, flags);
831 bna_intr_status_get(&bnad->bna, intr_status);
833 if (unlikely(!intr_status)) {
834 spin_unlock_irqrestore(&bnad->bna_lock, flags);
838 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
839 bna_mbox_handler(&bnad->bna, intr_status);
841 spin_unlock_irqrestore(&bnad->bna_lock, flags);
843 if (!BNA_IS_INTX_DATA_INTR(intr_status))
846 /* Process data interrupts */
848 for (i = 0; i < bnad->num_tx; i++) {
849 for (j = 0; j < bnad->num_txq_per_tx; j++) {
850 tcb = bnad->tx_info[i].tcb[j];
851 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
852 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
856 for (i = 0; i < bnad->num_rx; i++) {
857 rx_info = &bnad->rx_info[i];
860 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
861 rx_ctrl = &rx_info->rx_ctrl[j];
863 bnad_netif_rx_schedule_poll(bnad,
871 * Called in interrupt / callback context
872 * with bna_lock held, so cfg_flags access is OK
875 bnad_enable_mbox_irq(struct bnad *bnad)
877 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
879 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
883 * Called with bnad->bna_lock held b'cos of
884 * bnad->cfg_flags access.
887 bnad_disable_mbox_irq(struct bnad *bnad)
889 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
891 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
895 bnad_set_netdev_perm_addr(struct bnad *bnad)
897 struct net_device *netdev = bnad->netdev;
899 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
900 if (is_zero_ether_addr(netdev->dev_addr))
901 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
904 /* Control Path Handlers */
908 bnad_cb_mbox_intr_enable(struct bnad *bnad)
910 bnad_enable_mbox_irq(bnad);
914 bnad_cb_mbox_intr_disable(struct bnad *bnad)
916 bnad_disable_mbox_irq(bnad);
920 bnad_cb_ioceth_ready(struct bnad *bnad)
922 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
923 complete(&bnad->bnad_completions.ioc_comp);
927 bnad_cb_ioceth_failed(struct bnad *bnad)
929 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
930 complete(&bnad->bnad_completions.ioc_comp);
934 bnad_cb_ioceth_disabled(struct bnad *bnad)
936 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
937 complete(&bnad->bnad_completions.ioc_comp);
941 bnad_cb_enet_disabled(void *arg)
943 struct bnad *bnad = (struct bnad *)arg;
945 netif_carrier_off(bnad->netdev);
946 complete(&bnad->bnad_completions.enet_comp);
950 bnad_cb_ethport_link_status(struct bnad *bnad,
951 enum bna_link_status link_status)
953 bool link_up = false;
955 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
957 if (link_status == BNA_CEE_UP) {
958 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
959 BNAD_UPDATE_CTR(bnad, cee_toggle);
960 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
962 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
963 BNAD_UPDATE_CTR(bnad, cee_toggle);
964 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
968 if (!netif_carrier_ok(bnad->netdev)) {
970 netdev_info(bnad->netdev, "link up\n");
971 netif_carrier_on(bnad->netdev);
972 BNAD_UPDATE_CTR(bnad, link_toggle);
973 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
974 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
976 struct bna_tcb *tcb =
977 bnad->tx_info[tx_id].tcb[tcb_id];
984 if (test_bit(BNAD_TXQ_TX_STARTED,
988 * Transmit Schedule */
992 BNAD_UPDATE_CTR(bnad,
998 BNAD_UPDATE_CTR(bnad,
1005 if (netif_carrier_ok(bnad->netdev)) {
1006 netdev_info(bnad->netdev, "link down\n");
1007 netif_carrier_off(bnad->netdev);
1008 BNAD_UPDATE_CTR(bnad, link_toggle);
1014 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
1016 struct bnad *bnad = (struct bnad *)arg;
1018 complete(&bnad->bnad_completions.tx_comp);
1022 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1024 struct bnad_tx_info *tx_info =
1025 (struct bnad_tx_info *)tcb->txq->tx->priv;
1028 tx_info->tcb[tcb->id] = tcb;
1032 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1034 struct bnad_tx_info *tx_info =
1035 (struct bnad_tx_info *)tcb->txq->tx->priv;
1037 tx_info->tcb[tcb->id] = NULL;
1042 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1044 struct bnad_rx_info *rx_info =
1045 (struct bnad_rx_info *)ccb->cq->rx->priv;
1047 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1048 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1052 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1054 struct bnad_rx_info *rx_info =
1055 (struct bnad_rx_info *)ccb->cq->rx->priv;
1057 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1061 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1063 struct bnad_tx_info *tx_info =
1064 (struct bnad_tx_info *)tx->priv;
1065 struct bna_tcb *tcb;
1069 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1070 tcb = tx_info->tcb[i];
1074 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1075 netif_stop_subqueue(bnad->netdev, txq_id);
1080 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1082 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1083 struct bna_tcb *tcb;
1087 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1088 tcb = tx_info->tcb[i];
1093 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1094 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1095 BUG_ON(*(tcb->hw_consumer_index) != 0);
1097 if (netif_carrier_ok(bnad->netdev)) {
1098 netif_wake_subqueue(bnad->netdev, txq_id);
1099 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1104 * Workaround for first ioceth enable failure & we
1105 * get a 0 MAC address. We try to get the MAC address
1108 if (is_zero_ether_addr(bnad->perm_addr)) {
1109 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1110 bnad_set_netdev_perm_addr(bnad);
1115 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1118 bnad_tx_cleanup(struct delayed_work *work)
1120 struct bnad_tx_info *tx_info =
1121 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1122 struct bnad *bnad = NULL;
1123 struct bna_tcb *tcb;
1124 unsigned long flags;
1127 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1128 tcb = tx_info->tcb[i];
1134 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1139 bnad_txq_cleanup(bnad, tcb);
1141 smp_mb__before_atomic();
1142 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1147 msecs_to_jiffies(1));
1151 spin_lock_irqsave(&bnad->bna_lock, flags);
1152 bna_tx_cleanup_complete(tx_info->tx);
1153 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1157 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1159 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1160 struct bna_tcb *tcb;
1163 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1164 tcb = tx_info->tcb[i];
1169 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1173 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1175 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1176 struct bna_ccb *ccb;
1177 struct bnad_rx_ctrl *rx_ctrl;
1180 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1181 rx_ctrl = &rx_info->rx_ctrl[i];
1186 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1189 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1194 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1197 bnad_rx_cleanup(void *work)
1199 struct bnad_rx_info *rx_info =
1200 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1201 struct bnad_rx_ctrl *rx_ctrl;
1202 struct bnad *bnad = NULL;
1203 unsigned long flags;
1206 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1207 rx_ctrl = &rx_info->rx_ctrl[i];
1212 bnad = rx_ctrl->ccb->bnad;
1215 * Wait till the poll handler has exited
1216 * and nothing can be scheduled anymore
1218 napi_disable(&rx_ctrl->napi);
1220 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1221 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1222 if (rx_ctrl->ccb->rcb[1])
1223 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1226 spin_lock_irqsave(&bnad->bna_lock, flags);
1227 bna_rx_cleanup_complete(rx_info->rx);
1228 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1232 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1234 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235 struct bna_ccb *ccb;
1236 struct bnad_rx_ctrl *rx_ctrl;
1239 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1240 rx_ctrl = &rx_info->rx_ctrl[i];
1245 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1248 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1251 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1255 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1257 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1258 struct bna_ccb *ccb;
1259 struct bna_rcb *rcb;
1260 struct bnad_rx_ctrl *rx_ctrl;
1263 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1264 rx_ctrl = &rx_info->rx_ctrl[i];
1269 napi_enable(&rx_ctrl->napi);
1271 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1276 bnad_rxq_alloc_init(bnad, rcb);
1277 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1278 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1279 bnad_rxq_post(bnad, rcb);
1285 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1287 struct bnad *bnad = (struct bnad *)arg;
1289 complete(&bnad->bnad_completions.rx_comp);
1293 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1295 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1296 complete(&bnad->bnad_completions.mcast_comp);
1300 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1301 struct bna_stats *stats)
1303 if (status == BNA_CB_SUCCESS)
1304 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1306 if (!netif_running(bnad->netdev) ||
1307 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1310 mod_timer(&bnad->stats_timer,
1311 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1315 bnad_cb_enet_mtu_set(struct bnad *bnad)
1317 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1318 complete(&bnad->bnad_completions.mtu_comp);
1322 bnad_cb_completion(void *arg, enum bfa_status status)
1324 struct bnad_iocmd_comp *iocmd_comp =
1325 (struct bnad_iocmd_comp *)arg;
1327 iocmd_comp->comp_status = (u32) status;
1328 complete(&iocmd_comp->comp);
1331 /* Resource allocation, free functions */
1334 bnad_mem_free(struct bnad *bnad,
1335 struct bna_mem_info *mem_info)
1340 if (mem_info->mdl == NULL)
1343 for (i = 0; i < mem_info->num; i++) {
1344 if (mem_info->mdl[i].kva != NULL) {
1345 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1346 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1348 dma_free_coherent(&bnad->pcidev->dev,
1349 mem_info->mdl[i].len,
1350 mem_info->mdl[i].kva, dma_pa);
1352 kfree(mem_info->mdl[i].kva);
1355 kfree(mem_info->mdl);
1356 mem_info->mdl = NULL;
1360 bnad_mem_alloc(struct bnad *bnad,
1361 struct bna_mem_info *mem_info)
1366 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1367 mem_info->mdl = NULL;
1371 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1373 if (mem_info->mdl == NULL)
1376 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1377 for (i = 0; i < mem_info->num; i++) {
1378 mem_info->mdl[i].len = mem_info->len;
1379 mem_info->mdl[i].kva =
1380 dma_alloc_coherent(&bnad->pcidev->dev,
1381 mem_info->len, &dma_pa,
1383 if (mem_info->mdl[i].kva == NULL)
1386 BNA_SET_DMA_ADDR(dma_pa,
1387 &(mem_info->mdl[i].dma));
1390 for (i = 0; i < mem_info->num; i++) {
1391 mem_info->mdl[i].len = mem_info->len;
1392 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1394 if (mem_info->mdl[i].kva == NULL)
1402 bnad_mem_free(bnad, mem_info);
1406 /* Free IRQ for Mailbox */
1408 bnad_mbox_irq_free(struct bnad *bnad)
1411 unsigned long flags;
1413 spin_lock_irqsave(&bnad->bna_lock, flags);
1414 bnad_disable_mbox_irq(bnad);
1415 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1417 irq = BNAD_GET_MBOX_IRQ(bnad);
1418 free_irq(irq, bnad);
1422 * Allocates IRQ for Mailbox, but keep it disabled
1423 * This will be enabled once we get the mbox enable callback
1427 bnad_mbox_irq_alloc(struct bnad *bnad)
1430 unsigned long irq_flags, flags;
1432 irq_handler_t irq_handler;
1434 spin_lock_irqsave(&bnad->bna_lock, flags);
1435 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1436 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1437 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1440 irq_handler = (irq_handler_t)bnad_isr;
1441 irq = bnad->pcidev->irq;
1442 irq_flags = IRQF_SHARED;
1445 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1446 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1449 * Set the Mbox IRQ disable flag, so that the IRQ handler
1450 * called from request_irq() for SHARED IRQs do not execute
1452 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1454 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1456 err = request_irq(irq, irq_handler, irq_flags,
1457 bnad->mbox_irq_name, bnad);
1463 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1465 kfree(intr_info->idl);
1466 intr_info->idl = NULL;
1469 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1471 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1472 u32 txrx_id, struct bna_intr_info *intr_info)
1474 int i, vector_start = 0;
1476 unsigned long flags;
1478 spin_lock_irqsave(&bnad->bna_lock, flags);
1479 cfg_flags = bnad->cfg_flags;
1480 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1482 if (cfg_flags & BNAD_CF_MSIX) {
1483 intr_info->intr_type = BNA_INTR_T_MSIX;
1484 intr_info->idl = kcalloc(intr_info->num,
1485 sizeof(struct bna_intr_descr),
1487 if (!intr_info->idl)
1492 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1496 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1497 (bnad->num_tx * bnad->num_txq_per_tx) +
1505 for (i = 0; i < intr_info->num; i++)
1506 intr_info->idl[i].vector = vector_start + i;
1508 intr_info->intr_type = BNA_INTR_T_INTX;
1510 intr_info->idl = kcalloc(intr_info->num,
1511 sizeof(struct bna_intr_descr),
1513 if (!intr_info->idl)
1518 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1522 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1529 /* NOTE: Should be called for MSIX only
1530 * Unregisters Tx MSIX vector(s) from the kernel
1533 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1539 for (i = 0; i < num_txqs; i++) {
1540 if (tx_info->tcb[i] == NULL)
1543 vector_num = tx_info->tcb[i]->intr_vector;
1544 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1548 /* NOTE: Should be called for MSIX only
1549 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1552 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1553 u32 tx_id, int num_txqs)
1559 for (i = 0; i < num_txqs; i++) {
1560 vector_num = tx_info->tcb[i]->intr_vector;
1561 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1562 tx_id + tx_info->tcb[i]->id);
1563 err = request_irq(bnad->msix_table[vector_num].vector,
1564 (irq_handler_t)bnad_msix_tx, 0,
1565 tx_info->tcb[i]->name,
1575 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1579 /* NOTE: Should be called for MSIX only
1580 * Unregisters Rx MSIX vector(s) from the kernel
1583 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1589 for (i = 0; i < num_rxps; i++) {
1590 if (rx_info->rx_ctrl[i].ccb == NULL)
1593 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1594 free_irq(bnad->msix_table[vector_num].vector,
1595 rx_info->rx_ctrl[i].ccb);
1599 /* NOTE: Should be called for MSIX only
1600 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1603 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1604 u32 rx_id, int num_rxps)
1610 for (i = 0; i < num_rxps; i++) {
1611 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1612 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1614 rx_id + rx_info->rx_ctrl[i].ccb->id);
1615 err = request_irq(bnad->msix_table[vector_num].vector,
1616 (irq_handler_t)bnad_msix_rx, 0,
1617 rx_info->rx_ctrl[i].ccb->name,
1618 rx_info->rx_ctrl[i].ccb);
1627 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1631 /* Free Tx object Resources */
1633 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1637 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1638 if (res_info[i].res_type == BNA_RES_T_MEM)
1639 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1640 else if (res_info[i].res_type == BNA_RES_T_INTR)
1641 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1645 /* Allocates memory and interrupt resources for Tx object */
1647 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1652 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1653 if (res_info[i].res_type == BNA_RES_T_MEM)
1654 err = bnad_mem_alloc(bnad,
1655 &res_info[i].res_u.mem_info);
1656 else if (res_info[i].res_type == BNA_RES_T_INTR)
1657 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1658 &res_info[i].res_u.intr_info);
1665 bnad_tx_res_free(bnad, res_info);
1669 /* Free Rx object Resources */
1671 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1675 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1676 if (res_info[i].res_type == BNA_RES_T_MEM)
1677 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1678 else if (res_info[i].res_type == BNA_RES_T_INTR)
1679 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1683 /* Allocates memory and interrupt resources for Rx object */
1685 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1690 /* All memory needs to be allocated before setup_ccbs */
1691 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1692 if (res_info[i].res_type == BNA_RES_T_MEM)
1693 err = bnad_mem_alloc(bnad,
1694 &res_info[i].res_u.mem_info);
1695 else if (res_info[i].res_type == BNA_RES_T_INTR)
1696 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1697 &res_info[i].res_u.intr_info);
1704 bnad_rx_res_free(bnad, res_info);
1708 /* Timer callbacks */
1711 bnad_ioc_timeout(unsigned long data)
1713 struct bnad *bnad = (struct bnad *)data;
1714 unsigned long flags;
1716 spin_lock_irqsave(&bnad->bna_lock, flags);
1717 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1718 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1722 bnad_ioc_hb_check(unsigned long data)
1724 struct bnad *bnad = (struct bnad *)data;
1725 unsigned long flags;
1727 spin_lock_irqsave(&bnad->bna_lock, flags);
1728 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1729 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1733 bnad_iocpf_timeout(unsigned long data)
1735 struct bnad *bnad = (struct bnad *)data;
1736 unsigned long flags;
1738 spin_lock_irqsave(&bnad->bna_lock, flags);
1739 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1740 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1744 bnad_iocpf_sem_timeout(unsigned long data)
1746 struct bnad *bnad = (struct bnad *)data;
1747 unsigned long flags;
1749 spin_lock_irqsave(&bnad->bna_lock, flags);
1750 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1751 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1755 * All timer routines use bnad->bna_lock to protect against
1756 * the following race, which may occur in case of no locking:
1764 /* b) Dynamic Interrupt Moderation Timer */
1766 bnad_dim_timeout(unsigned long data)
1768 struct bnad *bnad = (struct bnad *)data;
1769 struct bnad_rx_info *rx_info;
1770 struct bnad_rx_ctrl *rx_ctrl;
1772 unsigned long flags;
1774 if (!netif_carrier_ok(bnad->netdev))
1777 spin_lock_irqsave(&bnad->bna_lock, flags);
1778 for (i = 0; i < bnad->num_rx; i++) {
1779 rx_info = &bnad->rx_info[i];
1782 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1783 rx_ctrl = &rx_info->rx_ctrl[j];
1786 bna_rx_dim_update(rx_ctrl->ccb);
1790 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1791 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1792 mod_timer(&bnad->dim_timer,
1793 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1794 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797 /* c) Statistics Timer */
1799 bnad_stats_timeout(unsigned long data)
1801 struct bnad *bnad = (struct bnad *)data;
1802 unsigned long flags;
1804 if (!netif_running(bnad->netdev) ||
1805 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1808 spin_lock_irqsave(&bnad->bna_lock, flags);
1809 bna_hw_stats_get(&bnad->bna);
1810 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1814 * Set up timer for DIM
1815 * Called with bnad->bna_lock held
1818 bnad_dim_timer_start(struct bnad *bnad)
1820 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1821 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1822 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1823 (unsigned long)bnad);
1824 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1825 mod_timer(&bnad->dim_timer,
1826 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1831 * Set up timer for statistics
1832 * Called with mutex_lock(&bnad->conf_mutex) held
1835 bnad_stats_timer_start(struct bnad *bnad)
1837 unsigned long flags;
1839 spin_lock_irqsave(&bnad->bna_lock, flags);
1840 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1841 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1842 (unsigned long)bnad);
1843 mod_timer(&bnad->stats_timer,
1844 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1846 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 * Stops the stats timer
1851 * Called with mutex_lock(&bnad->conf_mutex) held
1854 bnad_stats_timer_stop(struct bnad *bnad)
1857 unsigned long flags;
1859 spin_lock_irqsave(&bnad->bna_lock, flags);
1860 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1862 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1864 del_timer_sync(&bnad->stats_timer);
1870 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1872 int i = 1; /* Index 0 has broadcast address */
1873 struct netdev_hw_addr *mc_addr;
1875 netdev_for_each_mc_addr(mc_addr, netdev) {
1876 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1882 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1884 struct bnad_rx_ctrl *rx_ctrl =
1885 container_of(napi, struct bnad_rx_ctrl, napi);
1886 struct bnad *bnad = rx_ctrl->bnad;
1889 rx_ctrl->rx_poll_ctr++;
1891 if (!netif_carrier_ok(bnad->netdev))
1894 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1899 napi_complete(napi);
1901 rx_ctrl->rx_complete++;
1904 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1909 #define BNAD_NAPI_POLL_QUOTA 64
1911 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1913 struct bnad_rx_ctrl *rx_ctrl;
1916 /* Initialize & enable NAPI */
1917 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1918 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1919 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1920 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1925 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1929 /* First disable and then clean up */
1930 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1931 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1934 /* Should be held with conf_lock held */
1936 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1938 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1939 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1940 unsigned long flags;
1945 init_completion(&bnad->bnad_completions.tx_comp);
1946 spin_lock_irqsave(&bnad->bna_lock, flags);
1947 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1948 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1949 wait_for_completion(&bnad->bnad_completions.tx_comp);
1951 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1952 bnad_tx_msix_unregister(bnad, tx_info,
1953 bnad->num_txq_per_tx);
1955 spin_lock_irqsave(&bnad->bna_lock, flags);
1956 bna_tx_destroy(tx_info->tx);
1957 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1962 bnad_tx_res_free(bnad, res_info);
1965 /* Should be held with conf_lock held */
1967 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1970 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1971 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1972 struct bna_intr_info *intr_info =
1973 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1974 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1975 static const struct bna_tx_event_cbfn tx_cbfn = {
1976 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1977 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1978 .tx_stall_cbfn = bnad_cb_tx_stall,
1979 .tx_resume_cbfn = bnad_cb_tx_resume,
1980 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1984 unsigned long flags;
1986 tx_info->tx_id = tx_id;
1988 /* Initialize the Tx object configuration */
1989 tx_config->num_txq = bnad->num_txq_per_tx;
1990 tx_config->txq_depth = bnad->txq_depth;
1991 tx_config->tx_type = BNA_TX_T_REGULAR;
1992 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1994 /* Get BNA's resource requirement for one tx object */
1995 spin_lock_irqsave(&bnad->bna_lock, flags);
1996 bna_tx_res_req(bnad->num_txq_per_tx,
1997 bnad->txq_depth, res_info);
1998 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2000 /* Fill Unmap Q memory requirements */
2001 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
2002 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
2005 /* Allocate resources */
2006 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
2010 /* Ask BNA to create one Tx object, supplying required resources */
2011 spin_lock_irqsave(&bnad->bna_lock, flags);
2012 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2014 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2021 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2022 (work_func_t)bnad_tx_cleanup);
2024 /* Register ISR for the Tx object */
2025 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2026 err = bnad_tx_msix_register(bnad, tx_info,
2027 tx_id, bnad->num_txq_per_tx);
2032 spin_lock_irqsave(&bnad->bna_lock, flags);
2034 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2039 spin_lock_irqsave(&bnad->bna_lock, flags);
2040 bna_tx_destroy(tx_info->tx);
2041 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2045 bnad_tx_res_free(bnad, res_info);
2049 /* Setup the rx config for bna_rx_create */
2050 /* bnad decides the configuration */
2052 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2054 memset(rx_config, 0, sizeof(*rx_config));
2055 rx_config->rx_type = BNA_RX_T_REGULAR;
2056 rx_config->num_paths = bnad->num_rxp_per_rx;
2057 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2059 if (bnad->num_rxp_per_rx > 1) {
2060 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2061 rx_config->rss_config.hash_type =
2062 (BFI_ENET_RSS_IPV6 |
2063 BFI_ENET_RSS_IPV6_TCP |
2065 BFI_ENET_RSS_IPV4_TCP);
2066 rx_config->rss_config.hash_mask =
2067 bnad->num_rxp_per_rx - 1;
2068 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2069 sizeof(rx_config->rss_config.toeplitz_hash_key));
2071 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2072 memset(&rx_config->rss_config, 0,
2073 sizeof(rx_config->rss_config));
2076 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2077 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2079 /* BNA_RXP_SINGLE - one data-buffer queue
2080 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2081 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2083 /* TODO: configurable param for queue type */
2084 rx_config->rxp_type = BNA_RXP_SLR;
2086 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2087 rx_config->frame_size > 4096) {
2088 /* though size_routing_enable is set in SLR,
2089 * small packets may get routed to same rxq.
2090 * set buf_size to 2048 instead of PAGE_SIZE.
2092 rx_config->q0_buf_size = 2048;
2093 /* this should be in multiples of 2 */
2094 rx_config->q0_num_vecs = 4;
2095 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2096 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2098 rx_config->q0_buf_size = rx_config->frame_size;
2099 rx_config->q0_num_vecs = 1;
2100 rx_config->q0_depth = bnad->rxq_depth;
2103 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2104 if (rx_config->rxp_type == BNA_RXP_SLR) {
2105 rx_config->q1_depth = bnad->rxq_depth;
2106 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2109 rx_config->vlan_strip_status =
2110 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2111 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2115 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2117 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2120 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2121 rx_info->rx_ctrl[i].bnad = bnad;
2124 /* Called with mutex_lock(&bnad->conf_mutex) held */
2126 bnad_reinit_rx(struct bnad *bnad)
2128 struct net_device *netdev = bnad->netdev;
2129 u32 err = 0, current_err = 0;
2130 u32 rx_id = 0, count = 0;
2131 unsigned long flags;
2133 /* destroy and create new rx objects */
2134 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2135 if (!bnad->rx_info[rx_id].rx)
2137 bnad_destroy_rx(bnad, rx_id);
2140 spin_lock_irqsave(&bnad->bna_lock, flags);
2141 bna_enet_mtu_set(&bnad->bna.enet,
2142 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2143 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2145 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2147 current_err = bnad_setup_rx(bnad, rx_id);
2148 if (current_err && !err) {
2150 netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2154 /* restore rx configuration */
2155 if (bnad->rx_info[0].rx && !err) {
2156 bnad_restore_vlans(bnad, 0);
2157 bnad_enable_default_bcast(bnad);
2158 spin_lock_irqsave(&bnad->bna_lock, flags);
2159 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2160 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2161 bnad_set_rx_mode(netdev);
2167 /* Called with bnad_conf_lock() held */
2169 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2171 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2172 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2173 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2174 unsigned long flags;
2181 spin_lock_irqsave(&bnad->bna_lock, flags);
2182 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2183 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2184 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2189 del_timer_sync(&bnad->dim_timer);
2192 init_completion(&bnad->bnad_completions.rx_comp);
2193 spin_lock_irqsave(&bnad->bna_lock, flags);
2194 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2195 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2196 wait_for_completion(&bnad->bnad_completions.rx_comp);
2198 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2199 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2201 bnad_napi_delete(bnad, rx_id);
2203 spin_lock_irqsave(&bnad->bna_lock, flags);
2204 bna_rx_destroy(rx_info->rx);
2208 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2210 bnad_rx_res_free(bnad, res_info);
2213 /* Called with mutex_lock(&bnad->conf_mutex) held */
2215 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2218 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2219 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2220 struct bna_intr_info *intr_info =
2221 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2222 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2223 static const struct bna_rx_event_cbfn rx_cbfn = {
2224 .rcb_setup_cbfn = NULL,
2225 .rcb_destroy_cbfn = NULL,
2226 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2227 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2228 .rx_stall_cbfn = bnad_cb_rx_stall,
2229 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2230 .rx_post_cbfn = bnad_cb_rx_post,
2233 unsigned long flags;
2235 rx_info->rx_id = rx_id;
2237 /* Initialize the Rx object configuration */
2238 bnad_init_rx_config(bnad, rx_config);
2240 /* Get BNA's resource requirement for one Rx object */
2241 spin_lock_irqsave(&bnad->bna_lock, flags);
2242 bna_rx_res_req(rx_config, res_info);
2243 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2245 /* Fill Unmap Q memory requirements */
2246 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2247 rx_config->num_paths,
2248 (rx_config->q0_depth *
2249 sizeof(struct bnad_rx_unmap)) +
2250 sizeof(struct bnad_rx_unmap_q));
2252 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2253 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2254 rx_config->num_paths,
2255 (rx_config->q1_depth *
2256 sizeof(struct bnad_rx_unmap) +
2257 sizeof(struct bnad_rx_unmap_q)));
2259 /* Allocate resource */
2260 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2264 bnad_rx_ctrl_init(bnad, rx_id);
2266 /* Ask BNA to create one Rx object, supplying required resources */
2267 spin_lock_irqsave(&bnad->bna_lock, flags);
2268 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2272 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2276 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2278 INIT_WORK(&rx_info->rx_cleanup_work,
2279 (work_func_t)(bnad_rx_cleanup));
2282 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2283 * so that IRQ handler cannot schedule NAPI at this point.
2285 bnad_napi_add(bnad, rx_id);
2287 /* Register ISR for the Rx object */
2288 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2289 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2290 rx_config->num_paths);
2295 spin_lock_irqsave(&bnad->bna_lock, flags);
2297 /* Set up Dynamic Interrupt Moderation Vector */
2298 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2299 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2301 /* Enable VLAN filtering only on the default Rx */
2302 bna_rx_vlanfilter_enable(rx);
2304 /* Start the DIM timer */
2305 bnad_dim_timer_start(bnad);
2309 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2314 bnad_destroy_rx(bnad, rx_id);
2318 /* Called with conf_lock & bnad->bna_lock held */
2320 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2322 struct bnad_tx_info *tx_info;
2324 tx_info = &bnad->tx_info[0];
2328 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2331 /* Called with conf_lock & bnad->bna_lock held */
2333 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2335 struct bnad_rx_info *rx_info;
2338 for (i = 0; i < bnad->num_rx; i++) {
2339 rx_info = &bnad->rx_info[i];
2342 bna_rx_coalescing_timeo_set(rx_info->rx,
2343 bnad->rx_coalescing_timeo);
2348 * Called with bnad->bna_lock held
2351 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2355 if (!is_valid_ether_addr(mac_addr))
2356 return -EADDRNOTAVAIL;
2358 /* If datapath is down, pretend everything went through */
2359 if (!bnad->rx_info[0].rx)
2362 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2363 if (ret != BNA_CB_SUCCESS)
2364 return -EADDRNOTAVAIL;
2369 /* Should be called with conf_lock held */
2371 bnad_enable_default_bcast(struct bnad *bnad)
2373 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2375 unsigned long flags;
2377 init_completion(&bnad->bnad_completions.mcast_comp);
2379 spin_lock_irqsave(&bnad->bna_lock, flags);
2380 ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2381 bnad_cb_rx_mcast_add);
2382 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2384 if (ret == BNA_CB_SUCCESS)
2385 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2389 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2395 /* Called with mutex_lock(&bnad->conf_mutex) held */
2397 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2400 unsigned long flags;
2402 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2403 spin_lock_irqsave(&bnad->bna_lock, flags);
2404 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2405 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2409 /* Statistics utilities */
2411 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2415 for (i = 0; i < bnad->num_rx; i++) {
2416 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2417 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2418 stats->rx_packets += bnad->rx_info[i].
2419 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2420 stats->rx_bytes += bnad->rx_info[i].
2421 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2422 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2423 bnad->rx_info[i].rx_ctrl[j].ccb->
2425 stats->rx_packets +=
2426 bnad->rx_info[i].rx_ctrl[j].
2427 ccb->rcb[1]->rxq->rx_packets;
2429 bnad->rx_info[i].rx_ctrl[j].
2430 ccb->rcb[1]->rxq->rx_bytes;
2435 for (i = 0; i < bnad->num_tx; i++) {
2436 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2437 if (bnad->tx_info[i].tcb[j]) {
2438 stats->tx_packets +=
2439 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2441 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2448 * Must be called with the bna_lock held.
2451 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2453 struct bfi_enet_stats_mac *mac_stats;
2457 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2459 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2460 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2461 mac_stats->rx_undersize;
2462 stats->tx_errors = mac_stats->tx_fcs_error +
2463 mac_stats->tx_undersize;
2464 stats->rx_dropped = mac_stats->rx_drop;
2465 stats->tx_dropped = mac_stats->tx_drop;
2466 stats->multicast = mac_stats->rx_multicast;
2467 stats->collisions = mac_stats->tx_total_collision;
2469 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2471 /* receive ring buffer overflow ?? */
2473 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2474 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2475 /* recv'r fifo overrun */
2476 bmap = bna_rx_rid_mask(&bnad->bna);
2477 for (i = 0; bmap; i++) {
2479 stats->rx_fifo_errors +=
2480 bnad->stats.bna_stats->
2481 hw_stats.rxf_stats[i].frame_drops;
2489 bnad_mbox_irq_sync(struct bnad *bnad)
2492 unsigned long flags;
2494 spin_lock_irqsave(&bnad->bna_lock, flags);
2495 if (bnad->cfg_flags & BNAD_CF_MSIX)
2496 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2498 irq = bnad->pcidev->irq;
2499 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2501 synchronize_irq(irq);
2504 /* Utility used by bnad_start_xmit, for doing TSO */
2506 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2510 err = skb_cow_head(skb, 0);
2512 BNAD_UPDATE_CTR(bnad, tso_err);
2517 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2518 * excluding the length field.
2520 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2521 struct iphdr *iph = ip_hdr(skb);
2523 /* Do we really need these? */
2527 tcp_hdr(skb)->check =
2528 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2530 BNAD_UPDATE_CTR(bnad, tso4);
2532 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2534 ipv6h->payload_len = 0;
2535 tcp_hdr(skb)->check =
2536 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2538 BNAD_UPDATE_CTR(bnad, tso6);
2545 * Initialize Q numbers depending on Rx Paths
2546 * Called with bnad->bna_lock held, because of cfg_flags
2550 bnad_q_num_init(struct bnad *bnad)
2554 rxps = min((uint)num_online_cpus(),
2555 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2557 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2558 rxps = 1; /* INTx */
2562 bnad->num_rxp_per_rx = rxps;
2563 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2567 * Adjusts the Q numbers, given a number of msix vectors
2568 * Give preference to RSS as opposed to Tx priority Queues,
2569 * in such a case, just use 1 Tx Q
2570 * Called with bnad->bna_lock held b'cos of cfg_flags access
2573 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2575 bnad->num_txq_per_tx = 1;
2576 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2577 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2578 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2579 bnad->num_rxp_per_rx = msix_vectors -
2580 (bnad->num_tx * bnad->num_txq_per_tx) -
2581 BNAD_MAILBOX_MSIX_VECTORS;
2583 bnad->num_rxp_per_rx = 1;
2586 /* Enable / disable ioceth */
2588 bnad_ioceth_disable(struct bnad *bnad)
2590 unsigned long flags;
2593 spin_lock_irqsave(&bnad->bna_lock, flags);
2594 init_completion(&bnad->bnad_completions.ioc_comp);
2595 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2596 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2598 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2599 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2601 err = bnad->bnad_completions.ioc_comp_status;
2606 bnad_ioceth_enable(struct bnad *bnad)
2609 unsigned long flags;
2611 spin_lock_irqsave(&bnad->bna_lock, flags);
2612 init_completion(&bnad->bnad_completions.ioc_comp);
2613 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2614 bna_ioceth_enable(&bnad->bna.ioceth);
2615 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2617 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2618 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2620 err = bnad->bnad_completions.ioc_comp_status;
2625 /* Free BNA resources */
2627 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2632 for (i = 0; i < res_val_max; i++)
2633 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2636 /* Allocates memory and interrupt resources for BNA */
2638 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2643 for (i = 0; i < res_val_max; i++) {
2644 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2651 bnad_res_free(bnad, res_info, res_val_max);
2655 /* Interrupt enable / disable */
2657 bnad_enable_msix(struct bnad *bnad)
2660 unsigned long flags;
2662 spin_lock_irqsave(&bnad->bna_lock, flags);
2663 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2667 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2669 if (bnad->msix_table)
2673 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2675 if (!bnad->msix_table)
2678 for (i = 0; i < bnad->msix_num; i++)
2679 bnad->msix_table[i].entry = i;
2681 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2685 } else if (ret < bnad->msix_num) {
2686 dev_warn(&bnad->pcidev->dev,
2687 "%d MSI-X vectors allocated < %d requested\n",
2688 ret, bnad->msix_num);
2690 spin_lock_irqsave(&bnad->bna_lock, flags);
2691 /* ret = #of vectors that we got */
2692 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2693 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2694 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2696 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2697 BNAD_MAILBOX_MSIX_VECTORS;
2699 if (bnad->msix_num > ret) {
2700 pci_disable_msix(bnad->pcidev);
2705 pci_intx(bnad->pcidev, 0);
2710 dev_warn(&bnad->pcidev->dev,
2711 "MSI-X enable failed - operating in INTx mode\n");
2713 kfree(bnad->msix_table);
2714 bnad->msix_table = NULL;
2716 spin_lock_irqsave(&bnad->bna_lock, flags);
2717 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2718 bnad_q_num_init(bnad);
2719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2723 bnad_disable_msix(struct bnad *bnad)
2726 unsigned long flags;
2728 spin_lock_irqsave(&bnad->bna_lock, flags);
2729 cfg_flags = bnad->cfg_flags;
2730 if (bnad->cfg_flags & BNAD_CF_MSIX)
2731 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2732 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2734 if (cfg_flags & BNAD_CF_MSIX) {
2735 pci_disable_msix(bnad->pcidev);
2736 kfree(bnad->msix_table);
2737 bnad->msix_table = NULL;
2741 /* Netdev entry points */
2743 bnad_open(struct net_device *netdev)
2746 struct bnad *bnad = netdev_priv(netdev);
2747 struct bna_pause_config pause_config;
2748 unsigned long flags;
2750 mutex_lock(&bnad->conf_mutex);
2753 err = bnad_setup_tx(bnad, 0);
2758 err = bnad_setup_rx(bnad, 0);
2763 pause_config.tx_pause = 0;
2764 pause_config.rx_pause = 0;
2766 spin_lock_irqsave(&bnad->bna_lock, flags);
2767 bna_enet_mtu_set(&bnad->bna.enet,
2768 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2769 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2770 bna_enet_enable(&bnad->bna.enet);
2771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2773 /* Enable broadcast */
2774 bnad_enable_default_bcast(bnad);
2776 /* Restore VLANs, if any */
2777 bnad_restore_vlans(bnad, 0);
2779 /* Set the UCAST address */
2780 spin_lock_irqsave(&bnad->bna_lock, flags);
2781 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2782 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2784 /* Start the stats timer */
2785 bnad_stats_timer_start(bnad);
2787 mutex_unlock(&bnad->conf_mutex);
2792 bnad_destroy_tx(bnad, 0);
2795 mutex_unlock(&bnad->conf_mutex);
2800 bnad_stop(struct net_device *netdev)
2802 struct bnad *bnad = netdev_priv(netdev);
2803 unsigned long flags;
2805 mutex_lock(&bnad->conf_mutex);
2807 /* Stop the stats timer */
2808 bnad_stats_timer_stop(bnad);
2810 init_completion(&bnad->bnad_completions.enet_comp);
2812 spin_lock_irqsave(&bnad->bna_lock, flags);
2813 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2814 bnad_cb_enet_disabled);
2815 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2817 wait_for_completion(&bnad->bnad_completions.enet_comp);
2819 bnad_destroy_tx(bnad, 0);
2820 bnad_destroy_rx(bnad, 0);
2822 /* Synchronize mailbox IRQ */
2823 bnad_mbox_irq_sync(bnad);
2825 mutex_unlock(&bnad->conf_mutex);
2831 /* Returns 0 for success */
2833 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2834 struct sk_buff *skb, struct bna_txq_entry *txqent)
2840 if (skb_vlan_tag_present(skb)) {
2841 vlan_tag = (u16)skb_vlan_tag_get(skb);
2842 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2844 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2845 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2846 | (vlan_tag & 0x1fff);
2847 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2849 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2851 if (skb_is_gso(skb)) {
2852 gso_size = skb_shinfo(skb)->gso_size;
2853 if (unlikely(gso_size > bnad->netdev->mtu)) {
2854 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2857 if (unlikely((gso_size + skb_transport_offset(skb) +
2858 tcp_hdrlen(skb)) >= skb->len)) {
2859 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2860 txqent->hdr.wi.lso_mss = 0;
2861 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2863 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2864 txqent->hdr.wi.lso_mss = htons(gso_size);
2867 if (bnad_tso_prepare(bnad, skb)) {
2868 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2872 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2873 txqent->hdr.wi.l4_hdr_size_n_offset =
2874 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2875 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2877 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2878 txqent->hdr.wi.lso_mss = 0;
2880 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2881 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2885 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2886 __be16 net_proto = vlan_get_protocol(skb);
2889 if (net_proto == htons(ETH_P_IP))
2890 proto = ip_hdr(skb)->protocol;
2891 #ifdef NETIF_F_IPV6_CSUM
2892 else if (net_proto == htons(ETH_P_IPV6)) {
2893 /* nexthdr may not be TCP immediately. */
2894 proto = ipv6_hdr(skb)->nexthdr;
2897 if (proto == IPPROTO_TCP) {
2898 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2899 txqent->hdr.wi.l4_hdr_size_n_offset =
2900 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2901 (0, skb_transport_offset(skb)));
2903 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2905 if (unlikely(skb_headlen(skb) <
2906 skb_transport_offset(skb) +
2908 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2911 } else if (proto == IPPROTO_UDP) {
2912 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2913 txqent->hdr.wi.l4_hdr_size_n_offset =
2914 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2915 (0, skb_transport_offset(skb)));
2917 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2918 if (unlikely(skb_headlen(skb) <
2919 skb_transport_offset(skb) +
2920 sizeof(struct udphdr))) {
2921 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2926 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2930 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2933 txqent->hdr.wi.flags = htons(flags);
2934 txqent->hdr.wi.frame_length = htonl(skb->len);
2940 * bnad_start_xmit : Netdev entry point for Transmit
2941 * Called under lock held by net_device
2944 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2946 struct bnad *bnad = netdev_priv(netdev);
2948 struct bna_tcb *tcb = NULL;
2949 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2950 u32 prod, q_depth, vect_id;
2951 u32 wis, vectors, len;
2953 dma_addr_t dma_addr;
2954 struct bna_txq_entry *txqent;
2956 len = skb_headlen(skb);
2958 /* Sanity checks for the skb */
2960 if (unlikely(skb->len <= ETH_HLEN)) {
2961 dev_kfree_skb_any(skb);
2962 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2963 return NETDEV_TX_OK;
2965 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2966 dev_kfree_skb_any(skb);
2967 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2968 return NETDEV_TX_OK;
2970 if (unlikely(len == 0)) {
2971 dev_kfree_skb_any(skb);
2972 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2973 return NETDEV_TX_OK;
2976 tcb = bnad->tx_info[0].tcb[txq_id];
2979 * Takes care of the Tx that is scheduled between clearing the flag
2980 * and the netif_tx_stop_all_queues() call.
2982 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2983 dev_kfree_skb_any(skb);
2984 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2985 return NETDEV_TX_OK;
2988 q_depth = tcb->q_depth;
2989 prod = tcb->producer_index;
2990 unmap_q = tcb->unmap_q;
2992 vectors = 1 + skb_shinfo(skb)->nr_frags;
2993 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2995 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2996 dev_kfree_skb_any(skb);
2997 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2998 return NETDEV_TX_OK;
3001 /* Check for available TxQ resources */
3002 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3003 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
3004 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
3006 sent = bnad_txcmpl_process(bnad, tcb);
3007 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3008 bna_ib_ack(tcb->i_dbell, sent);
3009 smp_mb__before_atomic();
3010 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3012 netif_stop_queue(netdev);
3013 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3018 * Check again to deal with race condition between
3019 * netif_stop_queue here, and netif_wake_queue in
3020 * interrupt handler which is not inside netif tx lock.
3022 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3023 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3024 return NETDEV_TX_BUSY;
3026 netif_wake_queue(netdev);
3027 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3031 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3032 head_unmap = &unmap_q[prod];
3034 /* Program the opcode, flags, frame_len, num_vectors in WI */
3035 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3036 dev_kfree_skb_any(skb);
3037 return NETDEV_TX_OK;
3039 txqent->hdr.wi.reserved = 0;
3040 txqent->hdr.wi.num_vectors = vectors;
3042 head_unmap->skb = skb;
3043 head_unmap->nvecs = 0;
3045 /* Program the vectors */
3047 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3048 len, DMA_TO_DEVICE);
3049 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3050 dev_kfree_skb_any(skb);
3051 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3052 return NETDEV_TX_OK;
3054 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3055 txqent->vector[0].length = htons(len);
3056 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3057 head_unmap->nvecs++;
3059 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3060 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3061 u32 size = skb_frag_size(frag);
3063 if (unlikely(size == 0)) {
3064 /* Undo the changes starting at tcb->producer_index */
3065 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3066 tcb->producer_index);
3067 dev_kfree_skb_any(skb);
3068 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3069 return NETDEV_TX_OK;
3075 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3077 BNA_QE_INDX_INC(prod, q_depth);
3078 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3079 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3080 unmap = &unmap_q[prod];
3083 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3084 0, size, DMA_TO_DEVICE);
3085 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3086 /* Undo the changes starting at tcb->producer_index */
3087 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3088 tcb->producer_index);
3089 dev_kfree_skb_any(skb);
3090 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3091 return NETDEV_TX_OK;
3094 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3095 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3096 txqent->vector[vect_id].length = htons(size);
3097 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3099 head_unmap->nvecs++;
3102 if (unlikely(len != skb->len)) {
3103 /* Undo the changes starting at tcb->producer_index */
3104 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3105 dev_kfree_skb_any(skb);
3106 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3107 return NETDEV_TX_OK;
3110 BNA_QE_INDX_INC(prod, q_depth);
3111 tcb->producer_index = prod;
3115 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3116 return NETDEV_TX_OK;
3118 skb_tx_timestamp(skb);
3120 bna_txq_prod_indx_doorbell(tcb);
3123 return NETDEV_TX_OK;
3127 * Used spin_lock to synchronize reading of stats structures, which
3128 * is written by BNA under the same lock.
3130 static struct rtnl_link_stats64 *
3131 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3133 struct bnad *bnad = netdev_priv(netdev);
3134 unsigned long flags;
3136 spin_lock_irqsave(&bnad->bna_lock, flags);
3138 bnad_netdev_qstats_fill(bnad, stats);
3139 bnad_netdev_hwstats_fill(bnad, stats);
3141 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3147 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3149 struct net_device *netdev = bnad->netdev;
3150 int uc_count = netdev_uc_count(netdev);
3151 enum bna_cb_status ret;
3153 struct netdev_hw_addr *ha;
3156 if (netdev_uc_empty(bnad->netdev)) {
3157 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3161 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3164 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3165 if (mac_list == NULL)
3169 netdev_for_each_uc_addr(ha, netdev) {
3170 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3174 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3177 if (ret != BNA_CB_SUCCESS)
3182 /* ucast packets not in UCAM are routed to default function */
3184 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3185 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3189 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3191 struct net_device *netdev = bnad->netdev;
3192 int mc_count = netdev_mc_count(netdev);
3193 enum bna_cb_status ret;
3196 if (netdev->flags & IFF_ALLMULTI)
3199 if (netdev_mc_empty(netdev))
3202 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3205 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3207 if (mac_list == NULL)
3210 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3212 /* copy rest of the MCAST addresses */
3213 bnad_netdev_mc_list_get(netdev, mac_list);
3214 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3217 if (ret != BNA_CB_SUCCESS)
3223 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3224 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3228 bnad_set_rx_mode(struct net_device *netdev)
3230 struct bnad *bnad = netdev_priv(netdev);
3231 enum bna_rxmode new_mode, mode_mask;
3232 unsigned long flags;
3234 spin_lock_irqsave(&bnad->bna_lock, flags);
3236 if (bnad->rx_info[0].rx == NULL) {
3237 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3241 /* clear bnad flags to update it with new settings */
3242 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3246 if (netdev->flags & IFF_PROMISC) {
3247 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3248 bnad->cfg_flags |= BNAD_CF_PROMISC;
3250 bnad_set_rx_mcast_fltr(bnad);
3252 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3253 new_mode |= BNA_RXMODE_ALLMULTI;
3255 bnad_set_rx_ucast_fltr(bnad);
3257 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3258 new_mode |= BNA_RXMODE_DEFAULT;
3261 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3262 BNA_RXMODE_ALLMULTI;
3263 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3265 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3269 * bna_lock is used to sync writes to netdev->addr
3270 * conf_lock cannot be used since this call may be made
3271 * in a non-blocking context.
3274 bnad_set_mac_address(struct net_device *netdev, void *addr)
3277 struct bnad *bnad = netdev_priv(netdev);
3278 struct sockaddr *sa = (struct sockaddr *)addr;
3279 unsigned long flags;
3281 spin_lock_irqsave(&bnad->bna_lock, flags);
3283 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3285 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3287 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3293 bnad_mtu_set(struct bnad *bnad, int frame_size)
3295 unsigned long flags;
3297 init_completion(&bnad->bnad_completions.mtu_comp);
3299 spin_lock_irqsave(&bnad->bna_lock, flags);
3300 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3301 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3303 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3305 return bnad->bnad_completions.mtu_comp_status;
3309 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3312 struct bnad *bnad = netdev_priv(netdev);
3313 u32 rx_count = 0, frame, new_frame;
3315 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3318 mutex_lock(&bnad->conf_mutex);
3321 netdev->mtu = new_mtu;
3323 frame = BNAD_FRAME_SIZE(mtu);
3324 new_frame = BNAD_FRAME_SIZE(new_mtu);
3326 /* check if multi-buffer needs to be enabled */
3327 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3328 netif_running(bnad->netdev)) {
3329 /* only when transition is over 4K */
3330 if ((frame <= 4096 && new_frame > 4096) ||
3331 (frame > 4096 && new_frame <= 4096))
3332 rx_count = bnad_reinit_rx(bnad);
3335 /* rx_count > 0 - new rx created
3336 * - Linux set err = 0 and return
3338 err = bnad_mtu_set(bnad, new_frame);
3342 mutex_unlock(&bnad->conf_mutex);
3347 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3349 struct bnad *bnad = netdev_priv(netdev);
3350 unsigned long flags;
3352 if (!bnad->rx_info[0].rx)
3355 mutex_lock(&bnad->conf_mutex);
3357 spin_lock_irqsave(&bnad->bna_lock, flags);
3358 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3359 set_bit(vid, bnad->active_vlans);
3360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3362 mutex_unlock(&bnad->conf_mutex);
3368 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3370 struct bnad *bnad = netdev_priv(netdev);
3371 unsigned long flags;
3373 if (!bnad->rx_info[0].rx)
3376 mutex_lock(&bnad->conf_mutex);
3378 spin_lock_irqsave(&bnad->bna_lock, flags);
3379 clear_bit(vid, bnad->active_vlans);
3380 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3383 mutex_unlock(&bnad->conf_mutex);
3388 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3390 struct bnad *bnad = netdev_priv(dev);
3391 netdev_features_t changed = features ^ dev->features;
3393 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3394 unsigned long flags;
3396 spin_lock_irqsave(&bnad->bna_lock, flags);
3398 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3399 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3401 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3403 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3409 #ifdef CONFIG_NET_POLL_CONTROLLER
3411 bnad_netpoll(struct net_device *netdev)
3413 struct bnad *bnad = netdev_priv(netdev);
3414 struct bnad_rx_info *rx_info;
3415 struct bnad_rx_ctrl *rx_ctrl;
3419 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3420 bna_intx_disable(&bnad->bna, curr_mask);
3421 bnad_isr(bnad->pcidev->irq, netdev);
3422 bna_intx_enable(&bnad->bna, curr_mask);
3425 * Tx processing may happen in sending context, so no need
3426 * to explicitly process completions here
3430 for (i = 0; i < bnad->num_rx; i++) {
3431 rx_info = &bnad->rx_info[i];
3434 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3435 rx_ctrl = &rx_info->rx_ctrl[j];
3437 bnad_netif_rx_schedule_poll(bnad,
3445 static const struct net_device_ops bnad_netdev_ops = {
3446 .ndo_open = bnad_open,
3447 .ndo_stop = bnad_stop,
3448 .ndo_start_xmit = bnad_start_xmit,
3449 .ndo_get_stats64 = bnad_get_stats64,
3450 .ndo_set_rx_mode = bnad_set_rx_mode,
3451 .ndo_validate_addr = eth_validate_addr,
3452 .ndo_set_mac_address = bnad_set_mac_address,
3453 .ndo_change_mtu = bnad_change_mtu,
3454 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3455 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3456 .ndo_set_features = bnad_set_features,
3457 #ifdef CONFIG_NET_POLL_CONTROLLER
3458 .ndo_poll_controller = bnad_netpoll
3463 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3465 struct net_device *netdev = bnad->netdev;
3467 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3468 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3469 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3470 NETIF_F_HW_VLAN_CTAG_RX;
3472 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3473 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3474 NETIF_F_TSO | NETIF_F_TSO6;
3476 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3479 netdev->features |= NETIF_F_HIGHDMA;
3481 netdev->mem_start = bnad->mmio_start;
3482 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3484 netdev->netdev_ops = &bnad_netdev_ops;
3485 bnad_set_ethtool_ops(netdev);
3489 * 1. Initialize the bnad structure
3490 * 2. Setup netdev pointer in pci_dev
3491 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3492 * 4. Initialize work queue.
3495 bnad_init(struct bnad *bnad,
3496 struct pci_dev *pdev, struct net_device *netdev)
3498 unsigned long flags;
3500 SET_NETDEV_DEV(netdev, &pdev->dev);
3501 pci_set_drvdata(pdev, netdev);
3503 bnad->netdev = netdev;
3504 bnad->pcidev = pdev;
3505 bnad->mmio_start = pci_resource_start(pdev, 0);
3506 bnad->mmio_len = pci_resource_len(pdev, 0);
3507 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3509 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3512 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3513 (unsigned long long) bnad->mmio_len);
3515 spin_lock_irqsave(&bnad->bna_lock, flags);
3516 if (!bnad_msix_disable)
3517 bnad->cfg_flags = BNAD_CF_MSIX;
3519 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3521 bnad_q_num_init(bnad);
3522 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3524 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3525 (bnad->num_rx * bnad->num_rxp_per_rx) +
3526 BNAD_MAILBOX_MSIX_VECTORS;
3528 bnad->txq_depth = BNAD_TXQ_DEPTH;
3529 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3531 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3532 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3534 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3535 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3536 if (!bnad->work_q) {
3537 iounmap(bnad->bar0);
3545 * Must be called after bnad_pci_uninit()
3546 * so that iounmap() and pci_set_drvdata(NULL)
3547 * happens only after PCI uninitialization.
3550 bnad_uninit(struct bnad *bnad)
3553 flush_workqueue(bnad->work_q);
3554 destroy_workqueue(bnad->work_q);
3555 bnad->work_q = NULL;
3559 iounmap(bnad->bar0);
3564 a) Per ioceth mutes used for serializing configuration
3565 changes from OS interface
3566 b) spin lock used to protect bna state machine
3569 bnad_lock_init(struct bnad *bnad)
3571 spin_lock_init(&bnad->bna_lock);
3572 mutex_init(&bnad->conf_mutex);
3573 mutex_init(&bnad_list_mutex);
3577 bnad_lock_uninit(struct bnad *bnad)
3579 mutex_destroy(&bnad->conf_mutex);
3580 mutex_destroy(&bnad_list_mutex);
3583 /* PCI Initialization */
3585 bnad_pci_init(struct bnad *bnad,
3586 struct pci_dev *pdev, bool *using_dac)
3590 err = pci_enable_device(pdev);
3593 err = pci_request_regions(pdev, BNAD_NAME);
3595 goto disable_device;
3596 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3599 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3601 goto release_regions;
3604 pci_set_master(pdev);
3608 pci_release_regions(pdev);
3610 pci_disable_device(pdev);
3616 bnad_pci_uninit(struct pci_dev *pdev)
3618 pci_release_regions(pdev);
3619 pci_disable_device(pdev);
3623 bnad_pci_probe(struct pci_dev *pdev,
3624 const struct pci_device_id *pcidev_id)
3630 struct net_device *netdev;
3631 struct bfa_pcidev pcidev_info;
3632 unsigned long flags;
3634 mutex_lock(&bnad_fwimg_mutex);
3635 if (!cna_get_firmware_buf(pdev)) {
3636 mutex_unlock(&bnad_fwimg_mutex);
3637 dev_err(&pdev->dev, "failed to load firmware image!\n");
3640 mutex_unlock(&bnad_fwimg_mutex);
3643 * Allocates sizeof(struct net_device + struct bnad)
3644 * bnad = netdev->priv
3646 netdev = alloc_etherdev(sizeof(struct bnad));
3651 bnad = netdev_priv(netdev);
3652 bnad_lock_init(bnad);
3653 bnad_add_to_list(bnad);
3655 mutex_lock(&bnad->conf_mutex);
3657 * PCI initialization
3658 * Output : using_dac = 1 for 64 bit DMA
3659 * = 0 for 32 bit DMA
3662 err = bnad_pci_init(bnad, pdev, &using_dac);
3667 * Initialize bnad structure
3668 * Setup relation between pci_dev & netdev
3670 err = bnad_init(bnad, pdev, netdev);
3674 /* Initialize netdev structure, set up ethtool ops */
3675 bnad_netdev_init(bnad, using_dac);
3677 /* Set link to down state */
3678 netif_carrier_off(netdev);
3680 /* Setup the debugfs node for this bfad */
3681 if (bna_debugfs_enable)
3682 bnad_debugfs_init(bnad);
3684 /* Get resource requirement form bna */
3685 spin_lock_irqsave(&bnad->bna_lock, flags);
3686 bna_res_req(&bnad->res_info[0]);
3687 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3689 /* Allocate resources from bna */
3690 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3696 /* Setup pcidev_info for bna_init() */
3697 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3698 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3699 pcidev_info.device_id = bnad->pcidev->device;
3700 pcidev_info.pci_bar_kva = bnad->bar0;
3702 spin_lock_irqsave(&bnad->bna_lock, flags);
3703 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3706 bnad->stats.bna_stats = &bna->stats;
3708 bnad_enable_msix(bnad);
3709 err = bnad_mbox_irq_alloc(bnad);
3714 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3715 (unsigned long)bnad);
3716 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3717 (unsigned long)bnad);
3718 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3719 (unsigned long)bnad);
3720 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3721 (unsigned long)bnad);
3725 * If the call back comes with error, we bail out.
3726 * This is a catastrophic error.
3728 err = bnad_ioceth_enable(bnad);
3730 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3734 spin_lock_irqsave(&bnad->bna_lock, flags);
3735 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3736 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3737 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3738 bna_attr(bna)->num_rxp - 1);
3739 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3740 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3745 goto disable_ioceth;
3747 spin_lock_irqsave(&bnad->bna_lock, flags);
3748 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3749 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3751 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3754 goto disable_ioceth;
3757 spin_lock_irqsave(&bnad->bna_lock, flags);
3758 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3759 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3761 /* Get the burnt-in mac */
3762 spin_lock_irqsave(&bnad->bna_lock, flags);
3763 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3764 bnad_set_netdev_perm_addr(bnad);
3765 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3767 mutex_unlock(&bnad->conf_mutex);
3769 /* Finally, reguister with net_device layer */
3770 err = register_netdev(netdev);
3772 dev_err(&pdev->dev, "registering net device failed\n");
3775 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3780 mutex_unlock(&bnad->conf_mutex);
3784 mutex_lock(&bnad->conf_mutex);
3785 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3787 bnad_ioceth_disable(bnad);
3788 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3789 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3790 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3791 spin_lock_irqsave(&bnad->bna_lock, flags);
3793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3794 bnad_mbox_irq_free(bnad);
3795 bnad_disable_msix(bnad);
3797 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3799 /* Remove the debugfs node for this bnad */
3800 kfree(bnad->regdata);
3801 bnad_debugfs_uninit(bnad);
3804 bnad_pci_uninit(pdev);
3806 mutex_unlock(&bnad->conf_mutex);
3807 bnad_remove_from_list(bnad);
3808 bnad_lock_uninit(bnad);
3809 free_netdev(netdev);
3814 bnad_pci_remove(struct pci_dev *pdev)
3816 struct net_device *netdev = pci_get_drvdata(pdev);
3819 unsigned long flags;
3824 bnad = netdev_priv(netdev);
3827 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3828 unregister_netdev(netdev);
3830 mutex_lock(&bnad->conf_mutex);
3831 bnad_ioceth_disable(bnad);
3832 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3833 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3834 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3835 spin_lock_irqsave(&bnad->bna_lock, flags);
3837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3839 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3840 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3841 bnad_mbox_irq_free(bnad);
3842 bnad_disable_msix(bnad);
3843 bnad_pci_uninit(pdev);
3844 mutex_unlock(&bnad->conf_mutex);
3845 bnad_remove_from_list(bnad);
3846 bnad_lock_uninit(bnad);
3847 /* Remove the debugfs node for this bnad */
3848 kfree(bnad->regdata);
3849 bnad_debugfs_uninit(bnad);
3851 free_netdev(netdev);
3854 static const struct pci_device_id bnad_pci_id_table[] = {
3856 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3857 PCI_DEVICE_ID_BROCADE_CT),
3858 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3859 .class_mask = 0xffff00
3862 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3863 BFA_PCI_DEVICE_ID_CT2),
3864 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3865 .class_mask = 0xffff00
3870 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3872 static struct pci_driver bnad_pci_driver = {
3874 .id_table = bnad_pci_id_table,
3875 .probe = bnad_pci_probe,
3876 .remove = bnad_pci_remove,
3880 bnad_module_init(void)
3884 pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3887 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3889 err = pci_register_driver(&bnad_pci_driver);
3891 pr_err("bna: PCI driver registration failed err=%d\n", err);
3899 bnad_module_exit(void)
3901 pci_unregister_driver(&bnad_pci_driver);
3902 release_firmware(bfi_fw);
3905 module_init(bnad_module_init);
3906 module_exit(bnad_module_exit);
3908 MODULE_AUTHOR("Brocade");
3909 MODULE_LICENSE("GPL");
3910 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3911 MODULE_VERSION(BNAD_VERSION);
3912 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3913 MODULE_FIRMWARE(CNA_FW_FILE_CT2);