1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
30 static int bnx2x_setup_irqs(struct bnx2x *bp);
33 * bnx2x_bz_fp - zero content of the fastpath structure.
36 * @index: fastpath index to be zeroed
38 * Makes sure the contents of the bp->fp[index].napi is kept
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
48 /* Restore the NAPI object as it has been already initialized */
53 * bnx2x_move_fp - move content of the fastpath structure.
56 * @from: source FP index
57 * @to: destination FP index
59 * Makes sure the contents of the bp->fp[to].napi is kept
62 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
64 struct bnx2x_fastpath *from_fp = &bp->fp[from];
65 struct bnx2x_fastpath *to_fp = &bp->fp[to];
66 struct napi_struct orig_napi = to_fp->napi;
67 /* Move bnx2x_fastpath contents */
68 memcpy(to_fp, from_fp, sizeof(*to_fp));
71 /* Restore the NAPI object as it has been already initialized */
72 to_fp->napi = orig_napi;
75 /* free skb in the packet ring at pos idx
76 * return idx of last bd freed
78 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
81 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
82 struct eth_tx_start_bd *tx_start_bd;
83 struct eth_tx_bd *tx_data_bd;
84 struct sk_buff *skb = tx_buf->skb;
85 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
88 /* prefetch skb end pointer to speedup dev_kfree_skb() */
91 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
95 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
96 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
97 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
98 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
100 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
101 #ifdef BNX2X_STOP_ON_ERROR
102 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
103 BNX2X_ERR("BAD nbd!\n");
107 new_cons = nbd + tx_buf->first_bd;
109 /* Get the next bd */
110 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
112 /* Skip a parse bd... */
114 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 /* ...and the TSO split header bd since they have no mapping */
117 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
119 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
125 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
126 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
127 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
128 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
130 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
135 dev_kfree_skb_any(skb);
136 tx_buf->first_bd = 0;
142 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
144 struct bnx2x *bp = fp->bp;
145 struct netdev_queue *txq;
146 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
148 #ifdef BNX2X_STOP_ON_ERROR
149 if (unlikely(bp->panic))
153 txq = netdev_get_tx_queue(bp->dev, fp->index);
154 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
155 sw_cons = fp->tx_pkt_cons;
157 while (sw_cons != hw_cons) {
160 pkt_cons = TX_BD(sw_cons);
162 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
164 fp->index, hw_cons, sw_cons, pkt_cons);
166 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
170 fp->tx_pkt_cons = sw_cons;
171 fp->tx_bd_cons = bd_cons;
173 /* Need to make the tx_bd_cons update visible to start_xmit()
174 * before checking for netif_tx_queue_stopped(). Without the
175 * memory barrier, there is a small possibility that
176 * start_xmit() will miss it and cause the queue to be stopped
181 if (unlikely(netif_tx_queue_stopped(txq))) {
182 /* Taking tx_lock() is needed to prevent reenabling the queue
183 * while it's empty. This could have happen if rx_action() gets
184 * suspended in bnx2x_tx_int() after the condition before
185 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
187 * stops the queue->sees fresh tx_bd_cons->releases the queue->
188 * sends some packets consuming the whole queue again->
192 __netif_tx_lock(txq, smp_processor_id());
194 if ((netif_tx_queue_stopped(txq)) &&
195 (bp->state == BNX2X_STATE_OPEN) &&
196 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
197 netif_tx_wake_queue(txq);
199 __netif_tx_unlock(txq);
204 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
207 u16 last_max = fp->last_max_sge;
209 if (SUB_S16(idx, last_max) > 0)
210 fp->last_max_sge = idx;
213 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214 struct eth_fast_path_rx_cqe *fp_cqe)
216 struct bnx2x *bp = fp->bp;
217 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
218 le16_to_cpu(fp_cqe->len_on_bd)) >>
220 u16 last_max, last_elem, first_elem;
227 /* First mark all used pages */
228 for (i = 0; i < sge_len; i++)
229 SGE_MASK_CLEAR_BIT(fp,
230 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
232 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
233 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
235 /* Here we assume that the last SGE index is the biggest */
236 prefetch((void *)(fp->sge_mask));
237 bnx2x_update_last_max_sge(fp,
238 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
240 last_max = RX_SGE(fp->last_max_sge);
241 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
242 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
244 /* If ring is not full */
245 if (last_elem + 1 != first_elem)
248 /* Now update the prod */
249 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
250 if (likely(fp->sge_mask[i]))
253 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
254 delta += RX_SGE_MASK_ELEM_SZ;
258 fp->rx_sge_prod += delta;
259 /* clear page-end entries */
260 bnx2x_clear_sge_mask_next_elems(fp);
263 DP(NETIF_MSG_RX_STATUS,
264 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
265 fp->last_max_sge, fp->rx_sge_prod);
268 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
269 struct sk_buff *skb, u16 cons, u16 prod)
271 struct bnx2x *bp = fp->bp;
272 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
273 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
274 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
277 /* move empty skb from pool to prod and map it */
278 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
279 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
280 fp->rx_buf_size, DMA_FROM_DEVICE);
281 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
283 /* move partial skb from cons to pool (don't unmap yet) */
284 fp->tpa_pool[queue] = *cons_rx_buf;
286 /* mark bin state as start - print error if current state != stop */
287 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
288 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
290 fp->tpa_state[queue] = BNX2X_TPA_START;
292 /* point prod_bd to new skb */
293 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
294 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
296 #ifdef BNX2X_STOP_ON_ERROR
297 fp->tpa_queue_used |= (1 << queue);
298 #ifdef _ASM_GENERIC_INT_L64_H
299 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
301 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
307 /* Timestamp option length allowed for TPA aggregation:
309 * nop nop kind length echo val
311 #define TPA_TSTAMP_OPT_LEN 12
313 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
316 * @parsing_flags: parsing flags from the START CQE
317 * @len_on_bd: total length of the first packet for the
320 * Approximate value of the MSS for this aggregation calculated using
321 * the first packet of it.
323 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
326 /* TPA arrgregation won't have an IP options and TCP options
327 * other than timestamp.
329 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
332 /* Check if there was a TCP timestamp, if there is it's will
333 * always be 12 bytes length: nop nop kind length echo val.
335 * Otherwise FW would close the aggregation.
337 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
338 hdrs_len += TPA_TSTAMP_OPT_LEN;
340 return len_on_bd - hdrs_len;
343 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
345 struct eth_fast_path_rx_cqe *fp_cqe,
346 u16 cqe_idx, u16 parsing_flags)
348 struct sw_rx_page *rx_pg, old_rx_pg;
349 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
350 u32 i, frag_len, frag_size, pages;
354 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
355 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
357 /* This is needed in order to enable forwarding support */
359 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
362 #ifdef BNX2X_STOP_ON_ERROR
363 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
364 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
366 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
367 fp_cqe->pkt_len, len_on_bd);
373 /* Run through the SGL and compose the fragmented skb */
374 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
376 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
378 /* FW gives the indices of the SGE as if the ring is an array
379 (meaning that "next" element will consume 2 indices) */
380 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
381 rx_pg = &fp->rx_page_ring[sge_idx];
384 /* If we fail to allocate a substitute page, we simply stop
385 where we are and drop the whole packet */
386 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
388 fp->eth_q_stats.rx_skb_alloc_failed++;
392 /* Unmap the page as we r going to pass it to the stack */
393 dma_unmap_page(&bp->pdev->dev,
394 dma_unmap_addr(&old_rx_pg, mapping),
395 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
397 /* Add one frag and update the appropriate fields in the skb */
398 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
400 skb->data_len += frag_len;
401 skb->truesize += frag_len;
402 skb->len += frag_len;
404 frag_size -= frag_len;
410 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
411 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
414 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
415 struct sk_buff *skb = rx_buf->skb;
417 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
419 /* Unmap skb in the pool anyway, as we are going to change
420 pool entry status to BNX2X_TPA_STOP even if new skb allocation
422 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
423 fp->rx_buf_size, DMA_FROM_DEVICE);
425 if (likely(new_skb)) {
426 /* fix ip xsum and give it to the stack */
427 /* (no need to map the new skb) */
429 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
432 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
434 #ifdef BNX2X_STOP_ON_ERROR
435 if (pad + len > fp->rx_buf_size) {
436 BNX2X_ERR("skb_put is about to fail... "
437 "pad %d len %d rx_buf_size %d\n",
438 pad, len, fp->rx_buf_size);
444 skb_reserve(skb, pad);
447 skb->protocol = eth_type_trans(skb, bp->dev);
448 skb->ip_summed = CHECKSUM_UNNECESSARY;
453 iph = (struct iphdr *)skb->data;
455 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
458 if (!bnx2x_fill_frag_skb(bp, fp, skb,
459 &cqe->fast_path_cqe, cqe_idx,
461 if (parsing_flags & PARSING_FLAGS_VLAN)
462 __vlan_hwaccel_put_tag(skb,
463 le16_to_cpu(cqe->fast_path_cqe.
465 napi_gro_receive(&fp->napi, skb);
467 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
468 " - dropping packet!\n");
469 dev_kfree_skb_any(skb);
473 /* put new skb in bin */
474 fp->tpa_pool[queue].skb = new_skb;
477 /* else drop the packet and keep the buffer in the bin */
478 DP(NETIF_MSG_RX_STATUS,
479 "Failed to allocate new skb - dropping packet!\n");
480 fp->eth_q_stats.rx_skb_alloc_failed++;
483 fp->tpa_state[queue] = BNX2X_TPA_STOP;
486 /* Set Toeplitz hash value in the skb using the value from the
487 * CQE (calculated by HW).
489 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
492 /* Set Toeplitz hash from CQE */
493 if ((bp->dev->features & NETIF_F_RXHASH) &&
494 (cqe->fast_path_cqe.status_flags &
495 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
497 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
500 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
502 struct bnx2x *bp = fp->bp;
503 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
504 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
507 #ifdef BNX2X_STOP_ON_ERROR
508 if (unlikely(bp->panic))
512 /* CQ "next element" is of the size of the regular element,
513 that's why it's ok here */
514 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
515 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
518 bd_cons = fp->rx_bd_cons;
519 bd_prod = fp->rx_bd_prod;
520 bd_prod_fw = bd_prod;
521 sw_comp_cons = fp->rx_comp_cons;
522 sw_comp_prod = fp->rx_comp_prod;
524 /* Memory barrier necessary as speculative reads of the rx
525 * buffer can be ahead of the index in the status block
529 DP(NETIF_MSG_RX_STATUS,
530 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
531 fp->index, hw_comp_cons, sw_comp_cons);
533 while (sw_comp_cons != hw_comp_cons) {
534 struct sw_rx_bd *rx_buf = NULL;
536 union eth_rx_cqe *cqe;
540 comp_ring_cons = RCQ_BD(sw_comp_cons);
541 bd_prod = RX_BD(bd_prod);
542 bd_cons = RX_BD(bd_cons);
544 /* Prefetch the page containing the BD descriptor
545 at producer's index. It will be needed when new skb is
547 prefetch((void *)(PAGE_ALIGN((unsigned long)
548 (&fp->rx_desc_ring[bd_prod])) -
551 cqe = &fp->rx_comp_ring[comp_ring_cons];
552 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
554 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
555 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
556 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
557 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
558 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
559 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
561 /* is this a slowpath msg? */
562 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
563 bnx2x_sp_event(fp, cqe);
566 /* this is an rx packet */
568 rx_buf = &fp->rx_buf_ring[bd_cons];
571 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
572 pad = cqe->fast_path_cqe.placement_offset;
574 /* - If CQE is marked both TPA_START and TPA_END it is
576 * - FP CQE will always have either TPA_START or/and
577 * TPA_STOP flags set.
579 if ((!fp->disable_tpa) &&
580 (TPA_TYPE(cqe_fp_flags) !=
581 (TPA_TYPE_START | TPA_TYPE_END))) {
582 u16 queue = cqe->fast_path_cqe.queue_index;
584 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
585 DP(NETIF_MSG_RX_STATUS,
586 "calling tpa_start on queue %d\n",
589 bnx2x_tpa_start(fp, queue, skb,
592 /* Set Toeplitz hash for an LRO skb */
593 bnx2x_set_skb_rxhash(bp, cqe, skb);
596 } else { /* TPA_STOP */
597 DP(NETIF_MSG_RX_STATUS,
598 "calling tpa_stop on queue %d\n",
601 if (!BNX2X_RX_SUM_FIX(cqe))
602 BNX2X_ERR("STOP on none TCP "
605 /* This is a size of the linear data
607 len = le16_to_cpu(cqe->fast_path_cqe.
609 bnx2x_tpa_stop(bp, fp, queue, pad,
610 len, cqe, comp_ring_cons);
611 #ifdef BNX2X_STOP_ON_ERROR
616 bnx2x_update_sge_prod(fp,
617 &cqe->fast_path_cqe);
622 dma_sync_single_for_device(&bp->pdev->dev,
623 dma_unmap_addr(rx_buf, mapping),
624 pad + RX_COPY_THRESH,
626 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
628 /* is this an error packet? */
629 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
631 "ERROR flags %x rx packet %u\n",
632 cqe_fp_flags, sw_comp_cons);
633 fp->eth_q_stats.rx_err_discard_pkt++;
637 /* Since we don't have a jumbo ring
638 * copy small packets if mtu > 1500
640 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
641 (len <= RX_COPY_THRESH)) {
642 struct sk_buff *new_skb;
644 new_skb = netdev_alloc_skb(bp->dev,
646 if (new_skb == NULL) {
648 "ERROR packet dropped "
649 "because of alloc failure\n");
650 fp->eth_q_stats.rx_skb_alloc_failed++;
655 skb_copy_from_linear_data_offset(skb, pad,
656 new_skb->data + pad, len);
657 skb_reserve(new_skb, pad);
658 skb_put(new_skb, len);
660 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
665 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
666 dma_unmap_single(&bp->pdev->dev,
667 dma_unmap_addr(rx_buf, mapping),
670 skb_reserve(skb, pad);
675 "ERROR packet dropped because "
676 "of alloc failure\n");
677 fp->eth_q_stats.rx_skb_alloc_failed++;
679 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
683 skb->protocol = eth_type_trans(skb, bp->dev);
685 /* Set Toeplitz hash for a none-LRO skb */
686 bnx2x_set_skb_rxhash(bp, cqe, skb);
688 skb_checksum_none_assert(skb);
690 if (bp->dev->features & NETIF_F_RXCSUM) {
691 if (likely(BNX2X_RX_CSUM_OK(cqe)))
692 skb->ip_summed = CHECKSUM_UNNECESSARY;
694 fp->eth_q_stats.hw_csum_err++;
698 skb_record_rx_queue(skb, fp->index);
700 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
702 __vlan_hwaccel_put_tag(skb,
703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
704 napi_gro_receive(&fp->napi, skb);
710 bd_cons = NEXT_RX_IDX(bd_cons);
711 bd_prod = NEXT_RX_IDX(bd_prod);
712 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
715 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
716 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
718 if (rx_pkt == budget)
722 fp->rx_bd_cons = bd_cons;
723 fp->rx_bd_prod = bd_prod_fw;
724 fp->rx_comp_cons = sw_comp_cons;
725 fp->rx_comp_prod = sw_comp_prod;
727 /* Update producers */
728 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
731 fp->rx_pkt += rx_pkt;
737 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
739 struct bnx2x_fastpath *fp = fp_cookie;
740 struct bnx2x *bp = fp->bp;
742 /* Return here if interrupt is disabled */
743 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
744 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
748 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
749 "[fp %d fw_sd %d igusb %d]\n",
750 fp->index, fp->fw_sb_id, fp->igu_sb_id);
751 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
753 #ifdef BNX2X_STOP_ON_ERROR
754 if (unlikely(bp->panic))
758 /* Handle Rx and Tx according to MSI-X vector */
759 prefetch(fp->rx_cons_sb);
760 prefetch(fp->tx_cons_sb);
761 prefetch(&fp->sb_running_index[SM_RX_ID]);
762 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
767 /* HW Lock for shared dual port PHYs */
768 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
770 mutex_lock(&bp->port.phy_mutex);
772 if (bp->port.need_hw_lock)
773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
776 void bnx2x_release_phy_lock(struct bnx2x *bp)
778 if (bp->port.need_hw_lock)
779 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
781 mutex_unlock(&bp->port.phy_mutex);
784 /* calculates MF speed according to current linespeed and MF configuration */
785 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
787 u16 line_speed = bp->link_vars.line_speed;
789 u16 maxCfg = bnx2x_extract_max_cfg(bp,
790 bp->mf_config[BP_VN(bp)]);
792 /* Calculate the current MAX line speed limit for the MF
796 line_speed = (line_speed * maxCfg) / 100;
798 u16 vn_max_rate = maxCfg * 100;
800 if (vn_max_rate < line_speed)
801 line_speed = vn_max_rate;
809 * bnx2x_fill_report_data - fill link report data to report
812 * @data: link state to update
814 * It uses a none-atomic bit operations because is called under the mutex.
816 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
817 struct bnx2x_link_report_data *data)
819 u16 line_speed = bnx2x_get_mf_speed(bp);
821 memset(data, 0, sizeof(*data));
823 /* Fill the report data: efective line speed */
824 data->line_speed = line_speed;
827 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
828 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
829 &data->link_report_flags);
832 if (bp->link_vars.duplex == DUPLEX_FULL)
833 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
835 /* Rx Flow Control is ON */
836 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
837 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
839 /* Tx Flow Control is ON */
840 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
841 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
845 * bnx2x_link_report - report link status to OS.
849 * Calls the __bnx2x_link_report() under the same locking scheme
850 * as a link/PHY state managing code to ensure a consistent link
854 void bnx2x_link_report(struct bnx2x *bp)
856 bnx2x_acquire_phy_lock(bp);
857 __bnx2x_link_report(bp);
858 bnx2x_release_phy_lock(bp);
862 * __bnx2x_link_report - report link status to OS.
866 * None atomic inmlementation.
867 * Should be called under the phy_lock.
869 void __bnx2x_link_report(struct bnx2x *bp)
871 struct bnx2x_link_report_data cur_data;
875 bnx2x_read_mf_cfg(bp);
877 /* Read the current link report info */
878 bnx2x_fill_report_data(bp, &cur_data);
880 /* Don't report link down or exactly the same link status twice */
881 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
882 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
883 &bp->last_reported_link.link_report_flags) &&
884 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
885 &cur_data.link_report_flags)))
890 /* We are going to report a new link parameters now -
891 * remember the current data for the next time.
893 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
895 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
896 &cur_data.link_report_flags)) {
897 netif_carrier_off(bp->dev);
898 netdev_err(bp->dev, "NIC Link is Down\n");
901 netif_carrier_on(bp->dev);
902 netdev_info(bp->dev, "NIC Link is Up, ");
903 pr_cont("%d Mbps ", cur_data.line_speed);
905 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
906 &cur_data.link_report_flags))
907 pr_cont("full duplex");
909 pr_cont("half duplex");
911 /* Handle the FC at the end so that only these flags would be
912 * possibly set. This way we may easily check if there is no FC
915 if (cur_data.link_report_flags) {
916 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
917 &cur_data.link_report_flags)) {
918 pr_cont(", receive ");
919 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
920 &cur_data.link_report_flags))
921 pr_cont("& transmit ");
923 pr_cont(", transmit ");
925 pr_cont("flow control ON");
931 void bnx2x_init_rx_rings(struct bnx2x *bp)
933 int func = BP_FUNC(bp);
934 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
935 ETH_MAX_AGGREGATION_QUEUES_E1H;
939 /* Allocate TPA resources */
940 for_each_rx_queue(bp, j) {
941 struct bnx2x_fastpath *fp = &bp->fp[j];
944 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
946 if (!fp->disable_tpa) {
947 /* Fill the per-aggregation pool */
948 for (i = 0; i < max_agg_queues; i++) {
949 fp->tpa_pool[i].skb =
950 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
951 if (!fp->tpa_pool[i].skb) {
952 BNX2X_ERR("Failed to allocate TPA "
953 "skb pool for queue[%d] - "
954 "disabling TPA on this "
956 bnx2x_free_tpa_pool(bp, fp, i);
960 dma_unmap_addr_set((struct sw_rx_bd *)
961 &bp->fp->tpa_pool[i],
963 fp->tpa_state[i] = BNX2X_TPA_STOP;
966 /* "next page" elements initialization */
967 bnx2x_set_next_page_sgl(fp);
969 /* set SGEs bit mask */
970 bnx2x_init_sge_ring_bit_mask(fp);
972 /* Allocate SGEs and initialize the ring elements */
973 for (i = 0, ring_prod = 0;
974 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
976 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
977 BNX2X_ERR("was only able to allocate "
979 BNX2X_ERR("disabling TPA for"
981 /* Cleanup already allocated elements */
982 bnx2x_free_rx_sge_range(bp,
984 bnx2x_free_tpa_pool(bp,
990 ring_prod = NEXT_SGE_IDX(ring_prod);
993 fp->rx_sge_prod = ring_prod;
997 for_each_rx_queue(bp, j) {
998 struct bnx2x_fastpath *fp = &bp->fp[j];
1002 /* Activate BD ring */
1004 * this will generate an interrupt (to the TSTORM)
1005 * must only be done after chip is initialized
1007 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1013 if (!CHIP_IS_E2(bp)) {
1014 REG_WR(bp, BAR_USTRORM_INTMEM +
1015 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1016 U64_LO(fp->rx_comp_mapping));
1017 REG_WR(bp, BAR_USTRORM_INTMEM +
1018 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1019 U64_HI(fp->rx_comp_mapping));
1024 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1028 for_each_tx_queue(bp, i) {
1029 struct bnx2x_fastpath *fp = &bp->fp[i];
1031 u16 bd_cons = fp->tx_bd_cons;
1032 u16 sw_prod = fp->tx_pkt_prod;
1033 u16 sw_cons = fp->tx_pkt_cons;
1035 while (sw_cons != sw_prod) {
1036 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1042 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1044 struct bnx2x *bp = fp->bp;
1047 /* ring wasn't allocated */
1048 if (fp->rx_buf_ring == NULL)
1051 for (i = 0; i < NUM_RX_BD; i++) {
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1053 struct sk_buff *skb = rx_buf->skb;
1058 dma_unmap_single(&bp->pdev->dev,
1059 dma_unmap_addr(rx_buf, mapping),
1060 fp->rx_buf_size, DMA_FROM_DEVICE);
1067 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1071 for_each_rx_queue(bp, j) {
1072 struct bnx2x_fastpath *fp = &bp->fp[j];
1074 bnx2x_free_rx_bds(fp);
1076 if (!fp->disable_tpa)
1077 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1078 ETH_MAX_AGGREGATION_QUEUES_E1 :
1079 ETH_MAX_AGGREGATION_QUEUES_E1H);
1083 void bnx2x_free_skbs(struct bnx2x *bp)
1085 bnx2x_free_tx_skbs(bp);
1086 bnx2x_free_rx_skbs(bp);
1089 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1091 /* load old values */
1092 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1094 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1095 /* leave all but MAX value */
1096 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1098 /* set new MAX value */
1099 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1100 & FUNC_MF_CFG_MAX_BW_MASK;
1102 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1106 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1110 free_irq(bp->msix_table[0].vector, bp->dev);
1111 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1112 bp->msix_table[0].vector);
1117 for_each_eth_queue(bp, i) {
1118 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
1119 "state %x\n", i, bp->msix_table[i + offset].vector,
1120 bnx2x_fp(bp, i, state));
1122 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1126 void bnx2x_free_irq(struct bnx2x *bp)
1128 if (bp->flags & USING_MSIX_FLAG)
1129 bnx2x_free_msix_irqs(bp);
1130 else if (bp->flags & USING_MSI_FLAG)
1131 free_irq(bp->pdev->irq, bp->dev);
1133 free_irq(bp->pdev->irq, bp->dev);
1136 int bnx2x_enable_msix(struct bnx2x *bp)
1138 int msix_vec = 0, i, rc, req_cnt;
1140 bp->msix_table[msix_vec].entry = msix_vec;
1141 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1142 bp->msix_table[0].entry);
1146 bp->msix_table[msix_vec].entry = msix_vec;
1147 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1148 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1151 for_each_eth_queue(bp, i) {
1152 bp->msix_table[msix_vec].entry = msix_vec;
1153 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1154 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1158 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1160 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1163 * reconfigure number of tx/rx queues according to available
1166 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1167 /* how less vectors we will have? */
1168 int diff = req_cnt - rc;
1171 "Trying to use less MSI-X vectors: %d\n", rc);
1173 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1177 "MSI-X is not attainable rc %d\n", rc);
1181 * decrease number of queues by number of unallocated entries
1183 bp->num_queues -= diff;
1185 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1188 /* fall to INTx if not enough memory */
1190 bp->flags |= DISABLE_MSI_FLAG;
1191 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1195 bp->flags |= USING_MSIX_FLAG;
1200 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1202 int i, rc, offset = 1;
1204 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1205 bp->dev->name, bp->dev);
1207 BNX2X_ERR("request sp irq failed\n");
1214 for_each_eth_queue(bp, i) {
1215 struct bnx2x_fastpath *fp = &bp->fp[i];
1216 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1219 rc = request_irq(bp->msix_table[offset].vector,
1220 bnx2x_msix_fp_int, 0, fp->name, fp);
1222 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1223 bnx2x_free_msix_irqs(bp);
1228 fp->state = BNX2X_FP_STATE_IRQ;
1231 i = BNX2X_NUM_ETH_QUEUES(bp);
1232 offset = 1 + CNIC_CONTEXT_USE;
1233 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1235 bp->msix_table[0].vector,
1236 0, bp->msix_table[offset].vector,
1237 i - 1, bp->msix_table[offset + i - 1].vector);
1242 int bnx2x_enable_msi(struct bnx2x *bp)
1246 rc = pci_enable_msi(bp->pdev);
1248 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1251 bp->flags |= USING_MSI_FLAG;
1256 static int bnx2x_req_irq(struct bnx2x *bp)
1258 unsigned long flags;
1261 if (bp->flags & USING_MSI_FLAG)
1264 flags = IRQF_SHARED;
1266 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1267 bp->dev->name, bp->dev);
1269 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1274 static void bnx2x_napi_enable(struct bnx2x *bp)
1278 for_each_napi_queue(bp, i)
1279 napi_enable(&bnx2x_fp(bp, i, napi));
1282 static void bnx2x_napi_disable(struct bnx2x *bp)
1286 for_each_napi_queue(bp, i)
1287 napi_disable(&bnx2x_fp(bp, i, napi));
1290 void bnx2x_netif_start(struct bnx2x *bp)
1294 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1295 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298 if (netif_running(bp->dev)) {
1299 bnx2x_napi_enable(bp);
1300 bnx2x_int_enable(bp);
1301 if (bp->state == BNX2X_STATE_OPEN)
1302 netif_tx_wake_all_queues(bp->dev);
1307 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1309 bnx2x_int_disable_sync(bp, disable_hw);
1310 bnx2x_napi_disable(bp);
1311 netif_tx_disable(bp->dev);
1314 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1317 struct bnx2x *bp = netdev_priv(dev);
1319 return skb_tx_hash(dev, skb);
1321 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1322 u16 ether_type = ntohs(hdr->h_proto);
1324 /* Skip VLAN tag if present */
1325 if (ether_type == ETH_P_8021Q) {
1326 struct vlan_ethhdr *vhdr =
1327 (struct vlan_ethhdr *)skb->data;
1329 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1332 /* If ethertype is FCoE or FIP - use FCoE ring */
1333 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1334 return bnx2x_fcoe(bp, index);
1337 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1339 return __skb_tx_hash(dev, skb,
1340 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1343 void bnx2x_set_num_queues(struct bnx2x *bp)
1345 switch (bp->multi_mode) {
1346 case ETH_RSS_MODE_DISABLED:
1349 case ETH_RSS_MODE_REGULAR:
1350 bp->num_queues = bnx2x_calc_num_queues(bp);
1358 /* Add special queues */
1359 bp->num_queues += NONE_ETH_CONTEXT_USE;
1363 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1367 bnx2x_set_fip_eth_mac_addr(bp, 1);
1368 bnx2x_set_all_enode_macs(bp, 1);
1369 bp->flags |= FCOE_MACS_SET;
1374 static void bnx2x_release_firmware(struct bnx2x *bp)
1376 kfree(bp->init_ops_offsets);
1377 kfree(bp->init_ops);
1378 kfree(bp->init_data);
1379 release_firmware(bp->firmware);
1382 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1384 int rc, num = bp->num_queues;
1388 num -= FCOE_CONTEXT_USE;
1391 netif_set_real_num_tx_queues(bp->dev, num);
1392 rc = netif_set_real_num_rx_queues(bp->dev, num);
1396 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1400 for_each_queue(bp, i) {
1401 struct bnx2x_fastpath *fp = &bp->fp[i];
1403 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1406 * Although there are no IP frames expected to arrive to
1407 * this ring we still want to add an
1408 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1412 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1413 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1416 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1417 IP_HEADER_ALIGNMENT_PADDING;
1421 /* must be called with rtnl_lock */
1422 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427 /* Set init arrays */
1428 rc = bnx2x_init_firmware(bp);
1430 BNX2X_ERR("Error loading firmware\n");
1434 #ifdef BNX2X_STOP_ON_ERROR
1435 if (unlikely(bp->panic))
1439 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1441 /* Set the initial link reported state to link down */
1442 bnx2x_acquire_phy_lock(bp);
1443 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1444 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1445 &bp->last_reported_link.link_report_flags);
1446 bnx2x_release_phy_lock(bp);
1448 /* must be called before memory allocation and HW init */
1449 bnx2x_ilt_set_info(bp);
1451 /* zero fastpath structures preserving invariants like napi which are
1452 * allocated only once
1454 for_each_queue(bp, i)
1457 /* Set the receive queues buffer size */
1458 bnx2x_set_rx_buf_size(bp);
1460 for_each_queue(bp, i)
1461 bnx2x_fp(bp, i, disable_tpa) =
1462 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1465 /* We don't want TPA on FCoE L2 ring */
1466 bnx2x_fcoe(bp, disable_tpa) = 1;
1469 if (bnx2x_alloc_mem(bp))
1472 /* As long as bnx2x_alloc_mem() may possibly update
1473 * bp->num_queues, bnx2x_set_real_num_queues() should always
1476 rc = bnx2x_set_real_num_queues(bp);
1478 BNX2X_ERR("Unable to set real_num_queues\n");
1482 bnx2x_napi_enable(bp);
1484 /* Send LOAD_REQUEST command to MCP
1485 Returns the type of LOAD command:
1486 if it is the first port to be initialized
1487 common blocks should be initialized, otherwise - not
1489 if (!BP_NOMCP(bp)) {
1490 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1492 BNX2X_ERR("MCP response failure, aborting\n");
1496 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1497 rc = -EBUSY; /* other port in diagnostic mode */
1502 int path = BP_PATH(bp);
1503 int port = BP_PORT(bp);
1505 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1506 path, load_count[path][0], load_count[path][1],
1507 load_count[path][2]);
1508 load_count[path][0]++;
1509 load_count[path][1 + port]++;
1510 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1511 path, load_count[path][0], load_count[path][1],
1512 load_count[path][2]);
1513 if (load_count[path][0] == 1)
1514 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1515 else if (load_count[path][1 + port] == 1)
1516 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1518 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1521 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1522 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1523 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1527 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1530 rc = bnx2x_init_hw(bp, load_code);
1532 BNX2X_ERR("HW init failed, aborting\n");
1533 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1537 /* Connect to IRQs */
1538 rc = bnx2x_setup_irqs(bp);
1540 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1544 /* Setup NIC internals and enable interrupts */
1545 bnx2x_nic_init(bp, load_code);
1547 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1548 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1549 (bp->common.shmem2_base))
1550 SHMEM2_WR(bp, dcc_support,
1551 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1552 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1554 /* Send LOAD_DONE command to MCP */
1555 if (!BP_NOMCP(bp)) {
1556 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1558 BNX2X_ERR("MCP response failure, aborting\n");
1564 bnx2x_dcbx_init(bp);
1566 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1568 rc = bnx2x_func_start(bp);
1570 BNX2X_ERR("Function start failed!\n");
1571 #ifndef BNX2X_STOP_ON_ERROR
1579 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1581 BNX2X_ERR("Setup leading failed!\n");
1582 #ifndef BNX2X_STOP_ON_ERROR
1590 if (!CHIP_IS_E1(bp) &&
1591 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1592 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1593 bp->flags |= MF_FUNC_DIS;
1597 /* Enable Timer scan */
1598 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1601 for_each_nondefault_queue(bp, i) {
1602 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1611 /* Now when Clients are configured we are ready to work */
1612 bp->state = BNX2X_STATE_OPEN;
1615 bnx2x_set_fcoe_eth_macs(bp);
1618 bnx2x_set_eth_mac(bp, 1);
1620 /* Clear MC configuration */
1622 bnx2x_invalidate_e1_mc_list(bp);
1624 bnx2x_invalidate_e1h_mc_list(bp);
1626 /* Clear UC lists configuration */
1627 bnx2x_invalidate_uc_list(bp);
1629 if (bp->pending_max) {
1630 bnx2x_update_max_mf_config(bp, bp->pending_max);
1631 bp->pending_max = 0;
1635 bnx2x_initial_phy_init(bp, load_mode);
1637 /* Initialize Rx filtering */
1638 bnx2x_set_rx_mode(bp->dev);
1640 /* Start fast path */
1641 switch (load_mode) {
1643 /* Tx queue should be only reenabled */
1644 netif_tx_wake_all_queues(bp->dev);
1645 /* Initialize the receive filter. */
1649 netif_tx_start_all_queues(bp->dev);
1650 smp_mb__after_clear_bit();
1654 bp->state = BNX2X_STATE_DIAG;
1662 bnx2x__link_status_update(bp);
1664 /* start the timer */
1665 mod_timer(&bp->timer, jiffies + bp->current_interval);
1668 bnx2x_setup_cnic_irq_info(bp);
1669 if (bp->state == BNX2X_STATE_OPEN)
1670 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1672 bnx2x_inc_load_cnt(bp);
1674 bnx2x_release_firmware(bp);
1680 /* Disable Timer scan */
1681 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1684 bnx2x_int_disable_sync(bp, 1);
1686 /* Free SKBs, SGEs, TPA pool and driver internals */
1687 bnx2x_free_skbs(bp);
1688 for_each_rx_queue(bp, i)
1689 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1694 if (!BP_NOMCP(bp)) {
1695 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1696 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1701 bnx2x_napi_disable(bp);
1705 bnx2x_release_firmware(bp);
1710 /* must be called with rtnl_lock */
1711 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1715 if (bp->state == BNX2X_STATE_CLOSED) {
1716 /* Interface has been removed - nothing to recover */
1717 bp->recovery_state = BNX2X_RECOVERY_DONE;
1719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1726 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1728 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1730 /* Set "drop all" */
1731 bp->rx_mode = BNX2X_RX_MODE_NONE;
1732 bnx2x_set_storm_rx_mode(bp);
1735 bnx2x_tx_disable(bp);
1737 del_timer_sync(&bp->timer);
1739 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1740 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1742 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1744 /* Cleanup the chip if needed */
1745 if (unload_mode != UNLOAD_RECOVERY)
1746 bnx2x_chip_cleanup(bp, unload_mode);
1748 /* Disable HW interrupts, NAPI and Tx */
1749 bnx2x_netif_stop(bp, 1);
1757 /* Free SKBs, SGEs, TPA pool and driver internals */
1758 bnx2x_free_skbs(bp);
1759 for_each_rx_queue(bp, i)
1760 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1764 bp->state = BNX2X_STATE_CLOSED;
1766 /* The last driver must disable a "close the gate" if there is no
1767 * parity attention or "process kill" pending.
1769 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1770 bnx2x_reset_is_done(bp))
1771 bnx2x_disable_close_the_gate(bp);
1773 /* Reset MCP mail box sequence if there is on going recovery */
1774 if (unload_mode == UNLOAD_RECOVERY)
1780 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1784 /* If there is no power capability, silently succeed */
1786 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1790 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1794 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1795 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1796 PCI_PM_CTRL_PME_STATUS));
1798 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1799 /* delay required during transition out of D3hot */
1804 /* If there are other clients above don't
1805 shut down the power */
1806 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1808 /* Don't shut down the power for emulation and FPGA */
1809 if (CHIP_REV_IS_SLOW(bp))
1812 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1816 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1818 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1821 /* No more memory access after this point until
1822 * device is brought back to D0.
1833 * net_device service functions
1835 int bnx2x_poll(struct napi_struct *napi, int budget)
1838 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1840 struct bnx2x *bp = fp->bp;
1843 #ifdef BNX2X_STOP_ON_ERROR
1844 if (unlikely(bp->panic)) {
1845 napi_complete(napi);
1850 if (bnx2x_has_tx_work(fp))
1853 if (bnx2x_has_rx_work(fp)) {
1854 work_done += bnx2x_rx_int(fp, budget - work_done);
1856 /* must not complete if we consumed full budget */
1857 if (work_done >= budget)
1861 /* Fall out from the NAPI loop if needed */
1862 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1864 /* No need to update SB for FCoE L2 ring as long as
1865 * it's connected to the default SB and the SB
1866 * has been updated when NAPI was scheduled.
1868 if (IS_FCOE_FP(fp)) {
1869 napi_complete(napi);
1874 bnx2x_update_fpsb_idx(fp);
1875 /* bnx2x_has_rx_work() reads the status block,
1876 * thus we need to ensure that status block indices
1877 * have been actually read (bnx2x_update_fpsb_idx)
1878 * prior to this check (bnx2x_has_rx_work) so that
1879 * we won't write the "newer" value of the status block
1880 * to IGU (if there was a DMA right after
1881 * bnx2x_has_rx_work and if there is no rmb, the memory
1882 * reading (bnx2x_update_fpsb_idx) may be postponed
1883 * to right before bnx2x_ack_sb). In this case there
1884 * will never be another interrupt until there is
1885 * another update of the status block, while there
1886 * is still unhandled work.
1890 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1891 napi_complete(napi);
1892 /* Re-enable interrupts */
1894 "Update index to %d\n", fp->fp_hc_idx);
1895 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1896 le16_to_cpu(fp->fp_hc_idx),
1906 /* we split the first BD into headers and data BDs
1907 * to ease the pain of our fellow microcode engineers
1908 * we use one mapping for both BDs
1909 * So far this has only been observed to happen
1910 * in Other Operating Systems(TM)
1912 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1913 struct bnx2x_fastpath *fp,
1914 struct sw_tx_bd *tx_buf,
1915 struct eth_tx_start_bd **tx_bd, u16 hlen,
1916 u16 bd_prod, int nbd)
1918 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1919 struct eth_tx_bd *d_tx_bd;
1921 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1923 /* first fix first BD */
1924 h_tx_bd->nbd = cpu_to_le16(nbd);
1925 h_tx_bd->nbytes = cpu_to_le16(hlen);
1927 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1928 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1929 h_tx_bd->addr_lo, h_tx_bd->nbd);
1931 /* now get a new data BD
1932 * (after the pbd) and fill it */
1933 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1934 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1936 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1937 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1939 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1940 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1941 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1943 /* this marks the BD as one that has no individual mapping */
1944 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1946 DP(NETIF_MSG_TX_QUEUED,
1947 "TSO split data size is %d (%x:%x)\n",
1948 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1951 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1956 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1959 csum = (u16) ~csum_fold(csum_sub(csum,
1960 csum_partial(t_header - fix, fix, 0)));
1963 csum = (u16) ~csum_fold(csum_add(csum,
1964 csum_partial(t_header, -fix, 0)));
1966 return swab16(csum);
1969 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1973 if (skb->ip_summed != CHECKSUM_PARTIAL)
1977 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1979 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1980 rc |= XMIT_CSUM_TCP;
1984 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1985 rc |= XMIT_CSUM_TCP;
1989 if (skb_is_gso_v6(skb))
1990 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1991 else if (skb_is_gso(skb))
1992 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1997 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1998 /* check if packet requires linearization (packet is too fragmented)
1999 no need to check fragmentation if page size > 8K (there will be no
2000 violation to FW restrictions) */
2001 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2006 int first_bd_sz = 0;
2008 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2009 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2011 if (xmit_type & XMIT_GSO) {
2012 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2013 /* Check if LSO packet needs to be copied:
2014 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2015 int wnd_size = MAX_FETCH_BD - 3;
2016 /* Number of windows to check */
2017 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2022 /* Headers length */
2023 hlen = (int)(skb_transport_header(skb) - skb->data) +
2026 /* Amount of data (w/o headers) on linear part of SKB*/
2027 first_bd_sz = skb_headlen(skb) - hlen;
2029 wnd_sum = first_bd_sz;
2031 /* Calculate the first sum - it's special */
2032 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2034 skb_shinfo(skb)->frags[frag_idx].size;
2036 /* If there was data on linear skb data - check it */
2037 if (first_bd_sz > 0) {
2038 if (unlikely(wnd_sum < lso_mss)) {
2043 wnd_sum -= first_bd_sz;
2046 /* Others are easier: run through the frag list and
2047 check all windows */
2048 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2050 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2052 if (unlikely(wnd_sum < lso_mss)) {
2057 skb_shinfo(skb)->frags[wnd_idx].size;
2060 /* in non-LSO too fragmented packet should always
2067 if (unlikely(to_copy))
2068 DP(NETIF_MSG_TX_QUEUED,
2069 "Linearization IS REQUIRED for %s packet. "
2070 "num_frags %d hlen %d first_bd_sz %d\n",
2071 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2072 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2078 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2081 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2082 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2083 ETH_TX_PARSE_BD_E2_LSO_MSS;
2084 if ((xmit_type & XMIT_GSO_V6) &&
2085 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2086 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2090 * bnx2x_set_pbd_gso - update PBD in GSO case.
2094 * @xmit_type: xmit flags
2096 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2097 struct eth_tx_parse_bd_e1x *pbd,
2100 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2101 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2102 pbd->tcp_flags = pbd_tcp_flags(skb);
2104 if (xmit_type & XMIT_GSO_V4) {
2105 pbd->ip_id = swab16(ip_hdr(skb)->id);
2106 pbd->tcp_pseudo_csum =
2107 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2109 0, IPPROTO_TCP, 0));
2112 pbd->tcp_pseudo_csum =
2113 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2114 &ipv6_hdr(skb)->daddr,
2115 0, IPPROTO_TCP, 0));
2117 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2121 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2123 * @bp: driver handle
2125 * @parsing_data: data to be updated
2126 * @xmit_type: xmit flags
2130 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2131 u32 *parsing_data, u32 xmit_type)
2134 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2135 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2136 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2138 if (xmit_type & XMIT_CSUM_TCP) {
2139 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2140 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2141 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2143 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2145 /* We support checksum offload for TCP and UDP only.
2146 * No need to pass the UDP header length - it's a constant.
2148 return skb_transport_header(skb) +
2149 sizeof(struct udphdr) - skb->data;
2153 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2155 * @bp: driver handle
2157 * @pbd: parse BD to be updated
2158 * @xmit_type: xmit flags
2160 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2161 struct eth_tx_parse_bd_e1x *pbd,
2164 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2166 /* for now NS flag is not used in Linux */
2168 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2169 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2171 pbd->ip_hlen_w = (skb_transport_header(skb) -
2172 skb_network_header(skb)) >> 1;
2174 hlen += pbd->ip_hlen_w;
2176 /* We support checksum offload for TCP and UDP only */
2177 if (xmit_type & XMIT_CSUM_TCP)
2178 hlen += tcp_hdrlen(skb) / 2;
2180 hlen += sizeof(struct udphdr) / 2;
2182 pbd->total_hlen_w = cpu_to_le16(hlen);
2185 if (xmit_type & XMIT_CSUM_TCP) {
2186 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2189 s8 fix = SKB_CS_OFF(skb); /* signed! */
2191 DP(NETIF_MSG_TX_QUEUED,
2192 "hlen %d fix %d csum before fix %x\n",
2193 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2195 /* HW bug: fixup the CSUM */
2196 pbd->tcp_pseudo_csum =
2197 bnx2x_csum_fix(skb_transport_header(skb),
2200 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2201 pbd->tcp_pseudo_csum);
2207 /* called with netif_tx_lock
2208 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2209 * netif_wake_queue()
2211 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2213 struct bnx2x *bp = netdev_priv(dev);
2214 struct bnx2x_fastpath *fp;
2215 struct netdev_queue *txq;
2216 struct sw_tx_bd *tx_buf;
2217 struct eth_tx_start_bd *tx_start_bd;
2218 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2219 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2220 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2221 u32 pbd_e2_parsing_data = 0;
2222 u16 pkt_prod, bd_prod;
2225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2228 __le16 pkt_size = 0;
2230 u8 mac_type = UNICAST_ADDRESS;
2232 #ifdef BNX2X_STOP_ON_ERROR
2233 if (unlikely(bp->panic))
2234 return NETDEV_TX_BUSY;
2237 fp_index = skb_get_queue_mapping(skb);
2238 txq = netdev_get_tx_queue(dev, fp_index);
2240 fp = &bp->fp[fp_index];
2242 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2243 fp->eth_q_stats.driver_xoff++;
2244 netif_tx_stop_queue(txq);
2245 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2246 return NETDEV_TX_BUSY;
2249 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2250 "protocol(%x,%x) gso type %x xmit_type %x\n",
2251 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2252 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2254 eth = (struct ethhdr *)skb->data;
2256 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2257 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2258 if (is_broadcast_ether_addr(eth->h_dest))
2259 mac_type = BROADCAST_ADDRESS;
2261 mac_type = MULTICAST_ADDRESS;
2264 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2265 /* First, check if we need to linearize the skb (due to FW
2266 restrictions). No need to check fragmentation if page size > 8K
2267 (there will be no violation to FW restrictions) */
2268 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2269 /* Statistics of linearization */
2271 if (skb_linearize(skb) != 0) {
2272 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2273 "silently dropping this SKB\n");
2274 dev_kfree_skb_any(skb);
2275 return NETDEV_TX_OK;
2281 Please read carefully. First we use one BD which we mark as start,
2282 then we have a parsing info BD (used for TSO or xsum),
2283 and only then we have the rest of the TSO BDs.
2284 (don't forget to mark the last one as last,
2285 and to unmap only AFTER you write to the BD ...)
2286 And above all, all pdb sizes are in words - NOT DWORDS!
2289 pkt_prod = fp->tx_pkt_prod++;
2290 bd_prod = TX_BD(fp->tx_bd_prod);
2292 /* get a tx_buf and first BD */
2293 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2294 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2296 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2297 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2301 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2303 /* remember the first BD of the packet */
2304 tx_buf->first_bd = fp->tx_bd_prod;
2308 DP(NETIF_MSG_TX_QUEUED,
2309 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2310 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2312 if (vlan_tx_tag_present(skb)) {
2313 tx_start_bd->vlan_or_ethertype =
2314 cpu_to_le16(vlan_tx_tag_get(skb));
2315 tx_start_bd->bd_flags.as_bitfield |=
2316 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2318 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2320 /* turn on parsing and get a BD */
2321 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2323 if (xmit_type & XMIT_CSUM) {
2324 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2326 if (xmit_type & XMIT_CSUM_V4)
2327 tx_start_bd->bd_flags.as_bitfield |=
2328 ETH_TX_BD_FLAGS_IP_CSUM;
2330 tx_start_bd->bd_flags.as_bitfield |=
2331 ETH_TX_BD_FLAGS_IPV6;
2333 if (!(xmit_type & XMIT_CSUM_TCP))
2334 tx_start_bd->bd_flags.as_bitfield |=
2335 ETH_TX_BD_FLAGS_IS_UDP;
2338 if (CHIP_IS_E2(bp)) {
2339 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2340 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2341 /* Set PBD in checksum offload case */
2342 if (xmit_type & XMIT_CSUM)
2343 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2344 &pbd_e2_parsing_data,
2347 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2348 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2349 /* Set PBD in checksum offload case */
2350 if (xmit_type & XMIT_CSUM)
2351 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2355 /* Map skb linear data for DMA */
2356 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2357 skb_headlen(skb), DMA_TO_DEVICE);
2359 /* Setup the data pointer of the first BD of the packet */
2360 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2361 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2362 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2363 tx_start_bd->nbd = cpu_to_le16(nbd);
2364 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2365 pkt_size = tx_start_bd->nbytes;
2367 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2368 " nbytes %d flags %x vlan %x\n",
2369 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2370 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2371 tx_start_bd->bd_flags.as_bitfield,
2372 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2374 if (xmit_type & XMIT_GSO) {
2376 DP(NETIF_MSG_TX_QUEUED,
2377 "TSO packet len %d hlen %d total len %d tso size %d\n",
2378 skb->len, hlen, skb_headlen(skb),
2379 skb_shinfo(skb)->gso_size);
2381 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2383 if (unlikely(skb_headlen(skb) > hlen))
2384 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2385 hlen, bd_prod, ++nbd);
2387 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2390 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2393 /* Set the PBD's parsing_data field if not zero
2394 * (for the chips newer than 57711).
2396 if (pbd_e2_parsing_data)
2397 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2399 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2401 /* Handle fragmented skb */
2402 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2403 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2405 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2406 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2407 if (total_pkt_bd == NULL)
2408 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2410 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2412 frag->size, DMA_TO_DEVICE);
2414 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2415 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2416 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2417 le16_add_cpu(&pkt_size, frag->size);
2419 DP(NETIF_MSG_TX_QUEUED,
2420 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2421 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2422 le16_to_cpu(tx_data_bd->nbytes));
2425 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2427 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2429 /* now send a tx doorbell, counting the next BD
2430 * if the packet contains or ends with it
2432 if (TX_BD_POFF(bd_prod) < nbd)
2435 if (total_pkt_bd != NULL)
2436 total_pkt_bd->total_pkt_bytes = pkt_size;
2439 DP(NETIF_MSG_TX_QUEUED,
2440 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2441 " tcp_flags %x xsum %x seq %u hlen %u\n",
2442 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2443 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2444 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2445 le16_to_cpu(pbd_e1x->total_hlen_w));
2447 DP(NETIF_MSG_TX_QUEUED,
2448 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2449 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2450 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2451 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2452 pbd_e2->parsing_data);
2453 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2456 * Make sure that the BD data is updated before updating the producer
2457 * since FW might read the BD right after the producer is updated.
2458 * This is only applicable for weak-ordered memory model archs such
2459 * as IA-64. The following barrier is also mandatory since FW will
2460 * assumes packets must have BDs.
2464 fp->tx_db.data.prod += nbd;
2467 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2471 fp->tx_bd_prod += nbd;
2473 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2474 netif_tx_stop_queue(txq);
2476 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2477 * ordering of set_bit() in netif_tx_stop_queue() and read of
2481 fp->eth_q_stats.driver_xoff++;
2482 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2483 netif_tx_wake_queue(txq);
2487 return NETDEV_TX_OK;
2490 /* called with rtnl_lock */
2491 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2493 struct sockaddr *addr = p;
2494 struct bnx2x *bp = netdev_priv(dev);
2496 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2499 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2500 if (netif_running(dev))
2501 bnx2x_set_eth_mac(bp, 1);
2506 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2508 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2509 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2513 if (IS_FCOE_IDX(fp_index)) {
2514 memset(sb, 0, sizeof(union host_hc_status_block));
2515 fp->status_blk_mapping = 0;
2521 BNX2X_PCI_FREE(sb->e2_sb,
2522 bnx2x_fp(bp, fp_index,
2523 status_blk_mapping),
2524 sizeof(struct host_hc_status_block_e2));
2526 BNX2X_PCI_FREE(sb->e1x_sb,
2527 bnx2x_fp(bp, fp_index,
2528 status_blk_mapping),
2529 sizeof(struct host_hc_status_block_e1x));
2534 if (!skip_rx_queue(bp, fp_index)) {
2535 bnx2x_free_rx_bds(fp);
2537 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2538 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2539 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2540 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2541 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2543 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2544 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2545 sizeof(struct eth_fast_path_rx_cqe) *
2549 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2550 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2551 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2552 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2556 if (!skip_tx_queue(bp, fp_index)) {
2557 /* fastpath tx rings: tx_buf tx_desc */
2558 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2559 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2560 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2561 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2563 /* end of fastpath */
2566 void bnx2x_free_fp_mem(struct bnx2x *bp)
2569 for_each_queue(bp, i)
2570 bnx2x_free_fp_mem_at(bp, i);
2573 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2575 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2576 if (CHIP_IS_E2(bp)) {
2577 bnx2x_fp(bp, index, sb_index_values) =
2578 (__le16 *)status_blk.e2_sb->sb.index_values;
2579 bnx2x_fp(bp, index, sb_running_index) =
2580 (__le16 *)status_blk.e2_sb->sb.running_index;
2582 bnx2x_fp(bp, index, sb_index_values) =
2583 (__le16 *)status_blk.e1x_sb->sb.index_values;
2584 bnx2x_fp(bp, index, sb_running_index) =
2585 (__le16 *)status_blk.e1x_sb->sb.running_index;
2589 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2591 union host_hc_status_block *sb;
2592 struct bnx2x_fastpath *fp = &bp->fp[index];
2595 /* if rx_ring_size specified - use it */
2596 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2597 MAX_RX_AVAIL/bp->num_queues;
2599 /* allocate at least number of buffers required by FW */
2600 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2604 bnx2x_fp(bp, index, bp) = bp;
2605 bnx2x_fp(bp, index, index) = index;
2608 sb = &bnx2x_fp(bp, index, status_blk);
2610 if (!IS_FCOE_IDX(index)) {
2614 BNX2X_PCI_ALLOC(sb->e2_sb,
2615 &bnx2x_fp(bp, index, status_blk_mapping),
2616 sizeof(struct host_hc_status_block_e2));
2618 BNX2X_PCI_ALLOC(sb->e1x_sb,
2619 &bnx2x_fp(bp, index, status_blk_mapping),
2620 sizeof(struct host_hc_status_block_e1x));
2624 set_sb_shortcuts(bp, index);
2627 if (!skip_tx_queue(bp, index)) {
2628 /* fastpath tx rings: tx_buf tx_desc */
2629 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2630 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2631 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2632 &bnx2x_fp(bp, index, tx_desc_mapping),
2633 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2637 if (!skip_rx_queue(bp, index)) {
2638 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2639 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2640 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2641 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2642 &bnx2x_fp(bp, index, rx_desc_mapping),
2643 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2645 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2646 &bnx2x_fp(bp, index, rx_comp_mapping),
2647 sizeof(struct eth_fast_path_rx_cqe) *
2651 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2652 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2653 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2654 &bnx2x_fp(bp, index, rx_sge_mapping),
2655 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2657 bnx2x_set_next_page_rx_bd(fp);
2660 bnx2x_set_next_page_rx_cq(fp);
2663 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2664 if (ring_size < rx_ring_size)
2670 /* handles low memory cases */
2672 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2674 /* FW will drop all packets if queue is not big enough,
2675 * In these cases we disable the queue
2676 * Min size diferent for TPA and non-TPA queues
2678 if (ring_size < (fp->disable_tpa ?
2679 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2680 /* release memory allocated for this queue */
2681 bnx2x_free_fp_mem_at(bp, index);
2687 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2692 * 1. Allocate FP for leading - fatal if error
2693 * 2. {CNIC} Allocate FCoE FP - fatal if error
2694 * 3. Allocate RSS - fix number of queues if error
2698 if (bnx2x_alloc_fp_mem_at(bp, 0))
2702 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2706 for_each_nondefault_eth_queue(bp, i)
2707 if (bnx2x_alloc_fp_mem_at(bp, i))
2710 /* handle memory failures */
2711 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2712 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2717 * move non eth FPs next to last eth FP
2718 * must be done in that order
2719 * FCOE_IDX < FWD_IDX < OOO_IDX
2723 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2725 bp->num_queues -= delta;
2726 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2727 bp->num_queues + delta, bp->num_queues);
2733 static int bnx2x_setup_irqs(struct bnx2x *bp)
2736 if (bp->flags & USING_MSIX_FLAG) {
2737 rc = bnx2x_req_msix_irqs(bp);
2742 rc = bnx2x_req_irq(bp);
2744 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2747 if (bp->flags & USING_MSI_FLAG) {
2748 bp->dev->irq = bp->pdev->irq;
2749 netdev_info(bp->dev, "using MSI IRQ %d\n",
2757 void bnx2x_free_mem_bp(struct bnx2x *bp)
2760 kfree(bp->msix_table);
2764 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2766 struct bnx2x_fastpath *fp;
2767 struct msix_entry *tbl;
2768 struct bnx2x_ilt *ilt;
2771 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2777 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2781 bp->msix_table = tbl;
2784 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2791 bnx2x_free_mem_bp(bp);
2796 static int bnx2x_reload_if_running(struct net_device *dev)
2798 struct bnx2x *bp = netdev_priv(dev);
2800 if (unlikely(!netif_running(dev)))
2803 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2804 return bnx2x_nic_load(bp, LOAD_NORMAL);
2807 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
2809 u32 sel_phy_idx = 0;
2810 if (bp->link_params.num_phys <= 1)
2813 if (bp->link_vars.link_up) {
2814 sel_phy_idx = EXT_PHY1;
2815 /* In case link is SERDES, check if the EXT_PHY2 is the one */
2816 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
2817 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
2818 sel_phy_idx = EXT_PHY2;
2821 switch (bnx2x_phy_selection(&bp->link_params)) {
2822 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2823 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
2824 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2825 sel_phy_idx = EXT_PHY1;
2827 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
2828 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2829 sel_phy_idx = EXT_PHY2;
2837 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
2839 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
2841 * The selected actived PHY is always after swapping (in case PHY
2842 * swapping is enabled). So when swapping is enabled, we need to reverse
2846 if (bp->link_params.multi_phy_config &
2847 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
2848 if (sel_phy_idx == EXT_PHY1)
2849 sel_phy_idx = EXT_PHY2;
2850 else if (sel_phy_idx == EXT_PHY2)
2851 sel_phy_idx = EXT_PHY1;
2853 return LINK_CONFIG_IDX(sel_phy_idx);
2856 /* called with rtnl_lock */
2857 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2859 struct bnx2x *bp = netdev_priv(dev);
2861 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2862 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2866 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2867 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2870 /* This does not race with packet allocation
2871 * because the actual alloc size is
2872 * only updated as part of load
2876 return bnx2x_reload_if_running(dev);
2879 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2881 struct bnx2x *bp = netdev_priv(dev);
2883 /* TPA requires Rx CSUM offloading */
2884 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2885 features &= ~NETIF_F_LRO;
2890 int bnx2x_set_features(struct net_device *dev, u32 features)
2892 struct bnx2x *bp = netdev_priv(dev);
2893 u32 flags = bp->flags;
2894 bool bnx2x_reload = false;
2896 if (features & NETIF_F_LRO)
2897 flags |= TPA_ENABLE_FLAG;
2899 flags &= ~TPA_ENABLE_FLAG;
2901 if (features & NETIF_F_LOOPBACK) {
2902 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2903 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2904 bnx2x_reload = true;
2907 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2908 bp->link_params.loopback_mode = LOOPBACK_NONE;
2909 bnx2x_reload = true;
2913 if (flags ^ bp->flags) {
2915 bnx2x_reload = true;
2919 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2920 return bnx2x_reload_if_running(dev);
2921 /* else: bnx2x_nic_load() will be called at end of recovery */
2927 void bnx2x_tx_timeout(struct net_device *dev)
2929 struct bnx2x *bp = netdev_priv(dev);
2931 #ifdef BNX2X_STOP_ON_ERROR
2935 /* This allows the netif to be shutdown gracefully before resetting */
2936 schedule_delayed_work(&bp->reset_task, 0);
2939 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2941 struct net_device *dev = pci_get_drvdata(pdev);
2945 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2948 bp = netdev_priv(dev);
2952 pci_save_state(pdev);
2954 if (!netif_running(dev)) {
2959 netif_device_detach(dev);
2961 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2963 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2970 int bnx2x_resume(struct pci_dev *pdev)
2972 struct net_device *dev = pci_get_drvdata(pdev);
2977 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2980 bp = netdev_priv(dev);
2982 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2983 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2989 pci_restore_state(pdev);
2991 if (!netif_running(dev)) {
2996 bnx2x_set_power_state(bp, PCI_D0);
2997 netif_device_attach(dev);
2999 /* Since the chip was reset, clear the FW sequence number */
3001 rc = bnx2x_nic_load(bp, LOAD_OPEN);