1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
91 /* free skb in the packet ring at pos idx
92 * return idx of last bd freed
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
109 txdata->txq_index, idx, tx_buf, skb);
112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
124 new_cons = nbd + tx_buf->first_bd;
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
129 /* Skip a parse bd... */
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 (*bytes_compl) += skb->len;
156 dev_kfree_skb_any(skb);
157 tx_buf->first_bd = 0;
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
165 struct netdev_queue *txq;
166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167 unsigned int pkts_compl = 0, bytes_compl = 0;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
178 while (sw_cons != hw_cons) {
181 pkt_cons = TX_BD(sw_cons);
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
220 __netif_tx_lock(txq, smp_processor_id());
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225 netif_tx_wake_queue(txq);
227 __netif_tx_unlock(txq);
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
235 u16 last_max = fp->last_max_sge;
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
243 struct eth_end_agg_rx_cqe *cqe)
245 struct bnx2x *bp = fp->bp;
246 u16 last_max, last_elem, first_elem;
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
263 bnx2x_update_last_max_sge(fp,
264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
266 last_max = RX_SGE(fp->last_max_sge);
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
294 /* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe,
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
309 return le32_to_cpu(cqe->rss_hash_result);
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
317 struct eth_fast_path_rx_cqe *cqe)
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
331 /* Try to map an empty data buffer from the aggregation info */
332 mapping = dma_map_single(&bp->pdev->dev,
333 first_buf->data + NET_SKB_PAD,
334 fp->rx_buf_size, DMA_FROM_DEVICE);
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
343 bnx2x_reuse_rx_data(fp, cons, prod);
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351 /* point prod_bd to new data */
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
373 #ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
384 /* Timestamp option length allowed for TPA aggregation:
386 * nop nop kind length echo val
388 #define TPA_TSTAMP_OPT_LEN 12
390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
413 hdrs_len += sizeof(struct iphdr);
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
419 * Otherwise FW would close the aggregation.
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
424 return len_on_bd - hdrs_len;
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458 struct bnx2x_agg_info *tpa_info,
461 struct eth_end_agg_rx_cqe *cqe,
464 struct sw_rx_page *rx_pg, old_rx_pg;
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
467 u16 len_on_bd = tpa_info->len_on_bd;
468 u16 full_page = 0, gro_size = 0;
470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
477 /* This is needed in order to enable forwarding support */
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
492 #ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
514 rx_pg = &fp->rx_page_ring[sge_idx];
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529 /* Add one frag and update the appropriate fields in the skb */
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
540 get_page(old_rx_pg.page);
545 skb->data_len += frag_len;
546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547 skb->len += frag_len;
549 frag_size -= frag_len;
555 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
557 if (fp->rx_frag_size)
558 put_page(virt_to_head_page(data));
563 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
565 if (fp->rx_frag_size)
566 return netdev_alloc_frag(fp->rx_frag_size);
568 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
572 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
573 struct bnx2x_agg_info *tpa_info,
575 struct eth_end_agg_rx_cqe *cqe,
578 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
579 u8 pad = tpa_info->placement_offset;
580 u16 len = tpa_info->len_on_bd;
581 struct sk_buff *skb = NULL;
582 u8 *new_data, *data = rx_buf->data;
583 u8 old_tpa_state = tpa_info->tpa_state;
585 tpa_info->tpa_state = BNX2X_TPA_STOP;
587 /* If we there was an error during the handling of the TPA_START -
588 * drop this aggregation.
590 if (old_tpa_state == BNX2X_TPA_ERROR)
593 /* Try to allocate the new data */
594 new_data = bnx2x_frag_alloc(fp);
595 /* Unmap skb in the pool anyway, as we are going to change
596 pool entry status to BNX2X_TPA_STOP even if new skb allocation
598 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
599 fp->rx_buf_size, DMA_FROM_DEVICE);
600 if (likely(new_data))
601 skb = build_skb(data, fp->rx_frag_size);
604 #ifdef BNX2X_STOP_ON_ERROR
605 if (pad + len > fp->rx_buf_size) {
606 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
607 pad, len, fp->rx_buf_size);
613 skb_reserve(skb, pad + NET_SKB_PAD);
615 skb->rxhash = tpa_info->rxhash;
616 skb->l4_rxhash = tpa_info->l4_rxhash;
618 skb->protocol = eth_type_trans(skb, bp->dev);
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
621 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
622 skb, cqe, cqe_idx)) {
623 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
624 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
625 napi_gro_receive(&fp->napi, skb);
627 DP(NETIF_MSG_RX_STATUS,
628 "Failed to allocate new pages - dropping packet!\n");
629 dev_kfree_skb_any(skb);
633 /* put new data in bin */
634 rx_buf->data = new_data;
638 bnx2x_frag_free(fp, new_data);
640 /* drop the packet and keep the buffer in the bin */
641 DP(NETIF_MSG_RX_STATUS,
642 "Failed to allocate or map a new skb - dropping packet!\n");
643 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
646 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
647 struct bnx2x_fastpath *fp, u16 index)
650 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
651 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
654 data = bnx2x_frag_alloc(fp);
655 if (unlikely(data == NULL))
658 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
662 bnx2x_frag_free(fp, data);
663 BNX2X_ERR("Can't map rx data\n");
668 dma_unmap_addr_set(rx_buf, mapping, mapping);
670 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
671 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
677 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
678 struct bnx2x_fastpath *fp,
679 struct bnx2x_eth_q_stats *qstats)
681 /* Do nothing if no L4 csum validation was done.
682 * We do not check whether IP csum was validated. For IPv4 we assume
683 * that if the card got as far as validating the L4 csum, it also
684 * validated the IP csum. IPv6 has no IP csum.
686 if (cqe->fast_path_cqe.status_flags &
687 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
690 /* If L4 validation was done, check if an error was found. */
692 if (cqe->fast_path_cqe.type_error_flags &
693 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
694 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
695 qstats->hw_csum_err++;
697 skb->ip_summed = CHECKSUM_UNNECESSARY;
700 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
702 struct bnx2x *bp = fp->bp;
703 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
704 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
707 #ifdef BNX2X_STOP_ON_ERROR
708 if (unlikely(bp->panic))
712 /* CQ "next element" is of the size of the regular element,
713 that's why it's ok here */
714 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
715 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
718 bd_cons = fp->rx_bd_cons;
719 bd_prod = fp->rx_bd_prod;
720 bd_prod_fw = bd_prod;
721 sw_comp_cons = fp->rx_comp_cons;
722 sw_comp_prod = fp->rx_comp_prod;
724 /* Memory barrier necessary as speculative reads of the rx
725 * buffer can be ahead of the index in the status block
729 DP(NETIF_MSG_RX_STATUS,
730 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
731 fp->index, hw_comp_cons, sw_comp_cons);
733 while (sw_comp_cons != hw_comp_cons) {
734 struct sw_rx_bd *rx_buf = NULL;
736 union eth_rx_cqe *cqe;
737 struct eth_fast_path_rx_cqe *cqe_fp;
739 enum eth_rx_cqe_type cqe_fp_type;
744 #ifdef BNX2X_STOP_ON_ERROR
745 if (unlikely(bp->panic))
749 comp_ring_cons = RCQ_BD(sw_comp_cons);
750 bd_prod = RX_BD(bd_prod);
751 bd_cons = RX_BD(bd_cons);
753 cqe = &fp->rx_comp_ring[comp_ring_cons];
754 cqe_fp = &cqe->fast_path_cqe;
755 cqe_fp_flags = cqe_fp->type_error_flags;
756 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
758 DP(NETIF_MSG_RX_STATUS,
759 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
760 CQE_TYPE(cqe_fp_flags),
761 cqe_fp_flags, cqe_fp->status_flags,
762 le32_to_cpu(cqe_fp->rss_hash_result),
763 le16_to_cpu(cqe_fp->vlan_tag),
764 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
766 /* is this a slowpath msg? */
767 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
768 bnx2x_sp_event(fp, cqe);
772 rx_buf = &fp->rx_buf_ring[bd_cons];
775 if (!CQE_TYPE_FAST(cqe_fp_type)) {
776 struct bnx2x_agg_info *tpa_info;
777 u16 frag_size, pages;
778 #ifdef BNX2X_STOP_ON_ERROR
780 if (fp->disable_tpa &&
781 (CQE_TYPE_START(cqe_fp_type) ||
782 CQE_TYPE_STOP(cqe_fp_type)))
783 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
784 CQE_TYPE(cqe_fp_type));
787 if (CQE_TYPE_START(cqe_fp_type)) {
788 u16 queue = cqe_fp->queue_index;
789 DP(NETIF_MSG_RX_STATUS,
790 "calling tpa_start on queue %d\n",
793 bnx2x_tpa_start(fp, queue,
800 queue = cqe->end_agg_cqe.queue_index;
801 tpa_info = &fp->tpa_info[queue];
802 DP(NETIF_MSG_RX_STATUS,
803 "calling tpa_stop on queue %d\n",
806 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
809 if (fp->mode == TPA_MODE_GRO)
810 pages = (frag_size + tpa_info->full_page - 1) /
813 pages = SGE_PAGE_ALIGN(frag_size) >>
816 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
817 &cqe->end_agg_cqe, comp_ring_cons);
818 #ifdef BNX2X_STOP_ON_ERROR
823 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
827 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
828 pad = cqe_fp->placement_offset;
829 dma_sync_single_for_cpu(&bp->pdev->dev,
830 dma_unmap_addr(rx_buf, mapping),
831 pad + RX_COPY_THRESH,
834 prefetch(data + pad); /* speedup eth_type_trans() */
835 /* is this an error packet? */
836 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
837 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
838 "ERROR flags %x rx packet %u\n",
839 cqe_fp_flags, sw_comp_cons);
840 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
844 /* Since we don't have a jumbo ring
845 * copy small packets if mtu > 1500
847 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
848 (len <= RX_COPY_THRESH)) {
849 skb = netdev_alloc_skb_ip_align(bp->dev, len);
851 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
852 "ERROR packet dropped because of alloc failure\n");
853 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
856 memcpy(skb->data, data + pad, len);
857 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
859 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
860 dma_unmap_single(&bp->pdev->dev,
861 dma_unmap_addr(rx_buf, mapping),
864 skb = build_skb(data, fp->rx_frag_size);
865 if (unlikely(!skb)) {
866 bnx2x_frag_free(fp, data);
867 bnx2x_fp_qstats(bp, fp)->
868 rx_skb_alloc_failed++;
871 skb_reserve(skb, pad);
873 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
874 "ERROR packet dropped because of alloc failure\n");
875 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
877 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
883 skb->protocol = eth_type_trans(skb, bp->dev);
885 /* Set Toeplitz hash for a none-LRO skb */
886 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
887 skb->l4_rxhash = l4_rxhash;
889 skb_checksum_none_assert(skb);
891 if (bp->dev->features & NETIF_F_RXCSUM)
892 bnx2x_csum_validate(skb, cqe, fp,
893 bnx2x_fp_qstats(bp, fp));
895 skb_record_rx_queue(skb, fp->rx_queue);
897 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
899 __vlan_hwaccel_put_tag(skb,
900 le16_to_cpu(cqe_fp->vlan_tag));
901 napi_gro_receive(&fp->napi, skb);
907 bd_cons = NEXT_RX_IDX(bd_cons);
908 bd_prod = NEXT_RX_IDX(bd_prod);
909 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
912 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
913 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
915 if (rx_pkt == budget)
919 fp->rx_bd_cons = bd_cons;
920 fp->rx_bd_prod = bd_prod_fw;
921 fp->rx_comp_cons = sw_comp_cons;
922 fp->rx_comp_prod = sw_comp_prod;
924 /* Update producers */
925 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
928 fp->rx_pkt += rx_pkt;
934 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
936 struct bnx2x_fastpath *fp = fp_cookie;
937 struct bnx2x *bp = fp->bp;
941 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
942 fp->index, fp->fw_sb_id, fp->igu_sb_id);
943 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
945 #ifdef BNX2X_STOP_ON_ERROR
946 if (unlikely(bp->panic))
950 /* Handle Rx and Tx according to MSI-X vector */
951 prefetch(fp->rx_cons_sb);
953 for_each_cos_in_tx_queue(fp, cos)
954 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
956 prefetch(&fp->sb_running_index[SM_RX_ID]);
957 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
962 /* HW Lock for shared dual port PHYs */
963 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
965 mutex_lock(&bp->port.phy_mutex);
967 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
970 void bnx2x_release_phy_lock(struct bnx2x *bp)
972 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
974 mutex_unlock(&bp->port.phy_mutex);
977 /* calculates MF speed according to current linespeed and MF configuration */
978 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
980 u16 line_speed = bp->link_vars.line_speed;
982 u16 maxCfg = bnx2x_extract_max_cfg(bp,
983 bp->mf_config[BP_VN(bp)]);
985 /* Calculate the current MAX line speed limit for the MF
989 line_speed = (line_speed * maxCfg) / 100;
991 u16 vn_max_rate = maxCfg * 100;
993 if (vn_max_rate < line_speed)
994 line_speed = vn_max_rate;
1002 * bnx2x_fill_report_data - fill link report data to report
1004 * @bp: driver handle
1005 * @data: link state to update
1007 * It uses a none-atomic bit operations because is called under the mutex.
1009 static void bnx2x_fill_report_data(struct bnx2x *bp,
1010 struct bnx2x_link_report_data *data)
1012 u16 line_speed = bnx2x_get_mf_speed(bp);
1014 memset(data, 0, sizeof(*data));
1016 /* Fill the report data: efective line speed */
1017 data->line_speed = line_speed;
1020 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1021 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1022 &data->link_report_flags);
1025 if (bp->link_vars.duplex == DUPLEX_FULL)
1026 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1028 /* Rx Flow Control is ON */
1029 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1030 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1032 /* Tx Flow Control is ON */
1033 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1034 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1038 * bnx2x_link_report - report link status to OS.
1040 * @bp: driver handle
1042 * Calls the __bnx2x_link_report() under the same locking scheme
1043 * as a link/PHY state managing code to ensure a consistent link
1047 void bnx2x_link_report(struct bnx2x *bp)
1049 bnx2x_acquire_phy_lock(bp);
1050 __bnx2x_link_report(bp);
1051 bnx2x_release_phy_lock(bp);
1055 * __bnx2x_link_report - report link status to OS.
1057 * @bp: driver handle
1059 * None atomic inmlementation.
1060 * Should be called under the phy_lock.
1062 void __bnx2x_link_report(struct bnx2x *bp)
1064 struct bnx2x_link_report_data cur_data;
1067 if (!CHIP_IS_E1(bp))
1068 bnx2x_read_mf_cfg(bp);
1070 /* Read the current link report info */
1071 bnx2x_fill_report_data(bp, &cur_data);
1073 /* Don't report link down or exactly the same link status twice */
1074 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1075 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1076 &bp->last_reported_link.link_report_flags) &&
1077 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1078 &cur_data.link_report_flags)))
1083 /* We are going to report a new link parameters now -
1084 * remember the current data for the next time.
1086 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1088 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1089 &cur_data.link_report_flags)) {
1090 netif_carrier_off(bp->dev);
1091 netdev_err(bp->dev, "NIC Link is Down\n");
1097 netif_carrier_on(bp->dev);
1099 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1100 &cur_data.link_report_flags))
1105 /* Handle the FC at the end so that only these flags would be
1106 * possibly set. This way we may easily check if there is no FC
1109 if (cur_data.link_report_flags) {
1110 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1111 &cur_data.link_report_flags)) {
1112 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1113 &cur_data.link_report_flags))
1114 flow = "ON - receive & transmit";
1116 flow = "ON - receive";
1118 flow = "ON - transmit";
1123 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1124 cur_data.line_speed, duplex, flow);
1128 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1132 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1133 struct eth_rx_sge *sge;
1135 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1137 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1138 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1141 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1142 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1146 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1147 struct bnx2x_fastpath *fp, int last)
1151 for (i = 0; i < last; i++) {
1152 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1153 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1154 u8 *data = first_buf->data;
1157 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1160 if (tpa_info->tpa_state == BNX2X_TPA_START)
1161 dma_unmap_single(&bp->pdev->dev,
1162 dma_unmap_addr(first_buf, mapping),
1163 fp->rx_buf_size, DMA_FROM_DEVICE);
1164 bnx2x_frag_free(fp, data);
1165 first_buf->data = NULL;
1169 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1173 for_each_rx_queue_cnic(bp, j) {
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1178 /* Activate BD ring */
1180 * this will generate an interrupt (to the TSTORM)
1181 * must only be done after chip is initialized
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1188 void bnx2x_init_rx_rings(struct bnx2x *bp)
1190 int func = BP_FUNC(bp);
1194 /* Allocate TPA resources */
1195 for_each_eth_queue(bp, j) {
1196 struct bnx2x_fastpath *fp = &bp->fp[j];
1199 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1201 if (!fp->disable_tpa) {
1202 /* Fill the per-aggregtion pool */
1203 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1204 struct bnx2x_agg_info *tpa_info =
1206 struct sw_rx_bd *first_buf =
1207 &tpa_info->first_buf;
1209 first_buf->data = bnx2x_frag_alloc(fp);
1210 if (!first_buf->data) {
1211 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1213 bnx2x_free_tpa_pool(bp, fp, i);
1214 fp->disable_tpa = 1;
1217 dma_unmap_addr_set(first_buf, mapping, 0);
1218 tpa_info->tpa_state = BNX2X_TPA_STOP;
1221 /* "next page" elements initialization */
1222 bnx2x_set_next_page_sgl(fp);
1224 /* set SGEs bit mask */
1225 bnx2x_init_sge_ring_bit_mask(fp);
1227 /* Allocate SGEs and initialize the ring elements */
1228 for (i = 0, ring_prod = 0;
1229 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1231 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1232 BNX2X_ERR("was only able to allocate %d rx sges\n",
1234 BNX2X_ERR("disabling TPA for queue[%d]\n",
1236 /* Cleanup already allocated elements */
1237 bnx2x_free_rx_sge_range(bp, fp,
1239 bnx2x_free_tpa_pool(bp, fp,
1241 fp->disable_tpa = 1;
1245 ring_prod = NEXT_SGE_IDX(ring_prod);
1248 fp->rx_sge_prod = ring_prod;
1252 for_each_eth_queue(bp, j) {
1253 struct bnx2x_fastpath *fp = &bp->fp[j];
1257 /* Activate BD ring */
1259 * this will generate an interrupt (to the TSTORM)
1260 * must only be done after chip is initialized
1262 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1268 if (CHIP_IS_E1(bp)) {
1269 REG_WR(bp, BAR_USTRORM_INTMEM +
1270 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1271 U64_LO(fp->rx_comp_mapping));
1272 REG_WR(bp, BAR_USTRORM_INTMEM +
1273 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1274 U64_HI(fp->rx_comp_mapping));
1279 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1282 struct bnx2x *bp = fp->bp;
1284 for_each_cos_in_tx_queue(fp, cos) {
1285 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1286 unsigned pkts_compl = 0, bytes_compl = 0;
1288 u16 sw_prod = txdata->tx_pkt_prod;
1289 u16 sw_cons = txdata->tx_pkt_cons;
1291 while (sw_cons != sw_prod) {
1292 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1293 &pkts_compl, &bytes_compl);
1297 netdev_tx_reset_queue(
1298 netdev_get_tx_queue(bp->dev,
1299 txdata->txq_index));
1303 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1307 for_each_tx_queue_cnic(bp, i) {
1308 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1312 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1316 for_each_eth_queue(bp, i) {
1317 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1321 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1323 struct bnx2x *bp = fp->bp;
1326 /* ring wasn't allocated */
1327 if (fp->rx_buf_ring == NULL)
1330 for (i = 0; i < NUM_RX_BD; i++) {
1331 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1332 u8 *data = rx_buf->data;
1336 dma_unmap_single(&bp->pdev->dev,
1337 dma_unmap_addr(rx_buf, mapping),
1338 fp->rx_buf_size, DMA_FROM_DEVICE);
1340 rx_buf->data = NULL;
1341 bnx2x_frag_free(fp, data);
1345 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1349 for_each_rx_queue_cnic(bp, j) {
1350 bnx2x_free_rx_bds(&bp->fp[j]);
1354 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1358 for_each_eth_queue(bp, j) {
1359 struct bnx2x_fastpath *fp = &bp->fp[j];
1361 bnx2x_free_rx_bds(fp);
1363 if (!fp->disable_tpa)
1364 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1368 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1370 bnx2x_free_tx_skbs_cnic(bp);
1371 bnx2x_free_rx_skbs_cnic(bp);
1374 void bnx2x_free_skbs(struct bnx2x *bp)
1376 bnx2x_free_tx_skbs(bp);
1377 bnx2x_free_rx_skbs(bp);
1380 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1382 /* load old values */
1383 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1385 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1386 /* leave all but MAX value */
1387 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1389 /* set new MAX value */
1390 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1391 & FUNC_MF_CFG_MAX_BW_MASK;
1393 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1398 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1400 * @bp: driver handle
1401 * @nvecs: number of vectors to be released
1403 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1407 if (nvecs == offset)
1409 free_irq(bp->msix_table[offset].vector, bp->dev);
1410 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1411 bp->msix_table[offset].vector);
1414 if (CNIC_SUPPORT(bp)) {
1415 if (nvecs == offset)
1420 for_each_eth_queue(bp, i) {
1421 if (nvecs == offset)
1423 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1424 i, bp->msix_table[offset].vector);
1426 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1430 void bnx2x_free_irq(struct bnx2x *bp)
1432 if (bp->flags & USING_MSIX_FLAG &&
1433 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1434 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1435 CNIC_SUPPORT(bp) + 1);
1437 free_irq(bp->dev->irq, bp->dev);
1440 int bnx2x_enable_msix(struct bnx2x *bp)
1442 int msix_vec = 0, i, rc, req_cnt;
1444 bp->msix_table[msix_vec].entry = msix_vec;
1445 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1446 bp->msix_table[0].entry);
1449 /* Cnic requires an msix vector for itself */
1450 if (CNIC_SUPPORT(bp)) {
1451 bp->msix_table[msix_vec].entry = msix_vec;
1452 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1453 msix_vec, bp->msix_table[msix_vec].entry);
1457 /* We need separate vectors for ETH queues only (not FCoE) */
1458 for_each_eth_queue(bp, i) {
1459 bp->msix_table[msix_vec].entry = msix_vec;
1460 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1461 msix_vec, msix_vec, i);
1465 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1467 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1470 * reconfigure number of tx/rx queues according to available
1473 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1474 /* how less vectors we will have? */
1475 int diff = req_cnt - rc;
1477 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1479 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1482 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1486 * decrease number of queues by number of unallocated entries
1488 bp->num_ethernet_queues -= diff;
1489 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1491 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1493 } else if (rc > 0) {
1494 /* Get by with single vector */
1495 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1497 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1502 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1503 bp->flags |= USING_SINGLE_MSIX_FLAG;
1505 BNX2X_DEV_INFO("set number of queues to 1\n");
1506 bp->num_ethernet_queues = 1;
1507 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1508 } else if (rc < 0) {
1509 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1513 bp->flags |= USING_MSIX_FLAG;
1518 /* fall to INTx if not enough memory */
1520 bp->flags |= DISABLE_MSI_FLAG;
1525 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1527 int i, rc, offset = 0;
1529 rc = request_irq(bp->msix_table[offset++].vector,
1530 bnx2x_msix_sp_int, 0,
1531 bp->dev->name, bp->dev);
1533 BNX2X_ERR("request sp irq failed\n");
1537 if (CNIC_SUPPORT(bp))
1540 for_each_eth_queue(bp, i) {
1541 struct bnx2x_fastpath *fp = &bp->fp[i];
1542 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1545 rc = request_irq(bp->msix_table[offset].vector,
1546 bnx2x_msix_fp_int, 0, fp->name, fp);
1548 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1549 bp->msix_table[offset].vector, rc);
1550 bnx2x_free_msix_irqs(bp, offset);
1557 i = BNX2X_NUM_ETH_QUEUES(bp);
1558 offset = 1 + CNIC_SUPPORT(bp);
1559 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1560 bp->msix_table[0].vector,
1561 0, bp->msix_table[offset].vector,
1562 i - 1, bp->msix_table[offset + i - 1].vector);
1567 int bnx2x_enable_msi(struct bnx2x *bp)
1571 rc = pci_enable_msi(bp->pdev);
1573 BNX2X_DEV_INFO("MSI is not attainable\n");
1576 bp->flags |= USING_MSI_FLAG;
1581 static int bnx2x_req_irq(struct bnx2x *bp)
1583 unsigned long flags;
1586 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1589 flags = IRQF_SHARED;
1591 if (bp->flags & USING_MSIX_FLAG)
1592 irq = bp->msix_table[0].vector;
1594 irq = bp->pdev->irq;
1596 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1599 static int bnx2x_setup_irqs(struct bnx2x *bp)
1602 if (bp->flags & USING_MSIX_FLAG &&
1603 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1604 rc = bnx2x_req_msix_irqs(bp);
1609 rc = bnx2x_req_irq(bp);
1611 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1614 if (bp->flags & USING_MSI_FLAG) {
1615 bp->dev->irq = bp->pdev->irq;
1616 netdev_info(bp->dev, "using MSI IRQ %d\n",
1619 if (bp->flags & USING_MSIX_FLAG) {
1620 bp->dev->irq = bp->msix_table[0].vector;
1621 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1629 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1633 for_each_rx_queue_cnic(bp, i)
1634 napi_enable(&bnx2x_fp(bp, i, napi));
1637 static void bnx2x_napi_enable(struct bnx2x *bp)
1641 for_each_eth_queue(bp, i)
1642 napi_enable(&bnx2x_fp(bp, i, napi));
1645 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1649 for_each_rx_queue_cnic(bp, i)
1650 napi_disable(&bnx2x_fp(bp, i, napi));
1653 static void bnx2x_napi_disable(struct bnx2x *bp)
1657 for_each_eth_queue(bp, i)
1658 napi_disable(&bnx2x_fp(bp, i, napi));
1661 void bnx2x_netif_start(struct bnx2x *bp)
1663 if (netif_running(bp->dev)) {
1664 bnx2x_napi_enable(bp);
1665 if (CNIC_LOADED(bp))
1666 bnx2x_napi_enable_cnic(bp);
1667 bnx2x_int_enable(bp);
1668 if (bp->state == BNX2X_STATE_OPEN)
1669 netif_tx_wake_all_queues(bp->dev);
1673 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1675 bnx2x_int_disable_sync(bp, disable_hw);
1676 bnx2x_napi_disable(bp);
1677 if (CNIC_LOADED(bp))
1678 bnx2x_napi_disable_cnic(bp);
1681 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1683 struct bnx2x *bp = netdev_priv(dev);
1685 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1686 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1687 u16 ether_type = ntohs(hdr->h_proto);
1689 /* Skip VLAN tag if present */
1690 if (ether_type == ETH_P_8021Q) {
1691 struct vlan_ethhdr *vhdr =
1692 (struct vlan_ethhdr *)skb->data;
1694 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1697 /* If ethertype is FCoE or FIP - use FCoE ring */
1698 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1699 return bnx2x_fcoe_tx(bp, txq_index);
1702 /* select a non-FCoE queue */
1703 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1707 void bnx2x_set_num_queues(struct bnx2x *bp)
1710 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1712 /* override in STORAGE SD modes */
1713 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1714 bp->num_ethernet_queues = 1;
1716 /* Add special queues */
1717 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1718 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1720 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1724 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1726 * @bp: Driver handle
1728 * We currently support for at most 16 Tx queues for each CoS thus we will
1729 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1732 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1733 * index after all ETH L2 indices.
1735 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1736 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1737 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1739 * The proper configuration of skb->queue_mapping is handled by
1740 * bnx2x_select_queue() and __skb_tx_hash().
1742 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1743 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1745 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1749 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1750 rx = BNX2X_NUM_ETH_QUEUES(bp);
1752 /* account for fcoe queue */
1753 if (include_cnic && !NO_FCOE(bp)) {
1758 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1760 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1763 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1765 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1769 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1775 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1779 for_each_queue(bp, i) {
1780 struct bnx2x_fastpath *fp = &bp->fp[i];
1783 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1786 * Although there are no IP frames expected to arrive to
1787 * this ring we still want to add an
1788 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1791 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1794 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1795 IP_HEADER_ALIGNMENT_PADDING +
1798 BNX2X_FW_RX_ALIGN_END;
1799 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1800 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1801 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1803 fp->rx_frag_size = 0;
1807 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1810 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1812 /* Prepare the initial contents fo the indirection table if RSS is
1815 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1816 bp->rss_conf_obj.ind_table[i] =
1818 ethtool_rxfh_indir_default(i, num_eth_queues);
1821 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1822 * per-port, so if explicit configuration is needed , do it only
1825 * For 57712 and newer on the other hand it's a per-function
1828 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1831 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1834 struct bnx2x_config_rss_params params = {NULL};
1836 /* Although RSS is meaningless when there is a single HW queue we
1837 * still need it enabled in order to have HW Rx hash generated.
1839 * if (!is_eth_multi(bp))
1840 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1843 params.rss_obj = rss_obj;
1845 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1847 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1849 /* RSS configuration */
1850 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1851 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1852 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1853 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1854 if (rss_obj->udp_rss_v4)
1855 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1856 if (rss_obj->udp_rss_v6)
1857 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1860 params.rss_result_mask = MULTI_MASK;
1862 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1866 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1867 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1870 return bnx2x_config_rss(bp, ¶ms);
1873 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1875 struct bnx2x_func_state_params func_params = {NULL};
1877 /* Prepare parameters for function state transitions */
1878 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1880 func_params.f_obj = &bp->func_obj;
1881 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1883 func_params.params.hw_init.load_phase = load_code;
1885 return bnx2x_func_state_change(bp, &func_params);
1889 * Cleans the object that have internal lists without sending
1890 * ramrods. Should be run when interrutps are disabled.
1892 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1895 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1896 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1897 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1899 /***************** Cleanup MACs' object first *************************/
1901 /* Wait for completion of requested */
1902 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1903 /* Perform a dry cleanup */
1904 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1906 /* Clean ETH primary MAC */
1907 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1908 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1911 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1913 /* Cleanup UC list */
1915 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1916 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1919 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1921 /***************** Now clean mcast object *****************************/
1922 rparam.mcast_obj = &bp->mcast_obj;
1923 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1925 /* Add a DEL command... */
1926 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1928 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1931 /* ...and wait until all pending commands are cleared */
1932 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1935 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1940 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1944 #ifndef BNX2X_STOP_ON_ERROR
1945 #define LOAD_ERROR_EXIT(bp, label) \
1947 (bp)->state = BNX2X_STATE_ERROR; \
1951 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1953 bp->cnic_loaded = false; \
1956 #else /*BNX2X_STOP_ON_ERROR*/
1957 #define LOAD_ERROR_EXIT(bp, label) \
1959 (bp)->state = BNX2X_STATE_ERROR; \
1963 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1965 bp->cnic_loaded = false; \
1969 #endif /*BNX2X_STOP_ON_ERROR*/
1971 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1973 /* build FW version dword */
1974 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1975 (BCM_5710_FW_MINOR_VERSION << 8) +
1976 (BCM_5710_FW_REVISION_VERSION << 16) +
1977 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1979 /* read loaded FW from chip */
1980 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1982 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1984 if (loaded_fw != my_fw) {
1986 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1995 * bnx2x_bz_fp - zero content of the fastpath structure.
1997 * @bp: driver handle
1998 * @index: fastpath index to be zeroed
2000 * Makes sure the contents of the bp->fp[index].napi is kept
2003 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2005 struct bnx2x_fastpath *fp = &bp->fp[index];
2006 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2009 struct napi_struct orig_napi = fp->napi;
2010 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2011 /* bzero bnx2x_fastpath contents */
2012 if (bp->stats_init) {
2013 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2014 memset(fp, 0, sizeof(*fp));
2016 /* Keep Queue statistics */
2017 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2018 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2020 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2022 if (tmp_eth_q_stats)
2023 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2024 sizeof(struct bnx2x_eth_q_stats));
2026 tmp_eth_q_stats_old =
2027 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2029 if (tmp_eth_q_stats_old)
2030 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2031 sizeof(struct bnx2x_eth_q_stats_old));
2033 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2034 memset(fp, 0, sizeof(*fp));
2036 if (tmp_eth_q_stats) {
2037 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2038 sizeof(struct bnx2x_eth_q_stats));
2039 kfree(tmp_eth_q_stats);
2042 if (tmp_eth_q_stats_old) {
2043 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2044 sizeof(struct bnx2x_eth_q_stats_old));
2045 kfree(tmp_eth_q_stats_old);
2050 /* Restore the NAPI object as it has been already initialized */
2051 fp->napi = orig_napi;
2052 fp->tpa_info = orig_tpa_info;
2056 fp->max_cos = bp->max_cos;
2058 /* Special queues support only one CoS */
2061 /* Init txdata pointers */
2063 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2065 for_each_cos_in_tx_queue(fp, cos)
2066 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2067 BNX2X_NUM_ETH_QUEUES(bp) + index];
2070 * set the tpa flag for each queue. The tpa flag determines the queue
2071 * minimal size so it must be set prior to queue memory allocation
2073 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2074 (bp->flags & GRO_ENABLE_FLAG &&
2075 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2076 if (bp->flags & TPA_ENABLE_FLAG)
2077 fp->mode = TPA_MODE_LRO;
2078 else if (bp->flags & GRO_ENABLE_FLAG)
2079 fp->mode = TPA_MODE_GRO;
2081 /* We don't want TPA on an FCoE L2 ring */
2083 fp->disable_tpa = 1;
2086 int bnx2x_load_cnic(struct bnx2x *bp)
2088 int i, rc, port = BP_PORT(bp);
2090 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2092 mutex_init(&bp->cnic_mutex);
2094 rc = bnx2x_alloc_mem_cnic(bp);
2096 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2097 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2100 rc = bnx2x_alloc_fp_mem_cnic(bp);
2102 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2103 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2106 /* Update the number of queues with the cnic queues */
2107 rc = bnx2x_set_real_num_queues(bp, 1);
2109 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2110 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2113 /* Add all CNIC NAPI objects */
2114 bnx2x_add_all_napi_cnic(bp);
2115 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2116 bnx2x_napi_enable_cnic(bp);
2118 rc = bnx2x_init_hw_func_cnic(bp);
2120 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2122 bnx2x_nic_init_cnic(bp);
2124 /* Enable Timer scan */
2125 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2127 for_each_cnic_queue(bp, i) {
2128 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2130 BNX2X_ERR("Queue setup failed\n");
2131 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2135 /* Initialize Rx filter. */
2136 netif_addr_lock_bh(bp->dev);
2137 bnx2x_set_rx_mode(bp->dev);
2138 netif_addr_unlock_bh(bp->dev);
2140 /* re-read iscsi info */
2141 bnx2x_get_iscsi_info(bp);
2142 bnx2x_setup_cnic_irq_info(bp);
2143 bnx2x_setup_cnic_info(bp);
2144 bp->cnic_loaded = true;
2145 if (bp->state == BNX2X_STATE_OPEN)
2146 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2149 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2153 #ifndef BNX2X_STOP_ON_ERROR
2155 /* Disable Timer scan */
2156 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2159 bnx2x_napi_disable_cnic(bp);
2160 /* Update the number of queues without the cnic queues */
2161 rc = bnx2x_set_real_num_queues(bp, 0);
2163 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2165 BNX2X_ERR("CNIC-related load failed\n");
2166 bnx2x_free_fp_mem_cnic(bp);
2167 bnx2x_free_mem_cnic(bp);
2169 #endif /* ! BNX2X_STOP_ON_ERROR */
2173 /* must be called with rtnl_lock */
2174 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2176 int port = BP_PORT(bp);
2180 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2182 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2184 #ifdef BNX2X_STOP_ON_ERROR
2185 if (unlikely(bp->panic)) {
2186 BNX2X_ERR("Can't load NIC when there is panic\n");
2191 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2193 /* Set the initial link reported state to link down */
2194 bnx2x_acquire_phy_lock(bp);
2195 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2196 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2197 &bp->last_reported_link.link_report_flags);
2198 bnx2x_release_phy_lock(bp);
2200 /* must be called before memory allocation and HW init */
2201 bnx2x_ilt_set_info(bp);
2204 * Zero fastpath structures preserving invariants like napi, which are
2205 * allocated only once, fp index, max_cos, bp pointer.
2206 * Also set fp->disable_tpa and txdata_ptr.
2208 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2209 for_each_queue(bp, i)
2211 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2212 bp->num_cnic_queues) *
2213 sizeof(struct bnx2x_fp_txdata));
2215 bp->fcoe_init = false;
2217 /* Set the receive queues buffer size */
2218 bnx2x_set_rx_buf_size(bp);
2220 if (bnx2x_alloc_mem(bp))
2223 /* As long as bnx2x_alloc_mem() may possibly update
2224 * bp->num_queues, bnx2x_set_real_num_queues() should always
2225 * come after it. At this stage cnic queues are not counted.
2227 rc = bnx2x_set_real_num_queues(bp, 0);
2229 BNX2X_ERR("Unable to set real_num_queues\n");
2230 LOAD_ERROR_EXIT(bp, load_error0);
2233 /* configure multi cos mappings in kernel.
2234 * this configuration may be overriden by a multi class queue discipline
2235 * or by a dcbx negotiation result.
2237 bnx2x_setup_tc(bp->dev, bp->max_cos);
2239 /* Add all NAPI objects */
2240 bnx2x_add_all_napi(bp);
2241 DP(NETIF_MSG_IFUP, "napi added\n");
2242 bnx2x_napi_enable(bp);
2244 /* set pf load just before approaching the MCP */
2245 bnx2x_set_pf_load(bp);
2247 /* Send LOAD_REQUEST command to MCP
2248 * Returns the type of LOAD command:
2249 * if it is the first port to be initialized
2250 * common blocks should be initialized, otherwise - not
2252 if (!BP_NOMCP(bp)) {
2255 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2256 DRV_MSG_SEQ_NUMBER_MASK);
2257 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2259 /* Get current FW pulse sequence */
2260 bp->fw_drv_pulse_wr_seq =
2261 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2262 DRV_PULSE_SEQ_MASK);
2263 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2265 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2266 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2268 BNX2X_ERR("MCP response failure, aborting\n");
2270 LOAD_ERROR_EXIT(bp, load_error1);
2272 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2273 BNX2X_ERR("Driver load refused\n");
2274 rc = -EBUSY; /* other port in diagnostic mode */
2275 LOAD_ERROR_EXIT(bp, load_error1);
2277 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2278 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2279 /* abort nic load if version mismatch */
2280 if (!bnx2x_test_firmware_version(bp, true)) {
2282 LOAD_ERROR_EXIT(bp, load_error2);
2287 int path = BP_PATH(bp);
2289 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2290 path, load_count[path][0], load_count[path][1],
2291 load_count[path][2]);
2292 load_count[path][0]++;
2293 load_count[path][1 + port]++;
2294 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2295 path, load_count[path][0], load_count[path][1],
2296 load_count[path][2]);
2297 if (load_count[path][0] == 1)
2298 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2299 else if (load_count[path][1 + port] == 1)
2300 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2302 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2305 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2306 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2307 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2310 * We need the barrier to ensure the ordering between the
2311 * writing to bp->port.pmf here and reading it from the
2312 * bnx2x_periodic_task().
2318 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2320 /* Init Function state controlling object */
2321 bnx2x__init_func_obj(bp);
2324 rc = bnx2x_init_hw(bp, load_code);
2326 BNX2X_ERR("HW init failed, aborting\n");
2327 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2328 LOAD_ERROR_EXIT(bp, load_error2);
2331 /* Connect to IRQs */
2332 rc = bnx2x_setup_irqs(bp);
2334 BNX2X_ERR("IRQs setup failed\n");
2335 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2336 LOAD_ERROR_EXIT(bp, load_error2);
2339 /* Setup NIC internals and enable interrupts */
2340 bnx2x_nic_init(bp, load_code);
2342 /* Init per-function objects */
2343 bnx2x_init_bp_objs(bp);
2345 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2346 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2347 (bp->common.shmem2_base)) {
2348 if (SHMEM2_HAS(bp, dcc_support))
2349 SHMEM2_WR(bp, dcc_support,
2350 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2351 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2352 if (SHMEM2_HAS(bp, afex_driver_support))
2353 SHMEM2_WR(bp, afex_driver_support,
2354 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2357 /* Set AFEX default VLAN tag to an invalid value */
2358 bp->afex_def_vlan_tag = -1;
2360 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2361 rc = bnx2x_func_start(bp);
2363 BNX2X_ERR("Function start failed!\n");
2364 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2365 LOAD_ERROR_EXIT(bp, load_error3);
2368 /* Send LOAD_DONE command to MCP */
2369 if (!BP_NOMCP(bp)) {
2370 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2372 BNX2X_ERR("MCP response failure, aborting\n");
2374 LOAD_ERROR_EXIT(bp, load_error3);
2378 rc = bnx2x_setup_leading(bp);
2380 BNX2X_ERR("Setup leading failed!\n");
2381 LOAD_ERROR_EXIT(bp, load_error3);
2384 for_each_nondefault_eth_queue(bp, i) {
2385 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2387 BNX2X_ERR("Queue setup failed\n");
2388 LOAD_ERROR_EXIT(bp, load_error3);
2392 rc = bnx2x_init_rss_pf(bp);
2394 BNX2X_ERR("PF RSS init failed\n");
2395 LOAD_ERROR_EXIT(bp, load_error3);
2398 /* Now when Clients are configured we are ready to work */
2399 bp->state = BNX2X_STATE_OPEN;
2401 /* Configure a ucast MAC */
2402 rc = bnx2x_set_eth_mac(bp, true);
2404 BNX2X_ERR("Setting Ethernet MAC failed\n");
2405 LOAD_ERROR_EXIT(bp, load_error3);
2408 if (bp->pending_max) {
2409 bnx2x_update_max_mf_config(bp, bp->pending_max);
2410 bp->pending_max = 0;
2414 bnx2x_initial_phy_init(bp, load_mode);
2415 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2417 /* Start fast path */
2419 /* Initialize Rx filter. */
2420 netif_addr_lock_bh(bp->dev);
2421 bnx2x_set_rx_mode(bp->dev);
2422 netif_addr_unlock_bh(bp->dev);
2425 switch (load_mode) {
2427 /* Tx queue should be only reenabled */
2428 netif_tx_wake_all_queues(bp->dev);
2432 netif_tx_start_all_queues(bp->dev);
2433 smp_mb__after_clear_bit();
2437 case LOAD_LOOPBACK_EXT:
2438 bp->state = BNX2X_STATE_DIAG;
2446 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2448 bnx2x__link_status_update(bp);
2450 /* start the timer */
2451 mod_timer(&bp->timer, jiffies + bp->current_interval);
2453 if (CNIC_ENABLED(bp))
2454 bnx2x_load_cnic(bp);
2456 /* mark driver is loaded in shmem2 */
2457 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2459 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2460 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2461 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2462 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2465 /* Wait for all pending SP commands to complete */
2466 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2467 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2468 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2472 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2473 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2474 bnx2x_dcbx_init(bp, false);
2476 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2480 #ifndef BNX2X_STOP_ON_ERROR
2482 bnx2x_int_disable_sync(bp, 1);
2484 /* Clean queueable objects */
2485 bnx2x_squeeze_objects(bp);
2487 /* Free SKBs, SGEs, TPA pool and driver internals */
2488 bnx2x_free_skbs(bp);
2489 for_each_rx_queue(bp, i)
2490 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2495 if (!BP_NOMCP(bp)) {
2496 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2497 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2502 bnx2x_napi_disable(bp);
2503 /* clear pf_load status, as it was already set */
2504 bnx2x_clear_pf_load(bp);
2509 #endif /* ! BNX2X_STOP_ON_ERROR */
2512 /* must be called with rtnl_lock */
2513 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2516 bool global = false;
2518 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2520 /* mark driver is unloaded in shmem2 */
2521 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2523 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2524 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2525 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2528 if ((bp->state == BNX2X_STATE_CLOSED) ||
2529 (bp->state == BNX2X_STATE_ERROR)) {
2530 /* We can get here if the driver has been unloaded
2531 * during parity error recovery and is either waiting for a
2532 * leader to complete or for other functions to unload and
2533 * then ifdown has been issued. In this case we want to
2534 * unload and let other functions to complete a recovery
2537 bp->recovery_state = BNX2X_RECOVERY_DONE;
2539 bnx2x_release_leader_lock(bp);
2542 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2543 BNX2X_ERR("Can't unload in closed or error state\n");
2548 * It's important to set the bp->state to the value different from
2549 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2550 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2552 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2555 if (CNIC_LOADED(bp))
2556 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2559 bnx2x_tx_disable(bp);
2560 netdev_reset_tc(bp->dev);
2562 bp->rx_mode = BNX2X_RX_MODE_NONE;
2564 del_timer_sync(&bp->timer);
2566 /* Set ALWAYS_ALIVE bit in shmem */
2567 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2569 bnx2x_drv_pulse(bp);
2571 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2572 bnx2x_save_statistics(bp);
2574 /* Cleanup the chip if needed */
2575 if (unload_mode != UNLOAD_RECOVERY)
2576 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2578 /* Send the UNLOAD_REQUEST to the MCP */
2579 bnx2x_send_unload_req(bp, unload_mode);
2582 * Prevent transactions to host from the functions on the
2583 * engine that doesn't reset global blocks in case of global
2584 * attention once gloabl blocks are reset and gates are opened
2585 * (the engine which leader will perform the recovery
2588 if (!CHIP_IS_E1x(bp))
2589 bnx2x_pf_disable(bp);
2591 /* Disable HW interrupts, NAPI */
2592 bnx2x_netif_stop(bp, 1);
2593 /* Delete all NAPI objects */
2594 bnx2x_del_all_napi(bp);
2595 if (CNIC_LOADED(bp))
2596 bnx2x_del_all_napi_cnic(bp);
2600 /* Report UNLOAD_DONE to MCP */
2601 bnx2x_send_unload_done(bp, false);
2605 * At this stage no more interrupts will arrive so we may safly clean
2606 * the queueable objects here in case they failed to get cleaned so far.
2608 bnx2x_squeeze_objects(bp);
2610 /* There should be no more pending SP commands at this stage */
2615 /* Free SKBs, SGEs, TPA pool and driver internals */
2616 bnx2x_free_skbs(bp);
2617 if (CNIC_LOADED(bp))
2618 bnx2x_free_skbs_cnic(bp);
2619 for_each_rx_queue(bp, i)
2620 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2622 if (CNIC_LOADED(bp)) {
2623 bnx2x_free_fp_mem_cnic(bp);
2624 bnx2x_free_mem_cnic(bp);
2628 bp->state = BNX2X_STATE_CLOSED;
2629 bp->cnic_loaded = false;
2631 /* Check if there are pending parity attentions. If there are - set
2632 * RECOVERY_IN_PROGRESS.
2634 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2635 bnx2x_set_reset_in_progress(bp);
2637 /* Set RESET_IS_GLOBAL if needed */
2639 bnx2x_set_reset_global(bp);
2643 /* The last driver must disable a "close the gate" if there is no
2644 * parity attention or "process kill" pending.
2646 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2647 bnx2x_disable_close_the_gate(bp);
2649 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2654 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2658 /* If there is no power capability, silently succeed */
2660 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2664 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2668 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2669 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2670 PCI_PM_CTRL_PME_STATUS));
2672 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2673 /* delay required during transition out of D3hot */
2678 /* If there are other clients above don't
2679 shut down the power */
2680 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2682 /* Don't shut down the power for emulation and FPGA */
2683 if (CHIP_REV_IS_SLOW(bp))
2686 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2690 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2692 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2695 /* No more memory access after this point until
2696 * device is brought back to D0.
2701 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2708 * net_device service functions
2710 int bnx2x_poll(struct napi_struct *napi, int budget)
2714 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2716 struct bnx2x *bp = fp->bp;
2719 #ifdef BNX2X_STOP_ON_ERROR
2720 if (unlikely(bp->panic)) {
2721 napi_complete(napi);
2726 for_each_cos_in_tx_queue(fp, cos)
2727 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2728 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2731 if (bnx2x_has_rx_work(fp)) {
2732 work_done += bnx2x_rx_int(fp, budget - work_done);
2734 /* must not complete if we consumed full budget */
2735 if (work_done >= budget)
2739 /* Fall out from the NAPI loop if needed */
2740 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2742 /* No need to update SB for FCoE L2 ring as long as
2743 * it's connected to the default SB and the SB
2744 * has been updated when NAPI was scheduled.
2746 if (IS_FCOE_FP(fp)) {
2747 napi_complete(napi);
2750 bnx2x_update_fpsb_idx(fp);
2751 /* bnx2x_has_rx_work() reads the status block,
2752 * thus we need to ensure that status block indices
2753 * have been actually read (bnx2x_update_fpsb_idx)
2754 * prior to this check (bnx2x_has_rx_work) so that
2755 * we won't write the "newer" value of the status block
2756 * to IGU (if there was a DMA right after
2757 * bnx2x_has_rx_work and if there is no rmb, the memory
2758 * reading (bnx2x_update_fpsb_idx) may be postponed
2759 * to right before bnx2x_ack_sb). In this case there
2760 * will never be another interrupt until there is
2761 * another update of the status block, while there
2762 * is still unhandled work.
2766 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2767 napi_complete(napi);
2768 /* Re-enable interrupts */
2769 DP(NETIF_MSG_RX_STATUS,
2770 "Update index to %d\n", fp->fp_hc_idx);
2771 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2772 le16_to_cpu(fp->fp_hc_idx),
2782 /* we split the first BD into headers and data BDs
2783 * to ease the pain of our fellow microcode engineers
2784 * we use one mapping for both BDs
2786 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2787 struct bnx2x_fp_txdata *txdata,
2788 struct sw_tx_bd *tx_buf,
2789 struct eth_tx_start_bd **tx_bd, u16 hlen,
2790 u16 bd_prod, int nbd)
2792 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2793 struct eth_tx_bd *d_tx_bd;
2795 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2797 /* first fix first BD */
2798 h_tx_bd->nbd = cpu_to_le16(nbd);
2799 h_tx_bd->nbytes = cpu_to_le16(hlen);
2801 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2802 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2804 /* now get a new data BD
2805 * (after the pbd) and fill it */
2806 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2807 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2809 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2810 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2812 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2813 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2814 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2816 /* this marks the BD as one that has no individual mapping */
2817 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2819 DP(NETIF_MSG_TX_QUEUED,
2820 "TSO split data size is %d (%x:%x)\n",
2821 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2824 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2829 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2832 csum = (u16) ~csum_fold(csum_sub(csum,
2833 csum_partial(t_header - fix, fix, 0)));
2836 csum = (u16) ~csum_fold(csum_add(csum,
2837 csum_partial(t_header, -fix, 0)));
2839 return swab16(csum);
2842 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2846 if (skb->ip_summed != CHECKSUM_PARTIAL)
2850 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2852 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2853 rc |= XMIT_CSUM_TCP;
2857 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2858 rc |= XMIT_CSUM_TCP;
2862 if (skb_is_gso_v6(skb))
2863 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2864 else if (skb_is_gso(skb))
2865 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2870 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2871 /* check if packet requires linearization (packet is too fragmented)
2872 no need to check fragmentation if page size > 8K (there will be no
2873 violation to FW restrictions) */
2874 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2879 int first_bd_sz = 0;
2881 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2882 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2884 if (xmit_type & XMIT_GSO) {
2885 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2886 /* Check if LSO packet needs to be copied:
2887 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2888 int wnd_size = MAX_FETCH_BD - 3;
2889 /* Number of windows to check */
2890 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2895 /* Headers length */
2896 hlen = (int)(skb_transport_header(skb) - skb->data) +
2899 /* Amount of data (w/o headers) on linear part of SKB*/
2900 first_bd_sz = skb_headlen(skb) - hlen;
2902 wnd_sum = first_bd_sz;
2904 /* Calculate the first sum - it's special */
2905 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2907 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2909 /* If there was data on linear skb data - check it */
2910 if (first_bd_sz > 0) {
2911 if (unlikely(wnd_sum < lso_mss)) {
2916 wnd_sum -= first_bd_sz;
2919 /* Others are easier: run through the frag list and
2920 check all windows */
2921 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2923 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2925 if (unlikely(wnd_sum < lso_mss)) {
2930 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2933 /* in non-LSO too fragmented packet should always
2940 if (unlikely(to_copy))
2941 DP(NETIF_MSG_TX_QUEUED,
2942 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2943 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2944 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2950 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2953 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2954 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2955 ETH_TX_PARSE_BD_E2_LSO_MSS;
2956 if ((xmit_type & XMIT_GSO_V6) &&
2957 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2958 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2962 * bnx2x_set_pbd_gso - update PBD in GSO case.
2966 * @xmit_type: xmit flags
2968 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2969 struct eth_tx_parse_bd_e1x *pbd,
2972 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2973 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2974 pbd->tcp_flags = pbd_tcp_flags(skb);
2976 if (xmit_type & XMIT_GSO_V4) {
2977 pbd->ip_id = swab16(ip_hdr(skb)->id);
2978 pbd->tcp_pseudo_csum =
2979 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2981 0, IPPROTO_TCP, 0));
2984 pbd->tcp_pseudo_csum =
2985 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2986 &ipv6_hdr(skb)->daddr,
2987 0, IPPROTO_TCP, 0));
2989 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2993 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2995 * @bp: driver handle
2997 * @parsing_data: data to be updated
2998 * @xmit_type: xmit flags
3002 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3003 u32 *parsing_data, u32 xmit_type)
3006 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3007 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3008 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3010 if (xmit_type & XMIT_CSUM_TCP) {
3011 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3012 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3013 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3015 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3017 /* We support checksum offload for TCP and UDP only.
3018 * No need to pass the UDP header length - it's a constant.
3020 return skb_transport_header(skb) +
3021 sizeof(struct udphdr) - skb->data;
3024 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3025 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3027 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3029 if (xmit_type & XMIT_CSUM_V4)
3030 tx_start_bd->bd_flags.as_bitfield |=
3031 ETH_TX_BD_FLAGS_IP_CSUM;
3033 tx_start_bd->bd_flags.as_bitfield |=
3034 ETH_TX_BD_FLAGS_IPV6;
3036 if (!(xmit_type & XMIT_CSUM_TCP))
3037 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3041 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3043 * @bp: driver handle
3045 * @pbd: parse BD to be updated
3046 * @xmit_type: xmit flags
3048 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3049 struct eth_tx_parse_bd_e1x *pbd,
3052 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3054 /* for now NS flag is not used in Linux */
3056 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3057 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3059 pbd->ip_hlen_w = (skb_transport_header(skb) -
3060 skb_network_header(skb)) >> 1;
3062 hlen += pbd->ip_hlen_w;
3064 /* We support checksum offload for TCP and UDP only */
3065 if (xmit_type & XMIT_CSUM_TCP)
3066 hlen += tcp_hdrlen(skb) / 2;
3068 hlen += sizeof(struct udphdr) / 2;
3070 pbd->total_hlen_w = cpu_to_le16(hlen);
3073 if (xmit_type & XMIT_CSUM_TCP) {
3074 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3077 s8 fix = SKB_CS_OFF(skb); /* signed! */
3079 DP(NETIF_MSG_TX_QUEUED,
3080 "hlen %d fix %d csum before fix %x\n",
3081 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3083 /* HW bug: fixup the CSUM */
3084 pbd->tcp_pseudo_csum =
3085 bnx2x_csum_fix(skb_transport_header(skb),
3088 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3089 pbd->tcp_pseudo_csum);
3095 /* called with netif_tx_lock
3096 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3097 * netif_wake_queue()
3099 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3101 struct bnx2x *bp = netdev_priv(dev);
3103 struct netdev_queue *txq;
3104 struct bnx2x_fp_txdata *txdata;
3105 struct sw_tx_bd *tx_buf;
3106 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3107 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3108 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3109 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3110 u32 pbd_e2_parsing_data = 0;
3111 u16 pkt_prod, bd_prod;
3114 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3117 __le16 pkt_size = 0;
3119 u8 mac_type = UNICAST_ADDRESS;
3121 #ifdef BNX2X_STOP_ON_ERROR
3122 if (unlikely(bp->panic))
3123 return NETDEV_TX_BUSY;
3126 txq_index = skb_get_queue_mapping(skb);
3127 txq = netdev_get_tx_queue(dev, txq_index);
3129 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3131 txdata = &bp->bnx2x_txq[txq_index];
3133 /* enable this debug print to view the transmission queue being used
3134 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3135 txq_index, fp_index, txdata_index); */
3137 /* enable this debug print to view the tranmission details
3138 DP(NETIF_MSG_TX_QUEUED,
3139 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3140 txdata->cid, fp_index, txdata_index, txdata, fp); */
3142 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3143 skb_shinfo(skb)->nr_frags +
3145 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3146 /* Handle special storage cases separately */
3147 if (txdata->tx_ring_size == 0) {
3148 struct bnx2x_eth_q_stats *q_stats =
3149 bnx2x_fp_qstats(bp, txdata->parent_fp);
3150 q_stats->driver_filtered_tx_pkt++;
3152 return NETDEV_TX_OK;
3154 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3155 netif_tx_stop_queue(txq);
3156 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3158 return NETDEV_TX_BUSY;
3161 DP(NETIF_MSG_TX_QUEUED,
3162 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3163 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3164 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3166 eth = (struct ethhdr *)skb->data;
3168 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3169 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3170 if (is_broadcast_ether_addr(eth->h_dest))
3171 mac_type = BROADCAST_ADDRESS;
3173 mac_type = MULTICAST_ADDRESS;
3176 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3177 /* First, check if we need to linearize the skb (due to FW
3178 restrictions). No need to check fragmentation if page size > 8K
3179 (there will be no violation to FW restrictions) */
3180 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3181 /* Statistics of linearization */
3183 if (skb_linearize(skb) != 0) {
3184 DP(NETIF_MSG_TX_QUEUED,
3185 "SKB linearization failed - silently dropping this SKB\n");
3186 dev_kfree_skb_any(skb);
3187 return NETDEV_TX_OK;
3191 /* Map skb linear data for DMA */
3192 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3193 skb_headlen(skb), DMA_TO_DEVICE);
3194 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3195 DP(NETIF_MSG_TX_QUEUED,
3196 "SKB mapping failed - silently dropping this SKB\n");
3197 dev_kfree_skb_any(skb);
3198 return NETDEV_TX_OK;
3201 Please read carefully. First we use one BD which we mark as start,
3202 then we have a parsing info BD (used for TSO or xsum),
3203 and only then we have the rest of the TSO BDs.
3204 (don't forget to mark the last one as last,
3205 and to unmap only AFTER you write to the BD ...)
3206 And above all, all pdb sizes are in words - NOT DWORDS!
3209 /* get current pkt produced now - advance it just before sending packet
3210 * since mapping of pages may fail and cause packet to be dropped
3212 pkt_prod = txdata->tx_pkt_prod;
3213 bd_prod = TX_BD(txdata->tx_bd_prod);
3215 /* get a tx_buf and first BD
3216 * tx_start_bd may be changed during SPLIT,
3217 * but first_bd will always stay first
3219 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3220 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3221 first_bd = tx_start_bd;
3223 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3224 SET_FLAG(tx_start_bd->general_data,
3225 ETH_TX_START_BD_PARSE_NBDS,
3229 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3231 /* remember the first BD of the packet */
3232 tx_buf->first_bd = txdata->tx_bd_prod;
3236 DP(NETIF_MSG_TX_QUEUED,
3237 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3238 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3240 if (vlan_tx_tag_present(skb)) {
3241 tx_start_bd->vlan_or_ethertype =
3242 cpu_to_le16(vlan_tx_tag_get(skb));
3243 tx_start_bd->bd_flags.as_bitfield |=
3244 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3246 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3248 /* turn on parsing and get a BD */
3249 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3251 if (xmit_type & XMIT_CSUM)
3252 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3254 if (!CHIP_IS_E1x(bp)) {
3255 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3256 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3257 /* Set PBD in checksum offload case */
3258 if (xmit_type & XMIT_CSUM)
3259 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3260 &pbd_e2_parsing_data,
3264 * fill in the MAC addresses in the PBD - for local
3267 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3268 &pbd_e2->src_mac_addr_mid,
3269 &pbd_e2->src_mac_addr_lo,
3271 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3272 &pbd_e2->dst_mac_addr_mid,
3273 &pbd_e2->dst_mac_addr_lo,
3277 SET_FLAG(pbd_e2_parsing_data,
3278 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3280 u16 global_data = 0;
3281 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3282 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3283 /* Set PBD in checksum offload case */
3284 if (xmit_type & XMIT_CSUM)
3285 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3287 SET_FLAG(global_data,
3288 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3289 pbd_e1x->global_data |= cpu_to_le16(global_data);
3292 /* Setup the data pointer of the first BD of the packet */
3293 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3296 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3297 pkt_size = tx_start_bd->nbytes;
3299 DP(NETIF_MSG_TX_QUEUED,
3300 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3301 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3302 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3303 tx_start_bd->bd_flags.as_bitfield,
3304 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3306 if (xmit_type & XMIT_GSO) {
3308 DP(NETIF_MSG_TX_QUEUED,
3309 "TSO packet len %d hlen %d total len %d tso size %d\n",
3310 skb->len, hlen, skb_headlen(skb),
3311 skb_shinfo(skb)->gso_size);
3313 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3315 if (unlikely(skb_headlen(skb) > hlen))
3316 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3319 if (!CHIP_IS_E1x(bp))
3320 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3323 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3326 /* Set the PBD's parsing_data field if not zero
3327 * (for the chips newer than 57711).
3329 if (pbd_e2_parsing_data)
3330 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3332 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3334 /* Handle fragmented skb */
3335 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3336 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3338 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3339 skb_frag_size(frag), DMA_TO_DEVICE);
3340 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3341 unsigned int pkts_compl = 0, bytes_compl = 0;
3343 DP(NETIF_MSG_TX_QUEUED,
3344 "Unable to map page - dropping packet...\n");
3346 /* we need unmap all buffers already mapped
3348 * first_bd->nbd need to be properly updated
3349 * before call to bnx2x_free_tx_pkt
3351 first_bd->nbd = cpu_to_le16(nbd);
3352 bnx2x_free_tx_pkt(bp, txdata,
3353 TX_BD(txdata->tx_pkt_prod),
3354 &pkts_compl, &bytes_compl);
3355 return NETDEV_TX_OK;
3358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3359 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3360 if (total_pkt_bd == NULL)
3361 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3363 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3364 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3365 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3366 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3369 DP(NETIF_MSG_TX_QUEUED,
3370 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3371 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3372 le16_to_cpu(tx_data_bd->nbytes));
3375 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3377 /* update with actual num BDs */
3378 first_bd->nbd = cpu_to_le16(nbd);
3380 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3382 /* now send a tx doorbell, counting the next BD
3383 * if the packet contains or ends with it
3385 if (TX_BD_POFF(bd_prod) < nbd)
3388 /* total_pkt_bytes should be set on the first data BD if
3389 * it's not an LSO packet and there is more than one
3390 * data BD. In this case pkt_size is limited by an MTU value.
3391 * However we prefer to set it for an LSO packet (while we don't
3392 * have to) in order to save some CPU cycles in a none-LSO
3393 * case, when we much more care about them.
3395 if (total_pkt_bd != NULL)
3396 total_pkt_bd->total_pkt_bytes = pkt_size;
3399 DP(NETIF_MSG_TX_QUEUED,
3400 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3401 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3402 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3403 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3404 le16_to_cpu(pbd_e1x->total_hlen_w));
3406 DP(NETIF_MSG_TX_QUEUED,
3407 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3408 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3409 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3410 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3411 pbd_e2->parsing_data);
3412 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3414 netdev_tx_sent_queue(txq, skb->len);
3416 skb_tx_timestamp(skb);
3418 txdata->tx_pkt_prod++;
3420 * Make sure that the BD data is updated before updating the producer
3421 * since FW might read the BD right after the producer is updated.
3422 * This is only applicable for weak-ordered memory model archs such
3423 * as IA-64. The following barrier is also mandatory since FW will
3424 * assumes packets must have BDs.
3428 txdata->tx_db.data.prod += nbd;
3431 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3435 txdata->tx_bd_prod += nbd;
3437 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3438 netif_tx_stop_queue(txq);
3440 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3441 * ordering of set_bit() in netif_tx_stop_queue() and read of
3445 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3446 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3447 netif_tx_wake_queue(txq);
3451 return NETDEV_TX_OK;
3455 * bnx2x_setup_tc - routine to configure net_device for multi tc
3457 * @netdev: net device to configure
3458 * @tc: number of traffic classes to enable
3460 * callback connected to the ndo_setup_tc function pointer
3462 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3464 int cos, prio, count, offset;
3465 struct bnx2x *bp = netdev_priv(dev);
3467 /* setup tc must be called under rtnl lock */
3470 /* no traffic classes requested. aborting */
3472 netdev_reset_tc(dev);
3476 /* requested to support too many traffic classes */
3477 if (num_tc > bp->max_cos) {
3478 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3479 num_tc, bp->max_cos);
3483 /* declare amount of supported traffic classes */
3484 if (netdev_set_num_tc(dev, num_tc)) {
3485 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3489 /* configure priority to traffic class mapping */
3490 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3491 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3492 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3493 "mapping priority %d to tc %d\n",
3494 prio, bp->prio_to_cos[prio]);
3498 /* Use this configuration to diffrentiate tc0 from other COSes
3499 This can be used for ets or pfc, and save the effort of setting
3500 up a multio class queue disc or negotiating DCBX with a switch
3501 netdev_set_prio_tc_map(dev, 0, 0);
3502 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3503 for (prio = 1; prio < 16; prio++) {
3504 netdev_set_prio_tc_map(dev, prio, 1);
3505 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3508 /* configure traffic class to transmission queue mapping */
3509 for (cos = 0; cos < bp->max_cos; cos++) {
3510 count = BNX2X_NUM_ETH_QUEUES(bp);
3511 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3512 netdev_set_tc_queue(dev, cos, count, offset);
3513 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3514 "mapping tc %d to offset %d count %d\n",
3515 cos, offset, count);
3521 /* called with rtnl_lock */
3522 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3524 struct sockaddr *addr = p;
3525 struct bnx2x *bp = netdev_priv(dev);
3528 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3529 BNX2X_ERR("Requested MAC address is not valid\n");
3533 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3534 !is_zero_ether_addr(addr->sa_data)) {
3535 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3539 if (netif_running(dev)) {
3540 rc = bnx2x_set_eth_mac(bp, false);
3545 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3546 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3548 if (netif_running(dev))
3549 rc = bnx2x_set_eth_mac(bp, true);
3554 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3556 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3557 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3562 if (IS_FCOE_IDX(fp_index)) {
3563 memset(sb, 0, sizeof(union host_hc_status_block));
3564 fp->status_blk_mapping = 0;
3567 if (!CHIP_IS_E1x(bp))
3568 BNX2X_PCI_FREE(sb->e2_sb,
3569 bnx2x_fp(bp, fp_index,
3570 status_blk_mapping),
3571 sizeof(struct host_hc_status_block_e2));
3573 BNX2X_PCI_FREE(sb->e1x_sb,
3574 bnx2x_fp(bp, fp_index,
3575 status_blk_mapping),
3576 sizeof(struct host_hc_status_block_e1x));
3580 if (!skip_rx_queue(bp, fp_index)) {
3581 bnx2x_free_rx_bds(fp);
3583 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3584 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3585 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3586 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3587 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3589 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3590 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3591 sizeof(struct eth_fast_path_rx_cqe) *
3595 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3596 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3597 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3598 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3602 if (!skip_tx_queue(bp, fp_index)) {
3603 /* fastpath tx rings: tx_buf tx_desc */
3604 for_each_cos_in_tx_queue(fp, cos) {
3605 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3607 DP(NETIF_MSG_IFDOWN,
3608 "freeing tx memory of fp %d cos %d cid %d\n",
3609 fp_index, cos, txdata->cid);
3611 BNX2X_FREE(txdata->tx_buf_ring);
3612 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3613 txdata->tx_desc_mapping,
3614 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3617 /* end of fastpath */
3620 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3623 for_each_cnic_queue(bp, i)
3624 bnx2x_free_fp_mem_at(bp, i);
3627 void bnx2x_free_fp_mem(struct bnx2x *bp)
3630 for_each_eth_queue(bp, i)
3631 bnx2x_free_fp_mem_at(bp, i);
3634 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3636 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3637 if (!CHIP_IS_E1x(bp)) {
3638 bnx2x_fp(bp, index, sb_index_values) =
3639 (__le16 *)status_blk.e2_sb->sb.index_values;
3640 bnx2x_fp(bp, index, sb_running_index) =
3641 (__le16 *)status_blk.e2_sb->sb.running_index;
3643 bnx2x_fp(bp, index, sb_index_values) =
3644 (__le16 *)status_blk.e1x_sb->sb.index_values;
3645 bnx2x_fp(bp, index, sb_running_index) =
3646 (__le16 *)status_blk.e1x_sb->sb.running_index;
3650 /* Returns the number of actually allocated BDs */
3651 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3654 struct bnx2x *bp = fp->bp;
3655 u16 ring_prod, cqe_ring_prod;
3656 int i, failure_cnt = 0;
3658 fp->rx_comp_cons = 0;
3659 cqe_ring_prod = ring_prod = 0;
3661 /* This routine is called only during fo init so
3662 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3664 for (i = 0; i < rx_ring_size; i++) {
3665 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3669 ring_prod = NEXT_RX_IDX(ring_prod);
3670 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3671 WARN_ON(ring_prod <= (i - failure_cnt));
3675 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3676 i - failure_cnt, fp->index);
3678 fp->rx_bd_prod = ring_prod;
3679 /* Limit the CQE producer by the CQE ring size */
3680 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3682 fp->rx_pkt = fp->rx_calls = 0;
3684 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3686 return i - failure_cnt;
3689 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3693 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3694 struct eth_rx_cqe_next_page *nextpg;
3696 nextpg = (struct eth_rx_cqe_next_page *)
3697 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3699 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3700 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3702 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3703 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3707 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3709 union host_hc_status_block *sb;
3710 struct bnx2x_fastpath *fp = &bp->fp[index];
3713 int rx_ring_size = 0;
3715 if (!bp->rx_ring_size &&
3716 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3717 rx_ring_size = MIN_RX_SIZE_NONTPA;
3718 bp->rx_ring_size = rx_ring_size;
3719 } else if (!bp->rx_ring_size) {
3720 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3722 if (CHIP_IS_E3(bp)) {
3723 u32 cfg = SHMEM_RD(bp,
3724 dev_info.port_hw_config[BP_PORT(bp)].
3727 /* Decrease ring size for 1G functions */
3728 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3729 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3733 /* allocate at least number of buffers required by FW */
3734 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3735 MIN_RX_SIZE_TPA, rx_ring_size);
3737 bp->rx_ring_size = rx_ring_size;
3738 } else /* if rx_ring_size specified - use it */
3739 rx_ring_size = bp->rx_ring_size;
3742 sb = &bnx2x_fp(bp, index, status_blk);
3744 if (!IS_FCOE_IDX(index)) {
3746 if (!CHIP_IS_E1x(bp))
3747 BNX2X_PCI_ALLOC(sb->e2_sb,
3748 &bnx2x_fp(bp, index, status_blk_mapping),
3749 sizeof(struct host_hc_status_block_e2));
3751 BNX2X_PCI_ALLOC(sb->e1x_sb,
3752 &bnx2x_fp(bp, index, status_blk_mapping),
3753 sizeof(struct host_hc_status_block_e1x));
3756 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3757 * set shortcuts for it.
3759 if (!IS_FCOE_IDX(index))
3760 set_sb_shortcuts(bp, index);
3763 if (!skip_tx_queue(bp, index)) {
3764 /* fastpath tx rings: tx_buf tx_desc */
3765 for_each_cos_in_tx_queue(fp, cos) {
3766 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3769 "allocating tx memory of fp %d cos %d\n",
3772 BNX2X_ALLOC(txdata->tx_buf_ring,
3773 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3774 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3775 &txdata->tx_desc_mapping,
3776 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3781 if (!skip_rx_queue(bp, index)) {
3782 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3783 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3784 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3785 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3786 &bnx2x_fp(bp, index, rx_desc_mapping),
3787 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3789 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3790 &bnx2x_fp(bp, index, rx_comp_mapping),
3791 sizeof(struct eth_fast_path_rx_cqe) *
3795 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3796 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3797 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3798 &bnx2x_fp(bp, index, rx_sge_mapping),
3799 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3801 bnx2x_set_next_page_rx_bd(fp);
3804 bnx2x_set_next_page_rx_cq(fp);
3807 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3808 if (ring_size < rx_ring_size)
3814 /* handles low memory cases */
3816 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3818 /* FW will drop all packets if queue is not big enough,
3819 * In these cases we disable the queue
3820 * Min size is different for OOO, TPA and non-TPA queues
3822 if (ring_size < (fp->disable_tpa ?
3823 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3824 /* release memory allocated for this queue */
3825 bnx2x_free_fp_mem_at(bp, index);
3831 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3835 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3836 /* we will fail load process instead of mark
3844 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3848 /* 1. Allocate FP for leading - fatal if error
3849 * 2. Allocate RSS - fix number of queues if error
3853 if (bnx2x_alloc_fp_mem_at(bp, 0))
3857 for_each_nondefault_eth_queue(bp, i)
3858 if (bnx2x_alloc_fp_mem_at(bp, i))
3861 /* handle memory failures */
3862 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3863 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3866 if (CNIC_SUPPORT(bp))
3867 /* move non eth FPs next to last eth FP
3868 * must be done in that order
3869 * FCOE_IDX < FWD_IDX < OOO_IDX
3872 /* move FCoE fp even NO_FCOE_FLAG is on */
3873 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3874 bp->num_ethernet_queues -= delta;
3875 bp->num_queues = bp->num_ethernet_queues +
3876 bp->num_cnic_queues;
3877 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3878 bp->num_queues + delta, bp->num_queues);
3884 void bnx2x_free_mem_bp(struct bnx2x *bp)
3886 kfree(bp->fp->tpa_info);
3889 kfree(bp->fp_stats);
3890 kfree(bp->bnx2x_txq);
3891 kfree(bp->msix_table);
3895 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3897 struct bnx2x_fastpath *fp;
3898 struct msix_entry *tbl;
3899 struct bnx2x_ilt *ilt;
3900 int msix_table_size = 0;
3901 int fp_array_size, txq_array_size;
3905 * The biggest MSI-X table we might need is as a maximum number of fast
3906 * path IGU SBs plus default SB (for PF).
3908 msix_table_size = bp->igu_sb_cnt + 1;
3910 /* fp array: RSS plus CNIC related L2 queues */
3911 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3912 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3914 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3917 for (i = 0; i < fp_array_size; i++) {
3919 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3920 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3921 if (!(fp[i].tpa_info))
3927 /* allocate sp objs */
3928 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3933 /* allocate fp_stats */
3934 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3939 /* Allocate memory for the transmission queues array */
3941 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3942 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3944 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3950 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3953 bp->msix_table = tbl;
3956 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3963 bnx2x_free_mem_bp(bp);
3968 int bnx2x_reload_if_running(struct net_device *dev)
3970 struct bnx2x *bp = netdev_priv(dev);
3972 if (unlikely(!netif_running(dev)))
3975 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3976 return bnx2x_nic_load(bp, LOAD_NORMAL);
3979 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3981 u32 sel_phy_idx = 0;
3982 if (bp->link_params.num_phys <= 1)
3985 if (bp->link_vars.link_up) {
3986 sel_phy_idx = EXT_PHY1;
3987 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3988 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3989 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3990 sel_phy_idx = EXT_PHY2;
3993 switch (bnx2x_phy_selection(&bp->link_params)) {
3994 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3995 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3996 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3997 sel_phy_idx = EXT_PHY1;
3999 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4000 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4001 sel_phy_idx = EXT_PHY2;
4009 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4011 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4013 * The selected actived PHY is always after swapping (in case PHY
4014 * swapping is enabled). So when swapping is enabled, we need to reverse
4018 if (bp->link_params.multi_phy_config &
4019 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4020 if (sel_phy_idx == EXT_PHY1)
4021 sel_phy_idx = EXT_PHY2;
4022 else if (sel_phy_idx == EXT_PHY2)
4023 sel_phy_idx = EXT_PHY1;
4025 return LINK_CONFIG_IDX(sel_phy_idx);
4028 #ifdef NETDEV_FCOE_WWNN
4029 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4031 struct bnx2x *bp = netdev_priv(dev);
4032 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4035 case NETDEV_FCOE_WWNN:
4036 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4037 cp->fcoe_wwn_node_name_lo);
4039 case NETDEV_FCOE_WWPN:
4040 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4041 cp->fcoe_wwn_port_name_lo);
4044 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4052 /* called with rtnl_lock */
4053 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4055 struct bnx2x *bp = netdev_priv(dev);
4057 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4058 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4062 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4063 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4064 BNX2X_ERR("Can't support requested MTU size\n");
4068 /* This does not race with packet allocation
4069 * because the actual alloc size is
4070 * only updated as part of load
4074 return bnx2x_reload_if_running(dev);
4077 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4078 netdev_features_t features)
4080 struct bnx2x *bp = netdev_priv(dev);
4082 /* TPA requires Rx CSUM offloading */
4083 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4084 features &= ~NETIF_F_LRO;
4085 features &= ~NETIF_F_GRO;
4091 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4093 struct bnx2x *bp = netdev_priv(dev);
4094 u32 flags = bp->flags;
4095 bool bnx2x_reload = false;
4097 if (features & NETIF_F_LRO)
4098 flags |= TPA_ENABLE_FLAG;
4100 flags &= ~TPA_ENABLE_FLAG;
4102 if (features & NETIF_F_GRO)
4103 flags |= GRO_ENABLE_FLAG;
4105 flags &= ~GRO_ENABLE_FLAG;
4107 if (features & NETIF_F_LOOPBACK) {
4108 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4109 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4110 bnx2x_reload = true;
4113 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4114 bp->link_params.loopback_mode = LOOPBACK_NONE;
4115 bnx2x_reload = true;
4119 if (flags ^ bp->flags) {
4121 bnx2x_reload = true;
4125 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4126 return bnx2x_reload_if_running(dev);
4127 /* else: bnx2x_nic_load() will be called at end of recovery */
4133 void bnx2x_tx_timeout(struct net_device *dev)
4135 struct bnx2x *bp = netdev_priv(dev);
4137 #ifdef BNX2X_STOP_ON_ERROR
4142 smp_mb__before_clear_bit();
4143 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4144 smp_mb__after_clear_bit();
4146 /* This allows the netif to be shutdown gracefully before resetting */
4147 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4150 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4152 struct net_device *dev = pci_get_drvdata(pdev);
4156 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4159 bp = netdev_priv(dev);
4163 pci_save_state(pdev);
4165 if (!netif_running(dev)) {
4170 netif_device_detach(dev);
4172 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4174 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4181 int bnx2x_resume(struct pci_dev *pdev)
4183 struct net_device *dev = pci_get_drvdata(pdev);
4188 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4191 bp = netdev_priv(dev);
4193 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4194 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4200 pci_restore_state(pdev);
4202 if (!netif_running(dev)) {
4207 bnx2x_set_power_state(bp, PCI_D0);
4208 netif_device_attach(dev);
4210 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4218 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4221 /* ustorm cxt validation */
4222 cxt->ustorm_ag_context.cdu_usage =
4223 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4224 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4225 /* xcontext validation */
4226 cxt->xstorm_ag_context.cdu_reserved =
4227 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4228 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4231 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4232 u8 fw_sb_id, u8 sb_index,
4236 u32 addr = BAR_CSTRORM_INTMEM +
4237 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4238 REG_WR8(bp, addr, ticks);
4240 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4241 port, fw_sb_id, sb_index, ticks);
4244 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4245 u16 fw_sb_id, u8 sb_index,
4248 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4249 u32 addr = BAR_CSTRORM_INTMEM +
4250 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4251 u16 flags = REG_RD16(bp, addr);
4253 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4254 flags |= enable_flag;
4255 REG_WR16(bp, addr, flags);
4257 "port %x fw_sb_id %d sb_index %d disable %d\n",
4258 port, fw_sb_id, sb_index, disable);
4261 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4262 u8 sb_index, u8 disable, u16 usec)
4264 int port = BP_PORT(bp);
4265 u8 ticks = usec / BNX2X_BTR;
4267 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4269 disable = disable ? 1 : (usec ? 0 : 1);
4270 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);