}
}
+static u32
+bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
+ u32 index, u32 depth, struct sk_buff *skb, u32 frag)
+{
+ int j;
+ array[index].skb = NULL;
+
+ dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+
+ for (j = 0; j < frag; j++) {
+ dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+ }
+
+ return index;
+}
+
/*
* Frees all pending Tx Bufs
* At this point no activity is expected on the Q,
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb = NULL;
- int i;
+ int q;
unmap_array = unmap_q->unmap_array;
- unmap_cons = 0;
- while (unmap_cons < unmap_q->q_depth) {
- skb = unmap_array[unmap_cons].skb;
- if (!skb) {
- unmap_cons++;
+ for (q = 0; q < unmap_q->q_depth; q++) {
+ skb = unmap_array[q].skb;
+ if (!skb)
continue;
- }
- unmap_array[unmap_cons].skb = NULL;
-
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr), skb_headlen(skb),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- if (++unmap_cons >= unmap_q->q_depth)
- break;
+ unmap_cons = q;
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- dma_unmap_page(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
- 0);
- if (++unmap_cons >= unmap_q->q_depth)
- break;
- }
dev_kfree_skb_any(skb);
}
}
bnad_free_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 sent_packets = 0, sent_bytes = 0;
- u16 wis, unmap_cons, updated_hw_cons;
+ u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
+ u16 wis, updated_hw_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- int i;
/*
* Just return if TX is stopped. This check is useful
while (wis) {
skb = unmap_array[unmap_cons].skb;
- unmap_array[unmap_cons].skb = NULL;
-
sent_packets++;
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr), skb_headlen(skb),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
-
- prefetch(&unmap_array[unmap_cons + 1]);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- prefetch(&unmap_array[unmap_cons + 1]);
-
- dma_unmap_page(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
- 0);
- BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
- }
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+
dev_kfree_skb_any(skb);
}
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
while (to_alloc--) {
- if (!wi_range) {
+ if (!wi_range)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
wi_range);
- }
skb = netdev_alloc_skb_ip_align(bnad->netdev,
rcb->rxq->buffer_size);
if (unlikely(!skb)) {
BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ rcb->rxq->rxbuf_alloc_failed++;
goto finishing;
}
unmap_array[unmap_prod].skb = skb;
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
- if (likely(ccb)) {
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- bna_ib_ack(ccb->i_dbell, packets);
- bnad_refill_rxq(bnad, ccb->rcb[0]);
- if (ccb->rcb[1])
- bnad_refill_rxq(bnad, ccb->rcb[1]);
- } else {
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- bna_ib_ack(ccb->i_dbell, 0);
- }
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ bna_ib_ack_disable_irq(ccb->i_dbell, packets);
+
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
return packets;
}
-static void
-bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- return;
-
- bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
- bna_ib_ack(ccb->i_dbell, 0);
-}
-
-static void
-bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- unsigned long flags;
-
- /* Because of polling context */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bnad_enable_rx_irq_unsafe(ccb);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-}
-
static void
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
{
struct napi_struct *napi = &rx_ctrl->napi;
if (likely(napi_schedule_prep(napi))) {
- bnad_disable_rx_irq(bnad, ccb);
__napi_schedule(napi);
+ rx_ctrl->rx_schedule++;
}
- BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
}
/* MSIX Rx Path Handler */
bnad_msix_rx(int irq, void *data)
{
struct bna_ccb *ccb = (struct bna_ccb *)data;
- struct bnad *bnad = ccb->bnad;
- bnad_netif_rx_schedule_poll(bnad, ccb);
+ if (ccb) {
+ ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+ bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
+ }
return IRQ_HANDLED;
}
unsigned long flags;
struct bnad *bnad = (struct bnad *)data;
- if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
- return IRQ_HANDLED;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_HANDLED;
+ }
bna_intr_status_get(&bnad->bna, intr_status);
struct bnad_rx_ctrl *rx_ctrl;
struct bna_tcb *tcb = NULL;
- if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
return IRQ_NONE;
+ }
bna_intr_status_get(&bnad->bna, intr_status);
- if (unlikely(!intr_status))
+ if (unlikely(!intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
return IRQ_NONE;
-
- spin_lock_irqsave(&bnad->bna_lock, flags);
+ }
if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
bna_mbox_handler(&bnad->bna, intr_status);
mdelay(BNAD_TXRX_SYNC_MDELAY);
- for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
ccb = rx_ctrl->ccb;
if (!ccb)
int i;
int j;
- for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
ccb = rx_ctrl->ccb;
if (!ccb)
{
struct bnad_rx_ctrl *rx_ctrl =
container_of(napi, struct bnad_rx_ctrl, napi);
- struct bna_ccb *ccb;
- struct bnad *bnad;
+ struct bnad *bnad = rx_ctrl->bnad;
int rcvd = 0;
- ccb = rx_ctrl->ccb;
-
- bnad = ccb->bnad;
+ rx_ctrl->rx_poll_ctr++;
if (!netif_carrier_ok(bnad->netdev))
goto poll_exit;
- rcvd = bnad_poll_cq(bnad, ccb, budget);
- if (rcvd == budget)
+ rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+ if (rcvd >= budget)
return rcvd;
poll_exit:
- napi_complete((napi));
+ napi_complete(napi);
+
+ rx_ctrl->rx_complete++;
- BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+ if (rx_ctrl->ccb)
+ bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
- bnad_enable_rx_irq(bnad, ccb);
return rcvd;
}
+#define BNAD_NAPI_POLL_QUOTA 64
static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+bnad_napi_init(struct bnad *bnad, u32 rx_id)
{
struct bnad_rx_ctrl *rx_ctrl;
int i;
/* Initialize & enable NAPI */
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, 64);
+ bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ }
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
napi_enable(&rx_ctrl->napi);
}
bnad_tx_msix_unregister(bnad, tx_info,
bnad->num_txq_per_tx);
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_tx_destroy(tx_info->tx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
tx_info->tx = NULL;
tx_info->tx_id = 0;
- if (0 == tx_id)
- tasklet_kill(&bnad->tx_free_tasklet);
-
bnad_tx_res_free(bnad, res_info);
}
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
+static void
+bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ int i;
+
+ for (i = 0; i < bnad->num_rxp_per_rx; i++)
+ rx_info->rx_ctrl[i].bnad = bnad;
+}
+
/* Called with mutex_lock(&bnad->conf_mutex) held */
void
bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
unsigned long flags;
- int dim_timer_del = 0;
+ int to_del = 0;
if (!rx_info->rx)
return;
if (0 == rx_id) {
spin_lock_irqsave(&bnad->bna_lock, flags);
- dim_timer_del = bnad_dim_timer_running(bnad);
- if (dim_timer_del)
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ to_del = 1;
+ }
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (dim_timer_del)
+ if (to_del)
del_timer_sync(&bnad->dim_timer);
}
- bnad_napi_disable(bnad, rx_id);
-
init_completion(&bnad->bnad_completions.rx_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+ bnad_napi_disable(bnad, rx_id);
+
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_destroy(rx_info->rx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
rx_info->rx = NULL;
+ rx_info->rx_id = 0;
bnad_rx_res_free(bnad, res_info);
}
if (err)
return err;
+ bnad_rx_ctrl_init(bnad, rx_id);
+
/* Ask BNA to create one Rx object, supplying required resources */
spin_lock_irqsave(&bnad->bna_lock, flags);
rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
rx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!rx)
+ if (!rx) {
+ err = -ENOMEM;
goto err_return;
+ }
rx_info->rx = rx;
+ /*
+ * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+ * so that IRQ handler cannot schedule NAPI at this point.
+ */
+ bnad_napi_init(bnad, rx_id);
+
/* Register ISR for the Rx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
err = bnad_rx_msix_register(bnad, rx_info, rx_id,
goto err_return;
}
- /* Enable NAPI */
- bnad_napi_enable(bnad, rx_id);
-
spin_lock_irqsave(&bnad->bna_lock, flags);
if (0 == rx_id) {
/* Set up Dynamic Interrupt Moderation Vector */
bna_rx_enable(rx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ /* Enable scheduling of NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
return 0;
err_return:
/*
* Called with bnad->bna_lock held
*/
-static int
+int
bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
{
int ret;
}
/* Should be called with conf_lock held */
-static int
+int
bnad_enable_default_bcast(struct bnad *bnad)
{
struct bnad_rx_info *rx_info = &bnad->rx_info[0];
return 0;
}
-/* Called with bnad_conf_lock() held */
-static void
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
{
u16 vid;
unsigned long flags;
- BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
-
for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
{
int err;
- /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
- BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) {
} else {
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
ipv6h->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
int rxps;
rxps = min((uint)num_online_cpus(),
- (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+ (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
if (!(bnad->cfg_flags & BNAD_CF_MSIX))
rxps = 1; /* INTx */
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
if (ret > 0) {
/* Not enough MSI-X vectors. */
+ pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
- bnad_q_num_adjust(bnad, ret, 0);
+ bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
+ (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
- + (bnad->num_rx
- * bnad->num_rxp_per_rx) +
+ bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
if (bnad->msix_num > ret)
return;
intx_mode:
+ pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
u32 unmap_prod, wis, wis_used, wi_range;
u32 vectors, vect_id, i, acked;
int err;
+ unsigned int len;
+ u32 gso_size;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
dma_addr_t dma_addr;
struct bna_txq_entry *txqent;
u16 flags;
- if (unlikely
- (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+ if (unlikely(skb->len <= ETH_HLEN)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) == 0)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
/*
* Takes care of the Tx that is scheduled between clearing the flag
- * and the netif_stop_all_queue() call.
+ * and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
vectors = 1 + skb_shinfo(skb)->nr_frags;
- if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
}
unmap_prod = unmap_q->producer_index;
- wis_used = 1;
- vect_id = 0;
flags = 0;
txq_prod = tcb->producer_index;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
txqent->hdr.wi.reserved = 0;
txqent->hdr.wi.num_vectors = vectors;
- txqent->hdr.wi.opcode =
- htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
- BNA_TXQ_WI_SEND));
if (vlan_tx_tag_present(skb)) {
vlan_tag = (u16) vlan_tx_tag_get(skb);
txqent->hdr.wi.vlan_tag = htons(vlan_tag);
if (skb_is_gso(skb)) {
+ gso_size = skb_shinfo(skb)->gso_size;
+
+ if (unlikely(gso_size > netdev->mtu)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely((gso_size + skb_transport_offset(skb) +
+ tcp_hdrlen(skb)) >= skb->len)) {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.lso_mss = 0;
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
+ } else {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.lso_mss = htons(gso_size);
+ }
+
err = bnad_tso_prepare(bnad, skb);
- if (err) {
+ if (unlikely(err)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
return NETDEV_TX_OK;
}
- txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
txqent->hdr.wi.l4_hdr_size_n_offset =
htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
(tcp_hdrlen(skb) >> 2,
skb_transport_offset(skb)));
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 proto = 0;
-
+ } else {
+ txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
- if (skb->protocol == htons(ETH_P_IP))
- proto = ip_hdr(skb)->protocol;
- else if (skb->protocol == htons(ETH_P_IPV6)) {
- /* nexthdr may not be TCP immediately. */
- proto = ipv6_hdr(skb)->nexthdr;
+ if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
+ return NETDEV_TX_OK;
}
- if (proto == IPPROTO_TCP) {
- flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
- txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (0, skb_transport_offset(skb)));
- BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
- BUG_ON(!(skb_headlen(skb) >=
- skb_transport_offset(skb) + tcp_hdrlen(skb)));
-
- } else if (proto == IPPROTO_UDP) {
- flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
- txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (0, skb_transport_offset(skb)));
-
- BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (skb->protocol == __constant_htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol ==
+ __constant_htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
+ return NETDEV_TX_OK;
+ }
- BUG_ON(!(skb_headlen(skb) >=
- skb_transport_offset(skb) +
- sizeof(struct udphdr)));
- } else {
- err = skb_checksum_help(skb);
- BNAD_UPDATE_CTR(bnad, csum_help);
- if (err) {
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
+ return NETDEV_TX_OK;
+ }
+ } else {
dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, csum_help_err);
+ BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
return NETDEV_TX_OK;
}
+ } else {
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
}
- } else {
- txqent->hdr.wi.lso_mss = 0;
- txqent->hdr.wi.l4_hdr_size_n_offset = 0;
}
txqent->hdr.wi.flags = htons(flags);
txqent->hdr.wi.frame_length = htonl(skb->len);
unmap_q->unmap_array[unmap_prod].skb = skb;
- BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
- txqent->vector[vect_id].length = htons(skb_headlen(skb));
+ len = skb_headlen(skb);
+ txqent->vector[0].length = htons(len);
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
- BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ vect_id = 0;
+ wis_used = 1;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
u16 size = frag->size;
+ if (unlikely(size == 0)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array,
+ unmap_prod, unmap_q->q_depth, skb,
+ i);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
+ return NETDEV_TX_OK;
+ }
+
+ len += size;
+
if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
vect_id = 0;
if (--wi_range)
wis_used = 0;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
}
wis_used++;
- txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+ txqent->hdr.wi_ext.opcode =
+ __constant_htons(BNA_TXQ_WI_EXTENSION);
}
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
}
+ if (unlikely(len != skb->len)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array, unmap_prod,
+ unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
+ return NETDEV_TX_OK;
+ }
+
unmap_q->producer_index = unmap_prod;
BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
tcb->producer_index = txq_prod;
return NETDEV_TX_OK;
bna_txq_prod_indx_doorbell(tcb);
+ smp_mb();
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
tasklet_schedule(&bnad->tx_free_tasklet);
return stats;
}
-static void
+void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
}
}
+ if (bnad->rx_info[0].rx == NULL)
+ goto unlock;
+
bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
if (!netdev_mc_empty(netdev)) {
bnad_isr(bnad->pcidev->irq, netdev);
bna_intx_enable(&bnad->bna, curr_mask);
} else {
+ /*
+ * Tx processing may happen in sending context, so no need
+ * to explicitly process completions here
+ */
+
+ /* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
if (!rx_info->rx)
continue;
for (j = 0; j < bnad->num_rxp_per_rx; j++) {
rx_ctrl = &rx_info->rx_ctrl[j];
- if (rx_ctrl->ccb) {
- bnad_disable_rx_irq(bnad,
- rx_ctrl->ccb);
+ if (rx_ctrl->ccb)
bnad_netif_rx_schedule_poll(bnad,
rx_ctrl->ccb);
- }
}
}
}
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac = false;
+ bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
err = -EIO;
}
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (err)
+ goto disable_ioceth;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
- if (err)
+ if (err) {
+ err = -EIO;
goto disable_ioceth;
+ }
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
}
set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
+ return 0;
+
probe_success:
mutex_unlock(&bnad->conf_mutex);
return 0;