]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mlx4_en: Byte Queue Limit support
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>
Mon, 23 Apr 2012 02:18:50 +0000 (02:18 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 24 Apr 2012 02:34:02 +0000 (22:34 -0400)
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

index 35ad0971939d4efc55604d5d383b9085f4ca5209..eaa8fadf19c05b00a7616973201d9cf811a3fb5c 100644 (file)
@@ -667,6 +667,7 @@ int mlx4_en_start_port(struct net_device *dev)
                        mlx4_en_deactivate_cq(priv, cq);
                        goto tx_err;
                }
+               tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
 
                /* Arm CQ for TX completions */
                mlx4_en_arm_cq(priv, cq);
@@ -812,12 +813,15 @@ static void mlx4_en_restart(struct work_struct *work)
                                                 watchdog_task);
        struct mlx4_en_dev *mdev = priv->mdev;
        struct net_device *dev = priv->dev;
+       int i;
 
        en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
 
        mutex_lock(&mdev->state_lock);
        if (priv->port_up) {
                mlx4_en_stop_port(dev);
+               for (i = 0; i < priv->tx_ring_num; i++)
+                       netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
                if (mlx4_en_start_port(dev))
                        en_err(priv, "Failed restarting port %d\n", priv->port);
        }
index 2d493420e1a698ec21b8133ca52e1a6c57308f3c..9a38483feb920246c88b7b827c40ba3649ece036 100644 (file)
@@ -315,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        int size = cq->size;
        u32 size_mask = ring->size_mask;
        struct mlx4_cqe *buf = cq->buf;
+       u32 packets = 0;
+       u32 bytes = 0;
 
        if (!priv->port_up)
                return;
@@ -343,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
                                        priv, ring, ring_index,
                                        !!((ring->cons + txbbs_skipped) &
                                                        ring->size));
+                       packets++;
+                       bytes += ring->tx_info[ring_index].nr_bytes;
                } while (ring_index != new_index);
 
                ++cons_index;
@@ -359,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        mlx4_cq_set_ci(mcq);
        wmb();
        ring->cons += txbbs_skipped;
+       netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
        /* Wakeup Tx queue if this ring stopped it */
        if (unlikely(ring->blocked)) {
                if ((u32) (ring->prod - ring->cons) <=
                     ring->size - HEADROOM - MAX_DESC_TXBBS) {
                        ring->blocked = 0;
-                       netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
+                       netif_tx_wake_queue(ring->tx_queue);
                        priv->port_stats.wake_queue++;
                }
        }
@@ -583,7 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(((int)(ring->prod - ring->cons)) >
                     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
                /* every full Tx ring stops queue */
-               netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
+               netif_tx_stop_queue(ring->tx_queue);
                ring->blocked = 1;
                priv->port_stats.queue_stopped++;
 
@@ -649,7 +654,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->port_stats.tso_packets++;
                i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
                        !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
-               ring->bytes += skb->len + (i - 1) * lso_header_size;
+               tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
                ring->packets += i;
        } else {
                /* Normal (Non LSO) packet */
@@ -657,10 +662,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                        ((ring->prod & ring->size) ?
                         cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
                data = &tx_desc->data;
-               ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+               tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
                ring->packets++;
 
        }
+       ring->bytes += tx_info->nr_bytes;
+       netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
        AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
 
index 0feafc5344b3361d92e3b9a3fee626906e14bbcc..5d876375a132b72b481bae4f78c2ece6ec4dfd76 100644 (file)
@@ -200,6 +200,7 @@ enum cq_type {
 struct mlx4_en_tx_info {
        struct sk_buff *skb;
        u32 nr_txbb;
+       u32 nr_bytes;
        u8 linear;
        u8 data_offset;
        u8 inl;
@@ -257,6 +258,7 @@ struct mlx4_en_tx_ring {
        unsigned long tx_csum;
        struct mlx4_bf bf;
        bool bf_enabled;
+       struct netdev_queue *tx_queue;
 };
 
 struct mlx4_en_rx_desc {