]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
hv_netvsc: Use the xmit_more skb flag to optimize signaling the host
authorKY Srinivasan <kys@microsoft.com>
Mon, 11 May 2015 22:39:46 +0000 (15:39 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 13 May 2015 03:10:43 +0000 (23:10 -0400)
Based on the information given to this driver (via the xmit_more skb flag),
we can defer signaling the host if more packets are on the way. This will help
make the host more efficient since it can potentially process a larger batch of
packets. Implement this optimization.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/hyperv/netvsc.c

index 2d9ef533cc4837c5bd7b46c96958f9f1b9eda323..1c4f265f4e7cd74660f5123f973e161eaffe91aa 100644 (file)
@@ -743,6 +743,7 @@ static inline int netvsc_send_pkt(
        u64 req_id;
        int ret;
        struct hv_page_buffer *pgbuf;
+       u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
 
        nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
        if (packet->is_data_pkt) {
@@ -769,32 +770,42 @@ static inline int netvsc_send_pkt(
        if (out_channel->rescind)
                return -ENODEV;
 
+       /*
+        * It is possible that once we successfully place this packet
+        * on the ringbuffer, we may stop the queue. In that case, we want
+        * to notify the host independent of the xmit_more flag. We don't
+        * need to be precise here; in the worst case we may signal the host
+        * unnecessarily.
+        */
+       if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
+               packet->xmit_more = false;
+
        if (packet->page_buf_cnt) {
                pgbuf = packet->cp_partial ? packet->page_buf +
                        packet->rmsg_pgcnt : packet->page_buf;
-               ret = vmbus_sendpacket_pagebuffer(out_channel,
-                                                 pgbuf,
-                                                 packet->page_buf_cnt,
-                                                 &nvmsg,
-                                                 sizeof(struct nvsp_message),
-                                                 req_id);
+               ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
+                                                     pgbuf,
+                                                     packet->page_buf_cnt,
+                                                     &nvmsg,
+                                                     sizeof(struct nvsp_message),
+                                                     req_id,
+                                                     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+                                                     !packet->xmit_more);
        } else {
-               ret = vmbus_sendpacket(
-                               out_channel, &nvmsg,
-                               sizeof(struct nvsp_message),
-                               req_id,
-                               VM_PKT_DATA_INBAND,
-                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+               ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
+                                          sizeof(struct nvsp_message),
+                                          req_id,
+                                          VM_PKT_DATA_INBAND,
+                                          VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+                                          !packet->xmit_more);
        }
 
        if (ret == 0) {
                atomic_inc(&net_device->num_outstanding_sends);
                atomic_inc(&net_device->queue_sends[q_idx]);
 
-               if (hv_ringbuf_avail_percent(&out_channel->outbound) <
-                       RING_AVAIL_PERCENT_LOWATER) {
-                       netif_tx_stop_queue(netdev_get_tx_queue(
-                                           ndev, q_idx));
+               if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+                       netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
 
                        if (atomic_read(&net_device->
                                queue_sends[q_idx]) < 1)