]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/net/hyperv/netvsc.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / drivers / net / hyperv / netvsc.c
index 86e5749226ef4cf65d6070bca1ab0d4be35bf2e0..fd6ebbefd919344e0c7dab94316488a43bc3e2bc 100644 (file)
@@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void)
        if (!net_device)
                return NULL;
 
-       net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
-       if (!net_device->cb_buffer) {
-               kfree(net_device);
-               return NULL;
-       }
-
-       net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
-                                        sizeof(struct recv_comp_data));
+       net_device->chan_table[0].mrc.buf
+               = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
@@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
        int i;
 
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
-               vfree(nvdev->mrc[i].buf);
+               vfree(nvdev->chan_table[i].mrc.buf);
 
-       kfree(nvdev->cb_buffer);
        kfree(nvdev);
 }
 
-static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
-{
-       struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
 
-       if (net_device && net_device->destroy)
-               net_device = NULL;
+static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
+                                      u16 q_idx)
+{
+       const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 
-       return net_device;
+       return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
+               atomic_read(&nvchan->queue_sends) == 0;
 }
 
-static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
+static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 {
        struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
 
-       if (!net_device)
-               goto get_in_err;
-
-       if (net_device->destroy &&
-           atomic_read(&net_device->num_outstanding_sends) == 0 &&
-           atomic_read(&net_device->num_outstanding_recvs) == 0)
+       if (net_device && net_device->destroy)
                net_device = NULL;
 
-get_in_err:
        return net_device;
 }
 
@@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device)
        vmbus_close(device->channel);
 
        /* Release all resources */
-       vfree(net_device->sub_cb_buf);
        free_netvsc_device(net_device);
 }
 
@@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct vmbus_channel *channel = device->channel;
-       int num_outstanding_sends;
        u16 q_idx = 0;
        int queue_sends;
 
        /* Notify the layer above us */
        if (likely(skb)) {
-               struct hv_netvsc_packet *nvsc_packet
+               const struct hv_netvsc_packet *packet
                        = (struct hv_netvsc_packet *)skb->cb;
-               u32 send_index = nvsc_packet->send_buf_index;
+               u32 send_index = packet->send_buf_index;
+               struct netvsc_stats *tx_stats;
 
                if (send_index != NETVSC_INVALID_INDEX)
                        netvsc_free_send_slot(net_device, send_index);
-               q_idx = nvsc_packet->q_idx;
+               q_idx = packet->q_idx;
                channel = incoming_channel;
 
+               tx_stats = &net_device->chan_table[q_idx].tx_stats;
+
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->packets += packet->total_packets;
+               tx_stats->bytes += packet->total_bytes;
+               u64_stats_update_end(&tx_stats->syncp);
+
                dev_consume_skb_any(skb);
        }
 
-       num_outstanding_sends =
-               atomic_dec_return(&net_device->num_outstanding_sends);
-       queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
+       queue_sends =
+               atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
 
-       if (net_device->destroy && num_outstanding_sends == 0)
+       if (net_device->destroy && queue_sends == 0)
                wake_up(&net_device->wait_drain);
 
        if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 {
-       unsigned long index;
-       u32 max_words = net_device->map_words;
-       unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
-       u32 section_cnt = net_device->send_section_cnt;
-       int ret_val = NETVSC_INVALID_INDEX;
-       int i;
-       int prev_val;
-
-       for (i = 0; i < max_words; i++) {
-               if (!~(map_addr[i]))
-                       continue;
-               index = ffz(map_addr[i]);
-               prev_val = sync_test_and_set_bit(index, &map_addr[i]);
-               if (prev_val)
-                       continue;
-               if ((index + (i * BITS_PER_LONG)) >= section_cnt)
-                       break;
-               ret_val = (index + (i * BITS_PER_LONG));
-               break;
+       unsigned long *map_addr = net_device->send_section_map;
+       unsigned int i;
+
+       for_each_clear_bit(i, map_addr, net_device->map_words) {
+               if (sync_test_and_set_bit(i, map_addr) == 0)
+                       return i;
        }
-       return ret_val;
+
+       return NETVSC_INVALID_INDEX;
 }
 
 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@@ -765,9 +745,11 @@ static inline int netvsc_send_pkt(
        struct sk_buff *skb)
 {
        struct nvsp_message nvmsg;
-       u16 q_idx = packet->q_idx;
-       struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
+       struct netvsc_channel *nvchan
+               = &net_device->chan_table[packet->q_idx];
+       struct vmbus_channel *out_channel = nvchan->channel;
        struct net_device *ndev = hv_get_drvdata(device);
+       struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
        u64 req_id;
        int ret;
        struct hv_page_buffer *pgbuf;
@@ -827,23 +809,14 @@ static inline int netvsc_send_pkt(
        }
 
        if (ret == 0) {
-               atomic_inc(&net_device->num_outstanding_sends);
-               atomic_inc(&net_device->queue_sends[q_idx]);
-
-               if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
-                       netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
+               atomic_inc_return(&nvchan->queue_sends);
 
-                       if (atomic_read(&net_device->
-                               queue_sends[q_idx]) < 1)
-                               netif_tx_wake_queue(netdev_get_tx_queue(
-                                                   ndev, q_idx));
-               }
+               if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
+                       netif_tx_stop_queue(txq);
        } else if (ret == -EAGAIN) {
-               netif_tx_stop_queue(netdev_get_tx_queue(
-                                   ndev, q_idx));
-               if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
-                       netif_tx_wake_queue(netdev_get_tx_queue(
-                                           ndev, q_idx));
+               netif_tx_stop_queue(txq);
+               if (atomic_read(&nvchan->queue_sends) < 1) {
+                       netif_tx_wake_queue(txq);
                        ret = -ENOSPC;
                }
        } else {
@@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device,
 {
        struct netvsc_device *net_device;
        int ret = 0;
-       struct vmbus_channel *out_channel;
-       u16 q_idx = packet->q_idx;
+       struct netvsc_channel *nvchan;
        u32 pktlen = packet->total_data_buflen, msd_len = 0;
        unsigned int section_index = NETVSC_INVALID_INDEX;
        struct multi_send_data *msdp;
@@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device,
        if (!net_device->send_section_map)
                return -EAGAIN;
 
-       out_channel = net_device->chn_table[q_idx];
-
+       nvchan = &net_device->chan_table[packet->q_idx];
        packet->send_buf_index = NETVSC_INVALID_INDEX;
        packet->cp_partial = false;
 
@@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device,
                goto send_now;
        }
 
-       msdp = &net_device->msd[q_idx];
-
        /* batch packets in send buffer if possible */
+       msdp = &nvchan->msd;
        if (msdp->pkt)
                msd_len = msdp->pkt->total_data_buflen;
 
@@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device,
                        packet->total_data_buflen += msd_len;
                }
 
+               if (msdp->pkt) {
+                       packet->total_packets += msdp->pkt->total_packets;
+                       packet->total_bytes += msdp->pkt->total_bytes;
+               }
+
                if (msdp->skb)
                        dev_consume_skb_any(msdp->skb);
 
@@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel,
 static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
                                        u32 *filled, u32 *avail)
 {
-       u32 first = nvdev->mrc[q_idx].first;
-       u32 next = nvdev->mrc[q_idx].next;
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
+       u32 first = mrc->first;
+       u32 next = mrc->next;
 
        *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
                  next - first;
@@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
 static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
                                                         *nvdev, u16 q_idx)
 {
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        u32 filled, avail;
 
-       if (!nvdev->mrc[q_idx].buf)
+       if (unlikely(!mrc->buf))
                return NULL;
 
        count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
        if (!filled)
                return NULL;
 
-       return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
-              sizeof(struct recv_comp_data);
+       return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
 }
 
 /* Put the first filled slot back to available pool */
 static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
 {
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        int num_recv;
 
-       nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
-                                 NETVSC_RECVSLOT_MAX;
+       mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
 
        num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
 
@@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
 static inline struct recv_comp_data *get_recv_comp_slot(
        struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
 {
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        u32 filled, avail, next;
        struct recv_comp_data *rcd;
 
-       if (!nvdev->recv_section)
+       if (unlikely(!nvdev->recv_section))
                return NULL;
 
-       if (!nvdev->mrc[q_idx].buf)
+       if (unlikely(!mrc->buf))
                return NULL;
 
        if (atomic_read(&nvdev->num_outstanding_recvs) >
@@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot(
        if (!avail)
                return NULL;
 
-       next = nvdev->mrc[q_idx].next;
-       rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
-       nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
+       next = mrc->next;
+       rcd = mrc->buf + next * sizeof(struct recv_comp_data);
+       mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
 
        atomic_inc(&nvdev->num_outstanding_recvs);
 
        return rcd;
 }
 
-static void netvsc_receive(struct netvsc_device *net_device,
-                       struct vmbus_channel *channel,
-                       struct hv_device *device,
-                       struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct net_device *ndev,
+                  struct netvsc_device *net_device,
+                  struct net_device_context *net_device_ctx,
+                  struct hv_device *device,
+                  struct vmbus_channel *channel,
+                  struct vmtransfer_page_packet_header *vmxferpage_packet,
+                  struct nvsp_message *nvsp)
 {
-       struct vmtransfer_page_packet_header *vmxferpage_packet;
-       struct nvsp_message *nvsp_packet;
-       struct hv_netvsc_packet nv_pkt;
-       struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+       char *recv_buf = net_device->recv_buf;
        u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
-       struct net_device *ndev = hv_get_drvdata(device);
-       void *data;
        int ret;
        struct recv_comp_data *rcd;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
 
-       /*
-        * All inbound packets other than send completion should be xfer page
-        * packet
-        */
-       if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
-               netdev_err(ndev, "Unknown packet type received - %d\n",
-                          packet->type);
-               return;
-       }
-
-       nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
-                       (packet->offset8 << 3));
-
        /* Make sure this is a valid nvsp packet */
-       if (nvsp_packet->hdr.msg_type !=
-           NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
-               netdev_err(ndev, "Unknown nvsp packet type received-"
-                       " %d\n", nvsp_packet->hdr.msg_type);
+       if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
+               netif_err(net_device_ctx, rx_err, ndev,
+                         "Unknown nvsp packet type received %u\n",
+                         nvsp->hdr.msg_type);
                return;
        }
 
-       vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
-
-       if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
-               netdev_err(ndev, "Invalid xfer page set id - "
-                          "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
-                          vmxferpage_packet->xfer_pageset_id);
+       if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
+               netif_err(net_device_ctx, rx_err, ndev,
+                         "Invalid xfer page set id - expecting %x got %x\n",
+                         NETVSC_RECEIVE_BUFFER_ID,
+                         vmxferpage_packet->xfer_pageset_id);
                return;
        }
 
@@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device,
 
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
        for (i = 0; i < count; i++) {
-               /* Initialize the netvsc packet */
-               data = (void *)((unsigned long)net_device->
-                       recv_buf + vmxferpage_packet->ranges[i].byte_offset);
-               netvsc_packet->total_data_buflen =
-                                       vmxferpage_packet->ranges[i].byte_count;
+               void *data = recv_buf
+                       + vmxferpage_packet->ranges[i].byte_offset;
+               u32 buflen = vmxferpage_packet->ranges[i].byte_count;
 
                /* Pass it to the upper layer */
-               status = rndis_filter_receive(device, netvsc_packet, &data,
-                                             channel);
+               status = rndis_filter_receive(ndev, net_device, device,
+                                             channel, data, buflen);
        }
 
-       if (!net_device->mrc[q_idx].buf) {
+       if (!net_device->chan_table[q_idx].mrc.buf) {
                ret = netvsc_send_recv_completion(channel,
                                                  vmxferpage_packet->d.trans_id,
                                                  status);
@@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
                                   u64 request_id,
                                   struct vmpacket_descriptor *desc)
 {
-       struct nvsp_message *nvmsg;
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-
-       nvmsg = (struct nvsp_message *)((unsigned long)
-               desc + (desc->offset8 << 3));
+       struct nvsp_message *nvmsg
+               = (struct nvsp_message *)((unsigned long)desc
+                                         + (desc->offset8 << 3));
 
        switch (desc->type) {
        case VM_PKT_COMP:
@@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
                break;
 
        case VM_PKT_DATA_USING_XFER_PAGES:
-               netvsc_receive(net_device, channel, device, desc);
+               netvsc_receive(ndev, net_device, net_device_ctx,
+                              device, channel,
+                              (struct vmtransfer_page_packet_header *)desc,
+                              nvmsg);
                break;
 
        case VM_PKT_DATA_INBAND:
@@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
 
 void netvsc_channel_cb(void *context)
 {
-       int ret;
-       struct vmbus_channel *channel = (struct vmbus_channel *)context;
+       struct vmbus_channel *channel = context;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
        struct hv_device *device;
        struct netvsc_device *net_device;
-       u32 bytes_recvd;
-       u64 request_id;
        struct vmpacket_descriptor *desc;
-       unsigned char *buffer;
-       int bufferlen = NETVSC_PACKET_SIZE;
        struct net_device *ndev;
        bool need_to_commit = false;
 
@@ -1289,74 +1245,28 @@ void netvsc_channel_cb(void *context)
        else
                device = channel->device_obj;
 
-       net_device = get_inbound_net_device(device);
-       if (!net_device)
-               return;
        ndev = hv_get_drvdata(device);
-       buffer = get_per_channel_state(channel);
+       if (unlikely(!ndev))
+               return;
+
+       net_device = net_device_to_netvsc_device(ndev);
+       if (unlikely(net_device->destroy) &&
+           netvsc_channel_idle(net_device, q_idx))
+               return;
 
        /* commit_rd_index() -> hv_signal_on_read() needs this. */
        init_cached_read_index(channel);
 
-       do {
-               desc = get_next_pkt_raw(channel);
-               if (desc != NULL) {
-                       netvsc_process_raw_pkt(device,
-                                              channel,
-                                              net_device,
-                                              ndev,
-                                              desc->trans_id,
-                                              desc);
-
-                       put_pkt_raw(channel, desc);
-                       need_to_commit = true;
-                       continue;
-               }
-               if (need_to_commit) {
-                       need_to_commit = false;
-                       commit_rd_index(channel);
-               }
-
-               ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
-                                          &bytes_recvd, &request_id);
-               if (ret == 0) {
-                       if (bytes_recvd > 0) {
-                               desc = (struct vmpacket_descriptor *)buffer;
-                               netvsc_process_raw_pkt(device,
-                                                      channel,
-                                                      net_device,
-                                                      ndev,
-                                                      request_id,
-                                                      desc);
-                       } else {
-                               /*
-                                * We are done for this pass.
-                                */
-                               break;
-                       }
-
-               } else if (ret == -ENOBUFS) {
-                       if (bufferlen > NETVSC_PACKET_SIZE)
-                               kfree(buffer);
-                       /* Handle large packet */
-                       buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-                       if (buffer == NULL) {
-                               /* Try again next time around */
-                               netdev_err(ndev,
-                                          "unable to allocate buffer of size "
-                                          "(%d)!!\n", bytes_recvd);
-                               break;
-                       }
-
-                       bufferlen = bytes_recvd;
-               }
-
-               init_cached_read_index(channel);
+       while ((desc = get_next_pkt_raw(channel)) != NULL) {
+               netvsc_process_raw_pkt(device, channel, net_device,
+                                      ndev, desc->trans_id, desc);
 
-       } while (1);
+               put_pkt_raw(channel, desc);
+               need_to_commit = true;
+       }
 
-       if (bufferlen > NETVSC_PACKET_SIZE)
-               kfree(buffer);
+       if (need_to_commit)
+               commit_rd_index(channel);
 
        netvsc_chk_recv_comp(net_device, channel, q_idx);
 }
@@ -1365,11 +1275,11 @@ void netvsc_channel_cb(void *context)
  * netvsc_device_add - Callback when the device belonging to this
  * driver is added
  */
-int netvsc_device_add(struct hv_device *device, void *additional_info)
+int netvsc_device_add(struct hv_device *device,
+                     const struct netvsc_device_info *device_info)
 {
        int i, ret = 0;
-       int ring_size =
-       ((struct netvsc_device_info *)additional_info)->ring_size;
+       int ring_size = device_info->ring_size;
        struct netvsc_device *net_device;
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1380,8 +1290,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
 
        net_device->ring_size = ring_size;
 
-       set_per_channel_state(device->channel, net_device->cb_buffer);
-
        /* Open the channel */
        ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
                         ring_size * PAGE_SIZE, NULL, 0,
@@ -1400,7 +1308,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
         * opened.
         */
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
-               net_device->chn_table[i] = device->channel;
+               net_device->chan_table[i].channel = device->channel;
 
        /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
         * populated.