]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/hyperv/netvsc.c
netvsc: implement NAPI
[karo-tx-linux.git] / drivers / net / hyperv / netvsc.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
25 #include <linux/mm.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
33
34 #include "hyperv_net.h"
35
36 /*
37  * Switch the data path from the synthetic interface to the VF
38  * interface.
39  */
40 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
41 {
42         struct net_device_context *net_device_ctx = netdev_priv(ndev);
43         struct hv_device *dev = net_device_ctx->device_ctx;
44         struct netvsc_device *nv_dev = net_device_ctx->nvdev;
45         struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
46
47         memset(init_pkt, 0, sizeof(struct nvsp_message));
48         init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49         if (vf)
50                 init_pkt->msg.v4_msg.active_dp.active_datapath =
51                         NVSP_DATAPATH_VF;
52         else
53                 init_pkt->msg.v4_msg.active_dp.active_datapath =
54                         NVSP_DATAPATH_SYNTHETIC;
55
56         vmbus_sendpacket(dev->channel, init_pkt,
57                                sizeof(struct nvsp_message),
58                                (unsigned long)init_pkt,
59                                VM_PKT_DATA_INBAND, 0);
60 }
61
62 static struct netvsc_device *alloc_net_device(void)
63 {
64         struct netvsc_device *net_device;
65
66         net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
67         if (!net_device)
68                 return NULL;
69
70         net_device->chan_table[0].mrc.buf
71                 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
72
73         init_waitqueue_head(&net_device->wait_drain);
74         net_device->destroy = false;
75         atomic_set(&net_device->open_cnt, 0);
76         net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
77         net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
78         init_completion(&net_device->channel_init_wait);
79
80         return net_device;
81 }
82
83 static void free_netvsc_device(struct netvsc_device *nvdev)
84 {
85         int i;
86
87         for (i = 0; i < VRSS_CHANNEL_MAX; i++)
88                 vfree(nvdev->chan_table[i].mrc.buf);
89
90         kfree(nvdev);
91 }
92
93
94 static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
95                                        u16 q_idx)
96 {
97         const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
98
99         return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
100                 atomic_read(&nvchan->queue_sends) == 0;
101 }
102
103 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
104 {
105         struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
106
107         if (net_device && net_device->destroy)
108                 net_device = NULL;
109
110         return net_device;
111 }
112
113 static void netvsc_destroy_buf(struct hv_device *device)
114 {
115         struct nvsp_message *revoke_packet;
116         struct net_device *ndev = hv_get_drvdata(device);
117         struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
118         int ret;
119
120         /*
121          * If we got a section count, it means we received a
122          * SendReceiveBufferComplete msg (ie sent
123          * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
124          * to send a revoke msg here
125          */
126         if (net_device->recv_section_cnt) {
127                 /* Send the revoke receive buffer */
128                 revoke_packet = &net_device->revoke_packet;
129                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
130
131                 revoke_packet->hdr.msg_type =
132                         NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
133                 revoke_packet->msg.v1_msg.
134                 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
135
136                 ret = vmbus_sendpacket(device->channel,
137                                        revoke_packet,
138                                        sizeof(struct nvsp_message),
139                                        (unsigned long)revoke_packet,
140                                        VM_PKT_DATA_INBAND, 0);
141                 /*
142                  * If we failed here, we might as well return and
143                  * have a leak rather than continue and a bugchk
144                  */
145                 if (ret != 0) {
146                         netdev_err(ndev, "unable to send "
147                                 "revoke receive buffer to netvsp\n");
148                         return;
149                 }
150         }
151
152         /* Teardown the gpadl on the vsp end */
153         if (net_device->recv_buf_gpadl_handle) {
154                 ret = vmbus_teardown_gpadl(device->channel,
155                                            net_device->recv_buf_gpadl_handle);
156
157                 /* If we failed here, we might as well return and have a leak
158                  * rather than continue and a bugchk
159                  */
160                 if (ret != 0) {
161                         netdev_err(ndev,
162                                    "unable to teardown receive buffer's gpadl\n");
163                         return;
164                 }
165                 net_device->recv_buf_gpadl_handle = 0;
166         }
167
168         if (net_device->recv_buf) {
169                 /* Free up the receive buffer */
170                 vfree(net_device->recv_buf);
171                 net_device->recv_buf = NULL;
172         }
173
174         if (net_device->recv_section) {
175                 net_device->recv_section_cnt = 0;
176                 kfree(net_device->recv_section);
177                 net_device->recv_section = NULL;
178         }
179
180         /* Deal with the send buffer we may have setup.
181          * If we got a  send section size, it means we received a
182          * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
183          * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
184          * to send a revoke msg here
185          */
186         if (net_device->send_section_size) {
187                 /* Send the revoke receive buffer */
188                 revoke_packet = &net_device->revoke_packet;
189                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
190
191                 revoke_packet->hdr.msg_type =
192                         NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
193                 revoke_packet->msg.v1_msg.revoke_send_buf.id =
194                         NETVSC_SEND_BUFFER_ID;
195
196                 ret = vmbus_sendpacket(device->channel,
197                                        revoke_packet,
198                                        sizeof(struct nvsp_message),
199                                        (unsigned long)revoke_packet,
200                                        VM_PKT_DATA_INBAND, 0);
201                 /* If we failed here, we might as well return and
202                  * have a leak rather than continue and a bugchk
203                  */
204                 if (ret != 0) {
205                         netdev_err(ndev, "unable to send "
206                                    "revoke send buffer to netvsp\n");
207                         return;
208                 }
209         }
210         /* Teardown the gpadl on the vsp end */
211         if (net_device->send_buf_gpadl_handle) {
212                 ret = vmbus_teardown_gpadl(device->channel,
213                                            net_device->send_buf_gpadl_handle);
214
215                 /* If we failed here, we might as well return and have a leak
216                  * rather than continue and a bugchk
217                  */
218                 if (ret != 0) {
219                         netdev_err(ndev,
220                                    "unable to teardown send buffer's gpadl\n");
221                         return;
222                 }
223                 net_device->send_buf_gpadl_handle = 0;
224         }
225         if (net_device->send_buf) {
226                 /* Free up the send buffer */
227                 vfree(net_device->send_buf);
228                 net_device->send_buf = NULL;
229         }
230         kfree(net_device->send_section_map);
231 }
232
233 static int netvsc_init_buf(struct hv_device *device)
234 {
235         int ret = 0;
236         struct netvsc_device *net_device;
237         struct nvsp_message *init_packet;
238         struct net_device *ndev;
239         int node;
240
241         net_device = get_outbound_net_device(device);
242         if (!net_device)
243                 return -ENODEV;
244         ndev = hv_get_drvdata(device);
245
246         node = cpu_to_node(device->channel->target_cpu);
247         net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
248         if (!net_device->recv_buf)
249                 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
250
251         if (!net_device->recv_buf) {
252                 netdev_err(ndev, "unable to allocate receive "
253                         "buffer of size %d\n", net_device->recv_buf_size);
254                 ret = -ENOMEM;
255                 goto cleanup;
256         }
257
258         /*
259          * Establish the gpadl handle for this buffer on this
260          * channel.  Note: This call uses the vmbus connection rather
261          * than the channel to establish the gpadl handle.
262          */
263         ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
264                                     net_device->recv_buf_size,
265                                     &net_device->recv_buf_gpadl_handle);
266         if (ret != 0) {
267                 netdev_err(ndev,
268                         "unable to establish receive buffer's gpadl\n");
269                 goto cleanup;
270         }
271
272         /* Notify the NetVsp of the gpadl handle */
273         init_packet = &net_device->channel_init_pkt;
274
275         memset(init_packet, 0, sizeof(struct nvsp_message));
276
277         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
278         init_packet->msg.v1_msg.send_recv_buf.
279                 gpadl_handle = net_device->recv_buf_gpadl_handle;
280         init_packet->msg.v1_msg.
281                 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
282
283         /* Send the gpadl notification request */
284         ret = vmbus_sendpacket(device->channel, init_packet,
285                                sizeof(struct nvsp_message),
286                                (unsigned long)init_packet,
287                                VM_PKT_DATA_INBAND,
288                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
289         if (ret != 0) {
290                 netdev_err(ndev,
291                         "unable to send receive buffer's gpadl to netvsp\n");
292                 goto cleanup;
293         }
294
295         wait_for_completion(&net_device->channel_init_wait);
296
297         /* Check the response */
298         if (init_packet->msg.v1_msg.
299             send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
300                 netdev_err(ndev, "Unable to complete receive buffer "
301                            "initialization with NetVsp - status %d\n",
302                            init_packet->msg.v1_msg.
303                            send_recv_buf_complete.status);
304                 ret = -EINVAL;
305                 goto cleanup;
306         }
307
308         /* Parse the response */
309
310         net_device->recv_section_cnt = init_packet->msg.
311                 v1_msg.send_recv_buf_complete.num_sections;
312
313         net_device->recv_section = kmemdup(
314                 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
315                 net_device->recv_section_cnt *
316                 sizeof(struct nvsp_1_receive_buffer_section),
317                 GFP_KERNEL);
318         if (net_device->recv_section == NULL) {
319                 ret = -EINVAL;
320                 goto cleanup;
321         }
322
323         /*
324          * For 1st release, there should only be 1 section that represents the
325          * entire receive buffer
326          */
327         if (net_device->recv_section_cnt != 1 ||
328             net_device->recv_section->offset != 0) {
329                 ret = -EINVAL;
330                 goto cleanup;
331         }
332
333         /* Now setup the send buffer.
334          */
335         net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
336         if (!net_device->send_buf)
337                 net_device->send_buf = vzalloc(net_device->send_buf_size);
338         if (!net_device->send_buf) {
339                 netdev_err(ndev, "unable to allocate send "
340                            "buffer of size %d\n", net_device->send_buf_size);
341                 ret = -ENOMEM;
342                 goto cleanup;
343         }
344
345         /* Establish the gpadl handle for this buffer on this
346          * channel.  Note: This call uses the vmbus connection rather
347          * than the channel to establish the gpadl handle.
348          */
349         ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
350                                     net_device->send_buf_size,
351                                     &net_device->send_buf_gpadl_handle);
352         if (ret != 0) {
353                 netdev_err(ndev,
354                            "unable to establish send buffer's gpadl\n");
355                 goto cleanup;
356         }
357
358         /* Notify the NetVsp of the gpadl handle */
359         init_packet = &net_device->channel_init_pkt;
360         memset(init_packet, 0, sizeof(struct nvsp_message));
361         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
362         init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
363                 net_device->send_buf_gpadl_handle;
364         init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
365
366         /* Send the gpadl notification request */
367         ret = vmbus_sendpacket(device->channel, init_packet,
368                                sizeof(struct nvsp_message),
369                                (unsigned long)init_packet,
370                                VM_PKT_DATA_INBAND,
371                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
372         if (ret != 0) {
373                 netdev_err(ndev,
374                            "unable to send send buffer's gpadl to netvsp\n");
375                 goto cleanup;
376         }
377
378         wait_for_completion(&net_device->channel_init_wait);
379
380         /* Check the response */
381         if (init_packet->msg.v1_msg.
382             send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
383                 netdev_err(ndev, "Unable to complete send buffer "
384                            "initialization with NetVsp - status %d\n",
385                            init_packet->msg.v1_msg.
386                            send_send_buf_complete.status);
387                 ret = -EINVAL;
388                 goto cleanup;
389         }
390
391         /* Parse the response */
392         net_device->send_section_size = init_packet->msg.
393                                 v1_msg.send_send_buf_complete.section_size;
394
395         /* Section count is simply the size divided by the section size.
396          */
397         net_device->send_section_cnt =
398                 net_device->send_buf_size / net_device->send_section_size;
399
400         netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
401                    net_device->send_section_size, net_device->send_section_cnt);
402
403         /* Setup state for managing the send buffer. */
404         net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
405                                              BITS_PER_LONG);
406
407         net_device->send_section_map = kcalloc(net_device->map_words,
408                                                sizeof(ulong), GFP_KERNEL);
409         if (net_device->send_section_map == NULL) {
410                 ret = -ENOMEM;
411                 goto cleanup;
412         }
413
414         goto exit;
415
416 cleanup:
417         netvsc_destroy_buf(device);
418
419 exit:
420         return ret;
421 }
422
423 /* Negotiate NVSP protocol version */
424 static int negotiate_nvsp_ver(struct hv_device *device,
425                               struct netvsc_device *net_device,
426                               struct nvsp_message *init_packet,
427                               u32 nvsp_ver)
428 {
429         struct net_device *ndev = hv_get_drvdata(device);
430         int ret;
431
432         memset(init_packet, 0, sizeof(struct nvsp_message));
433         init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
434         init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
435         init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
436
437         /* Send the init request */
438         ret = vmbus_sendpacket(device->channel, init_packet,
439                                sizeof(struct nvsp_message),
440                                (unsigned long)init_packet,
441                                VM_PKT_DATA_INBAND,
442                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
443
444         if (ret != 0)
445                 return ret;
446
447         wait_for_completion(&net_device->channel_init_wait);
448
449         if (init_packet->msg.init_msg.init_complete.status !=
450             NVSP_STAT_SUCCESS)
451                 return -EINVAL;
452
453         if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
454                 return 0;
455
456         /* NVSPv2 or later: Send NDIS config */
457         memset(init_packet, 0, sizeof(struct nvsp_message));
458         init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
459         init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
460         init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
461
462         if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
463                 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
464
465                 /* Teaming bit is needed to receive link speed updates */
466                 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
467         }
468
469         ret = vmbus_sendpacket(device->channel, init_packet,
470                                 sizeof(struct nvsp_message),
471                                 (unsigned long)init_packet,
472                                 VM_PKT_DATA_INBAND, 0);
473
474         return ret;
475 }
476
477 static int netvsc_connect_vsp(struct hv_device *device)
478 {
479         int ret;
480         struct netvsc_device *net_device;
481         struct nvsp_message *init_packet;
482         int ndis_version;
483         const u32 ver_list[] = {
484                 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
485                 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
486         int i;
487
488         net_device = get_outbound_net_device(device);
489         if (!net_device)
490                 return -ENODEV;
491
492         init_packet = &net_device->channel_init_pkt;
493
494         /* Negotiate the latest NVSP protocol supported */
495         for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
496                 if (negotiate_nvsp_ver(device, net_device, init_packet,
497                                        ver_list[i])  == 0) {
498                         net_device->nvsp_version = ver_list[i];
499                         break;
500                 }
501
502         if (i < 0) {
503                 ret = -EPROTO;
504                 goto cleanup;
505         }
506
507         pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
508
509         /* Send the ndis version */
510         memset(init_packet, 0, sizeof(struct nvsp_message));
511
512         if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
513                 ndis_version = 0x00060001;
514         else
515                 ndis_version = 0x0006001e;
516
517         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
518         init_packet->msg.v1_msg.
519                 send_ndis_ver.ndis_major_ver =
520                                 (ndis_version & 0xFFFF0000) >> 16;
521         init_packet->msg.v1_msg.
522                 send_ndis_ver.ndis_minor_ver =
523                                 ndis_version & 0xFFFF;
524
525         /* Send the init request */
526         ret = vmbus_sendpacket(device->channel, init_packet,
527                                 sizeof(struct nvsp_message),
528                                 (unsigned long)init_packet,
529                                 VM_PKT_DATA_INBAND, 0);
530         if (ret != 0)
531                 goto cleanup;
532
533         /* Post the big receive buffer to NetVSP */
534         if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
535                 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
536         else
537                 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
538         net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
539
540         ret = netvsc_init_buf(device);
541
542 cleanup:
543         return ret;
544 }
545
546 static void netvsc_disconnect_vsp(struct hv_device *device)
547 {
548         netvsc_destroy_buf(device);
549 }
550
551 /*
552  * netvsc_device_remove - Callback when the root bus device is removed
553  */
554 void netvsc_device_remove(struct hv_device *device)
555 {
556         struct net_device *ndev = hv_get_drvdata(device);
557         struct net_device_context *net_device_ctx = netdev_priv(ndev);
558         struct netvsc_device *net_device = net_device_ctx->nvdev;
559         int i;
560
561         netvsc_disconnect_vsp(device);
562
563         net_device_ctx->nvdev = NULL;
564
565         /*
566          * At this point, no one should be accessing net_device
567          * except in here
568          */
569         netdev_dbg(ndev, "net device safe to remove\n");
570
571         /* Now, we can close the channel safely */
572         vmbus_close(device->channel);
573
574         for (i = 0; i < VRSS_CHANNEL_MAX; i++)
575                 napi_disable(&net_device->chan_table[0].napi);
576
577         /* Release all resources */
578         free_netvsc_device(net_device);
579 }
580
581 #define RING_AVAIL_PERCENT_HIWATER 20
582 #define RING_AVAIL_PERCENT_LOWATER 10
583
584 /*
585  * Get the percentage of available bytes to write in the ring.
586  * The return value is in range from 0 to 100.
587  */
588 static inline u32 hv_ringbuf_avail_percent(
589                 struct hv_ring_buffer_info *ring_info)
590 {
591         u32 avail_read, avail_write;
592
593         hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
594
595         return avail_write * 100 / ring_info->ring_datasize;
596 }
597
598 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
599                                          u32 index)
600 {
601         sync_change_bit(index, net_device->send_section_map);
602 }
603
604 static void netvsc_send_tx_complete(struct netvsc_device *net_device,
605                                     struct vmbus_channel *incoming_channel,
606                                     struct hv_device *device,
607                                     const struct vmpacket_descriptor *desc)
608 {
609         struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
610         struct net_device *ndev = hv_get_drvdata(device);
611         struct net_device_context *net_device_ctx = netdev_priv(ndev);
612         struct vmbus_channel *channel = device->channel;
613         u16 q_idx = 0;
614         int queue_sends;
615
616         /* Notify the layer above us */
617         if (likely(skb)) {
618                 const struct hv_netvsc_packet *packet
619                         = (struct hv_netvsc_packet *)skb->cb;
620                 u32 send_index = packet->send_buf_index;
621                 struct netvsc_stats *tx_stats;
622
623                 if (send_index != NETVSC_INVALID_INDEX)
624                         netvsc_free_send_slot(net_device, send_index);
625                 q_idx = packet->q_idx;
626                 channel = incoming_channel;
627
628                 tx_stats = &net_device->chan_table[q_idx].tx_stats;
629
630                 u64_stats_update_begin(&tx_stats->syncp);
631                 tx_stats->packets += packet->total_packets;
632                 tx_stats->bytes += packet->total_bytes;
633                 u64_stats_update_end(&tx_stats->syncp);
634
635                 dev_consume_skb_any(skb);
636         }
637
638         queue_sends =
639                 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
640
641         if (net_device->destroy && queue_sends == 0)
642                 wake_up(&net_device->wait_drain);
643
644         if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
645             !net_device_ctx->start_remove &&
646             (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
647              queue_sends < 1))
648                 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
649 }
650
651 static void netvsc_send_completion(struct netvsc_device *net_device,
652                                    struct vmbus_channel *incoming_channel,
653                                    struct hv_device *device,
654                                    const struct vmpacket_descriptor *desc)
655 {
656         struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
657         struct net_device *ndev = hv_get_drvdata(device);
658
659         switch (nvsp_packet->hdr.msg_type) {
660         case NVSP_MSG_TYPE_INIT_COMPLETE:
661         case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
662         case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
663         case NVSP_MSG5_TYPE_SUBCHANNEL:
664                 /* Copy the response back */
665                 memcpy(&net_device->channel_init_pkt, nvsp_packet,
666                        sizeof(struct nvsp_message));
667                 complete(&net_device->channel_init_wait);
668                 break;
669
670         case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
671                 netvsc_send_tx_complete(net_device, incoming_channel,
672                                         device, desc);
673                 break;
674
675         default:
676                 netdev_err(ndev,
677                            "Unknown send completion type %d received!!\n",
678                            nvsp_packet->hdr.msg_type);
679         }
680 }
681
682 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
683 {
684         unsigned long *map_addr = net_device->send_section_map;
685         unsigned int i;
686
687         for_each_clear_bit(i, map_addr, net_device->map_words) {
688                 if (sync_test_and_set_bit(i, map_addr) == 0)
689                         return i;
690         }
691
692         return NETVSC_INVALID_INDEX;
693 }
694
695 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
696                                    unsigned int section_index,
697                                    u32 pend_size,
698                                    struct hv_netvsc_packet *packet,
699                                    struct rndis_message *rndis_msg,
700                                    struct hv_page_buffer **pb,
701                                    struct sk_buff *skb)
702 {
703         char *start = net_device->send_buf;
704         char *dest = start + (section_index * net_device->send_section_size)
705                      + pend_size;
706         int i;
707         u32 msg_size = 0;
708         u32 padding = 0;
709         u32 remain = packet->total_data_buflen % net_device->pkt_align;
710         u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
711                 packet->page_buf_cnt;
712
713         /* Add padding */
714         if (skb && skb->xmit_more && remain &&
715             !packet->cp_partial) {
716                 padding = net_device->pkt_align - remain;
717                 rndis_msg->msg_len += padding;
718                 packet->total_data_buflen += padding;
719         }
720
721         for (i = 0; i < page_count; i++) {
722                 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
723                 u32 offset = (*pb)[i].offset;
724                 u32 len = (*pb)[i].len;
725
726                 memcpy(dest, (src + offset), len);
727                 msg_size += len;
728                 dest += len;
729         }
730
731         if (padding) {
732                 memset(dest, 0, padding);
733                 msg_size += padding;
734         }
735
736         return msg_size;
737 }
738
739 static inline int netvsc_send_pkt(
740         struct hv_device *device,
741         struct hv_netvsc_packet *packet,
742         struct netvsc_device *net_device,
743         struct hv_page_buffer **pb,
744         struct sk_buff *skb)
745 {
746         struct nvsp_message nvmsg;
747         struct netvsc_channel *nvchan
748                 = &net_device->chan_table[packet->q_idx];
749         struct vmbus_channel *out_channel = nvchan->channel;
750         struct net_device *ndev = hv_get_drvdata(device);
751         struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
752         u64 req_id;
753         int ret;
754         struct hv_page_buffer *pgbuf;
755         u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
756
757         nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
758         if (skb != NULL) {
759                 /* 0 is RMC_DATA; */
760                 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
761         } else {
762                 /* 1 is RMC_CONTROL; */
763                 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
764         }
765
766         nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
767                 packet->send_buf_index;
768         if (packet->send_buf_index == NETVSC_INVALID_INDEX)
769                 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
770         else
771                 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
772                         packet->total_data_buflen;
773
774         req_id = (ulong)skb;
775
776         if (out_channel->rescind)
777                 return -ENODEV;
778
779         if (packet->page_buf_cnt) {
780                 pgbuf = packet->cp_partial ? (*pb) +
781                         packet->rmsg_pgcnt : (*pb);
782                 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
783                                                       pgbuf,
784                                                       packet->page_buf_cnt,
785                                                       &nvmsg,
786                                                       sizeof(struct nvsp_message),
787                                                       req_id,
788                                                       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
789         } else {
790                 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
791                                            sizeof(struct nvsp_message),
792                                            req_id,
793                                            VM_PKT_DATA_INBAND,
794                                            VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
795         }
796
797         if (ret == 0) {
798                 atomic_inc_return(&nvchan->queue_sends);
799
800                 if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
801                         netif_tx_stop_queue(txq);
802         } else if (ret == -EAGAIN) {
803                 netif_tx_stop_queue(txq);
804                 if (atomic_read(&nvchan->queue_sends) < 1) {
805                         netif_tx_wake_queue(txq);
806                         ret = -ENOSPC;
807                 }
808         } else {
809                 netdev_err(ndev, "Unable to send packet %p ret %d\n",
810                            packet, ret);
811         }
812
813         return ret;
814 }
815
816 /* Move packet out of multi send data (msd), and clear msd */
817 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
818                                 struct sk_buff **msd_skb,
819                                 struct multi_send_data *msdp)
820 {
821         *msd_skb = msdp->skb;
822         *msd_send = msdp->pkt;
823         msdp->skb = NULL;
824         msdp->pkt = NULL;
825         msdp->count = 0;
826 }
827
828 int netvsc_send(struct hv_device *device,
829                 struct hv_netvsc_packet *packet,
830                 struct rndis_message *rndis_msg,
831                 struct hv_page_buffer **pb,
832                 struct sk_buff *skb)
833 {
834         struct netvsc_device *net_device;
835         int ret = 0;
836         struct netvsc_channel *nvchan;
837         u32 pktlen = packet->total_data_buflen, msd_len = 0;
838         unsigned int section_index = NETVSC_INVALID_INDEX;
839         struct multi_send_data *msdp;
840         struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
841         struct sk_buff *msd_skb = NULL;
842         bool try_batch;
843         bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
844
845         net_device = get_outbound_net_device(device);
846         if (!net_device)
847                 return -ENODEV;
848
849         /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
850          * here before the negotiation with the host is finished and
851          * send_section_map may not be allocated yet.
852          */
853         if (!net_device->send_section_map)
854                 return -EAGAIN;
855
856         nvchan = &net_device->chan_table[packet->q_idx];
857         packet->send_buf_index = NETVSC_INVALID_INDEX;
858         packet->cp_partial = false;
859
860         /* Send control message directly without accessing msd (Multi-Send
861          * Data) field which may be changed during data packet processing.
862          */
863         if (!skb) {
864                 cur_send = packet;
865                 goto send_now;
866         }
867
868         /* batch packets in send buffer if possible */
869         msdp = &nvchan->msd;
870         if (msdp->pkt)
871                 msd_len = msdp->pkt->total_data_buflen;
872
873         try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
874                     net_device->max_pkt;
875
876         if (try_batch && msd_len + pktlen + net_device->pkt_align <
877             net_device->send_section_size) {
878                 section_index = msdp->pkt->send_buf_index;
879
880         } else if (try_batch && msd_len + packet->rmsg_size <
881                    net_device->send_section_size) {
882                 section_index = msdp->pkt->send_buf_index;
883                 packet->cp_partial = true;
884
885         } else if ((skb != NULL) && pktlen + net_device->pkt_align <
886                    net_device->send_section_size) {
887                 section_index = netvsc_get_next_send_section(net_device);
888                 if (section_index != NETVSC_INVALID_INDEX) {
889                         move_pkt_msd(&msd_send, &msd_skb, msdp);
890                         msd_len = 0;
891                 }
892         }
893
894         if (section_index != NETVSC_INVALID_INDEX) {
895                 netvsc_copy_to_send_buf(net_device,
896                                         section_index, msd_len,
897                                         packet, rndis_msg, pb, skb);
898
899                 packet->send_buf_index = section_index;
900
901                 if (packet->cp_partial) {
902                         packet->page_buf_cnt -= packet->rmsg_pgcnt;
903                         packet->total_data_buflen = msd_len + packet->rmsg_size;
904                 } else {
905                         packet->page_buf_cnt = 0;
906                         packet->total_data_buflen += msd_len;
907                 }
908
909                 if (msdp->pkt) {
910                         packet->total_packets += msdp->pkt->total_packets;
911                         packet->total_bytes += msdp->pkt->total_bytes;
912                 }
913
914                 if (msdp->skb)
915                         dev_consume_skb_any(msdp->skb);
916
917                 if (xmit_more && !packet->cp_partial) {
918                         msdp->skb = skb;
919                         msdp->pkt = packet;
920                         msdp->count++;
921                 } else {
922                         cur_send = packet;
923                         msdp->skb = NULL;
924                         msdp->pkt = NULL;
925                         msdp->count = 0;
926                 }
927         } else {
928                 move_pkt_msd(&msd_send, &msd_skb, msdp);
929                 cur_send = packet;
930         }
931
932         if (msd_send) {
933                 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
934                                             NULL, msd_skb);
935
936                 if (m_ret != 0) {
937                         netvsc_free_send_slot(net_device,
938                                               msd_send->send_buf_index);
939                         dev_kfree_skb_any(msd_skb);
940                 }
941         }
942
943 send_now:
944         if (cur_send)
945                 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
946
947         if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
948                 netvsc_free_send_slot(net_device, section_index);
949
950         return ret;
951 }
952
953 static int netvsc_send_recv_completion(struct vmbus_channel *channel,
954                                        u64 transaction_id, u32 status)
955 {
956         struct nvsp_message recvcompMessage;
957         int ret;
958
959         recvcompMessage.hdr.msg_type =
960                                 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
961
962         recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
963
964         /* Send the completion */
965         ret = vmbus_sendpacket(channel, &recvcompMessage,
966                                sizeof(struct nvsp_message_header) + sizeof(u32),
967                                transaction_id, VM_PKT_COMP, 0);
968
969         return ret;
970 }
971
972 static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
973                                         u32 *filled, u32 *avail)
974 {
975         struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
976         u32 first = mrc->first;
977         u32 next = mrc->next;
978
979         *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
980                   next - first;
981
982         *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
983 }
984
985 /* Read the first filled slot, no change to index */
986 static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
987                                                          *nvdev, u16 q_idx)
988 {
989         struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
990         u32 filled, avail;
991
992         if (unlikely(!mrc->buf))
993                 return NULL;
994
995         count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
996         if (!filled)
997                 return NULL;
998
999         return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
1000 }
1001
1002 /* Put the first filled slot back to available pool */
1003 static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
1004 {
1005         struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
1006         int num_recv;
1007
1008         mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
1009
1010         num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
1011
1012         if (nvdev->destroy && num_recv == 0)
1013                 wake_up(&nvdev->wait_drain);
1014 }
1015
1016 /* Check and send pending recv completions */
1017 static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
1018                                  struct vmbus_channel *channel, u16 q_idx)
1019 {
1020         struct recv_comp_data *rcd;
1021         int ret;
1022
1023         while (true) {
1024                 rcd = read_recv_comp_slot(nvdev, q_idx);
1025                 if (!rcd)
1026                         break;
1027
1028                 ret = netvsc_send_recv_completion(channel, rcd->tid,
1029                                                   rcd->status);
1030                 if (ret)
1031                         break;
1032
1033                 put_recv_comp_slot(nvdev, q_idx);
1034         }
1035 }
1036
1037 #define NETVSC_RCD_WATERMARK 80
1038
1039 /* Get next available slot */
1040 static inline struct recv_comp_data *get_recv_comp_slot(
1041         struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
1042 {
1043         struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
1044         u32 filled, avail, next;
1045         struct recv_comp_data *rcd;
1046
1047         if (unlikely(!nvdev->recv_section))
1048                 return NULL;
1049
1050         if (unlikely(!mrc->buf))
1051                 return NULL;
1052
1053         if (atomic_read(&nvdev->num_outstanding_recvs) >
1054             nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
1055                 netvsc_chk_recv_comp(nvdev, channel, q_idx);
1056
1057         count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1058         if (!avail)
1059                 return NULL;
1060
1061         next = mrc->next;
1062         rcd = mrc->buf + next * sizeof(struct recv_comp_data);
1063         mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
1064
1065         atomic_inc(&nvdev->num_outstanding_recvs);
1066
1067         return rcd;
1068 }
1069
1070 static int netvsc_receive(struct net_device *ndev,
1071                    struct netvsc_device *net_device,
1072                    struct net_device_context *net_device_ctx,
1073                    struct hv_device *device,
1074                    struct vmbus_channel *channel,
1075                    const struct vmpacket_descriptor *desc,
1076                    struct nvsp_message *nvsp)
1077 {
1078         const struct vmtransfer_page_packet_header *vmxferpage_packet
1079                 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1080         u16 q_idx = channel->offermsg.offer.sub_channel_index;
1081         char *recv_buf = net_device->recv_buf;
1082         u32 status = NVSP_STAT_SUCCESS;
1083         int i;
1084         int count = 0;
1085         int ret;
1086
1087         /* Make sure this is a valid nvsp packet */
1088         if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1089                 netif_err(net_device_ctx, rx_err, ndev,
1090                           "Unknown nvsp packet type received %u\n",
1091                           nvsp->hdr.msg_type);
1092                 return 0;
1093         }
1094
1095         if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1096                 netif_err(net_device_ctx, rx_err, ndev,
1097                           "Invalid xfer page set id - expecting %x got %x\n",
1098                           NETVSC_RECEIVE_BUFFER_ID,
1099                           vmxferpage_packet->xfer_pageset_id);
1100                 return 0;
1101         }
1102
1103         count = vmxferpage_packet->range_cnt;
1104
1105         /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1106         for (i = 0; i < count; i++) {
1107                 void *data = recv_buf
1108                         + vmxferpage_packet->ranges[i].byte_offset;
1109                 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1110
1111                 /* Pass it to the upper layer */
1112                 status = rndis_filter_receive(ndev, net_device, device,
1113                                               channel, data, buflen);
1114         }
1115
1116         if (net_device->chan_table[q_idx].mrc.buf) {
1117                 struct recv_comp_data *rcd;
1118
1119                 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1120                 if (rcd) {
1121                         rcd->tid = vmxferpage_packet->d.trans_id;
1122                         rcd->status = status;
1123                 } else {
1124                         netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1125                                    q_idx, vmxferpage_packet->d.trans_id);
1126                 }
1127         } else {
1128                 ret = netvsc_send_recv_completion(channel,
1129                                                   vmxferpage_packet->d.trans_id,
1130                                                   status);
1131                 if (ret)
1132                         netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1133                                    q_idx, vmxferpage_packet->d.trans_id, ret);
1134         }
1135         return count;
1136 }
1137
1138 static void netvsc_send_table(struct hv_device *hdev,
1139                               struct nvsp_message *nvmsg)
1140 {
1141         struct netvsc_device *nvscdev;
1142         struct net_device *ndev = hv_get_drvdata(hdev);
1143         int i;
1144         u32 count, *tab;
1145
1146         nvscdev = get_outbound_net_device(hdev);
1147         if (!nvscdev)
1148                 return;
1149
1150         count = nvmsg->msg.v5_msg.send_table.count;
1151         if (count != VRSS_SEND_TAB_SIZE) {
1152                 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1153                 return;
1154         }
1155
1156         tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1157                       nvmsg->msg.v5_msg.send_table.offset);
1158
1159         for (i = 0; i < count; i++)
1160                 nvscdev->send_table[i] = tab[i];
1161 }
1162
1163 static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1164                            struct nvsp_message *nvmsg)
1165 {
1166         net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1167         net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1168 }
1169
1170 static inline void netvsc_receive_inband(struct hv_device *hdev,
1171                                  struct net_device_context *net_device_ctx,
1172                                  struct nvsp_message *nvmsg)
1173 {
1174         switch (nvmsg->hdr.msg_type) {
1175         case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1176                 netvsc_send_table(hdev, nvmsg);
1177                 break;
1178
1179         case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1180                 netvsc_send_vf(net_device_ctx, nvmsg);
1181                 break;
1182         }
1183 }
1184
1185 static int netvsc_process_raw_pkt(struct hv_device *device,
1186                                   struct vmbus_channel *channel,
1187                                   struct netvsc_device *net_device,
1188                                   struct net_device *ndev,
1189                                   u64 request_id,
1190                                   const struct vmpacket_descriptor *desc)
1191 {
1192         struct net_device_context *net_device_ctx = netdev_priv(ndev);
1193         struct nvsp_message *nvmsg = hv_pkt_data(desc);
1194
1195         switch (desc->type) {
1196         case VM_PKT_COMP:
1197                 netvsc_send_completion(net_device, channel, device, desc);
1198                 break;
1199
1200         case VM_PKT_DATA_USING_XFER_PAGES:
1201                 return netvsc_receive(ndev, net_device, net_device_ctx,
1202                                       device, channel, desc, nvmsg);
1203                 break;
1204
1205         case VM_PKT_DATA_INBAND:
1206                 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1207                 break;
1208
1209         default:
1210                 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1211                            desc->type, request_id);
1212                 break;
1213         }
1214
1215         return 0;
1216 }
1217
1218 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1219 {
1220         struct vmbus_channel *primary = channel->primary_channel;
1221
1222         return primary ? primary->device_obj : channel->device_obj;
1223 }
1224
1225 int netvsc_poll(struct napi_struct *napi, int budget)
1226 {
1227         struct netvsc_channel *nvchan
1228                 = container_of(napi, struct netvsc_channel, napi);
1229         struct vmbus_channel *channel = nvchan->channel;
1230         struct hv_device *device = netvsc_channel_to_device(channel);
1231         u16 q_idx = channel->offermsg.offer.sub_channel_index;
1232         struct net_device *ndev = hv_get_drvdata(device);
1233         struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
1234         const struct vmpacket_descriptor *desc;
1235         int work_done = 0;
1236
1237         desc = hv_pkt_iter_first(channel);
1238         while (desc) {
1239                 int count;
1240
1241                 count = netvsc_process_raw_pkt(device, channel, net_device,
1242                                                ndev, desc->trans_id, desc);
1243                 work_done += count;
1244                 desc = __hv_pkt_iter_next(channel, desc);
1245
1246                 /* If receive packet budget is exhausted, reschedule */
1247                 if (work_done >= budget) {
1248                         work_done = budget;
1249                         break;
1250                 }
1251         }
1252         hv_pkt_iter_close(channel);
1253
1254         /* If ring is empty and NAPI is not doing polling */
1255         if (work_done < budget &&
1256             napi_complete_done(napi, work_done) &&
1257             hv_end_read(&channel->inbound) != 0)
1258                 napi_reschedule(napi);
1259
1260         netvsc_chk_recv_comp(net_device, channel, q_idx);
1261         return work_done;
1262 }
1263
1264 void netvsc_channel_cb(void *context)
1265 {
1266         struct vmbus_channel *channel = context;
1267         struct hv_device *device = netvsc_channel_to_device(channel);
1268         u16 q_idx = channel->offermsg.offer.sub_channel_index;
1269         struct netvsc_device *net_device;
1270         struct net_device *ndev;
1271
1272         ndev = hv_get_drvdata(device);
1273         if (unlikely(!ndev))
1274                 return;
1275
1276         net_device = net_device_to_netvsc_device(ndev);
1277         if (unlikely(net_device->destroy) &&
1278             netvsc_channel_idle(net_device, q_idx))
1279                 return;
1280
1281         /* disable interupts from host */
1282         hv_begin_read(&channel->inbound);
1283         napi_schedule(&net_device->chan_table[q_idx].napi);
1284 }
1285
1286 /*
1287  * netvsc_device_add - Callback when the device belonging to this
1288  * driver is added
1289  */
1290 int netvsc_device_add(struct hv_device *device,
1291                       const struct netvsc_device_info *device_info)
1292 {
1293         int i, ret = 0;
1294         int ring_size = device_info->ring_size;
1295         struct netvsc_device *net_device;
1296         struct net_device *ndev = hv_get_drvdata(device);
1297         struct net_device_context *net_device_ctx = netdev_priv(ndev);
1298
1299         net_device = alloc_net_device();
1300         if (!net_device)
1301                 return -ENOMEM;
1302
1303         net_device->ring_size = ring_size;
1304
1305         /* Because the device uses NAPI, all the interrupt batching and
1306          * control is done via Net softirq, not the channel handling
1307          */
1308         set_channel_read_mode(device->channel, HV_CALL_ISR);
1309
1310         /* Open the channel */
1311         ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1312                          ring_size * PAGE_SIZE, NULL, 0,
1313                          netvsc_channel_cb, device->channel);
1314
1315         if (ret != 0) {
1316                 netdev_err(ndev, "unable to open channel: %d\n", ret);
1317                 goto cleanup;
1318         }
1319
1320         /* Channel is opened */
1321         netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1322
1323         /* If we're reopening the device we may have multiple queues, fill the
1324          * chn_table with the default channel to use it before subchannels are
1325          * opened.
1326          */
1327         for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1328                 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1329
1330                 nvchan->channel = device->channel;
1331                 netif_napi_add(ndev, &nvchan->napi,
1332                                netvsc_poll, NAPI_POLL_WEIGHT);
1333         }
1334
1335         /* Enable NAPI handler for init callbacks */
1336         napi_enable(&net_device->chan_table[0].napi);
1337
1338         /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1339          * populated.
1340          */
1341         wmb();
1342
1343         net_device_ctx->nvdev = net_device;
1344
1345         /* Connect with the NetVsp */
1346         ret = netvsc_connect_vsp(device);
1347         if (ret != 0) {
1348                 netdev_err(ndev,
1349                         "unable to connect to NetVSP - %d\n", ret);
1350                 goto close;
1351         }
1352
1353         return ret;
1354
1355 close:
1356         napi_disable(&net_device->chan_table[0].napi);
1357
1358         /* Now, we can close the channel safely */
1359         vmbus_close(device->channel);
1360
1361 cleanup:
1362         free_netvsc_device(net_device);
1363
1364         return ret;
1365 }