2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
34 #include "hyperv_net.h"
37 * Switch the data path from the synthetic interface to the VF
40 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
42 struct net_device_context *net_device_ctx = netdev_priv(ndev);
43 struct hv_device *dev = net_device_ctx->device_ctx;
44 struct netvsc_device *nv_dev = net_device_ctx->nvdev;
45 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
56 vmbus_sendpacket(dev->channel, init_pkt,
57 sizeof(struct nvsp_message),
58 (unsigned long)init_pkt,
59 VM_PKT_DATA_INBAND, 0);
62 static struct netvsc_device *alloc_net_device(void)
64 struct netvsc_device *net_device;
66 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
70 net_device->chan_table[0].mrc.buf
71 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
73 init_waitqueue_head(&net_device->wait_drain);
74 net_device->destroy = false;
75 atomic_set(&net_device->open_cnt, 0);
76 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
78 init_completion(&net_device->channel_init_wait);
83 static void free_netvsc_device(struct rcu_head *head)
85 struct netvsc_device *nvdev
86 = container_of(head, struct netvsc_device, rcu);
89 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
90 vfree(nvdev->chan_table[i].mrc.buf);
95 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
97 call_rcu(&nvdev->rcu, free_netvsc_device);
100 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
102 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
104 if (net_device && net_device->destroy)
110 static void netvsc_destroy_buf(struct hv_device *device)
112 struct nvsp_message *revoke_packet;
113 struct net_device *ndev = hv_get_drvdata(device);
114 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
118 * If we got a section count, it means we received a
119 * SendReceiveBufferComplete msg (ie sent
120 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
121 * to send a revoke msg here
123 if (net_device->recv_section_cnt) {
124 /* Send the revoke receive buffer */
125 revoke_packet = &net_device->revoke_packet;
126 memset(revoke_packet, 0, sizeof(struct nvsp_message));
128 revoke_packet->hdr.msg_type =
129 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
130 revoke_packet->msg.v1_msg.
131 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
133 ret = vmbus_sendpacket(device->channel,
135 sizeof(struct nvsp_message),
136 (unsigned long)revoke_packet,
137 VM_PKT_DATA_INBAND, 0);
138 /* If the failure is because the channel is rescinded;
139 * ignore the failure since we cannot send on a rescinded
140 * channel. This would allow us to properly cleanup
141 * even when the channel is rescinded.
143 if (device->channel->rescind)
146 * If we failed here, we might as well return and
147 * have a leak rather than continue and a bugchk
150 netdev_err(ndev, "unable to send "
151 "revoke receive buffer to netvsp\n");
156 /* Teardown the gpadl on the vsp end */
157 if (net_device->recv_buf_gpadl_handle) {
158 ret = vmbus_teardown_gpadl(device->channel,
159 net_device->recv_buf_gpadl_handle);
161 /* If we failed here, we might as well return and have a leak
162 * rather than continue and a bugchk
166 "unable to teardown receive buffer's gpadl\n");
169 net_device->recv_buf_gpadl_handle = 0;
172 if (net_device->recv_buf) {
173 /* Free up the receive buffer */
174 vfree(net_device->recv_buf);
175 net_device->recv_buf = NULL;
178 if (net_device->recv_section) {
179 net_device->recv_section_cnt = 0;
180 kfree(net_device->recv_section);
181 net_device->recv_section = NULL;
184 /* Deal with the send buffer we may have setup.
185 * If we got a send section size, it means we received a
186 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
187 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
188 * to send a revoke msg here
190 if (net_device->send_section_size) {
191 /* Send the revoke receive buffer */
192 revoke_packet = &net_device->revoke_packet;
193 memset(revoke_packet, 0, sizeof(struct nvsp_message));
195 revoke_packet->hdr.msg_type =
196 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
197 revoke_packet->msg.v1_msg.revoke_send_buf.id =
198 NETVSC_SEND_BUFFER_ID;
200 ret = vmbus_sendpacket(device->channel,
202 sizeof(struct nvsp_message),
203 (unsigned long)revoke_packet,
204 VM_PKT_DATA_INBAND, 0);
206 /* If the failure is because the channel is rescinded;
207 * ignore the failure since we cannot send on a rescinded
208 * channel. This would allow us to properly cleanup
209 * even when the channel is rescinded.
211 if (device->channel->rescind)
214 /* If we failed here, we might as well return and
215 * have a leak rather than continue and a bugchk
218 netdev_err(ndev, "unable to send "
219 "revoke send buffer to netvsp\n");
223 /* Teardown the gpadl on the vsp end */
224 if (net_device->send_buf_gpadl_handle) {
225 ret = vmbus_teardown_gpadl(device->channel,
226 net_device->send_buf_gpadl_handle);
228 /* If we failed here, we might as well return and have a leak
229 * rather than continue and a bugchk
233 "unable to teardown send buffer's gpadl\n");
236 net_device->send_buf_gpadl_handle = 0;
238 if (net_device->send_buf) {
239 /* Free up the send buffer */
240 vfree(net_device->send_buf);
241 net_device->send_buf = NULL;
243 kfree(net_device->send_section_map);
246 static int netvsc_init_buf(struct hv_device *device)
249 struct netvsc_device *net_device;
250 struct nvsp_message *init_packet;
251 struct net_device *ndev;
255 net_device = get_outbound_net_device(device);
258 ndev = hv_get_drvdata(device);
260 node = cpu_to_node(device->channel->target_cpu);
261 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
262 if (!net_device->recv_buf)
263 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
265 if (!net_device->recv_buf) {
266 netdev_err(ndev, "unable to allocate receive "
267 "buffer of size %d\n", net_device->recv_buf_size);
273 * Establish the gpadl handle for this buffer on this
274 * channel. Note: This call uses the vmbus connection rather
275 * than the channel to establish the gpadl handle.
277 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
278 net_device->recv_buf_size,
279 &net_device->recv_buf_gpadl_handle);
282 "unable to establish receive buffer's gpadl\n");
286 /* Notify the NetVsp of the gpadl handle */
287 init_packet = &net_device->channel_init_pkt;
289 memset(init_packet, 0, sizeof(struct nvsp_message));
291 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
292 init_packet->msg.v1_msg.send_recv_buf.
293 gpadl_handle = net_device->recv_buf_gpadl_handle;
294 init_packet->msg.v1_msg.
295 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
297 /* Send the gpadl notification request */
298 ret = vmbus_sendpacket(device->channel, init_packet,
299 sizeof(struct nvsp_message),
300 (unsigned long)init_packet,
302 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
305 "unable to send receive buffer's gpadl to netvsp\n");
309 wait_for_completion(&net_device->channel_init_wait);
311 /* Check the response */
312 if (init_packet->msg.v1_msg.
313 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
314 netdev_err(ndev, "Unable to complete receive buffer "
315 "initialization with NetVsp - status %d\n",
316 init_packet->msg.v1_msg.
317 send_recv_buf_complete.status);
322 /* Parse the response */
324 net_device->recv_section_cnt = init_packet->msg.
325 v1_msg.send_recv_buf_complete.num_sections;
327 net_device->recv_section = kmemdup(
328 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
329 net_device->recv_section_cnt *
330 sizeof(struct nvsp_1_receive_buffer_section),
332 if (net_device->recv_section == NULL) {
338 * For 1st release, there should only be 1 section that represents the
339 * entire receive buffer
341 if (net_device->recv_section_cnt != 1 ||
342 net_device->recv_section->offset != 0) {
347 /* Now setup the send buffer.
349 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
350 if (!net_device->send_buf)
351 net_device->send_buf = vzalloc(net_device->send_buf_size);
352 if (!net_device->send_buf) {
353 netdev_err(ndev, "unable to allocate send "
354 "buffer of size %d\n", net_device->send_buf_size);
359 /* Establish the gpadl handle for this buffer on this
360 * channel. Note: This call uses the vmbus connection rather
361 * than the channel to establish the gpadl handle.
363 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
364 net_device->send_buf_size,
365 &net_device->send_buf_gpadl_handle);
368 "unable to establish send buffer's gpadl\n");
372 /* Notify the NetVsp of the gpadl handle */
373 init_packet = &net_device->channel_init_pkt;
374 memset(init_packet, 0, sizeof(struct nvsp_message));
375 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
376 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
377 net_device->send_buf_gpadl_handle;
378 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
380 /* Send the gpadl notification request */
381 ret = vmbus_sendpacket(device->channel, init_packet,
382 sizeof(struct nvsp_message),
383 (unsigned long)init_packet,
385 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
388 "unable to send send buffer's gpadl to netvsp\n");
392 wait_for_completion(&net_device->channel_init_wait);
394 /* Check the response */
395 if (init_packet->msg.v1_msg.
396 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
397 netdev_err(ndev, "Unable to complete send buffer "
398 "initialization with NetVsp - status %d\n",
399 init_packet->msg.v1_msg.
400 send_send_buf_complete.status);
405 /* Parse the response */
406 net_device->send_section_size = init_packet->msg.
407 v1_msg.send_send_buf_complete.section_size;
409 /* Section count is simply the size divided by the section size.
411 net_device->send_section_cnt =
412 net_device->send_buf_size / net_device->send_section_size;
414 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
415 net_device->send_section_size, net_device->send_section_cnt);
417 /* Setup state for managing the send buffer. */
418 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
420 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
421 if (net_device->send_section_map == NULL) {
429 netvsc_destroy_buf(device);
435 /* Negotiate NVSP protocol version */
436 static int negotiate_nvsp_ver(struct hv_device *device,
437 struct netvsc_device *net_device,
438 struct nvsp_message *init_packet,
441 struct net_device *ndev = hv_get_drvdata(device);
444 memset(init_packet, 0, sizeof(struct nvsp_message));
445 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
446 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
447 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
449 /* Send the init request */
450 ret = vmbus_sendpacket(device->channel, init_packet,
451 sizeof(struct nvsp_message),
452 (unsigned long)init_packet,
454 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
459 wait_for_completion(&net_device->channel_init_wait);
461 if (init_packet->msg.init_msg.init_complete.status !=
465 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
468 /* NVSPv2 or later: Send NDIS config */
469 memset(init_packet, 0, sizeof(struct nvsp_message));
470 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
471 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
472 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
474 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
475 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
477 /* Teaming bit is needed to receive link speed updates */
478 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
481 ret = vmbus_sendpacket(device->channel, init_packet,
482 sizeof(struct nvsp_message),
483 (unsigned long)init_packet,
484 VM_PKT_DATA_INBAND, 0);
489 static int netvsc_connect_vsp(struct hv_device *device)
492 struct netvsc_device *net_device;
493 struct nvsp_message *init_packet;
495 const u32 ver_list[] = {
496 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
497 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
500 net_device = get_outbound_net_device(device);
504 init_packet = &net_device->channel_init_pkt;
506 /* Negotiate the latest NVSP protocol supported */
507 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
508 if (negotiate_nvsp_ver(device, net_device, init_packet,
510 net_device->nvsp_version = ver_list[i];
519 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
521 /* Send the ndis version */
522 memset(init_packet, 0, sizeof(struct nvsp_message));
524 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
525 ndis_version = 0x00060001;
527 ndis_version = 0x0006001e;
529 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
530 init_packet->msg.v1_msg.
531 send_ndis_ver.ndis_major_ver =
532 (ndis_version & 0xFFFF0000) >> 16;
533 init_packet->msg.v1_msg.
534 send_ndis_ver.ndis_minor_ver =
535 ndis_version & 0xFFFF;
537 /* Send the init request */
538 ret = vmbus_sendpacket(device->channel, init_packet,
539 sizeof(struct nvsp_message),
540 (unsigned long)init_packet,
541 VM_PKT_DATA_INBAND, 0);
545 /* Post the big receive buffer to NetVSP */
546 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
547 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
549 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
550 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
552 ret = netvsc_init_buf(device);
558 static void netvsc_disconnect_vsp(struct hv_device *device)
560 netvsc_destroy_buf(device);
564 * netvsc_device_remove - Callback when the root bus device is removed
566 void netvsc_device_remove(struct hv_device *device)
568 struct net_device *ndev = hv_get_drvdata(device);
569 struct net_device_context *net_device_ctx = netdev_priv(ndev);
570 struct netvsc_device *net_device = net_device_ctx->nvdev;
573 netvsc_disconnect_vsp(device);
575 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
578 * At this point, no one should be accessing net_device
581 netdev_dbg(ndev, "net device safe to remove\n");
583 /* Now, we can close the channel safely */
584 vmbus_close(device->channel);
586 /* And dissassociate NAPI context from device */
587 for (i = 0; i < net_device->num_chn; i++)
588 netif_napi_del(&net_device->chan_table[i].napi);
590 /* Release all resources */
591 free_netvsc_device_rcu(net_device);
594 #define RING_AVAIL_PERCENT_HIWATER 20
595 #define RING_AVAIL_PERCENT_LOWATER 10
598 * Get the percentage of available bytes to write in the ring.
599 * The return value is in range from 0 to 100.
601 static inline u32 hv_ringbuf_avail_percent(
602 struct hv_ring_buffer_info *ring_info)
604 u32 avail_read, avail_write;
606 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
608 return avail_write * 100 / ring_info->ring_datasize;
611 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
614 sync_change_bit(index, net_device->send_section_map);
617 static void netvsc_send_tx_complete(struct netvsc_device *net_device,
618 struct vmbus_channel *incoming_channel,
619 struct hv_device *device,
620 const struct vmpacket_descriptor *desc,
623 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
624 struct net_device *ndev = hv_get_drvdata(device);
625 struct vmbus_channel *channel = device->channel;
629 /* Notify the layer above us */
631 const struct hv_netvsc_packet *packet
632 = (struct hv_netvsc_packet *)skb->cb;
633 u32 send_index = packet->send_buf_index;
634 struct netvsc_stats *tx_stats;
636 if (send_index != NETVSC_INVALID_INDEX)
637 netvsc_free_send_slot(net_device, send_index);
638 q_idx = packet->q_idx;
639 channel = incoming_channel;
641 tx_stats = &net_device->chan_table[q_idx].tx_stats;
643 u64_stats_update_begin(&tx_stats->syncp);
644 tx_stats->packets += packet->total_packets;
645 tx_stats->bytes += packet->total_bytes;
646 u64_stats_update_end(&tx_stats->syncp);
648 napi_consume_skb(skb, budget);
652 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
654 if (net_device->destroy && queue_sends == 0)
655 wake_up(&net_device->wait_drain);
657 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
658 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
660 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
663 static void netvsc_send_completion(struct netvsc_device *net_device,
664 struct vmbus_channel *incoming_channel,
665 struct hv_device *device,
666 const struct vmpacket_descriptor *desc,
669 struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
670 struct net_device *ndev = hv_get_drvdata(device);
672 switch (nvsp_packet->hdr.msg_type) {
673 case NVSP_MSG_TYPE_INIT_COMPLETE:
674 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
675 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
676 case NVSP_MSG5_TYPE_SUBCHANNEL:
677 /* Copy the response back */
678 memcpy(&net_device->channel_init_pkt, nvsp_packet,
679 sizeof(struct nvsp_message));
680 complete(&net_device->channel_init_wait);
683 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
684 netvsc_send_tx_complete(net_device, incoming_channel,
685 device, desc, budget);
690 "Unknown send completion type %d received!!\n",
691 nvsp_packet->hdr.msg_type);
695 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
697 unsigned long *map_addr = net_device->send_section_map;
700 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
701 if (sync_test_and_set_bit(i, map_addr) == 0)
705 return NETVSC_INVALID_INDEX;
708 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
709 unsigned int section_index,
711 struct hv_netvsc_packet *packet,
712 struct rndis_message *rndis_msg,
713 struct hv_page_buffer **pb,
716 char *start = net_device->send_buf;
717 char *dest = start + (section_index * net_device->send_section_size)
722 u32 remain = packet->total_data_buflen % net_device->pkt_align;
723 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
724 packet->page_buf_cnt;
727 if (skb->xmit_more && remain && !packet->cp_partial) {
728 padding = net_device->pkt_align - remain;
729 rndis_msg->msg_len += padding;
730 packet->total_data_buflen += padding;
733 for (i = 0; i < page_count; i++) {
734 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
735 u32 offset = (*pb)[i].offset;
736 u32 len = (*pb)[i].len;
738 memcpy(dest, (src + offset), len);
744 memset(dest, 0, padding);
751 static inline int netvsc_send_pkt(
752 struct hv_device *device,
753 struct hv_netvsc_packet *packet,
754 struct netvsc_device *net_device,
755 struct hv_page_buffer **pb,
758 struct nvsp_message nvmsg;
759 struct netvsc_channel *nvchan
760 = &net_device->chan_table[packet->q_idx];
761 struct vmbus_channel *out_channel = nvchan->channel;
762 struct net_device *ndev = hv_get_drvdata(device);
763 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
766 struct hv_page_buffer *pgbuf;
767 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
769 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
772 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
774 /* 1 is RMC_CONTROL; */
775 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
778 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
779 packet->send_buf_index;
780 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
781 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
783 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
784 packet->total_data_buflen;
788 if (out_channel->rescind)
791 if (packet->page_buf_cnt) {
792 pgbuf = packet->cp_partial ? (*pb) +
793 packet->rmsg_pgcnt : (*pb);
794 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
796 packet->page_buf_cnt,
798 sizeof(struct nvsp_message),
800 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
802 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
803 sizeof(struct nvsp_message),
806 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
810 atomic_inc_return(&nvchan->queue_sends);
812 if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
813 netif_tx_stop_queue(txq);
814 } else if (ret == -EAGAIN) {
815 netif_tx_stop_queue(txq);
816 if (atomic_read(&nvchan->queue_sends) < 1) {
817 netif_tx_wake_queue(txq);
821 netdev_err(ndev, "Unable to send packet %p ret %d\n",
828 /* Move packet out of multi send data (msd), and clear msd */
829 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
830 struct sk_buff **msd_skb,
831 struct multi_send_data *msdp)
833 *msd_skb = msdp->skb;
834 *msd_send = msdp->pkt;
840 int netvsc_send(struct hv_device *device,
841 struct hv_netvsc_packet *packet,
842 struct rndis_message *rndis_msg,
843 struct hv_page_buffer **pb,
846 struct netvsc_device *net_device;
848 struct netvsc_channel *nvchan;
849 u32 pktlen = packet->total_data_buflen, msd_len = 0;
850 unsigned int section_index = NETVSC_INVALID_INDEX;
851 struct multi_send_data *msdp;
852 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
853 struct sk_buff *msd_skb = NULL;
855 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
857 net_device = get_outbound_net_device(device);
861 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
862 * here before the negotiation with the host is finished and
863 * send_section_map may not be allocated yet.
865 if (!net_device->send_section_map)
868 nvchan = &net_device->chan_table[packet->q_idx];
869 packet->send_buf_index = NETVSC_INVALID_INDEX;
870 packet->cp_partial = false;
872 /* Send control message directly without accessing msd (Multi-Send
873 * Data) field which may be changed during data packet processing.
880 /* batch packets in send buffer if possible */
883 msd_len = msdp->pkt->total_data_buflen;
885 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
886 if (try_batch && msd_len + pktlen + net_device->pkt_align <
887 net_device->send_section_size) {
888 section_index = msdp->pkt->send_buf_index;
890 } else if (try_batch && msd_len + packet->rmsg_size <
891 net_device->send_section_size) {
892 section_index = msdp->pkt->send_buf_index;
893 packet->cp_partial = true;
895 } else if (pktlen + net_device->pkt_align <
896 net_device->send_section_size) {
897 section_index = netvsc_get_next_send_section(net_device);
898 if (section_index != NETVSC_INVALID_INDEX) {
899 move_pkt_msd(&msd_send, &msd_skb, msdp);
904 if (section_index != NETVSC_INVALID_INDEX) {
905 netvsc_copy_to_send_buf(net_device,
906 section_index, msd_len,
907 packet, rndis_msg, pb, skb);
909 packet->send_buf_index = section_index;
911 if (packet->cp_partial) {
912 packet->page_buf_cnt -= packet->rmsg_pgcnt;
913 packet->total_data_buflen = msd_len + packet->rmsg_size;
915 packet->page_buf_cnt = 0;
916 packet->total_data_buflen += msd_len;
920 packet->total_packets += msdp->pkt->total_packets;
921 packet->total_bytes += msdp->pkt->total_bytes;
925 dev_consume_skb_any(msdp->skb);
927 if (xmit_more && !packet->cp_partial) {
938 move_pkt_msd(&msd_send, &msd_skb, msdp);
943 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
947 netvsc_free_send_slot(net_device,
948 msd_send->send_buf_index);
949 dev_kfree_skb_any(msd_skb);
955 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
957 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
958 netvsc_free_send_slot(net_device, section_index);
963 static int netvsc_send_recv_completion(struct vmbus_channel *channel,
964 u64 transaction_id, u32 status)
966 struct nvsp_message recvcompMessage;
969 recvcompMessage.hdr.msg_type =
970 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
972 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
974 /* Send the completion */
975 ret = vmbus_sendpacket(channel, &recvcompMessage,
976 sizeof(struct nvsp_message_header) + sizeof(u32),
977 transaction_id, VM_PKT_COMP, 0);
982 static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
983 u32 *filled, u32 *avail)
985 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
986 u32 first = mrc->first;
987 u32 next = mrc->next;
989 *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
992 *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
995 /* Read the first filled slot, no change to index */
996 static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
999 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
1002 if (unlikely(!mrc->buf))
1005 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1009 return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
1012 /* Put the first filled slot back to available pool */
1013 static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
1015 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
1018 mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
1020 num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
1022 if (nvdev->destroy && num_recv == 0)
1023 wake_up(&nvdev->wait_drain);
1026 /* Check and send pending recv completions */
1027 static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
1028 struct vmbus_channel *channel, u16 q_idx)
1030 struct recv_comp_data *rcd;
1034 rcd = read_recv_comp_slot(nvdev, q_idx);
1038 ret = netvsc_send_recv_completion(channel, rcd->tid,
1043 put_recv_comp_slot(nvdev, q_idx);
1047 #define NETVSC_RCD_WATERMARK 80
1049 /* Get next available slot */
1050 static inline struct recv_comp_data *get_recv_comp_slot(
1051 struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
1053 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
1054 u32 filled, avail, next;
1055 struct recv_comp_data *rcd;
1057 if (unlikely(!nvdev->recv_section))
1060 if (unlikely(!mrc->buf))
1063 if (atomic_read(&nvdev->num_outstanding_recvs) >
1064 nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
1065 netvsc_chk_recv_comp(nvdev, channel, q_idx);
1067 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1072 rcd = mrc->buf + next * sizeof(struct recv_comp_data);
1073 mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
1075 atomic_inc(&nvdev->num_outstanding_recvs);
1080 static int netvsc_receive(struct net_device *ndev,
1081 struct netvsc_device *net_device,
1082 struct net_device_context *net_device_ctx,
1083 struct hv_device *device,
1084 struct vmbus_channel *channel,
1085 const struct vmpacket_descriptor *desc,
1086 struct nvsp_message *nvsp)
1088 const struct vmtransfer_page_packet_header *vmxferpage_packet
1089 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1090 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1091 char *recv_buf = net_device->recv_buf;
1092 u32 status = NVSP_STAT_SUCCESS;
1097 /* Make sure this is a valid nvsp packet */
1098 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1099 netif_err(net_device_ctx, rx_err, ndev,
1100 "Unknown nvsp packet type received %u\n",
1101 nvsp->hdr.msg_type);
1105 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1106 netif_err(net_device_ctx, rx_err, ndev,
1107 "Invalid xfer page set id - expecting %x got %x\n",
1108 NETVSC_RECEIVE_BUFFER_ID,
1109 vmxferpage_packet->xfer_pageset_id);
1113 count = vmxferpage_packet->range_cnt;
1115 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1116 for (i = 0; i < count; i++) {
1117 void *data = recv_buf
1118 + vmxferpage_packet->ranges[i].byte_offset;
1119 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1121 /* Pass it to the upper layer */
1122 status = rndis_filter_receive(ndev, net_device, device,
1123 channel, data, buflen);
1126 if (net_device->chan_table[q_idx].mrc.buf) {
1127 struct recv_comp_data *rcd;
1129 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1131 rcd->tid = vmxferpage_packet->d.trans_id;
1132 rcd->status = status;
1134 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1135 q_idx, vmxferpage_packet->d.trans_id);
1138 ret = netvsc_send_recv_completion(channel,
1139 vmxferpage_packet->d.trans_id,
1142 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1143 q_idx, vmxferpage_packet->d.trans_id, ret);
1148 static void netvsc_send_table(struct hv_device *hdev,
1149 struct nvsp_message *nvmsg)
1151 struct net_device *ndev = hv_get_drvdata(hdev);
1152 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1156 count = nvmsg->msg.v5_msg.send_table.count;
1157 if (count != VRSS_SEND_TAB_SIZE) {
1158 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1162 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1163 nvmsg->msg.v5_msg.send_table.offset);
1165 for (i = 0; i < count; i++)
1166 net_device_ctx->tx_send_table[i] = tab[i];
1169 static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1170 struct nvsp_message *nvmsg)
1172 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1173 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1176 static inline void netvsc_receive_inband(struct hv_device *hdev,
1177 struct net_device_context *net_device_ctx,
1178 struct nvsp_message *nvmsg)
1180 switch (nvmsg->hdr.msg_type) {
1181 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1182 netvsc_send_table(hdev, nvmsg);
1185 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1186 netvsc_send_vf(net_device_ctx, nvmsg);
1191 static int netvsc_process_raw_pkt(struct hv_device *device,
1192 struct vmbus_channel *channel,
1193 struct netvsc_device *net_device,
1194 struct net_device *ndev,
1195 const struct vmpacket_descriptor *desc,
1198 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1199 struct nvsp_message *nvmsg = hv_pkt_data(desc);
1201 switch (desc->type) {
1203 netvsc_send_completion(net_device, channel, device,
1207 case VM_PKT_DATA_USING_XFER_PAGES:
1208 return netvsc_receive(ndev, net_device, net_device_ctx,
1209 device, channel, desc, nvmsg);
1212 case VM_PKT_DATA_INBAND:
1213 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1217 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1218 desc->type, desc->trans_id);
1225 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1227 struct vmbus_channel *primary = channel->primary_channel;
1229 return primary ? primary->device_obj : channel->device_obj;
1232 /* Network processing softirq
1233 * Process data in incoming ring buffer from host
1234 * Stops when ring is empty or budget is met or exceeded.
1236 int netvsc_poll(struct napi_struct *napi, int budget)
1238 struct netvsc_channel *nvchan
1239 = container_of(napi, struct netvsc_channel, napi);
1240 struct vmbus_channel *channel = nvchan->channel;
1241 struct hv_device *device = netvsc_channel_to_device(channel);
1242 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1243 struct net_device *ndev = hv_get_drvdata(device);
1244 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
1247 /* If starting a new interval */
1249 nvchan->desc = hv_pkt_iter_first(channel);
1251 while (nvchan->desc && work_done < budget) {
1252 work_done += netvsc_process_raw_pkt(device, channel, net_device,
1253 ndev, nvchan->desc, budget);
1254 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1257 /* If receive ring was exhausted
1258 * and not doing busy poll
1259 * then re-enable host interrupts
1260 * and reschedule if ring is not empty.
1262 if (work_done < budget &&
1263 napi_complete_done(napi, work_done) &&
1264 hv_end_read(&channel->inbound) != 0)
1265 napi_reschedule(napi);
1267 netvsc_chk_recv_comp(net_device, channel, q_idx);
1269 /* Driver may overshoot since multiple packets per descriptor */
1270 return min(work_done, budget);
1273 /* Call back when data is available in host ring buffer.
1274 * Processing is deferred until network softirq (NAPI)
1276 void netvsc_channel_cb(void *context)
1278 struct netvsc_channel *nvchan = context;
1280 if (napi_schedule_prep(&nvchan->napi)) {
1281 /* disable interupts from host */
1282 hv_begin_read(&nvchan->channel->inbound);
1284 __napi_schedule(&nvchan->napi);
1289 * netvsc_device_add - Callback when the device belonging to this
1292 int netvsc_device_add(struct hv_device *device,
1293 const struct netvsc_device_info *device_info)
1296 int ring_size = device_info->ring_size;
1297 struct netvsc_device *net_device;
1298 struct net_device *ndev = hv_get_drvdata(device);
1299 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1301 net_device = alloc_net_device();
1305 net_device->ring_size = ring_size;
1307 /* Because the device uses NAPI, all the interrupt batching and
1308 * control is done via Net softirq, not the channel handling
1310 set_channel_read_mode(device->channel, HV_CALL_ISR);
1312 /* If we're reopening the device we may have multiple queues, fill the
1313 * chn_table with the default channel to use it before subchannels are
1315 * Initialize the channel state before we open;
1316 * we can be interrupted as soon as we open the channel.
1319 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1320 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1322 nvchan->channel = device->channel;
1325 /* Enable NAPI handler before init callbacks */
1326 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1327 netvsc_poll, NAPI_POLL_WEIGHT);
1329 /* Open the channel */
1330 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1331 ring_size * PAGE_SIZE, NULL, 0,
1333 net_device->chan_table);
1336 netif_napi_del(&net_device->chan_table[0].napi);
1337 netdev_err(ndev, "unable to open channel: %d\n", ret);
1341 /* Channel is opened */
1342 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1344 napi_enable(&net_device->chan_table[0].napi);
1346 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1349 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1351 /* Connect with the NetVsp */
1352 ret = netvsc_connect_vsp(device);
1355 "unable to connect to NetVSP - %d\n", ret);
1362 netif_napi_del(&net_device->chan_table[0].napi);
1364 /* Now, we can close the channel safely */
1365 vmbus_close(device->channel);
1368 free_netvsc_device(&net_device->rcu);