]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/hv/channel_mgmt.c
vmbus: change to per channel tasklet
[karo-tx-linux.git] / drivers / hv / channel_mgmt.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
27 #include <linux/mm.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
34 #include <asm/mshyperv.h>
35
36 #include "hyperv_vmbus.h"
37
38 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
39
40 static const struct vmbus_device vmbus_devs[] = {
41         /* IDE */
42         { .dev_type = HV_IDE,
43           HV_IDE_GUID,
44           .perf_device = true,
45         },
46
47         /* SCSI */
48         { .dev_type = HV_SCSI,
49           HV_SCSI_GUID,
50           .perf_device = true,
51         },
52
53         /* Fibre Channel */
54         { .dev_type = HV_FC,
55           HV_SYNTHFC_GUID,
56           .perf_device = true,
57         },
58
59         /* Synthetic NIC */
60         { .dev_type = HV_NIC,
61           HV_NIC_GUID,
62           .perf_device = true,
63         },
64
65         /* Network Direct */
66         { .dev_type = HV_ND,
67           HV_ND_GUID,
68           .perf_device = true,
69         },
70
71         /* PCIE */
72         { .dev_type = HV_PCIE,
73           HV_PCIE_GUID,
74           .perf_device = true,
75         },
76
77         /* Synthetic Frame Buffer */
78         { .dev_type = HV_FB,
79           HV_SYNTHVID_GUID,
80           .perf_device = false,
81         },
82
83         /* Synthetic Keyboard */
84         { .dev_type = HV_KBD,
85           HV_KBD_GUID,
86           .perf_device = false,
87         },
88
89         /* Synthetic MOUSE */
90         { .dev_type = HV_MOUSE,
91           HV_MOUSE_GUID,
92           .perf_device = false,
93         },
94
95         /* KVP */
96         { .dev_type = HV_KVP,
97           HV_KVP_GUID,
98           .perf_device = false,
99         },
100
101         /* Time Synch */
102         { .dev_type = HV_TS,
103           HV_TS_GUID,
104           .perf_device = false,
105         },
106
107         /* Heartbeat */
108         { .dev_type = HV_HB,
109           HV_HEART_BEAT_GUID,
110           .perf_device = false,
111         },
112
113         /* Shutdown */
114         { .dev_type = HV_SHUTDOWN,
115           HV_SHUTDOWN_GUID,
116           .perf_device = false,
117         },
118
119         /* File copy */
120         { .dev_type = HV_FCOPY,
121           HV_FCOPY_GUID,
122           .perf_device = false,
123         },
124
125         /* Backup */
126         { .dev_type = HV_BACKUP,
127           HV_VSS_GUID,
128           .perf_device = false,
129         },
130
131         /* Dynamic Memory */
132         { .dev_type = HV_DM,
133           HV_DM_GUID,
134           .perf_device = false,
135         },
136
137         /* Unknown GUID */
138         { .dev_type = HV_UNKNOWN,
139           .perf_device = false,
140         },
141 };
142
143 static const struct {
144         uuid_le guid;
145 } vmbus_unsupported_devs[] = {
146         { HV_AVMA1_GUID },
147         { HV_AVMA2_GUID },
148         { HV_RDV_GUID   },
149 };
150
151 /*
152  * The rescinded channel may be blocked waiting for a response from the host;
153  * take care of that.
154  */
155 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
156 {
157         struct vmbus_channel_msginfo *msginfo;
158         unsigned long flags;
159
160
161         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
162
163         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
164                                 msglistentry) {
165
166                 if (msginfo->waiting_channel == channel) {
167                         complete(&msginfo->waitevent);
168                         break;
169                 }
170         }
171         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
172 }
173
174 static bool is_unsupported_vmbus_devs(const uuid_le *guid)
175 {
176         int i;
177
178         for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
179                 if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
180                         return true;
181         return false;
182 }
183
184 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
185 {
186         const uuid_le *guid = &channel->offermsg.offer.if_type;
187         u16 i;
188
189         if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
190                 return HV_UNKNOWN;
191
192         for (i = HV_IDE; i < HV_UNKNOWN; i++) {
193                 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
194                         return i;
195         }
196         pr_info("Unknown GUID: %pUl\n", guid);
197         return i;
198 }
199
200 /**
201  * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202  * @icmsghdrp: Pointer to msg header structure
203  * @icmsg_negotiate: Pointer to negotiate message structure
204  * @buf: Raw buffer channel data
205  *
206  * @icmsghdrp is of type &struct icmsg_hdr.
207  * Set up and fill in default negotiate response message.
208  *
209  * The fw_version and fw_vercnt specifies the framework version that
210  * we can support.
211  *
212  * The srv_version and srv_vercnt specifies the service
213  * versions we can support.
214  *
215  * Versions are given in decreasing order.
216  *
217  * nego_fw_version and nego_srv_version store the selected protocol versions.
218  *
219  * Mainly used by Hyper-V drivers.
220  */
221 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
222                                 u8 *buf, const int *fw_version, int fw_vercnt,
223                                 const int *srv_version, int srv_vercnt,
224                                 int *nego_fw_version, int *nego_srv_version)
225 {
226         int icframe_major, icframe_minor;
227         int icmsg_major, icmsg_minor;
228         int fw_major, fw_minor;
229         int srv_major, srv_minor;
230         int i, j;
231         bool found_match = false;
232         struct icmsg_negotiate *negop;
233
234         icmsghdrp->icmsgsize = 0x10;
235         negop = (struct icmsg_negotiate *)&buf[
236                 sizeof(struct vmbuspipe_hdr) +
237                 sizeof(struct icmsg_hdr)];
238
239         icframe_major = negop->icframe_vercnt;
240         icframe_minor = 0;
241
242         icmsg_major = negop->icmsg_vercnt;
243         icmsg_minor = 0;
244
245         /*
246          * Select the framework version number we will
247          * support.
248          */
249
250         for (i = 0; i < fw_vercnt; i++) {
251                 fw_major = (fw_version[i] >> 16);
252                 fw_minor = (fw_version[i] & 0xFFFF);
253
254                 for (j = 0; j < negop->icframe_vercnt; j++) {
255                         if ((negop->icversion_data[j].major == fw_major) &&
256                             (negop->icversion_data[j].minor == fw_minor)) {
257                                 icframe_major = negop->icversion_data[j].major;
258                                 icframe_minor = negop->icversion_data[j].minor;
259                                 found_match = true;
260                                 break;
261                         }
262                 }
263
264                 if (found_match)
265                         break;
266         }
267
268         if (!found_match)
269                 goto fw_error;
270
271         found_match = false;
272
273         for (i = 0; i < srv_vercnt; i++) {
274                 srv_major = (srv_version[i] >> 16);
275                 srv_minor = (srv_version[i] & 0xFFFF);
276
277                 for (j = negop->icframe_vercnt;
278                         (j < negop->icframe_vercnt + negop->icmsg_vercnt);
279                         j++) {
280
281                         if ((negop->icversion_data[j].major == srv_major) &&
282                                 (negop->icversion_data[j].minor == srv_minor)) {
283
284                                 icmsg_major = negop->icversion_data[j].major;
285                                 icmsg_minor = negop->icversion_data[j].minor;
286                                 found_match = true;
287                                 break;
288                         }
289                 }
290
291                 if (found_match)
292                         break;
293         }
294
295         /*
296          * Respond with the framework and service
297          * version numbers we can support.
298          */
299
300 fw_error:
301         if (!found_match) {
302                 negop->icframe_vercnt = 0;
303                 negop->icmsg_vercnt = 0;
304         } else {
305                 negop->icframe_vercnt = 1;
306                 negop->icmsg_vercnt = 1;
307         }
308
309         if (nego_fw_version)
310                 *nego_fw_version = (icframe_major << 16) | icframe_minor;
311
312         if (nego_srv_version)
313                 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
314
315         negop->icversion_data[0].major = icframe_major;
316         negop->icversion_data[0].minor = icframe_minor;
317         negop->icversion_data[1].major = icmsg_major;
318         negop->icversion_data[1].minor = icmsg_minor;
319         return found_match;
320 }
321
322 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
323
324 /*
325  * alloc_channel - Allocate and initialize a vmbus channel object
326  */
327 static struct vmbus_channel *alloc_channel(void)
328 {
329         struct vmbus_channel *channel;
330
331         channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
332         if (!channel)
333                 return NULL;
334
335         channel->acquire_ring_lock = true;
336         spin_lock_init(&channel->inbound_lock);
337         spin_lock_init(&channel->lock);
338
339         INIT_LIST_HEAD(&channel->sc_list);
340         INIT_LIST_HEAD(&channel->percpu_list);
341
342         tasklet_init(&channel->callback_event,
343                      vmbus_on_event, (unsigned long)channel);
344
345         return channel;
346 }
347
348 /*
349  * free_channel - Release the resources used by the vmbus channel object
350  */
351 static void free_channel(struct vmbus_channel *channel)
352 {
353         tasklet_kill(&channel->callback_event);
354         kfree(channel);
355 }
356
357 static void percpu_channel_enq(void *arg)
358 {
359         struct vmbus_channel *channel = arg;
360         struct hv_per_cpu_context *hv_cpu
361                 = this_cpu_ptr(hv_context.cpu_context);
362
363         list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
364 }
365
366 static void percpu_channel_deq(void *arg)
367 {
368         struct vmbus_channel *channel = arg;
369
370         list_del(&channel->percpu_list);
371 }
372
373
374 static void vmbus_release_relid(u32 relid)
375 {
376         struct vmbus_channel_relid_released msg;
377
378         memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
379         msg.child_relid = relid;
380         msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
381         vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
382                        true);
383 }
384
385 void hv_event_tasklet_disable(struct vmbus_channel *channel)
386 {
387         tasklet_disable(&channel->callback_event);
388 }
389
390 void hv_event_tasklet_enable(struct vmbus_channel *channel)
391 {
392         tasklet_enable(&channel->callback_event);
393
394         /* In case there is any pending event */
395         tasklet_schedule(&channel->callback_event);
396 }
397
398 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
399 {
400         unsigned long flags;
401         struct vmbus_channel *primary_channel;
402
403         BUG_ON(!channel->rescind);
404         BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
405
406         hv_event_tasklet_disable(channel);
407         if (channel->target_cpu != get_cpu()) {
408                 put_cpu();
409                 smp_call_function_single(channel->target_cpu,
410                                          percpu_channel_deq, channel, true);
411         } else {
412                 percpu_channel_deq(channel);
413                 put_cpu();
414         }
415         hv_event_tasklet_enable(channel);
416
417         if (channel->primary_channel == NULL) {
418                 list_del(&channel->listentry);
419
420                 primary_channel = channel;
421         } else {
422                 primary_channel = channel->primary_channel;
423                 spin_lock_irqsave(&primary_channel->lock, flags);
424                 list_del(&channel->sc_list);
425                 primary_channel->num_sc--;
426                 spin_unlock_irqrestore(&primary_channel->lock, flags);
427         }
428
429         /*
430          * We need to free the bit for init_vp_index() to work in the case
431          * of sub-channel, when we reload drivers like hv_netvsc.
432          */
433         if (channel->affinity_policy == HV_LOCALIZED)
434                 cpumask_clear_cpu(channel->target_cpu,
435                                   &primary_channel->alloced_cpus_in_node);
436
437         vmbus_release_relid(relid);
438
439         free_channel(channel);
440 }
441
442 void vmbus_free_channels(void)
443 {
444         struct vmbus_channel *channel, *tmp;
445
446         mutex_lock(&vmbus_connection.channel_mutex);
447         list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
448                 listentry) {
449                 /* hv_process_channel_removal() needs this */
450                 channel->rescind = true;
451
452                 vmbus_device_unregister(channel->device_obj);
453         }
454         mutex_unlock(&vmbus_connection.channel_mutex);
455 }
456
457 /*
458  * vmbus_process_offer - Process the offer by creating a channel/device
459  * associated with this offer
460  */
461 static void vmbus_process_offer(struct vmbus_channel *newchannel)
462 {
463         struct vmbus_channel *channel;
464         bool fnew = true;
465         unsigned long flags;
466         u16 dev_type;
467         int ret;
468
469         /* Make sure this is a new offer */
470         mutex_lock(&vmbus_connection.channel_mutex);
471
472         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
473                 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
474                         newchannel->offermsg.offer.if_type) &&
475                         !uuid_le_cmp(channel->offermsg.offer.if_instance,
476                                 newchannel->offermsg.offer.if_instance)) {
477                         fnew = false;
478                         break;
479                 }
480         }
481
482         if (fnew)
483                 list_add_tail(&newchannel->listentry,
484                               &vmbus_connection.chn_list);
485
486         mutex_unlock(&vmbus_connection.channel_mutex);
487
488         if (!fnew) {
489                 /*
490                  * Check to see if this is a sub-channel.
491                  */
492                 if (newchannel->offermsg.offer.sub_channel_index != 0) {
493                         /*
494                          * Process the sub-channel.
495                          */
496                         newchannel->primary_channel = channel;
497                         spin_lock_irqsave(&channel->lock, flags);
498                         list_add_tail(&newchannel->sc_list, &channel->sc_list);
499                         channel->num_sc++;
500                         spin_unlock_irqrestore(&channel->lock, flags);
501                 } else
502                         goto err_free_chan;
503         }
504
505         dev_type = hv_get_dev_type(newchannel);
506
507         init_vp_index(newchannel, dev_type);
508
509         hv_event_tasklet_disable(newchannel);
510         if (newchannel->target_cpu != get_cpu()) {
511                 put_cpu();
512                 smp_call_function_single(newchannel->target_cpu,
513                                          percpu_channel_enq,
514                                          newchannel, true);
515         } else {
516                 percpu_channel_enq(newchannel);
517                 put_cpu();
518         }
519         hv_event_tasklet_enable(newchannel);
520
521         /*
522          * This state is used to indicate a successful open
523          * so that when we do close the channel normally, we
524          * can cleanup properly
525          */
526         newchannel->state = CHANNEL_OPEN_STATE;
527
528         if (!fnew) {
529                 if (channel->sc_creation_callback != NULL)
530                         channel->sc_creation_callback(newchannel);
531                 return;
532         }
533
534         /*
535          * Start the process of binding this offer to the driver
536          * We need to set the DeviceObject field before calling
537          * vmbus_child_dev_add()
538          */
539         newchannel->device_obj = vmbus_device_create(
540                 &newchannel->offermsg.offer.if_type,
541                 &newchannel->offermsg.offer.if_instance,
542                 newchannel);
543         if (!newchannel->device_obj)
544                 goto err_deq_chan;
545
546         newchannel->device_obj->device_id = dev_type;
547         /*
548          * Add the new device to the bus. This will kick off device-driver
549          * binding which eventually invokes the device driver's AddDevice()
550          * method.
551          */
552         mutex_lock(&vmbus_connection.channel_mutex);
553         ret = vmbus_device_register(newchannel->device_obj);
554         mutex_unlock(&vmbus_connection.channel_mutex);
555
556         if (ret != 0) {
557                 pr_err("unable to add child device object (relid %d)\n",
558                         newchannel->offermsg.child_relid);
559                 kfree(newchannel->device_obj);
560                 goto err_deq_chan;
561         }
562         return;
563
564 err_deq_chan:
565         mutex_lock(&vmbus_connection.channel_mutex);
566         list_del(&newchannel->listentry);
567         mutex_unlock(&vmbus_connection.channel_mutex);
568
569         hv_event_tasklet_disable(newchannel);
570         if (newchannel->target_cpu != get_cpu()) {
571                 put_cpu();
572                 smp_call_function_single(newchannel->target_cpu,
573                                          percpu_channel_deq, newchannel, true);
574         } else {
575                 percpu_channel_deq(newchannel);
576                 put_cpu();
577         }
578         hv_event_tasklet_enable(newchannel);
579
580         vmbus_release_relid(newchannel->offermsg.child_relid);
581
582 err_free_chan:
583         free_channel(newchannel);
584 }
585
586 /*
587  * We use this state to statically distribute the channel interrupt load.
588  */
589 static int next_numa_node_id;
590
591 /*
592  * Starting with Win8, we can statically distribute the incoming
593  * channel interrupt load by binding a channel to VCPU.
594  * We do this in a hierarchical fashion:
595  * First distribute the primary channels across available NUMA nodes
596  * and then distribute the subchannels amongst the CPUs in the NUMA
597  * node assigned to the primary channel.
598  *
599  * For pre-win8 hosts or non-performance critical channels we assign the
600  * first CPU in the first NUMA node.
601  */
602 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
603 {
604         u32 cur_cpu;
605         bool perf_chn = vmbus_devs[dev_type].perf_device;
606         struct vmbus_channel *primary = channel->primary_channel;
607         int next_node;
608         struct cpumask available_mask;
609         struct cpumask *alloced_mask;
610
611         if ((vmbus_proto_version == VERSION_WS2008) ||
612             (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
613                 /*
614                  * Prior to win8, all channel interrupts are
615                  * delivered on cpu 0.
616                  * Also if the channel is not a performance critical
617                  * channel, bind it to cpu 0.
618                  */
619                 channel->numa_node = 0;
620                 channel->target_cpu = 0;
621                 channel->target_vp = hv_context.vp_index[0];
622                 return;
623         }
624
625         /*
626          * Based on the channel affinity policy, we will assign the NUMA
627          * nodes.
628          */
629
630         if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
631                 while (true) {
632                         next_node = next_numa_node_id++;
633                         if (next_node == nr_node_ids) {
634                                 next_node = next_numa_node_id = 0;
635                                 continue;
636                         }
637                         if (cpumask_empty(cpumask_of_node(next_node)))
638                                 continue;
639                         break;
640                 }
641                 channel->numa_node = next_node;
642                 primary = channel;
643         }
644         alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
645
646         if (cpumask_weight(alloced_mask) ==
647             cpumask_weight(cpumask_of_node(primary->numa_node))) {
648                 /*
649                  * We have cycled through all the CPUs in the node;
650                  * reset the alloced map.
651                  */
652                 cpumask_clear(alloced_mask);
653         }
654
655         cpumask_xor(&available_mask, alloced_mask,
656                     cpumask_of_node(primary->numa_node));
657
658         cur_cpu = -1;
659
660         if (primary->affinity_policy == HV_LOCALIZED) {
661                 /*
662                  * Normally Hyper-V host doesn't create more subchannels
663                  * than there are VCPUs on the node but it is possible when not
664                  * all present VCPUs on the node are initialized by guest.
665                  * Clear the alloced_cpus_in_node to start over.
666                  */
667                 if (cpumask_equal(&primary->alloced_cpus_in_node,
668                                   cpumask_of_node(primary->numa_node)))
669                         cpumask_clear(&primary->alloced_cpus_in_node);
670         }
671
672         while (true) {
673                 cur_cpu = cpumask_next(cur_cpu, &available_mask);
674                 if (cur_cpu >= nr_cpu_ids) {
675                         cur_cpu = -1;
676                         cpumask_copy(&available_mask,
677                                      cpumask_of_node(primary->numa_node));
678                         continue;
679                 }
680
681                 if (primary->affinity_policy == HV_LOCALIZED) {
682                         /*
683                          * NOTE: in the case of sub-channel, we clear the
684                          * sub-channel related bit(s) in
685                          * primary->alloced_cpus_in_node in
686                          * hv_process_channel_removal(), so when we
687                          * reload drivers like hv_netvsc in SMP guest, here
688                          * we're able to re-allocate
689                          * bit from primary->alloced_cpus_in_node.
690                          */
691                         if (!cpumask_test_cpu(cur_cpu,
692                                               &primary->alloced_cpus_in_node)) {
693                                 cpumask_set_cpu(cur_cpu,
694                                                 &primary->alloced_cpus_in_node);
695                                 cpumask_set_cpu(cur_cpu, alloced_mask);
696                                 break;
697                         }
698                 } else {
699                         cpumask_set_cpu(cur_cpu, alloced_mask);
700                         break;
701                 }
702         }
703
704         channel->target_cpu = cur_cpu;
705         channel->target_vp = hv_context.vp_index[cur_cpu];
706 }
707
708 static void vmbus_wait_for_unload(void)
709 {
710         int cpu;
711         void *page_addr;
712         struct hv_message *msg;
713         struct vmbus_channel_message_header *hdr;
714         u32 message_type;
715
716         /*
717          * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
718          * used for initial contact or to CPU0 depending on host version. When
719          * we're crashing on a different CPU let's hope that IRQ handler on
720          * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
721          * functional and vmbus_unload_response() will complete
722          * vmbus_connection.unload_event. If not, the last thing we can do is
723          * read message pages for all CPUs directly.
724          */
725         while (1) {
726                 if (completion_done(&vmbus_connection.unload_event))
727                         break;
728
729                 for_each_online_cpu(cpu) {
730                         struct hv_per_cpu_context *hv_cpu
731                                 = per_cpu_ptr(hv_context.cpu_context, cpu);
732
733                         page_addr = hv_cpu->synic_message_page;
734                         msg = (struct hv_message *)page_addr
735                                 + VMBUS_MESSAGE_SINT;
736
737                         message_type = READ_ONCE(msg->header.message_type);
738                         if (message_type == HVMSG_NONE)
739                                 continue;
740
741                         hdr = (struct vmbus_channel_message_header *)
742                                 msg->u.payload;
743
744                         if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
745                                 complete(&vmbus_connection.unload_event);
746
747                         vmbus_signal_eom(msg, message_type);
748                 }
749
750                 mdelay(10);
751         }
752
753         /*
754          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
755          * maybe-pending messages on all CPUs to be able to receive new
756          * messages after we reconnect.
757          */
758         for_each_online_cpu(cpu) {
759                 struct hv_per_cpu_context *hv_cpu
760                         = per_cpu_ptr(hv_context.cpu_context, cpu);
761
762                 page_addr = hv_cpu->synic_message_page;
763                 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
764                 msg->header.message_type = HVMSG_NONE;
765         }
766 }
767
768 /*
769  * vmbus_unload_response - Handler for the unload response.
770  */
771 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
772 {
773         /*
774          * This is a global event; just wakeup the waiting thread.
775          * Once we successfully unload, we can cleanup the monitor state.
776          */
777         complete(&vmbus_connection.unload_event);
778 }
779
780 void vmbus_initiate_unload(bool crash)
781 {
782         struct vmbus_channel_message_header hdr;
783
784         /* Pre-Win2012R2 hosts don't support reconnect */
785         if (vmbus_proto_version < VERSION_WIN8_1)
786                 return;
787
788         init_completion(&vmbus_connection.unload_event);
789         memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
790         hdr.msgtype = CHANNELMSG_UNLOAD;
791         vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
792                        !crash);
793
794         /*
795          * vmbus_initiate_unload() is also called on crash and the crash can be
796          * happening in an interrupt context, where scheduling is impossible.
797          */
798         if (!crash)
799                 wait_for_completion(&vmbus_connection.unload_event);
800         else
801                 vmbus_wait_for_unload();
802 }
803
804 /*
805  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
806  *
807  */
808 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
809 {
810         struct vmbus_channel_offer_channel *offer;
811         struct vmbus_channel *newchannel;
812
813         offer = (struct vmbus_channel_offer_channel *)hdr;
814
815         /* Allocate the channel object and save this offer. */
816         newchannel = alloc_channel();
817         if (!newchannel) {
818                 pr_err("Unable to allocate channel object\n");
819                 return;
820         }
821
822         /*
823          * By default we setup state to enable batched
824          * reading. A specific service can choose to
825          * disable this prior to opening the channel.
826          */
827         newchannel->batched_reading = true;
828
829         /*
830          * Setup state for signalling the host.
831          */
832         newchannel->sig_event = (struct hv_input_signal_event *)
833                                 (ALIGN((unsigned long)
834                                 &newchannel->sig_buf,
835                                 HV_HYPERCALL_PARAM_ALIGN));
836
837         newchannel->sig_event->connectionid.asu32 = 0;
838         newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
839         newchannel->sig_event->flag_number = 0;
840         newchannel->sig_event->rsvdz = 0;
841
842         if (vmbus_proto_version != VERSION_WS2008) {
843                 newchannel->is_dedicated_interrupt =
844                                 (offer->is_dedicated_interrupt != 0);
845                 newchannel->sig_event->connectionid.u.id =
846                                 offer->connection_id;
847         }
848
849         memcpy(&newchannel->offermsg, offer,
850                sizeof(struct vmbus_channel_offer_channel));
851         newchannel->monitor_grp = (u8)offer->monitorid / 32;
852         newchannel->monitor_bit = (u8)offer->monitorid % 32;
853
854         vmbus_process_offer(newchannel);
855 }
856
857 /*
858  * vmbus_onoffer_rescind - Rescind offer handler.
859  *
860  * We queue a work item to process this offer synchronously
861  */
862 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
863 {
864         struct vmbus_channel_rescind_offer *rescind;
865         struct vmbus_channel *channel;
866         unsigned long flags;
867         struct device *dev;
868
869         rescind = (struct vmbus_channel_rescind_offer *)hdr;
870
871         mutex_lock(&vmbus_connection.channel_mutex);
872         channel = relid2channel(rescind->child_relid);
873
874         if (channel == NULL) {
875                 /*
876                  * This is very impossible, because in
877                  * vmbus_process_offer(), we have already invoked
878                  * vmbus_release_relid() on error.
879                  */
880                 goto out;
881         }
882
883         spin_lock_irqsave(&channel->lock, flags);
884         channel->rescind = true;
885         spin_unlock_irqrestore(&channel->lock, flags);
886
887         vmbus_rescind_cleanup(channel);
888
889         if (channel->device_obj) {
890                 if (channel->chn_rescind_callback) {
891                         channel->chn_rescind_callback(channel);
892                         goto out;
893                 }
894                 /*
895                  * We will have to unregister this device from the
896                  * driver core.
897                  */
898                 dev = get_device(&channel->device_obj->device);
899                 if (dev) {
900                         vmbus_device_unregister(channel->device_obj);
901                         put_device(dev);
902                 }
903         } else {
904                 hv_process_channel_removal(channel,
905                         channel->offermsg.child_relid);
906         }
907
908 out:
909         mutex_unlock(&vmbus_connection.channel_mutex);
910 }
911
912 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
913 {
914         mutex_lock(&vmbus_connection.channel_mutex);
915
916         BUG_ON(!is_hvsock_channel(channel));
917
918         channel->rescind = true;
919         vmbus_device_unregister(channel->device_obj);
920
921         mutex_unlock(&vmbus_connection.channel_mutex);
922 }
923 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
924
925
926 /*
927  * vmbus_onoffers_delivered -
928  * This is invoked when all offers have been delivered.
929  *
930  * Nothing to do here.
931  */
932 static void vmbus_onoffers_delivered(
933                         struct vmbus_channel_message_header *hdr)
934 {
935 }
936
937 /*
938  * vmbus_onopen_result - Open result handler.
939  *
940  * This is invoked when we received a response to our channel open request.
941  * Find the matching request, copy the response and signal the requesting
942  * thread.
943  */
944 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
945 {
946         struct vmbus_channel_open_result *result;
947         struct vmbus_channel_msginfo *msginfo;
948         struct vmbus_channel_message_header *requestheader;
949         struct vmbus_channel_open_channel *openmsg;
950         unsigned long flags;
951
952         result = (struct vmbus_channel_open_result *)hdr;
953
954         /*
955          * Find the open msg, copy the result and signal/unblock the wait event
956          */
957         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
958
959         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
960                                 msglistentry) {
961                 requestheader =
962                         (struct vmbus_channel_message_header *)msginfo->msg;
963
964                 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
965                         openmsg =
966                         (struct vmbus_channel_open_channel *)msginfo->msg;
967                         if (openmsg->child_relid == result->child_relid &&
968                             openmsg->openid == result->openid) {
969                                 memcpy(&msginfo->response.open_result,
970                                        result,
971                                        sizeof(
972                                         struct vmbus_channel_open_result));
973                                 complete(&msginfo->waitevent);
974                                 break;
975                         }
976                 }
977         }
978         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
979 }
980
981 /*
982  * vmbus_ongpadl_created - GPADL created handler.
983  *
984  * This is invoked when we received a response to our gpadl create request.
985  * Find the matching request, copy the response and signal the requesting
986  * thread.
987  */
988 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
989 {
990         struct vmbus_channel_gpadl_created *gpadlcreated;
991         struct vmbus_channel_msginfo *msginfo;
992         struct vmbus_channel_message_header *requestheader;
993         struct vmbus_channel_gpadl_header *gpadlheader;
994         unsigned long flags;
995
996         gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
997
998         /*
999          * Find the establish msg, copy the result and signal/unblock the wait
1000          * event
1001          */
1002         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1003
1004         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1005                                 msglistentry) {
1006                 requestheader =
1007                         (struct vmbus_channel_message_header *)msginfo->msg;
1008
1009                 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1010                         gpadlheader =
1011                         (struct vmbus_channel_gpadl_header *)requestheader;
1012
1013                         if ((gpadlcreated->child_relid ==
1014                              gpadlheader->child_relid) &&
1015                             (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1016                                 memcpy(&msginfo->response.gpadl_created,
1017                                        gpadlcreated,
1018                                        sizeof(
1019                                         struct vmbus_channel_gpadl_created));
1020                                 complete(&msginfo->waitevent);
1021                                 break;
1022                         }
1023                 }
1024         }
1025         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1026 }
1027
1028 /*
1029  * vmbus_ongpadl_torndown - GPADL torndown handler.
1030  *
1031  * This is invoked when we received a response to our gpadl teardown request.
1032  * Find the matching request, copy the response and signal the requesting
1033  * thread.
1034  */
1035 static void vmbus_ongpadl_torndown(
1036                         struct vmbus_channel_message_header *hdr)
1037 {
1038         struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1039         struct vmbus_channel_msginfo *msginfo;
1040         struct vmbus_channel_message_header *requestheader;
1041         struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1042         unsigned long flags;
1043
1044         gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1045
1046         /*
1047          * Find the open msg, copy the result and signal/unblock the wait event
1048          */
1049         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1050
1051         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1052                                 msglistentry) {
1053                 requestheader =
1054                         (struct vmbus_channel_message_header *)msginfo->msg;
1055
1056                 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1057                         gpadl_teardown =
1058                         (struct vmbus_channel_gpadl_teardown *)requestheader;
1059
1060                         if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1061                                 memcpy(&msginfo->response.gpadl_torndown,
1062                                        gpadl_torndown,
1063                                        sizeof(
1064                                         struct vmbus_channel_gpadl_torndown));
1065                                 complete(&msginfo->waitevent);
1066                                 break;
1067                         }
1068                 }
1069         }
1070         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1071 }
1072
1073 /*
1074  * vmbus_onversion_response - Version response handler
1075  *
1076  * This is invoked when we received a response to our initiate contact request.
1077  * Find the matching request, copy the response and signal the requesting
1078  * thread.
1079  */
1080 static void vmbus_onversion_response(
1081                 struct vmbus_channel_message_header *hdr)
1082 {
1083         struct vmbus_channel_msginfo *msginfo;
1084         struct vmbus_channel_message_header *requestheader;
1085         struct vmbus_channel_version_response *version_response;
1086         unsigned long flags;
1087
1088         version_response = (struct vmbus_channel_version_response *)hdr;
1089         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1090
1091         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1092                                 msglistentry) {
1093                 requestheader =
1094                         (struct vmbus_channel_message_header *)msginfo->msg;
1095
1096                 if (requestheader->msgtype ==
1097                     CHANNELMSG_INITIATE_CONTACT) {
1098                         memcpy(&msginfo->response.version_response,
1099                               version_response,
1100                               sizeof(struct vmbus_channel_version_response));
1101                         complete(&msginfo->waitevent);
1102                 }
1103         }
1104         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1105 }
1106
1107 /* Channel message dispatch table */
1108 struct vmbus_channel_message_table_entry
1109         channel_message_table[CHANNELMSG_COUNT] = {
1110         {CHANNELMSG_INVALID,                    0, NULL},
1111         {CHANNELMSG_OFFERCHANNEL,               0, vmbus_onoffer},
1112         {CHANNELMSG_RESCIND_CHANNELOFFER,       0, vmbus_onoffer_rescind},
1113         {CHANNELMSG_REQUESTOFFERS,              0, NULL},
1114         {CHANNELMSG_ALLOFFERS_DELIVERED,        1, vmbus_onoffers_delivered},
1115         {CHANNELMSG_OPENCHANNEL,                0, NULL},
1116         {CHANNELMSG_OPENCHANNEL_RESULT,         1, vmbus_onopen_result},
1117         {CHANNELMSG_CLOSECHANNEL,               0, NULL},
1118         {CHANNELMSG_GPADL_HEADER,               0, NULL},
1119         {CHANNELMSG_GPADL_BODY,                 0, NULL},
1120         {CHANNELMSG_GPADL_CREATED,              1, vmbus_ongpadl_created},
1121         {CHANNELMSG_GPADL_TEARDOWN,             0, NULL},
1122         {CHANNELMSG_GPADL_TORNDOWN,             1, vmbus_ongpadl_torndown},
1123         {CHANNELMSG_RELID_RELEASED,             0, NULL},
1124         {CHANNELMSG_INITIATE_CONTACT,           0, NULL},
1125         {CHANNELMSG_VERSION_RESPONSE,           1, vmbus_onversion_response},
1126         {CHANNELMSG_UNLOAD,                     0, NULL},
1127         {CHANNELMSG_UNLOAD_RESPONSE,            1, vmbus_unload_response},
1128         {CHANNELMSG_18,                         0, NULL},
1129         {CHANNELMSG_19,                         0, NULL},
1130         {CHANNELMSG_20,                         0, NULL},
1131         {CHANNELMSG_TL_CONNECT_REQUEST,         0, NULL},
1132 };
1133
1134 /*
1135  * vmbus_onmessage - Handler for channel protocol messages.
1136  *
1137  * This is invoked in the vmbus worker thread context.
1138  */
1139 void vmbus_onmessage(void *context)
1140 {
1141         struct hv_message *msg = context;
1142         struct vmbus_channel_message_header *hdr;
1143         int size;
1144
1145         hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1146         size = msg->header.payload_size;
1147
1148         if (hdr->msgtype >= CHANNELMSG_COUNT) {
1149                 pr_err("Received invalid channel message type %d size %d\n",
1150                            hdr->msgtype, size);
1151                 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1152                                      (unsigned char *)msg->u.payload, size);
1153                 return;
1154         }
1155
1156         if (channel_message_table[hdr->msgtype].message_handler)
1157                 channel_message_table[hdr->msgtype].message_handler(hdr);
1158         else
1159                 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1160 }
1161
1162 /*
1163  * vmbus_request_offers - Send a request to get all our pending offers.
1164  */
1165 int vmbus_request_offers(void)
1166 {
1167         struct vmbus_channel_message_header *msg;
1168         struct vmbus_channel_msginfo *msginfo;
1169         int ret;
1170
1171         msginfo = kmalloc(sizeof(*msginfo) +
1172                           sizeof(struct vmbus_channel_message_header),
1173                           GFP_KERNEL);
1174         if (!msginfo)
1175                 return -ENOMEM;
1176
1177         msg = (struct vmbus_channel_message_header *)msginfo->msg;
1178
1179         msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1180
1181
1182         ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1183                              true);
1184         if (ret != 0) {
1185                 pr_err("Unable to request offers - %d\n", ret);
1186
1187                 goto cleanup;
1188         }
1189
1190 cleanup:
1191         kfree(msginfo);
1192
1193         return ret;
1194 }
1195
1196 /*
1197  * Retrieve the (sub) channel on which to send an outgoing request.
1198  * When a primary channel has multiple sub-channels, we try to
1199  * distribute the load equally amongst all available channels.
1200  */
1201 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1202 {
1203         struct list_head *cur, *tmp;
1204         int cur_cpu;
1205         struct vmbus_channel *cur_channel;
1206         struct vmbus_channel *outgoing_channel = primary;
1207         int next_channel;
1208         int i = 1;
1209
1210         if (list_empty(&primary->sc_list))
1211                 return outgoing_channel;
1212
1213         next_channel = primary->next_oc++;
1214
1215         if (next_channel > (primary->num_sc)) {
1216                 primary->next_oc = 0;
1217                 return outgoing_channel;
1218         }
1219
1220         cur_cpu = hv_context.vp_index[get_cpu()];
1221         put_cpu();
1222         list_for_each_safe(cur, tmp, &primary->sc_list) {
1223                 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1224                 if (cur_channel->state != CHANNEL_OPENED_STATE)
1225                         continue;
1226
1227                 if (cur_channel->target_vp == cur_cpu)
1228                         return cur_channel;
1229
1230                 if (i == next_channel)
1231                         return cur_channel;
1232
1233                 i++;
1234         }
1235
1236         return outgoing_channel;
1237 }
1238 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1239
1240 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1241 {
1242         struct list_head *cur, *tmp;
1243         struct vmbus_channel *cur_channel;
1244
1245         if (primary_channel->sc_creation_callback == NULL)
1246                 return;
1247
1248         list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1249                 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1250
1251                 primary_channel->sc_creation_callback(cur_channel);
1252         }
1253 }
1254
1255 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1256                                 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1257 {
1258         primary_channel->sc_creation_callback = sc_cr_cb;
1259 }
1260 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1261
1262 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1263 {
1264         bool ret;
1265
1266         ret = !list_empty(&primary->sc_list);
1267
1268         if (ret) {
1269                 /*
1270                  * Invoke the callback on sub-channel creation.
1271                  * This will present a uniform interface to the
1272                  * clients.
1273                  */
1274                 invoke_sc_cb(primary);
1275         }
1276
1277         return ret;
1278 }
1279 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1280
1281 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1282                 void (*chn_rescind_cb)(struct vmbus_channel *))
1283 {
1284         channel->chn_rescind_callback = chn_rescind_cb;
1285 }
1286 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);