]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/hv/hv_balloon.c
Merge tag 'armsoc-dt64' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[karo-tx-linux.git] / drivers / hv / hv_balloon.c
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34
35 #include <linux/hyperv.h>
36
37 /*
38  * We begin with definitions supporting the Dynamic Memory protocol
39  * with the host.
40  *
41  * Begin protocol definitions.
42  */
43
44
45
46 /*
47  * Protocol versions. The low word is the minor version, the high word the major
48  * version.
49  *
50  * History:
51  * Initial version 1.0
52  * Changed to 0.1 on 2009/03/25
53  * Changes to 0.2 on 2009/05/14
54  * Changes to 0.3 on 2009/12/03
55  * Changed to 1.0 on 2011/04/05
56  */
57
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61
62 enum {
63         DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64         DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65         DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
66
67         DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68         DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69         DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
70
71         DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
72 };
73
74
75
76 /*
77  * Message Types
78  */
79
80 enum dm_message_type {
81         /*
82          * Version 0.3
83          */
84         DM_ERROR                        = 0,
85         DM_VERSION_REQUEST              = 1,
86         DM_VERSION_RESPONSE             = 2,
87         DM_CAPABILITIES_REPORT          = 3,
88         DM_CAPABILITIES_RESPONSE        = 4,
89         DM_STATUS_REPORT                = 5,
90         DM_BALLOON_REQUEST              = 6,
91         DM_BALLOON_RESPONSE             = 7,
92         DM_UNBALLOON_REQUEST            = 8,
93         DM_UNBALLOON_RESPONSE           = 9,
94         DM_MEM_HOT_ADD_REQUEST          = 10,
95         DM_MEM_HOT_ADD_RESPONSE         = 11,
96         DM_VERSION_03_MAX               = 11,
97         /*
98          * Version 1.0.
99          */
100         DM_INFO_MESSAGE                 = 12,
101         DM_VERSION_1_MAX                = 12
102 };
103
104
105 /*
106  * Structures defining the dynamic memory management
107  * protocol.
108  */
109
110 union dm_version {
111         struct {
112                 __u16 minor_version;
113                 __u16 major_version;
114         };
115         __u32 version;
116 } __packed;
117
118
119 union dm_caps {
120         struct {
121                 __u64 balloon:1;
122                 __u64 hot_add:1;
123                 /*
124                  * To support guests that may have alignment
125                  * limitations on hot-add, the guest can specify
126                  * its alignment requirements; a value of n
127                  * represents an alignment of 2^n in mega bytes.
128                  */
129                 __u64 hot_add_alignment:4;
130                 __u64 reservedz:58;
131         } cap_bits;
132         __u64 caps;
133 } __packed;
134
135 union dm_mem_page_range {
136         struct  {
137                 /*
138                  * The PFN number of the first page in the range.
139                  * 40 bits is the architectural limit of a PFN
140                  * number for AMD64.
141                  */
142                 __u64 start_page:40;
143                 /*
144                  * The number of pages in the range.
145                  */
146                 __u64 page_cnt:24;
147         } finfo;
148         __u64  page_range;
149 } __packed;
150
151
152
153 /*
154  * The header for all dynamic memory messages:
155  *
156  * type: Type of the message.
157  * size: Size of the message in bytes; including the header.
158  * trans_id: The guest is responsible for manufacturing this ID.
159  */
160
161 struct dm_header {
162         __u16 type;
163         __u16 size;
164         __u32 trans_id;
165 } __packed;
166
167 /*
168  * A generic message format for dynamic memory.
169  * Specific message formats are defined later in the file.
170  */
171
172 struct dm_message {
173         struct dm_header hdr;
174         __u8 data[]; /* enclosed message */
175 } __packed;
176
177
178 /*
179  * Specific message types supporting the dynamic memory protocol.
180  */
181
182 /*
183  * Version negotiation message. Sent from the guest to the host.
184  * The guest is free to try different versions until the host
185  * accepts the version.
186  *
187  * dm_version: The protocol version requested.
188  * is_last_attempt: If TRUE, this is the last version guest will request.
189  * reservedz: Reserved field, set to zero.
190  */
191
192 struct dm_version_request {
193         struct dm_header hdr;
194         union dm_version version;
195         __u32 is_last_attempt:1;
196         __u32 reservedz:31;
197 } __packed;
198
199 /*
200  * Version response message; Host to Guest and indicates
201  * if the host has accepted the version sent by the guest.
202  *
203  * is_accepted: If TRUE, host has accepted the version and the guest
204  * should proceed to the next stage of the protocol. FALSE indicates that
205  * guest should re-try with a different version.
206  *
207  * reservedz: Reserved field, set to zero.
208  */
209
210 struct dm_version_response {
211         struct dm_header hdr;
212         __u64 is_accepted:1;
213         __u64 reservedz:63;
214 } __packed;
215
216 /*
217  * Message reporting capabilities. This is sent from the guest to the
218  * host.
219  */
220
221 struct dm_capabilities {
222         struct dm_header hdr;
223         union dm_caps caps;
224         __u64 min_page_cnt;
225         __u64 max_page_number;
226 } __packed;
227
228 /*
229  * Response to the capabilities message. This is sent from the host to the
230  * guest. This message notifies if the host has accepted the guest's
231  * capabilities. If the host has not accepted, the guest must shutdown
232  * the service.
233  *
234  * is_accepted: Indicates if the host has accepted guest's capabilities.
235  * reservedz: Must be 0.
236  */
237
238 struct dm_capabilities_resp_msg {
239         struct dm_header hdr;
240         __u64 is_accepted:1;
241         __u64 reservedz:63;
242 } __packed;
243
244 /*
245  * This message is used to report memory pressure from the guest.
246  * This message is not part of any transaction and there is no
247  * response to this message.
248  *
249  * num_avail: Available memory in pages.
250  * num_committed: Committed memory in pages.
251  * page_file_size: The accumulated size of all page files
252  *                 in the system in pages.
253  * zero_free: The nunber of zero and free pages.
254  * page_file_writes: The writes to the page file in pages.
255  * io_diff: An indicator of file cache efficiency or page file activity,
256  *          calculated as File Cache Page Fault Count - Page Read Count.
257  *          This value is in pages.
258  *
259  * Some of these metrics are Windows specific and fortunately
260  * the algorithm on the host side that computes the guest memory
261  * pressure only uses num_committed value.
262  */
263
264 struct dm_status {
265         struct dm_header hdr;
266         __u64 num_avail;
267         __u64 num_committed;
268         __u64 page_file_size;
269         __u64 zero_free;
270         __u32 page_file_writes;
271         __u32 io_diff;
272 } __packed;
273
274
275 /*
276  * Message to ask the guest to allocate memory - balloon up message.
277  * This message is sent from the host to the guest. The guest may not be
278  * able to allocate as much memory as requested.
279  *
280  * num_pages: number of pages to allocate.
281  */
282
283 struct dm_balloon {
284         struct dm_header hdr;
285         __u32 num_pages;
286         __u32 reservedz;
287 } __packed;
288
289
290 /*
291  * Balloon response message; this message is sent from the guest
292  * to the host in response to the balloon message.
293  *
294  * reservedz: Reserved; must be set to zero.
295  * more_pages: If FALSE, this is the last message of the transaction.
296  * if TRUE there will atleast one more message from the guest.
297  *
298  * range_count: The number of ranges in the range array.
299  *
300  * range_array: An array of page ranges returned to the host.
301  *
302  */
303
304 struct dm_balloon_response {
305         struct dm_header hdr;
306         __u32 reservedz;
307         __u32 more_pages:1;
308         __u32 range_count:31;
309         union dm_mem_page_range range_array[];
310 } __packed;
311
312 /*
313  * Un-balloon message; this message is sent from the host
314  * to the guest to give guest more memory.
315  *
316  * more_pages: If FALSE, this is the last message of the transaction.
317  * if TRUE there will atleast one more message from the guest.
318  *
319  * reservedz: Reserved; must be set to zero.
320  *
321  * range_count: The number of ranges in the range array.
322  *
323  * range_array: An array of page ranges returned to the host.
324  *
325  */
326
327 struct dm_unballoon_request {
328         struct dm_header hdr;
329         __u32 more_pages:1;
330         __u32 reservedz:31;
331         __u32 range_count;
332         union dm_mem_page_range range_array[];
333 } __packed;
334
335 /*
336  * Un-balloon response message; this message is sent from the guest
337  * to the host in response to an unballoon request.
338  *
339  */
340
341 struct dm_unballoon_response {
342         struct dm_header hdr;
343 } __packed;
344
345
346 /*
347  * Hot add request message. Message sent from the host to the guest.
348  *
349  * mem_range: Memory range to hot add.
350  *
351  * On Linux we currently don't support this since we cannot hot add
352  * arbitrary granularity of memory.
353  */
354
355 struct dm_hot_add {
356         struct dm_header hdr;
357         union dm_mem_page_range range;
358 } __packed;
359
360 /*
361  * Hot add response message.
362  * This message is sent by the guest to report the status of a hot add request.
363  * If page_count is less than the requested page count, then the host should
364  * assume all further hot add requests will fail, since this indicates that
365  * the guest has hit an upper physical memory barrier.
366  *
367  * Hot adds may also fail due to low resources; in this case, the guest must
368  * not complete this message until the hot add can succeed, and the host must
369  * not send a new hot add request until the response is sent.
370  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
371  * times it fails the request.
372  *
373  *
374  * page_count: number of pages that were successfully hot added.
375  *
376  * result: result of the operation 1: success, 0: failure.
377  *
378  */
379
380 struct dm_hot_add_response {
381         struct dm_header hdr;
382         __u32 page_count;
383         __u32 result;
384 } __packed;
385
386 /*
387  * Types of information sent from host to the guest.
388  */
389
390 enum dm_info_type {
391         INFO_TYPE_MAX_PAGE_CNT = 0,
392         MAX_INFO_TYPE
393 };
394
395
396 /*
397  * Header for the information message.
398  */
399
400 struct dm_info_header {
401         enum dm_info_type type;
402         __u32 data_size;
403 } __packed;
404
405 /*
406  * This message is sent from the host to the guest to pass
407  * some relevant information (win8 addition).
408  *
409  * reserved: no used.
410  * info_size: size of the information blob.
411  * info: information blob.
412  */
413
414 struct dm_info_msg {
415         struct dm_header hdr;
416         __u32 reserved;
417         __u32 info_size;
418         __u8  info[];
419 };
420
421 /*
422  * End protocol definitions.
423  */
424
425 /*
426  * State to manage hot adding memory into the guest.
427  * The range start_pfn : end_pfn specifies the range
428  * that the host has asked us to hot add. The range
429  * start_pfn : ha_end_pfn specifies the range that we have
430  * currently hot added. We hot add in multiples of 128M
431  * chunks; it is possible that we may not be able to bring
432  * online all the pages in the region. The range
433  * covered_start_pfn:covered_end_pfn defines the pages that can
434  * be brough online.
435  */
436
437 struct hv_hotadd_state {
438         struct list_head list;
439         unsigned long start_pfn;
440         unsigned long covered_start_pfn;
441         unsigned long covered_end_pfn;
442         unsigned long ha_end_pfn;
443         unsigned long end_pfn;
444         /*
445          * A list of gaps.
446          */
447         struct list_head gap_list;
448 };
449
450 struct hv_hotadd_gap {
451         struct list_head list;
452         unsigned long start_pfn;
453         unsigned long end_pfn;
454 };
455
456 struct balloon_state {
457         __u32 num_pages;
458         struct work_struct wrk;
459 };
460
461 struct hot_add_wrk {
462         union dm_mem_page_range ha_page_range;
463         union dm_mem_page_range ha_region_range;
464         struct work_struct wrk;
465 };
466
467 static bool hot_add = true;
468 static bool do_hot_add;
469 /*
470  * Delay reporting memory pressure by
471  * the specified number of seconds.
472  */
473 static uint pressure_report_delay = 45;
474
475 /*
476  * The last time we posted a pressure report to host.
477  */
478 static unsigned long last_post_time;
479
480 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
482
483 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485 static atomic_t trans_id = ATOMIC_INIT(0);
486
487 static int dm_ring_size = (5 * PAGE_SIZE);
488
489 /*
490  * Driver specific state.
491  */
492
493 enum hv_dm_state {
494         DM_INITIALIZING = 0,
495         DM_INITIALIZED,
496         DM_BALLOON_UP,
497         DM_BALLOON_DOWN,
498         DM_HOT_ADD,
499         DM_INIT_ERROR
500 };
501
502
503 static __u8 recv_buffer[PAGE_SIZE];
504 static __u8 *send_buffer;
505 #define PAGES_IN_2M     512
506 #define HA_CHUNK (32 * 1024)
507
508 struct hv_dynmem_device {
509         struct hv_device *dev;
510         enum hv_dm_state state;
511         struct completion host_event;
512         struct completion config_event;
513
514         /*
515          * Number of pages we have currently ballooned out.
516          */
517         unsigned int num_pages_ballooned;
518         unsigned int num_pages_onlined;
519         unsigned int num_pages_added;
520
521         /*
522          * State to manage the ballooning (up) operation.
523          */
524         struct balloon_state balloon_wrk;
525
526         /*
527          * State to execute the "hot-add" operation.
528          */
529         struct hot_add_wrk ha_wrk;
530
531         /*
532          * This state tracks if the host has specified a hot-add
533          * region.
534          */
535         bool host_specified_ha_region;
536
537         /*
538          * State to synchronize hot-add.
539          */
540         struct completion  ol_waitevent;
541         bool ha_waiting;
542         /*
543          * This thread handles hot-add
544          * requests from the host as well as notifying
545          * the host with regards to memory pressure in
546          * the guest.
547          */
548         struct task_struct *thread;
549
550         /*
551          * Protects ha_region_list, num_pages_onlined counter and individual
552          * regions from ha_region_list.
553          */
554         spinlock_t ha_lock;
555
556         /*
557          * A list of hot-add regions.
558          */
559         struct list_head ha_region_list;
560
561         /*
562          * We start with the highest version we can support
563          * and downgrade based on the host; we save here the
564          * next version to try.
565          */
566         __u32 next_version;
567
568         /*
569          * The negotiated version agreed by host.
570          */
571         __u32 version;
572 };
573
574 static struct hv_dynmem_device dm_device;
575
576 static void post_status(struct hv_dynmem_device *dm);
577
578 #ifdef CONFIG_MEMORY_HOTPLUG
579 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
580                               void *v)
581 {
582         struct memory_notify *mem = (struct memory_notify *)v;
583         unsigned long flags;
584
585         switch (val) {
586         case MEM_ONLINE:
587                 spin_lock_irqsave(&dm_device.ha_lock, flags);
588                 dm_device.num_pages_onlined += mem->nr_pages;
589                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
590                 /* Fall through */
591         case MEM_CANCEL_ONLINE:
592                 if (dm_device.ha_waiting) {
593                         dm_device.ha_waiting = false;
594                         complete(&dm_device.ol_waitevent);
595                 }
596                 break;
597
598         case MEM_OFFLINE:
599                 spin_lock_irqsave(&dm_device.ha_lock, flags);
600                 dm_device.num_pages_onlined -= mem->nr_pages;
601                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
602                 break;
603         case MEM_GOING_ONLINE:
604         case MEM_GOING_OFFLINE:
605         case MEM_CANCEL_OFFLINE:
606                 break;
607         }
608         return NOTIFY_OK;
609 }
610
611 static struct notifier_block hv_memory_nb = {
612         .notifier_call = hv_memory_notifier,
613         .priority = 0
614 };
615
616 /* Check if the particular page is backed and can be onlined and online it. */
617 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
618 {
619         unsigned long cur_start_pgp;
620         unsigned long cur_end_pgp;
621         struct hv_hotadd_gap *gap;
622
623         cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
624         cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
625
626         /* The page is not backed. */
627         if (((unsigned long)pg < cur_start_pgp) ||
628             ((unsigned long)pg >= cur_end_pgp))
629                 return;
630
631         /* Check for gaps. */
632         list_for_each_entry(gap, &has->gap_list, list) {
633                 cur_start_pgp = (unsigned long)
634                         pfn_to_page(gap->start_pfn);
635                 cur_end_pgp = (unsigned long)
636                         pfn_to_page(gap->end_pfn);
637                 if (((unsigned long)pg >= cur_start_pgp) &&
638                     ((unsigned long)pg < cur_end_pgp)) {
639                         return;
640                 }
641         }
642
643         /* This frame is currently backed; online the page. */
644         __online_page_set_limits(pg);
645         __online_page_increment_counters(pg);
646         __online_page_free(pg);
647 }
648
649 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
650                                 unsigned long start_pfn, unsigned long size)
651 {
652         int i;
653
654         pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
655         for (i = 0; i < size; i++)
656                 hv_page_online_one(has, pfn_to_page(start_pfn + i));
657 }
658
659 static void hv_mem_hot_add(unsigned long start, unsigned long size,
660                                 unsigned long pfn_count,
661                                 struct hv_hotadd_state *has)
662 {
663         int ret = 0;
664         int i, nid;
665         unsigned long start_pfn;
666         unsigned long processed_pfn;
667         unsigned long total_pfn = pfn_count;
668         unsigned long flags;
669
670         for (i = 0; i < (size/HA_CHUNK); i++) {
671                 start_pfn = start + (i * HA_CHUNK);
672
673                 spin_lock_irqsave(&dm_device.ha_lock, flags);
674                 has->ha_end_pfn +=  HA_CHUNK;
675
676                 if (total_pfn > HA_CHUNK) {
677                         processed_pfn = HA_CHUNK;
678                         total_pfn -= HA_CHUNK;
679                 } else {
680                         processed_pfn = total_pfn;
681                         total_pfn = 0;
682                 }
683
684                 has->covered_end_pfn +=  processed_pfn;
685                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
686
687                 init_completion(&dm_device.ol_waitevent);
688                 dm_device.ha_waiting = !memhp_auto_online;
689
690                 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
691                 ret = add_memory(nid, PFN_PHYS((start_pfn)),
692                                 (HA_CHUNK << PAGE_SHIFT));
693
694                 if (ret) {
695                         pr_warn("hot_add memory failed error is %d\n", ret);
696                         if (ret == -EEXIST) {
697                                 /*
698                                  * This error indicates that the error
699                                  * is not a transient failure. This is the
700                                  * case where the guest's physical address map
701                                  * precludes hot adding memory. Stop all further
702                                  * memory hot-add.
703                                  */
704                                 do_hot_add = false;
705                         }
706                         spin_lock_irqsave(&dm_device.ha_lock, flags);
707                         has->ha_end_pfn -= HA_CHUNK;
708                         has->covered_end_pfn -=  processed_pfn;
709                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
710                         break;
711                 }
712
713                 /*
714                  * Wait for the memory block to be onlined when memory onlining
715                  * is done outside of kernel (memhp_auto_online). Since the hot
716                  * add has succeeded, it is ok to proceed even if the pages in
717                  * the hot added region have not been "onlined" within the
718                  * allowed time.
719                  */
720                 if (dm_device.ha_waiting)
721                         wait_for_completion_timeout(&dm_device.ol_waitevent,
722                                                     5*HZ);
723                 post_status(&dm_device);
724         }
725 }
726
727 static void hv_online_page(struct page *pg)
728 {
729         struct hv_hotadd_state *has;
730         unsigned long cur_start_pgp;
731         unsigned long cur_end_pgp;
732         unsigned long flags;
733
734         spin_lock_irqsave(&dm_device.ha_lock, flags);
735         list_for_each_entry(has, &dm_device.ha_region_list, list) {
736                 cur_start_pgp = (unsigned long)
737                         pfn_to_page(has->start_pfn);
738                 cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
739
740                 /* The page belongs to a different HAS. */
741                 if (((unsigned long)pg < cur_start_pgp) ||
742                     ((unsigned long)pg >= cur_end_pgp))
743                         continue;
744
745                 hv_page_online_one(has, pg);
746                 break;
747         }
748         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
749 }
750
751 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
752 {
753         struct hv_hotadd_state *has;
754         struct hv_hotadd_gap *gap;
755         unsigned long residual, new_inc;
756         int ret = 0;
757         unsigned long flags;
758
759         spin_lock_irqsave(&dm_device.ha_lock, flags);
760         list_for_each_entry(has, &dm_device.ha_region_list, list) {
761                 /*
762                  * If the pfn range we are dealing with is not in the current
763                  * "hot add block", move on.
764                  */
765                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
766                         continue;
767
768                 /*
769                  * If the current start pfn is not where the covered_end
770                  * is, create a gap and update covered_end_pfn.
771                  */
772                 if (has->covered_end_pfn != start_pfn) {
773                         gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
774                         if (!gap) {
775                                 ret = -ENOMEM;
776                                 break;
777                         }
778
779                         INIT_LIST_HEAD(&gap->list);
780                         gap->start_pfn = has->covered_end_pfn;
781                         gap->end_pfn = start_pfn;
782                         list_add_tail(&gap->list, &has->gap_list);
783
784                         has->covered_end_pfn = start_pfn;
785                 }
786
787                 /*
788                  * If the current hot add-request extends beyond
789                  * our current limit; extend it.
790                  */
791                 if ((start_pfn + pfn_cnt) > has->end_pfn) {
792                         residual = (start_pfn + pfn_cnt - has->end_pfn);
793                         /*
794                          * Extend the region by multiples of HA_CHUNK.
795                          */
796                         new_inc = (residual / HA_CHUNK) * HA_CHUNK;
797                         if (residual % HA_CHUNK)
798                                 new_inc += HA_CHUNK;
799
800                         has->end_pfn += new_inc;
801                 }
802
803                 ret = 1;
804                 break;
805         }
806         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
807
808         return ret;
809 }
810
811 static unsigned long handle_pg_range(unsigned long pg_start,
812                                         unsigned long pg_count)
813 {
814         unsigned long start_pfn = pg_start;
815         unsigned long pfn_cnt = pg_count;
816         unsigned long size;
817         struct hv_hotadd_state *has;
818         unsigned long pgs_ol = 0;
819         unsigned long old_covered_state;
820         unsigned long res = 0, flags;
821
822         pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
823                 pg_start);
824
825         spin_lock_irqsave(&dm_device.ha_lock, flags);
826         list_for_each_entry(has, &dm_device.ha_region_list, list) {
827                 /*
828                  * If the pfn range we are dealing with is not in the current
829                  * "hot add block", move on.
830                  */
831                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
832                         continue;
833
834                 old_covered_state = has->covered_end_pfn;
835
836                 if (start_pfn < has->ha_end_pfn) {
837                         /*
838                          * This is the case where we are backing pages
839                          * in an already hot added region. Bring
840                          * these pages online first.
841                          */
842                         pgs_ol = has->ha_end_pfn - start_pfn;
843                         if (pgs_ol > pfn_cnt)
844                                 pgs_ol = pfn_cnt;
845
846                         has->covered_end_pfn +=  pgs_ol;
847                         pfn_cnt -= pgs_ol;
848                         /*
849                          * Check if the corresponding memory block is already
850                          * online by checking its last previously backed page.
851                          * In case it is we need to bring rest (which was not
852                          * backed previously) online too.
853                          */
854                         if (start_pfn > has->start_pfn &&
855                             !PageReserved(pfn_to_page(start_pfn - 1)))
856                                 hv_bring_pgs_online(has, start_pfn, pgs_ol);
857
858                 }
859
860                 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
861                         /*
862                          * We have some residual hot add range
863                          * that needs to be hot added; hot add
864                          * it now. Hot add a multiple of
865                          * of HA_CHUNK that fully covers the pages
866                          * we have.
867                          */
868                         size = (has->end_pfn - has->ha_end_pfn);
869                         if (pfn_cnt <= size) {
870                                 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
871                                 if (pfn_cnt % HA_CHUNK)
872                                         size += HA_CHUNK;
873                         } else {
874                                 pfn_cnt = size;
875                         }
876                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
877                         hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
878                         spin_lock_irqsave(&dm_device.ha_lock, flags);
879                 }
880                 /*
881                  * If we managed to online any pages that were given to us,
882                  * we declare success.
883                  */
884                 res = has->covered_end_pfn - old_covered_state;
885                 break;
886         }
887         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
888
889         return res;
890 }
891
892 static unsigned long process_hot_add(unsigned long pg_start,
893                                         unsigned long pfn_cnt,
894                                         unsigned long rg_start,
895                                         unsigned long rg_size)
896 {
897         struct hv_hotadd_state *ha_region = NULL;
898         int covered;
899         unsigned long flags;
900
901         if (pfn_cnt == 0)
902                 return 0;
903
904         if (!dm_device.host_specified_ha_region) {
905                 covered = pfn_covered(pg_start, pfn_cnt);
906                 if (covered < 0)
907                         return 0;
908
909                 if (covered)
910                         goto do_pg_range;
911         }
912
913         /*
914          * If the host has specified a hot-add range; deal with it first.
915          */
916
917         if (rg_size != 0) {
918                 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
919                 if (!ha_region)
920                         return 0;
921
922                 INIT_LIST_HEAD(&ha_region->list);
923                 INIT_LIST_HEAD(&ha_region->gap_list);
924
925                 ha_region->start_pfn = rg_start;
926                 ha_region->ha_end_pfn = rg_start;
927                 ha_region->covered_start_pfn = pg_start;
928                 ha_region->covered_end_pfn = pg_start;
929                 ha_region->end_pfn = rg_start + rg_size;
930
931                 spin_lock_irqsave(&dm_device.ha_lock, flags);
932                 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
933                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
934         }
935
936 do_pg_range:
937         /*
938          * Process the page range specified; bringing them
939          * online if possible.
940          */
941         return handle_pg_range(pg_start, pfn_cnt);
942 }
943
944 #endif
945
946 static void hot_add_req(struct work_struct *dummy)
947 {
948         struct dm_hot_add_response resp;
949 #ifdef CONFIG_MEMORY_HOTPLUG
950         unsigned long pg_start, pfn_cnt;
951         unsigned long rg_start, rg_sz;
952 #endif
953         struct hv_dynmem_device *dm = &dm_device;
954
955         memset(&resp, 0, sizeof(struct dm_hot_add_response));
956         resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
957         resp.hdr.size = sizeof(struct dm_hot_add_response);
958
959 #ifdef CONFIG_MEMORY_HOTPLUG
960         pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
961         pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
962
963         rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
964         rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
965
966         if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
967                 unsigned long region_size;
968                 unsigned long region_start;
969
970                 /*
971                  * The host has not specified the hot-add region.
972                  * Based on the hot-add page range being specified,
973                  * compute a hot-add region that can cover the pages
974                  * that need to be hot-added while ensuring the alignment
975                  * and size requirements of Linux as it relates to hot-add.
976                  */
977                 region_start = pg_start;
978                 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
979                 if (pfn_cnt % HA_CHUNK)
980                         region_size += HA_CHUNK;
981
982                 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
983
984                 rg_start = region_start;
985                 rg_sz = region_size;
986         }
987
988         if (do_hot_add)
989                 resp.page_count = process_hot_add(pg_start, pfn_cnt,
990                                                 rg_start, rg_sz);
991
992         dm->num_pages_added += resp.page_count;
993 #endif
994         /*
995          * The result field of the response structure has the
996          * following semantics:
997          *
998          * 1. If all or some pages hot-added: Guest should return success.
999          *
1000          * 2. If no pages could be hot-added:
1001          *
1002          * If the guest returns success, then the host
1003          * will not attempt any further hot-add operations. This
1004          * signifies a permanent failure.
1005          *
1006          * If the guest returns failure, then this failure will be
1007          * treated as a transient failure and the host may retry the
1008          * hot-add operation after some delay.
1009          */
1010         if (resp.page_count > 0)
1011                 resp.result = 1;
1012         else if (!do_hot_add)
1013                 resp.result = 1;
1014         else
1015                 resp.result = 0;
1016
1017         if (!do_hot_add || (resp.page_count == 0))
1018                 pr_info("Memory hot add failed\n");
1019
1020         dm->state = DM_INITIALIZED;
1021         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1022         vmbus_sendpacket(dm->dev->channel, &resp,
1023                         sizeof(struct dm_hot_add_response),
1024                         (unsigned long)NULL,
1025                         VM_PKT_DATA_INBAND, 0);
1026 }
1027
1028 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1029 {
1030         struct dm_info_header *info_hdr;
1031
1032         info_hdr = (struct dm_info_header *)msg->info;
1033
1034         switch (info_hdr->type) {
1035         case INFO_TYPE_MAX_PAGE_CNT:
1036                 if (info_hdr->data_size == sizeof(__u64)) {
1037                         __u64 *max_page_count = (__u64 *)&info_hdr[1];
1038
1039                         pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
1040                                 *max_page_count);
1041                 }
1042
1043                 break;
1044         default:
1045                 pr_info("Received Unknown type: %d\n", info_hdr->type);
1046         }
1047 }
1048
1049 static unsigned long compute_balloon_floor(void)
1050 {
1051         unsigned long min_pages;
1052 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1053         /* Simple continuous piecewiese linear function:
1054          *  max MiB -> min MiB  gradient
1055          *       0         0
1056          *      16        16
1057          *      32        24
1058          *     128        72    (1/2)
1059          *     512       168    (1/4)
1060          *    2048       360    (1/8)
1061          *    8192       744    (1/16)
1062          *   32768      1512    (1/32)
1063          */
1064         if (totalram_pages < MB2PAGES(128))
1065                 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1066         else if (totalram_pages < MB2PAGES(512))
1067                 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1068         else if (totalram_pages < MB2PAGES(2048))
1069                 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1070         else if (totalram_pages < MB2PAGES(8192))
1071                 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1072         else
1073                 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1074 #undef MB2PAGES
1075         return min_pages;
1076 }
1077
1078 /*
1079  * Post our status as it relates memory pressure to the
1080  * host. Host expects the guests to post this status
1081  * periodically at 1 second intervals.
1082  *
1083  * The metrics specified in this protocol are very Windows
1084  * specific and so we cook up numbers here to convey our memory
1085  * pressure.
1086  */
1087
1088 static void post_status(struct hv_dynmem_device *dm)
1089 {
1090         struct dm_status status;
1091         unsigned long now = jiffies;
1092         unsigned long last_post = last_post_time;
1093
1094         if (pressure_report_delay > 0) {
1095                 --pressure_report_delay;
1096                 return;
1097         }
1098
1099         if (!time_after(now, (last_post_time + HZ)))
1100                 return;
1101
1102         memset(&status, 0, sizeof(struct dm_status));
1103         status.hdr.type = DM_STATUS_REPORT;
1104         status.hdr.size = sizeof(struct dm_status);
1105         status.hdr.trans_id = atomic_inc_return(&trans_id);
1106
1107         /*
1108          * The host expects the guest to report free and committed memory.
1109          * Furthermore, the host expects the pressure information to include
1110          * the ballooned out pages. For a given amount of memory that we are
1111          * managing we need to compute a floor below which we should not
1112          * balloon. Compute this and add it to the pressure report.
1113          * We also need to report all offline pages (num_pages_added -
1114          * num_pages_onlined) as committed to the host, otherwise it can try
1115          * asking us to balloon them out.
1116          */
1117         status.num_avail = si_mem_available();
1118         status.num_committed = vm_memory_committed() +
1119                 dm->num_pages_ballooned +
1120                 (dm->num_pages_added > dm->num_pages_onlined ?
1121                  dm->num_pages_added - dm->num_pages_onlined : 0) +
1122                 compute_balloon_floor();
1123
1124         /*
1125          * If our transaction ID is no longer current, just don't
1126          * send the status. This can happen if we were interrupted
1127          * after we picked our transaction ID.
1128          */
1129         if (status.hdr.trans_id != atomic_read(&trans_id))
1130                 return;
1131
1132         /*
1133          * If the last post time that we sampled has changed,
1134          * we have raced, don't post the status.
1135          */
1136         if (last_post != last_post_time)
1137                 return;
1138
1139         last_post_time = jiffies;
1140         vmbus_sendpacket(dm->dev->channel, &status,
1141                                 sizeof(struct dm_status),
1142                                 (unsigned long)NULL,
1143                                 VM_PKT_DATA_INBAND, 0);
1144
1145 }
1146
1147 static void free_balloon_pages(struct hv_dynmem_device *dm,
1148                          union dm_mem_page_range *range_array)
1149 {
1150         int num_pages = range_array->finfo.page_cnt;
1151         __u64 start_frame = range_array->finfo.start_page;
1152         struct page *pg;
1153         int i;
1154
1155         for (i = 0; i < num_pages; i++) {
1156                 pg = pfn_to_page(i + start_frame);
1157                 __free_page(pg);
1158                 dm->num_pages_ballooned--;
1159         }
1160 }
1161
1162
1163
1164 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1165                                         unsigned int num_pages,
1166                                         struct dm_balloon_response *bl_resp,
1167                                         int alloc_unit)
1168 {
1169         unsigned int i = 0;
1170         struct page *pg;
1171
1172         if (num_pages < alloc_unit)
1173                 return 0;
1174
1175         for (i = 0; (i * alloc_unit) < num_pages; i++) {
1176                 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1177                         PAGE_SIZE)
1178                         return i * alloc_unit;
1179
1180                 /*
1181                  * We execute this code in a thread context. Furthermore,
1182                  * we don't want the kernel to try too hard.
1183                  */
1184                 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1185                                 __GFP_NOMEMALLOC | __GFP_NOWARN,
1186                                 get_order(alloc_unit << PAGE_SHIFT));
1187
1188                 if (!pg)
1189                         return i * alloc_unit;
1190
1191                 dm->num_pages_ballooned += alloc_unit;
1192
1193                 /*
1194                  * If we allocatted 2M pages; split them so we
1195                  * can free them in any order we get.
1196                  */
1197
1198                 if (alloc_unit != 1)
1199                         split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1200
1201                 bl_resp->range_count++;
1202                 bl_resp->range_array[i].finfo.start_page =
1203                         page_to_pfn(pg);
1204                 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1205                 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1206
1207         }
1208
1209         return num_pages;
1210 }
1211
1212 static void balloon_up(struct work_struct *dummy)
1213 {
1214         unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1215         unsigned int num_ballooned = 0;
1216         struct dm_balloon_response *bl_resp;
1217         int alloc_unit;
1218         int ret;
1219         bool done = false;
1220         int i;
1221         long avail_pages;
1222         unsigned long floor;
1223
1224         /* The host balloons pages in 2M granularity. */
1225         WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1226
1227         /*
1228          * We will attempt 2M allocations. However, if we fail to
1229          * allocate 2M chunks, we will go back to 4k allocations.
1230          */
1231         alloc_unit = 512;
1232
1233         avail_pages = si_mem_available();
1234         floor = compute_balloon_floor();
1235
1236         /* Refuse to balloon below the floor, keep the 2M granularity. */
1237         if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1238                 pr_warn("Balloon request will be partially fulfilled. %s\n",
1239                         avail_pages < num_pages ? "Not enough memory." :
1240                         "Balloon floor reached.");
1241
1242                 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1243                 num_pages -= num_pages % PAGES_IN_2M;
1244         }
1245
1246         while (!done) {
1247                 bl_resp = (struct dm_balloon_response *)send_buffer;
1248                 memset(send_buffer, 0, PAGE_SIZE);
1249                 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1250                 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1251                 bl_resp->more_pages = 1;
1252
1253                 num_pages -= num_ballooned;
1254                 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1255                                                     bl_resp, alloc_unit);
1256
1257                 if (alloc_unit != 1 && num_ballooned == 0) {
1258                         alloc_unit = 1;
1259                         continue;
1260                 }
1261
1262                 if (num_ballooned == 0 || num_ballooned == num_pages) {
1263                         pr_debug("Ballooned %u out of %u requested pages.\n",
1264                                 num_pages, dm_device.balloon_wrk.num_pages);
1265
1266                         bl_resp->more_pages = 0;
1267                         done = true;
1268                         dm_device.state = DM_INITIALIZED;
1269                 }
1270
1271                 /*
1272                  * We are pushing a lot of data through the channel;
1273                  * deal with transient failures caused because of the
1274                  * lack of space in the ring buffer.
1275                  */
1276
1277                 do {
1278                         bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1279                         ret = vmbus_sendpacket(dm_device.dev->channel,
1280                                                 bl_resp,
1281                                                 bl_resp->hdr.size,
1282                                                 (unsigned long)NULL,
1283                                                 VM_PKT_DATA_INBAND, 0);
1284
1285                         if (ret == -EAGAIN)
1286                                 msleep(20);
1287                         post_status(&dm_device);
1288                 } while (ret == -EAGAIN);
1289
1290                 if (ret) {
1291                         /*
1292                          * Free up the memory we allocatted.
1293                          */
1294                         pr_info("Balloon response failed\n");
1295
1296                         for (i = 0; i < bl_resp->range_count; i++)
1297                                 free_balloon_pages(&dm_device,
1298                                                  &bl_resp->range_array[i]);
1299
1300                         done = true;
1301                 }
1302         }
1303
1304 }
1305
1306 static void balloon_down(struct hv_dynmem_device *dm,
1307                         struct dm_unballoon_request *req)
1308 {
1309         union dm_mem_page_range *range_array = req->range_array;
1310         int range_count = req->range_count;
1311         struct dm_unballoon_response resp;
1312         int i;
1313         unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1314
1315         for (i = 0; i < range_count; i++) {
1316                 free_balloon_pages(dm, &range_array[i]);
1317                 complete(&dm_device.config_event);
1318         }
1319
1320         pr_debug("Freed %u ballooned pages.\n",
1321                 prev_pages_ballooned - dm->num_pages_ballooned);
1322
1323         if (req->more_pages == 1)
1324                 return;
1325
1326         memset(&resp, 0, sizeof(struct dm_unballoon_response));
1327         resp.hdr.type = DM_UNBALLOON_RESPONSE;
1328         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1329         resp.hdr.size = sizeof(struct dm_unballoon_response);
1330
1331         vmbus_sendpacket(dm_device.dev->channel, &resp,
1332                                 sizeof(struct dm_unballoon_response),
1333                                 (unsigned long)NULL,
1334                                 VM_PKT_DATA_INBAND, 0);
1335
1336         dm->state = DM_INITIALIZED;
1337 }
1338
1339 static void balloon_onchannelcallback(void *context);
1340
1341 static int dm_thread_func(void *dm_dev)
1342 {
1343         struct hv_dynmem_device *dm = dm_dev;
1344
1345         while (!kthread_should_stop()) {
1346                 wait_for_completion_interruptible_timeout(
1347                                                 &dm_device.config_event, 1*HZ);
1348                 /*
1349                  * The host expects us to post information on the memory
1350                  * pressure every second.
1351                  */
1352                 reinit_completion(&dm_device.config_event);
1353                 post_status(dm);
1354         }
1355
1356         return 0;
1357 }
1358
1359
1360 static void version_resp(struct hv_dynmem_device *dm,
1361                         struct dm_version_response *vresp)
1362 {
1363         struct dm_version_request version_req;
1364         int ret;
1365
1366         if (vresp->is_accepted) {
1367                 /*
1368                  * We are done; wakeup the
1369                  * context waiting for version
1370                  * negotiation.
1371                  */
1372                 complete(&dm->host_event);
1373                 return;
1374         }
1375         /*
1376          * If there are more versions to try, continue
1377          * with negotiations; if not
1378          * shutdown the service since we are not able
1379          * to negotiate a suitable version number
1380          * with the host.
1381          */
1382         if (dm->next_version == 0)
1383                 goto version_error;
1384
1385         memset(&version_req, 0, sizeof(struct dm_version_request));
1386         version_req.hdr.type = DM_VERSION_REQUEST;
1387         version_req.hdr.size = sizeof(struct dm_version_request);
1388         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1389         version_req.version.version = dm->next_version;
1390         dm->version = version_req.version.version;
1391
1392         /*
1393          * Set the next version to try in case current version fails.
1394          * Win7 protocol ought to be the last one to try.
1395          */
1396         switch (version_req.version.version) {
1397         case DYNMEM_PROTOCOL_VERSION_WIN8:
1398                 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1399                 version_req.is_last_attempt = 0;
1400                 break;
1401         default:
1402                 dm->next_version = 0;
1403                 version_req.is_last_attempt = 1;
1404         }
1405
1406         ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1407                                 sizeof(struct dm_version_request),
1408                                 (unsigned long)NULL,
1409                                 VM_PKT_DATA_INBAND, 0);
1410
1411         if (ret)
1412                 goto version_error;
1413
1414         return;
1415
1416 version_error:
1417         dm->state = DM_INIT_ERROR;
1418         complete(&dm->host_event);
1419 }
1420
1421 static void cap_resp(struct hv_dynmem_device *dm,
1422                         struct dm_capabilities_resp_msg *cap_resp)
1423 {
1424         if (!cap_resp->is_accepted) {
1425                 pr_info("Capabilities not accepted by host\n");
1426                 dm->state = DM_INIT_ERROR;
1427         }
1428         complete(&dm->host_event);
1429 }
1430
1431 static void balloon_onchannelcallback(void *context)
1432 {
1433         struct hv_device *dev = context;
1434         u32 recvlen;
1435         u64 requestid;
1436         struct dm_message *dm_msg;
1437         struct dm_header *dm_hdr;
1438         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1439         struct dm_balloon *bal_msg;
1440         struct dm_hot_add *ha_msg;
1441         union dm_mem_page_range *ha_pg_range;
1442         union dm_mem_page_range *ha_region;
1443
1444         memset(recv_buffer, 0, sizeof(recv_buffer));
1445         vmbus_recvpacket(dev->channel, recv_buffer,
1446                          PAGE_SIZE, &recvlen, &requestid);
1447
1448         if (recvlen > 0) {
1449                 dm_msg = (struct dm_message *)recv_buffer;
1450                 dm_hdr = &dm_msg->hdr;
1451
1452                 switch (dm_hdr->type) {
1453                 case DM_VERSION_RESPONSE:
1454                         version_resp(dm,
1455                                  (struct dm_version_response *)dm_msg);
1456                         break;
1457
1458                 case DM_CAPABILITIES_RESPONSE:
1459                         cap_resp(dm,
1460                                  (struct dm_capabilities_resp_msg *)dm_msg);
1461                         break;
1462
1463                 case DM_BALLOON_REQUEST:
1464                         if (dm->state == DM_BALLOON_UP)
1465                                 pr_warn("Currently ballooning\n");
1466                         bal_msg = (struct dm_balloon *)recv_buffer;
1467                         dm->state = DM_BALLOON_UP;
1468                         dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1469                         schedule_work(&dm_device.balloon_wrk.wrk);
1470                         break;
1471
1472                 case DM_UNBALLOON_REQUEST:
1473                         dm->state = DM_BALLOON_DOWN;
1474                         balloon_down(dm,
1475                                  (struct dm_unballoon_request *)recv_buffer);
1476                         break;
1477
1478                 case DM_MEM_HOT_ADD_REQUEST:
1479                         if (dm->state == DM_HOT_ADD)
1480                                 pr_warn("Currently hot-adding\n");
1481                         dm->state = DM_HOT_ADD;
1482                         ha_msg = (struct dm_hot_add *)recv_buffer;
1483                         if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1484                                 /*
1485                                  * This is a normal hot-add request specifying
1486                                  * hot-add memory.
1487                                  */
1488                                 dm->host_specified_ha_region = false;
1489                                 ha_pg_range = &ha_msg->range;
1490                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1491                                 dm->ha_wrk.ha_region_range.page_range = 0;
1492                         } else {
1493                                 /*
1494                                  * Host is specifying that we first hot-add
1495                                  * a region and then partially populate this
1496                                  * region.
1497                                  */
1498                                 dm->host_specified_ha_region = true;
1499                                 ha_pg_range = &ha_msg->range;
1500                                 ha_region = &ha_pg_range[1];
1501                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1502                                 dm->ha_wrk.ha_region_range = *ha_region;
1503                         }
1504                         schedule_work(&dm_device.ha_wrk.wrk);
1505                         break;
1506
1507                 case DM_INFO_MESSAGE:
1508                         process_info(dm, (struct dm_info_msg *)dm_msg);
1509                         break;
1510
1511                 default:
1512                         pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1513
1514                 }
1515         }
1516
1517 }
1518
1519 static int balloon_probe(struct hv_device *dev,
1520                         const struct hv_vmbus_device_id *dev_id)
1521 {
1522         int ret;
1523         unsigned long t;
1524         struct dm_version_request version_req;
1525         struct dm_capabilities cap_msg;
1526
1527 #ifdef CONFIG_MEMORY_HOTPLUG
1528         do_hot_add = hot_add;
1529 #else
1530         do_hot_add = false;
1531 #endif
1532
1533         /*
1534          * First allocate a send buffer.
1535          */
1536
1537         send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1538         if (!send_buffer)
1539                 return -ENOMEM;
1540
1541         ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1542                         balloon_onchannelcallback, dev);
1543
1544         if (ret)
1545                 goto probe_error0;
1546
1547         dm_device.dev = dev;
1548         dm_device.state = DM_INITIALIZING;
1549         dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1550         init_completion(&dm_device.host_event);
1551         init_completion(&dm_device.config_event);
1552         INIT_LIST_HEAD(&dm_device.ha_region_list);
1553         spin_lock_init(&dm_device.ha_lock);
1554         INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1555         INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1556         dm_device.host_specified_ha_region = false;
1557
1558         dm_device.thread =
1559                  kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1560         if (IS_ERR(dm_device.thread)) {
1561                 ret = PTR_ERR(dm_device.thread);
1562                 goto probe_error1;
1563         }
1564
1565 #ifdef CONFIG_MEMORY_HOTPLUG
1566         set_online_page_callback(&hv_online_page);
1567         register_memory_notifier(&hv_memory_nb);
1568 #endif
1569
1570         hv_set_drvdata(dev, &dm_device);
1571         /*
1572          * Initiate the hand shake with the host and negotiate
1573          * a version that the host can support. We start with the
1574          * highest version number and go down if the host cannot
1575          * support it.
1576          */
1577         memset(&version_req, 0, sizeof(struct dm_version_request));
1578         version_req.hdr.type = DM_VERSION_REQUEST;
1579         version_req.hdr.size = sizeof(struct dm_version_request);
1580         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1581         version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1582         version_req.is_last_attempt = 0;
1583         dm_device.version = version_req.version.version;
1584
1585         ret = vmbus_sendpacket(dev->channel, &version_req,
1586                                 sizeof(struct dm_version_request),
1587                                 (unsigned long)NULL,
1588                                 VM_PKT_DATA_INBAND, 0);
1589         if (ret)
1590                 goto probe_error2;
1591
1592         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1593         if (t == 0) {
1594                 ret = -ETIMEDOUT;
1595                 goto probe_error2;
1596         }
1597
1598         /*
1599          * If we could not negotiate a compatible version with the host
1600          * fail the probe function.
1601          */
1602         if (dm_device.state == DM_INIT_ERROR) {
1603                 ret = -ETIMEDOUT;
1604                 goto probe_error2;
1605         }
1606
1607         pr_info("Using Dynamic Memory protocol version %u.%u\n",
1608                 DYNMEM_MAJOR_VERSION(dm_device.version),
1609                 DYNMEM_MINOR_VERSION(dm_device.version));
1610
1611         /*
1612          * Now submit our capabilities to the host.
1613          */
1614         memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1615         cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1616         cap_msg.hdr.size = sizeof(struct dm_capabilities);
1617         cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1618
1619         cap_msg.caps.cap_bits.balloon = 1;
1620         cap_msg.caps.cap_bits.hot_add = 1;
1621
1622         /*
1623          * Specify our alignment requirements as it relates
1624          * memory hot-add. Specify 128MB alignment.
1625          */
1626         cap_msg.caps.cap_bits.hot_add_alignment = 7;
1627
1628         /*
1629          * Currently the host does not use these
1630          * values and we set them to what is done in the
1631          * Windows driver.
1632          */
1633         cap_msg.min_page_cnt = 0;
1634         cap_msg.max_page_number = -1;
1635
1636         ret = vmbus_sendpacket(dev->channel, &cap_msg,
1637                                 sizeof(struct dm_capabilities),
1638                                 (unsigned long)NULL,
1639                                 VM_PKT_DATA_INBAND, 0);
1640         if (ret)
1641                 goto probe_error2;
1642
1643         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1644         if (t == 0) {
1645                 ret = -ETIMEDOUT;
1646                 goto probe_error2;
1647         }
1648
1649         /*
1650          * If the host does not like our capabilities,
1651          * fail the probe function.
1652          */
1653         if (dm_device.state == DM_INIT_ERROR) {
1654                 ret = -ETIMEDOUT;
1655                 goto probe_error2;
1656         }
1657
1658         dm_device.state = DM_INITIALIZED;
1659
1660         return 0;
1661
1662 probe_error2:
1663 #ifdef CONFIG_MEMORY_HOTPLUG
1664         restore_online_page_callback(&hv_online_page);
1665 #endif
1666         kthread_stop(dm_device.thread);
1667
1668 probe_error1:
1669         vmbus_close(dev->channel);
1670 probe_error0:
1671         kfree(send_buffer);
1672         return ret;
1673 }
1674
1675 static int balloon_remove(struct hv_device *dev)
1676 {
1677         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1678         struct hv_hotadd_state *has, *tmp;
1679         struct hv_hotadd_gap *gap, *tmp_gap;
1680         unsigned long flags;
1681
1682         if (dm->num_pages_ballooned != 0)
1683                 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1684
1685         cancel_work_sync(&dm->balloon_wrk.wrk);
1686         cancel_work_sync(&dm->ha_wrk.wrk);
1687
1688         vmbus_close(dev->channel);
1689         kthread_stop(dm->thread);
1690         kfree(send_buffer);
1691 #ifdef CONFIG_MEMORY_HOTPLUG
1692         restore_online_page_callback(&hv_online_page);
1693         unregister_memory_notifier(&hv_memory_nb);
1694 #endif
1695         spin_lock_irqsave(&dm_device.ha_lock, flags);
1696         list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1697                 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1698                         list_del(&gap->list);
1699                         kfree(gap);
1700                 }
1701                 list_del(&has->list);
1702                 kfree(has);
1703         }
1704         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1705
1706         return 0;
1707 }
1708
1709 static const struct hv_vmbus_device_id id_table[] = {
1710         /* Dynamic Memory Class ID */
1711         /* 525074DC-8985-46e2-8057-A307DC18A502 */
1712         { HV_DM_GUID, },
1713         { },
1714 };
1715
1716 MODULE_DEVICE_TABLE(vmbus, id_table);
1717
1718 static  struct hv_driver balloon_drv = {
1719         .name = "hv_balloon",
1720         .id_table = id_table,
1721         .probe =  balloon_probe,
1722         .remove =  balloon_remove,
1723 };
1724
1725 static int __init init_balloon_drv(void)
1726 {
1727
1728         return vmbus_driver_register(&balloon_drv);
1729 }
1730
1731 module_init(init_balloon_drv);
1732
1733 MODULE_DESCRIPTION("Hyper-V Balloon");
1734 MODULE_LICENSE("GPL");