xref: /linux/drivers/hv/hv_balloon.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34 
35 #include <linux/hyperv.h>
36 
37 /*
38  * We begin with definitions supporting the Dynamic Memory protocol
39  * with the host.
40  *
41  * Begin protocol definitions.
42  */
43 
44 
45 
46 /*
47  * Protocol versions. The low word is the minor version, the high word the major
48  * version.
49  *
50  * History:
51  * Initial version 1.0
52  * Changed to 0.1 on 2009/03/25
53  * Changes to 0.2 on 2009/05/14
54  * Changes to 0.3 on 2009/12/03
55  * Changed to 1.0 on 2011/04/05
56  */
57 
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61 
62 enum {
63 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 
66 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
67 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
68 
69 	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
70 };
71 
72 
73 
74 /*
75  * Message Types
76  */
77 
78 enum dm_message_type {
79 	/*
80 	 * Version 0.3
81 	 */
82 	DM_ERROR			= 0,
83 	DM_VERSION_REQUEST		= 1,
84 	DM_VERSION_RESPONSE		= 2,
85 	DM_CAPABILITIES_REPORT		= 3,
86 	DM_CAPABILITIES_RESPONSE	= 4,
87 	DM_STATUS_REPORT		= 5,
88 	DM_BALLOON_REQUEST		= 6,
89 	DM_BALLOON_RESPONSE		= 7,
90 	DM_UNBALLOON_REQUEST		= 8,
91 	DM_UNBALLOON_RESPONSE		= 9,
92 	DM_MEM_HOT_ADD_REQUEST		= 10,
93 	DM_MEM_HOT_ADD_RESPONSE		= 11,
94 	DM_VERSION_03_MAX		= 11,
95 	/*
96 	 * Version 1.0.
97 	 */
98 	DM_INFO_MESSAGE			= 12,
99 	DM_VERSION_1_MAX		= 12
100 };
101 
102 
103 /*
104  * Structures defining the dynamic memory management
105  * protocol.
106  */
107 
108 union dm_version {
109 	struct {
110 		__u16 minor_version;
111 		__u16 major_version;
112 	};
113 	__u32 version;
114 } __packed;
115 
116 
117 union dm_caps {
118 	struct {
119 		__u64 balloon:1;
120 		__u64 hot_add:1;
121 		/*
122 		 * To support guests that may have alignment
123 		 * limitations on hot-add, the guest can specify
124 		 * its alignment requirements; a value of n
125 		 * represents an alignment of 2^n in mega bytes.
126 		 */
127 		__u64 hot_add_alignment:4;
128 		__u64 reservedz:58;
129 	} cap_bits;
130 	__u64 caps;
131 } __packed;
132 
133 union dm_mem_page_range {
134 	struct  {
135 		/*
136 		 * The PFN number of the first page in the range.
137 		 * 40 bits is the architectural limit of a PFN
138 		 * number for AMD64.
139 		 */
140 		__u64 start_page:40;
141 		/*
142 		 * The number of pages in the range.
143 		 */
144 		__u64 page_cnt:24;
145 	} finfo;
146 	__u64  page_range;
147 } __packed;
148 
149 
150 
151 /*
152  * The header for all dynamic memory messages:
153  *
154  * type: Type of the message.
155  * size: Size of the message in bytes; including the header.
156  * trans_id: The guest is responsible for manufacturing this ID.
157  */
158 
159 struct dm_header {
160 	__u16 type;
161 	__u16 size;
162 	__u32 trans_id;
163 } __packed;
164 
165 /*
166  * A generic message format for dynamic memory.
167  * Specific message formats are defined later in the file.
168  */
169 
170 struct dm_message {
171 	struct dm_header hdr;
172 	__u8 data[]; /* enclosed message */
173 } __packed;
174 
175 
176 /*
177  * Specific message types supporting the dynamic memory protocol.
178  */
179 
180 /*
181  * Version negotiation message. Sent from the guest to the host.
182  * The guest is free to try different versions until the host
183  * accepts the version.
184  *
185  * dm_version: The protocol version requested.
186  * is_last_attempt: If TRUE, this is the last version guest will request.
187  * reservedz: Reserved field, set to zero.
188  */
189 
190 struct dm_version_request {
191 	struct dm_header hdr;
192 	union dm_version version;
193 	__u32 is_last_attempt:1;
194 	__u32 reservedz:31;
195 } __packed;
196 
197 /*
198  * Version response message; Host to Guest and indicates
199  * if the host has accepted the version sent by the guest.
200  *
201  * is_accepted: If TRUE, host has accepted the version and the guest
202  * should proceed to the next stage of the protocol. FALSE indicates that
203  * guest should re-try with a different version.
204  *
205  * reservedz: Reserved field, set to zero.
206  */
207 
208 struct dm_version_response {
209 	struct dm_header hdr;
210 	__u64 is_accepted:1;
211 	__u64 reservedz:63;
212 } __packed;
213 
214 /*
215  * Message reporting capabilities. This is sent from the guest to the
216  * host.
217  */
218 
219 struct dm_capabilities {
220 	struct dm_header hdr;
221 	union dm_caps caps;
222 	__u64 min_page_cnt;
223 	__u64 max_page_number;
224 } __packed;
225 
226 /*
227  * Response to the capabilities message. This is sent from the host to the
228  * guest. This message notifies if the host has accepted the guest's
229  * capabilities. If the host has not accepted, the guest must shutdown
230  * the service.
231  *
232  * is_accepted: Indicates if the host has accepted guest's capabilities.
233  * reservedz: Must be 0.
234  */
235 
236 struct dm_capabilities_resp_msg {
237 	struct dm_header hdr;
238 	__u64 is_accepted:1;
239 	__u64 reservedz:63;
240 } __packed;
241 
242 /*
243  * This message is used to report memory pressure from the guest.
244  * This message is not part of any transaction and there is no
245  * response to this message.
246  *
247  * num_avail: Available memory in pages.
248  * num_committed: Committed memory in pages.
249  * page_file_size: The accumulated size of all page files
250  *		   in the system in pages.
251  * zero_free: The nunber of zero and free pages.
252  * page_file_writes: The writes to the page file in pages.
253  * io_diff: An indicator of file cache efficiency or page file activity,
254  *	    calculated as File Cache Page Fault Count - Page Read Count.
255  *	    This value is in pages.
256  *
257  * Some of these metrics are Windows specific and fortunately
258  * the algorithm on the host side that computes the guest memory
259  * pressure only uses num_committed value.
260  */
261 
262 struct dm_status {
263 	struct dm_header hdr;
264 	__u64 num_avail;
265 	__u64 num_committed;
266 	__u64 page_file_size;
267 	__u64 zero_free;
268 	__u32 page_file_writes;
269 	__u32 io_diff;
270 } __packed;
271 
272 
273 /*
274  * Message to ask the guest to allocate memory - balloon up message.
275  * This message is sent from the host to the guest. The guest may not be
276  * able to allocate as much memory as requested.
277  *
278  * num_pages: number of pages to allocate.
279  */
280 
281 struct dm_balloon {
282 	struct dm_header hdr;
283 	__u32 num_pages;
284 	__u32 reservedz;
285 } __packed;
286 
287 
288 /*
289  * Balloon response message; this message is sent from the guest
290  * to the host in response to the balloon message.
291  *
292  * reservedz: Reserved; must be set to zero.
293  * more_pages: If FALSE, this is the last message of the transaction.
294  * if TRUE there will atleast one more message from the guest.
295  *
296  * range_count: The number of ranges in the range array.
297  *
298  * range_array: An array of page ranges returned to the host.
299  *
300  */
301 
302 struct dm_balloon_response {
303 	struct dm_header hdr;
304 	__u32 reservedz;
305 	__u32 more_pages:1;
306 	__u32 range_count:31;
307 	union dm_mem_page_range range_array[];
308 } __packed;
309 
310 /*
311  * Un-balloon message; this message is sent from the host
312  * to the guest to give guest more memory.
313  *
314  * more_pages: If FALSE, this is the last message of the transaction.
315  * if TRUE there will atleast one more message from the guest.
316  *
317  * reservedz: Reserved; must be set to zero.
318  *
319  * range_count: The number of ranges in the range array.
320  *
321  * range_array: An array of page ranges returned to the host.
322  *
323  */
324 
325 struct dm_unballoon_request {
326 	struct dm_header hdr;
327 	__u32 more_pages:1;
328 	__u32 reservedz:31;
329 	__u32 range_count;
330 	union dm_mem_page_range range_array[];
331 } __packed;
332 
333 /*
334  * Un-balloon response message; this message is sent from the guest
335  * to the host in response to an unballoon request.
336  *
337  */
338 
339 struct dm_unballoon_response {
340 	struct dm_header hdr;
341 } __packed;
342 
343 
344 /*
345  * Hot add request message. Message sent from the host to the guest.
346  *
347  * mem_range: Memory range to hot add.
348  *
349  * On Linux we currently don't support this since we cannot hot add
350  * arbitrary granularity of memory.
351  */
352 
353 struct dm_hot_add {
354 	struct dm_header hdr;
355 	union dm_mem_page_range range;
356 } __packed;
357 
358 /*
359  * Hot add response message.
360  * This message is sent by the guest to report the status of a hot add request.
361  * If page_count is less than the requested page count, then the host should
362  * assume all further hot add requests will fail, since this indicates that
363  * the guest has hit an upper physical memory barrier.
364  *
365  * Hot adds may also fail due to low resources; in this case, the guest must
366  * not complete this message until the hot add can succeed, and the host must
367  * not send a new hot add request until the response is sent.
368  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
369  * times it fails the request.
370  *
371  *
372  * page_count: number of pages that were successfully hot added.
373  *
374  * result: result of the operation 1: success, 0: failure.
375  *
376  */
377 
378 struct dm_hot_add_response {
379 	struct dm_header hdr;
380 	__u32 page_count;
381 	__u32 result;
382 } __packed;
383 
384 /*
385  * Types of information sent from host to the guest.
386  */
387 
388 enum dm_info_type {
389 	INFO_TYPE_MAX_PAGE_CNT = 0,
390 	MAX_INFO_TYPE
391 };
392 
393 
394 /*
395  * Header for the information message.
396  */
397 
398 struct dm_info_header {
399 	enum dm_info_type type;
400 	__u32 data_size;
401 } __packed;
402 
403 /*
404  * This message is sent from the host to the guest to pass
405  * some relevant information (win8 addition).
406  *
407  * reserved: no used.
408  * info_size: size of the information blob.
409  * info: information blob.
410  */
411 
412 struct dm_info_msg {
413 	struct dm_header hdr;
414 	__u32 reserved;
415 	__u32 info_size;
416 	__u8  info[];
417 };
418 
419 /*
420  * End protocol definitions.
421  */
422 
423 /*
424  * State to manage hot adding memory into the guest.
425  * The range start_pfn : end_pfn specifies the range
426  * that the host has asked us to hot add. The range
427  * start_pfn : ha_end_pfn specifies the range that we have
428  * currently hot added. We hot add in multiples of 128M
429  * chunks; it is possible that we may not be able to bring
430  * online all the pages in the region. The range
431  * covered_start_pfn : covered_end_pfn defines the pages that can
432  * be brough online.
433  */
434 
435 struct hv_hotadd_state {
436 	struct list_head list;
437 	unsigned long start_pfn;
438 	unsigned long covered_start_pfn;
439 	unsigned long covered_end_pfn;
440 	unsigned long ha_end_pfn;
441 	unsigned long end_pfn;
442 };
443 
444 struct balloon_state {
445 	__u32 num_pages;
446 	struct work_struct wrk;
447 };
448 
449 struct hot_add_wrk {
450 	union dm_mem_page_range ha_page_range;
451 	union dm_mem_page_range ha_region_range;
452 	struct work_struct wrk;
453 };
454 
455 static bool hot_add = true;
456 static bool do_hot_add;
457 /*
458  * Delay reporting memory pressure by
459  * the specified number of seconds.
460  */
461 static uint pressure_report_delay = 45;
462 
463 /*
464  * The last time we posted a pressure report to host.
465  */
466 static unsigned long last_post_time;
467 
468 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
469 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
470 
471 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
472 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
473 static atomic_t trans_id = ATOMIC_INIT(0);
474 
475 static int dm_ring_size = (5 * PAGE_SIZE);
476 
477 /*
478  * Driver specific state.
479  */
480 
481 enum hv_dm_state {
482 	DM_INITIALIZING = 0,
483 	DM_INITIALIZED,
484 	DM_BALLOON_UP,
485 	DM_BALLOON_DOWN,
486 	DM_HOT_ADD,
487 	DM_INIT_ERROR
488 };
489 
490 
491 static __u8 recv_buffer[PAGE_SIZE];
492 static __u8 *send_buffer;
493 #define PAGES_IN_2M	512
494 #define HA_CHUNK (32 * 1024)
495 
496 struct hv_dynmem_device {
497 	struct hv_device *dev;
498 	enum hv_dm_state state;
499 	struct completion host_event;
500 	struct completion config_event;
501 
502 	/*
503 	 * Number of pages we have currently ballooned out.
504 	 */
505 	unsigned int num_pages_ballooned;
506 
507 	/*
508 	 * State to manage the ballooning (up) operation.
509 	 */
510 	struct balloon_state balloon_wrk;
511 
512 	/*
513 	 * State to execute the "hot-add" operation.
514 	 */
515 	struct hot_add_wrk ha_wrk;
516 
517 	/*
518 	 * This state tracks if the host has specified a hot-add
519 	 * region.
520 	 */
521 	bool host_specified_ha_region;
522 
523 	/*
524 	 * State to synchronize hot-add.
525 	 */
526 	struct completion  ol_waitevent;
527 	bool ha_waiting;
528 	/*
529 	 * This thread handles hot-add
530 	 * requests from the host as well as notifying
531 	 * the host with regards to memory pressure in
532 	 * the guest.
533 	 */
534 	struct task_struct *thread;
535 
536 	/*
537 	 * A list of hot-add regions.
538 	 */
539 	struct list_head ha_region_list;
540 
541 	/*
542 	 * We start with the highest version we can support
543 	 * and downgrade based on the host; we save here the
544 	 * next version to try.
545 	 */
546 	__u32 next_version;
547 };
548 
549 static struct hv_dynmem_device dm_device;
550 
551 static void post_status(struct hv_dynmem_device *dm);
552 #ifdef CONFIG_MEMORY_HOTPLUG
553 
554 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
555 {
556 	int i;
557 
558 	for (i = 0; i < size; i++) {
559 		struct page *pg;
560 		pg = pfn_to_page(start_pfn + i);
561 		__online_page_set_limits(pg);
562 		__online_page_increment_counters(pg);
563 		__online_page_free(pg);
564 	}
565 }
566 
567 static void hv_mem_hot_add(unsigned long start, unsigned long size,
568 				unsigned long pfn_count,
569 				struct hv_hotadd_state *has)
570 {
571 	int ret = 0;
572 	int i, nid;
573 	unsigned long start_pfn;
574 	unsigned long processed_pfn;
575 	unsigned long total_pfn = pfn_count;
576 
577 	for (i = 0; i < (size/HA_CHUNK); i++) {
578 		start_pfn = start + (i * HA_CHUNK);
579 		has->ha_end_pfn +=  HA_CHUNK;
580 
581 		if (total_pfn > HA_CHUNK) {
582 			processed_pfn = HA_CHUNK;
583 			total_pfn -= HA_CHUNK;
584 		} else {
585 			processed_pfn = total_pfn;
586 			total_pfn = 0;
587 		}
588 
589 		has->covered_end_pfn +=  processed_pfn;
590 
591 		init_completion(&dm_device.ol_waitevent);
592 		dm_device.ha_waiting = true;
593 
594 		nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
595 		ret = add_memory(nid, PFN_PHYS((start_pfn)),
596 				(HA_CHUNK << PAGE_SHIFT));
597 
598 		if (ret) {
599 			pr_info("hot_add memory failed error is %d\n", ret);
600 			if (ret == -EEXIST) {
601 				/*
602 				 * This error indicates that the error
603 				 * is not a transient failure. This is the
604 				 * case where the guest's physical address map
605 				 * precludes hot adding memory. Stop all further
606 				 * memory hot-add.
607 				 */
608 				do_hot_add = false;
609 			}
610 			has->ha_end_pfn -= HA_CHUNK;
611 			has->covered_end_pfn -=  processed_pfn;
612 			break;
613 		}
614 
615 		/*
616 		 * Wait for the memory block to be onlined.
617 		 * Since the hot add has succeeded, it is ok to
618 		 * proceed even if the pages in the hot added region
619 		 * have not been "onlined" within the allowed time.
620 		 */
621 		wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
622 		post_status(&dm_device);
623 	}
624 
625 	return;
626 }
627 
628 static void hv_online_page(struct page *pg)
629 {
630 	struct list_head *cur;
631 	struct hv_hotadd_state *has;
632 	unsigned long cur_start_pgp;
633 	unsigned long cur_end_pgp;
634 
635 	if (dm_device.ha_waiting) {
636 		dm_device.ha_waiting = false;
637 		complete(&dm_device.ol_waitevent);
638 	}
639 
640 	list_for_each(cur, &dm_device.ha_region_list) {
641 		has = list_entry(cur, struct hv_hotadd_state, list);
642 		cur_start_pgp = (unsigned long)
643 				pfn_to_page(has->covered_start_pfn);
644 		cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
645 
646 		if (((unsigned long)pg >= cur_start_pgp) &&
647 			((unsigned long)pg < cur_end_pgp)) {
648 			/*
649 			 * This frame is currently backed; online the
650 			 * page.
651 			 */
652 			__online_page_set_limits(pg);
653 			__online_page_increment_counters(pg);
654 			__online_page_free(pg);
655 			has->covered_start_pfn++;
656 		}
657 	}
658 }
659 
660 static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
661 {
662 	struct list_head *cur;
663 	struct hv_hotadd_state *has;
664 	unsigned long residual, new_inc;
665 
666 	if (list_empty(&dm_device.ha_region_list))
667 		return false;
668 
669 	list_for_each(cur, &dm_device.ha_region_list) {
670 		has = list_entry(cur, struct hv_hotadd_state, list);
671 
672 		/*
673 		 * If the pfn range we are dealing with is not in the current
674 		 * "hot add block", move on.
675 		 */
676 		if ((start_pfn >= has->end_pfn))
677 			continue;
678 		/*
679 		 * If the current hot add-request extends beyond
680 		 * our current limit; extend it.
681 		 */
682 		if ((start_pfn + pfn_cnt) > has->end_pfn) {
683 			residual = (start_pfn + pfn_cnt - has->end_pfn);
684 			/*
685 			 * Extend the region by multiples of HA_CHUNK.
686 			 */
687 			new_inc = (residual / HA_CHUNK) * HA_CHUNK;
688 			if (residual % HA_CHUNK)
689 				new_inc += HA_CHUNK;
690 
691 			has->end_pfn += new_inc;
692 		}
693 
694 		/*
695 		 * If the current start pfn is not where the covered_end
696 		 * is, update it.
697 		 */
698 
699 		if (has->covered_end_pfn != start_pfn) {
700 			has->covered_end_pfn = start_pfn;
701 			has->covered_start_pfn = start_pfn;
702 		}
703 		return true;
704 
705 	}
706 
707 	return false;
708 }
709 
710 static unsigned long handle_pg_range(unsigned long pg_start,
711 					unsigned long pg_count)
712 {
713 	unsigned long start_pfn = pg_start;
714 	unsigned long pfn_cnt = pg_count;
715 	unsigned long size;
716 	struct list_head *cur;
717 	struct hv_hotadd_state *has;
718 	unsigned long pgs_ol = 0;
719 	unsigned long old_covered_state;
720 
721 	if (list_empty(&dm_device.ha_region_list))
722 		return 0;
723 
724 	list_for_each(cur, &dm_device.ha_region_list) {
725 		has = list_entry(cur, struct hv_hotadd_state, list);
726 
727 		/*
728 		 * If the pfn range we are dealing with is not in the current
729 		 * "hot add block", move on.
730 		 */
731 		if ((start_pfn >= has->end_pfn))
732 			continue;
733 
734 		old_covered_state = has->covered_end_pfn;
735 
736 		if (start_pfn < has->ha_end_pfn) {
737 			/*
738 			 * This is the case where we are backing pages
739 			 * in an already hot added region. Bring
740 			 * these pages online first.
741 			 */
742 			pgs_ol = has->ha_end_pfn - start_pfn;
743 			if (pgs_ol > pfn_cnt)
744 				pgs_ol = pfn_cnt;
745 			hv_bring_pgs_online(start_pfn, pgs_ol);
746 			has->covered_end_pfn +=  pgs_ol;
747 			has->covered_start_pfn +=  pgs_ol;
748 			pfn_cnt -= pgs_ol;
749 		}
750 
751 		if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
752 			/*
753 			 * We have some residual hot add range
754 			 * that needs to be hot added; hot add
755 			 * it now. Hot add a multiple of
756 			 * of HA_CHUNK that fully covers the pages
757 			 * we have.
758 			 */
759 			size = (has->end_pfn - has->ha_end_pfn);
760 			if (pfn_cnt <= size) {
761 				size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
762 				if (pfn_cnt % HA_CHUNK)
763 					size += HA_CHUNK;
764 			} else {
765 				pfn_cnt = size;
766 			}
767 			hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
768 		}
769 		/*
770 		 * If we managed to online any pages that were given to us,
771 		 * we declare success.
772 		 */
773 		return has->covered_end_pfn - old_covered_state;
774 
775 	}
776 
777 	return 0;
778 }
779 
780 static unsigned long process_hot_add(unsigned long pg_start,
781 					unsigned long pfn_cnt,
782 					unsigned long rg_start,
783 					unsigned long rg_size)
784 {
785 	struct hv_hotadd_state *ha_region = NULL;
786 
787 	if (pfn_cnt == 0)
788 		return 0;
789 
790 	if (!dm_device.host_specified_ha_region)
791 		if (pfn_covered(pg_start, pfn_cnt))
792 			goto do_pg_range;
793 
794 	/*
795 	 * If the host has specified a hot-add range; deal with it first.
796 	 */
797 
798 	if (rg_size != 0) {
799 		ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
800 		if (!ha_region)
801 			return 0;
802 
803 		INIT_LIST_HEAD(&ha_region->list);
804 
805 		list_add_tail(&ha_region->list, &dm_device.ha_region_list);
806 		ha_region->start_pfn = rg_start;
807 		ha_region->ha_end_pfn = rg_start;
808 		ha_region->covered_start_pfn = pg_start;
809 		ha_region->covered_end_pfn = pg_start;
810 		ha_region->end_pfn = rg_start + rg_size;
811 	}
812 
813 do_pg_range:
814 	/*
815 	 * Process the page range specified; bringing them
816 	 * online if possible.
817 	 */
818 	return handle_pg_range(pg_start, pfn_cnt);
819 }
820 
821 #endif
822 
823 static void hot_add_req(struct work_struct *dummy)
824 {
825 	struct dm_hot_add_response resp;
826 #ifdef CONFIG_MEMORY_HOTPLUG
827 	unsigned long pg_start, pfn_cnt;
828 	unsigned long rg_start, rg_sz;
829 #endif
830 	struct hv_dynmem_device *dm = &dm_device;
831 
832 	memset(&resp, 0, sizeof(struct dm_hot_add_response));
833 	resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
834 	resp.hdr.size = sizeof(struct dm_hot_add_response);
835 
836 #ifdef CONFIG_MEMORY_HOTPLUG
837 	pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
838 	pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
839 
840 	rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
841 	rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
842 
843 	if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
844 		unsigned long region_size;
845 		unsigned long region_start;
846 
847 		/*
848 		 * The host has not specified the hot-add region.
849 		 * Based on the hot-add page range being specified,
850 		 * compute a hot-add region that can cover the pages
851 		 * that need to be hot-added while ensuring the alignment
852 		 * and size requirements of Linux as it relates to hot-add.
853 		 */
854 		region_start = pg_start;
855 		region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
856 		if (pfn_cnt % HA_CHUNK)
857 			region_size += HA_CHUNK;
858 
859 		region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
860 
861 		rg_start = region_start;
862 		rg_sz = region_size;
863 	}
864 
865 	if (do_hot_add)
866 		resp.page_count = process_hot_add(pg_start, pfn_cnt,
867 						rg_start, rg_sz);
868 #endif
869 	/*
870 	 * The result field of the response structure has the
871 	 * following semantics:
872 	 *
873 	 * 1. If all or some pages hot-added: Guest should return success.
874 	 *
875 	 * 2. If no pages could be hot-added:
876 	 *
877 	 * If the guest returns success, then the host
878 	 * will not attempt any further hot-add operations. This
879 	 * signifies a permanent failure.
880 	 *
881 	 * If the guest returns failure, then this failure will be
882 	 * treated as a transient failure and the host may retry the
883 	 * hot-add operation after some delay.
884 	 */
885 	if (resp.page_count > 0)
886 		resp.result = 1;
887 	else if (!do_hot_add)
888 		resp.result = 1;
889 	else
890 		resp.result = 0;
891 
892 	if (!do_hot_add || (resp.page_count == 0))
893 		pr_info("Memory hot add failed\n");
894 
895 	dm->state = DM_INITIALIZED;
896 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
897 	vmbus_sendpacket(dm->dev->channel, &resp,
898 			sizeof(struct dm_hot_add_response),
899 			(unsigned long)NULL,
900 			VM_PKT_DATA_INBAND, 0);
901 }
902 
903 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
904 {
905 	struct dm_info_header *info_hdr;
906 
907 	info_hdr = (struct dm_info_header *)msg->info;
908 
909 	switch (info_hdr->type) {
910 	case INFO_TYPE_MAX_PAGE_CNT:
911 		pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
912 		pr_info("Data Size is %d\n", info_hdr->data_size);
913 		break;
914 	default:
915 		pr_info("Received Unknown type: %d\n", info_hdr->type);
916 	}
917 }
918 
919 static unsigned long compute_balloon_floor(void)
920 {
921 	unsigned long min_pages;
922 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
923 	/* Simple continuous piecewiese linear function:
924 	 *  max MiB -> min MiB  gradient
925 	 *       0         0
926 	 *      16        16
927 	 *      32        24
928 	 *     128        72    (1/2)
929 	 *     512       168    (1/4)
930 	 *    2048       360    (1/8)
931 	 *    8192       552    (1/32)
932 	 *   32768      1320
933 	 *  131072      4392
934 	 */
935 	if (totalram_pages < MB2PAGES(128))
936 		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
937 	else if (totalram_pages < MB2PAGES(512))
938 		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
939 	else if (totalram_pages < MB2PAGES(2048))
940 		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
941 	else
942 		min_pages = MB2PAGES(296) + (totalram_pages >> 5);
943 #undef MB2PAGES
944 	return min_pages;
945 }
946 
947 /*
948  * Post our status as it relates memory pressure to the
949  * host. Host expects the guests to post this status
950  * periodically at 1 second intervals.
951  *
952  * The metrics specified in this protocol are very Windows
953  * specific and so we cook up numbers here to convey our memory
954  * pressure.
955  */
956 
957 static void post_status(struct hv_dynmem_device *dm)
958 {
959 	struct dm_status status;
960 	struct sysinfo val;
961 	unsigned long now = jiffies;
962 	unsigned long last_post = last_post_time;
963 
964 	if (pressure_report_delay > 0) {
965 		--pressure_report_delay;
966 		return;
967 	}
968 
969 	if (!time_after(now, (last_post_time + HZ)))
970 		return;
971 
972 	si_meminfo(&val);
973 	memset(&status, 0, sizeof(struct dm_status));
974 	status.hdr.type = DM_STATUS_REPORT;
975 	status.hdr.size = sizeof(struct dm_status);
976 	status.hdr.trans_id = atomic_inc_return(&trans_id);
977 
978 	/*
979 	 * The host expects the guest to report free memory.
980 	 * Further, the host expects the pressure information to
981 	 * include the ballooned out pages.
982 	 * For a given amount of memory that we are managing, we
983 	 * need to compute a floor below which we should not balloon.
984 	 * Compute this and add it to the pressure report.
985 	 */
986 	status.num_avail = val.freeram;
987 	status.num_committed = vm_memory_committed() +
988 				dm->num_pages_ballooned +
989 				compute_balloon_floor();
990 
991 	/*
992 	 * If our transaction ID is no longer current, just don't
993 	 * send the status. This can happen if we were interrupted
994 	 * after we picked our transaction ID.
995 	 */
996 	if (status.hdr.trans_id != atomic_read(&trans_id))
997 		return;
998 
999 	/*
1000 	 * If the last post time that we sampled has changed,
1001 	 * we have raced, don't post the status.
1002 	 */
1003 	if (last_post != last_post_time)
1004 		return;
1005 
1006 	last_post_time = jiffies;
1007 	vmbus_sendpacket(dm->dev->channel, &status,
1008 				sizeof(struct dm_status),
1009 				(unsigned long)NULL,
1010 				VM_PKT_DATA_INBAND, 0);
1011 
1012 }
1013 
1014 static void free_balloon_pages(struct hv_dynmem_device *dm,
1015 			 union dm_mem_page_range *range_array)
1016 {
1017 	int num_pages = range_array->finfo.page_cnt;
1018 	__u64 start_frame = range_array->finfo.start_page;
1019 	struct page *pg;
1020 	int i;
1021 
1022 	for (i = 0; i < num_pages; i++) {
1023 		pg = pfn_to_page(i + start_frame);
1024 		__free_page(pg);
1025 		dm->num_pages_ballooned--;
1026 	}
1027 }
1028 
1029 
1030 
1031 static int  alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
1032 			 struct dm_balloon_response *bl_resp, int alloc_unit,
1033 			 bool *alloc_error)
1034 {
1035 	int i = 0;
1036 	struct page *pg;
1037 
1038 	if (num_pages < alloc_unit)
1039 		return 0;
1040 
1041 	for (i = 0; (i * alloc_unit) < num_pages; i++) {
1042 		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1043 			PAGE_SIZE)
1044 			return i * alloc_unit;
1045 
1046 		/*
1047 		 * We execute this code in a thread context. Furthermore,
1048 		 * we don't want the kernel to try too hard.
1049 		 */
1050 		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1051 				__GFP_NOMEMALLOC | __GFP_NOWARN,
1052 				get_order(alloc_unit << PAGE_SHIFT));
1053 
1054 		if (!pg) {
1055 			*alloc_error = true;
1056 			return i * alloc_unit;
1057 		}
1058 
1059 
1060 		dm->num_pages_ballooned += alloc_unit;
1061 
1062 		/*
1063 		 * If we allocatted 2M pages; split them so we
1064 		 * can free them in any order we get.
1065 		 */
1066 
1067 		if (alloc_unit != 1)
1068 			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1069 
1070 		bl_resp->range_count++;
1071 		bl_resp->range_array[i].finfo.start_page =
1072 			page_to_pfn(pg);
1073 		bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1074 		bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1075 
1076 	}
1077 
1078 	return num_pages;
1079 }
1080 
1081 
1082 
1083 static void balloon_up(struct work_struct *dummy)
1084 {
1085 	int num_pages = dm_device.balloon_wrk.num_pages;
1086 	int num_ballooned = 0;
1087 	struct dm_balloon_response *bl_resp;
1088 	int alloc_unit;
1089 	int ret;
1090 	bool alloc_error = false;
1091 	bool done = false;
1092 	int i;
1093 
1094 
1095 	/*
1096 	 * We will attempt 2M allocations. However, if we fail to
1097 	 * allocate 2M chunks, we will go back to 4k allocations.
1098 	 */
1099 	alloc_unit = 512;
1100 
1101 	while (!done) {
1102 		bl_resp = (struct dm_balloon_response *)send_buffer;
1103 		memset(send_buffer, 0, PAGE_SIZE);
1104 		bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1105 		bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1106 		bl_resp->more_pages = 1;
1107 
1108 
1109 		num_pages -= num_ballooned;
1110 		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1111 						bl_resp, alloc_unit,
1112 						 &alloc_error);
1113 
1114 		if ((alloc_error) && (alloc_unit != 1)) {
1115 			alloc_unit = 1;
1116 			continue;
1117 		}
1118 
1119 		if ((alloc_error) || (num_ballooned == num_pages)) {
1120 			bl_resp->more_pages = 0;
1121 			done = true;
1122 			dm_device.state = DM_INITIALIZED;
1123 		}
1124 
1125 		/*
1126 		 * We are pushing a lot of data through the channel;
1127 		 * deal with transient failures caused because of the
1128 		 * lack of space in the ring buffer.
1129 		 */
1130 
1131 		do {
1132 			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1133 			ret = vmbus_sendpacket(dm_device.dev->channel,
1134 						bl_resp,
1135 						bl_resp->hdr.size,
1136 						(unsigned long)NULL,
1137 						VM_PKT_DATA_INBAND, 0);
1138 
1139 			if (ret == -EAGAIN)
1140 				msleep(20);
1141 			post_status(&dm_device);
1142 		} while (ret == -EAGAIN);
1143 
1144 		if (ret) {
1145 			/*
1146 			 * Free up the memory we allocatted.
1147 			 */
1148 			pr_info("Balloon response failed\n");
1149 
1150 			for (i = 0; i < bl_resp->range_count; i++)
1151 				free_balloon_pages(&dm_device,
1152 						 &bl_resp->range_array[i]);
1153 
1154 			done = true;
1155 		}
1156 	}
1157 
1158 }
1159 
1160 static void balloon_down(struct hv_dynmem_device *dm,
1161 			struct dm_unballoon_request *req)
1162 {
1163 	union dm_mem_page_range *range_array = req->range_array;
1164 	int range_count = req->range_count;
1165 	struct dm_unballoon_response resp;
1166 	int i;
1167 
1168 	for (i = 0; i < range_count; i++) {
1169 		free_balloon_pages(dm, &range_array[i]);
1170 		post_status(&dm_device);
1171 	}
1172 
1173 	if (req->more_pages == 1)
1174 		return;
1175 
1176 	memset(&resp, 0, sizeof(struct dm_unballoon_response));
1177 	resp.hdr.type = DM_UNBALLOON_RESPONSE;
1178 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1179 	resp.hdr.size = sizeof(struct dm_unballoon_response);
1180 
1181 	vmbus_sendpacket(dm_device.dev->channel, &resp,
1182 				sizeof(struct dm_unballoon_response),
1183 				(unsigned long)NULL,
1184 				VM_PKT_DATA_INBAND, 0);
1185 
1186 	dm->state = DM_INITIALIZED;
1187 }
1188 
1189 static void balloon_onchannelcallback(void *context);
1190 
1191 static int dm_thread_func(void *dm_dev)
1192 {
1193 	struct hv_dynmem_device *dm = dm_dev;
1194 	int t;
1195 
1196 	while (!kthread_should_stop()) {
1197 		t = wait_for_completion_interruptible_timeout(
1198 						&dm_device.config_event, 1*HZ);
1199 		/*
1200 		 * The host expects us to post information on the memory
1201 		 * pressure every second.
1202 		 */
1203 
1204 		if (t == 0)
1205 			post_status(dm);
1206 
1207 	}
1208 
1209 	return 0;
1210 }
1211 
1212 
1213 static void version_resp(struct hv_dynmem_device *dm,
1214 			struct dm_version_response *vresp)
1215 {
1216 	struct dm_version_request version_req;
1217 	int ret;
1218 
1219 	if (vresp->is_accepted) {
1220 		/*
1221 		 * We are done; wakeup the
1222 		 * context waiting for version
1223 		 * negotiation.
1224 		 */
1225 		complete(&dm->host_event);
1226 		return;
1227 	}
1228 	/*
1229 	 * If there are more versions to try, continue
1230 	 * with negotiations; if not
1231 	 * shutdown the service since we are not able
1232 	 * to negotiate a suitable version number
1233 	 * with the host.
1234 	 */
1235 	if (dm->next_version == 0)
1236 		goto version_error;
1237 
1238 	dm->next_version = 0;
1239 	memset(&version_req, 0, sizeof(struct dm_version_request));
1240 	version_req.hdr.type = DM_VERSION_REQUEST;
1241 	version_req.hdr.size = sizeof(struct dm_version_request);
1242 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1243 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
1244 	version_req.is_last_attempt = 1;
1245 
1246 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1247 				sizeof(struct dm_version_request),
1248 				(unsigned long)NULL,
1249 				VM_PKT_DATA_INBAND, 0);
1250 
1251 	if (ret)
1252 		goto version_error;
1253 
1254 	return;
1255 
1256 version_error:
1257 	dm->state = DM_INIT_ERROR;
1258 	complete(&dm->host_event);
1259 }
1260 
1261 static void cap_resp(struct hv_dynmem_device *dm,
1262 			struct dm_capabilities_resp_msg *cap_resp)
1263 {
1264 	if (!cap_resp->is_accepted) {
1265 		pr_info("Capabilities not accepted by host\n");
1266 		dm->state = DM_INIT_ERROR;
1267 	}
1268 	complete(&dm->host_event);
1269 }
1270 
1271 static void balloon_onchannelcallback(void *context)
1272 {
1273 	struct hv_device *dev = context;
1274 	u32 recvlen;
1275 	u64 requestid;
1276 	struct dm_message *dm_msg;
1277 	struct dm_header *dm_hdr;
1278 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1279 	struct dm_balloon *bal_msg;
1280 	struct dm_hot_add *ha_msg;
1281 	union dm_mem_page_range *ha_pg_range;
1282 	union dm_mem_page_range *ha_region;
1283 
1284 	memset(recv_buffer, 0, sizeof(recv_buffer));
1285 	vmbus_recvpacket(dev->channel, recv_buffer,
1286 			 PAGE_SIZE, &recvlen, &requestid);
1287 
1288 	if (recvlen > 0) {
1289 		dm_msg = (struct dm_message *)recv_buffer;
1290 		dm_hdr = &dm_msg->hdr;
1291 
1292 		switch (dm_hdr->type) {
1293 		case DM_VERSION_RESPONSE:
1294 			version_resp(dm,
1295 				 (struct dm_version_response *)dm_msg);
1296 			break;
1297 
1298 		case DM_CAPABILITIES_RESPONSE:
1299 			cap_resp(dm,
1300 				 (struct dm_capabilities_resp_msg *)dm_msg);
1301 			break;
1302 
1303 		case DM_BALLOON_REQUEST:
1304 			if (dm->state == DM_BALLOON_UP)
1305 				pr_warn("Currently ballooning\n");
1306 			bal_msg = (struct dm_balloon *)recv_buffer;
1307 			dm->state = DM_BALLOON_UP;
1308 			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1309 			schedule_work(&dm_device.balloon_wrk.wrk);
1310 			break;
1311 
1312 		case DM_UNBALLOON_REQUEST:
1313 			dm->state = DM_BALLOON_DOWN;
1314 			balloon_down(dm,
1315 				 (struct dm_unballoon_request *)recv_buffer);
1316 			break;
1317 
1318 		case DM_MEM_HOT_ADD_REQUEST:
1319 			if (dm->state == DM_HOT_ADD)
1320 				pr_warn("Currently hot-adding\n");
1321 			dm->state = DM_HOT_ADD;
1322 			ha_msg = (struct dm_hot_add *)recv_buffer;
1323 			if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1324 				/*
1325 				 * This is a normal hot-add request specifying
1326 				 * hot-add memory.
1327 				 */
1328 				ha_pg_range = &ha_msg->range;
1329 				dm->ha_wrk.ha_page_range = *ha_pg_range;
1330 				dm->ha_wrk.ha_region_range.page_range = 0;
1331 			} else {
1332 				/*
1333 				 * Host is specifying that we first hot-add
1334 				 * a region and then partially populate this
1335 				 * region.
1336 				 */
1337 				dm->host_specified_ha_region = true;
1338 				ha_pg_range = &ha_msg->range;
1339 				ha_region = &ha_pg_range[1];
1340 				dm->ha_wrk.ha_page_range = *ha_pg_range;
1341 				dm->ha_wrk.ha_region_range = *ha_region;
1342 			}
1343 			schedule_work(&dm_device.ha_wrk.wrk);
1344 			break;
1345 
1346 		case DM_INFO_MESSAGE:
1347 			process_info(dm, (struct dm_info_msg *)dm_msg);
1348 			break;
1349 
1350 		default:
1351 			pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1352 
1353 		}
1354 	}
1355 
1356 }
1357 
1358 static int balloon_probe(struct hv_device *dev,
1359 			const struct hv_vmbus_device_id *dev_id)
1360 {
1361 	int ret, t;
1362 	struct dm_version_request version_req;
1363 	struct dm_capabilities cap_msg;
1364 
1365 	do_hot_add = hot_add;
1366 
1367 	/*
1368 	 * First allocate a send buffer.
1369 	 */
1370 
1371 	send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1372 	if (!send_buffer)
1373 		return -ENOMEM;
1374 
1375 	ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1376 			balloon_onchannelcallback, dev);
1377 
1378 	if (ret)
1379 		goto probe_error0;
1380 
1381 	dm_device.dev = dev;
1382 	dm_device.state = DM_INITIALIZING;
1383 	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1384 	init_completion(&dm_device.host_event);
1385 	init_completion(&dm_device.config_event);
1386 	INIT_LIST_HEAD(&dm_device.ha_region_list);
1387 	INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1388 	INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1389 	dm_device.host_specified_ha_region = false;
1390 
1391 	dm_device.thread =
1392 		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1393 	if (IS_ERR(dm_device.thread)) {
1394 		ret = PTR_ERR(dm_device.thread);
1395 		goto probe_error1;
1396 	}
1397 
1398 #ifdef CONFIG_MEMORY_HOTPLUG
1399 	set_online_page_callback(&hv_online_page);
1400 #endif
1401 
1402 	hv_set_drvdata(dev, &dm_device);
1403 	/*
1404 	 * Initiate the hand shake with the host and negotiate
1405 	 * a version that the host can support. We start with the
1406 	 * highest version number and go down if the host cannot
1407 	 * support it.
1408 	 */
1409 	memset(&version_req, 0, sizeof(struct dm_version_request));
1410 	version_req.hdr.type = DM_VERSION_REQUEST;
1411 	version_req.hdr.size = sizeof(struct dm_version_request);
1412 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1413 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
1414 	version_req.is_last_attempt = 0;
1415 
1416 	ret = vmbus_sendpacket(dev->channel, &version_req,
1417 				sizeof(struct dm_version_request),
1418 				(unsigned long)NULL,
1419 				VM_PKT_DATA_INBAND, 0);
1420 	if (ret)
1421 		goto probe_error2;
1422 
1423 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1424 	if (t == 0) {
1425 		ret = -ETIMEDOUT;
1426 		goto probe_error2;
1427 	}
1428 
1429 	/*
1430 	 * If we could not negotiate a compatible version with the host
1431 	 * fail the probe function.
1432 	 */
1433 	if (dm_device.state == DM_INIT_ERROR) {
1434 		ret = -ETIMEDOUT;
1435 		goto probe_error2;
1436 	}
1437 	/*
1438 	 * Now submit our capabilities to the host.
1439 	 */
1440 	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1441 	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1442 	cap_msg.hdr.size = sizeof(struct dm_capabilities);
1443 	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1444 
1445 	cap_msg.caps.cap_bits.balloon = 1;
1446 	cap_msg.caps.cap_bits.hot_add = 1;
1447 
1448 	/*
1449 	 * Specify our alignment requirements as it relates
1450 	 * memory hot-add. Specify 128MB alignment.
1451 	 */
1452 	cap_msg.caps.cap_bits.hot_add_alignment = 7;
1453 
1454 	/*
1455 	 * Currently the host does not use these
1456 	 * values and we set them to what is done in the
1457 	 * Windows driver.
1458 	 */
1459 	cap_msg.min_page_cnt = 0;
1460 	cap_msg.max_page_number = -1;
1461 
1462 	ret = vmbus_sendpacket(dev->channel, &cap_msg,
1463 				sizeof(struct dm_capabilities),
1464 				(unsigned long)NULL,
1465 				VM_PKT_DATA_INBAND, 0);
1466 	if (ret)
1467 		goto probe_error2;
1468 
1469 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1470 	if (t == 0) {
1471 		ret = -ETIMEDOUT;
1472 		goto probe_error2;
1473 	}
1474 
1475 	/*
1476 	 * If the host does not like our capabilities,
1477 	 * fail the probe function.
1478 	 */
1479 	if (dm_device.state == DM_INIT_ERROR) {
1480 		ret = -ETIMEDOUT;
1481 		goto probe_error2;
1482 	}
1483 
1484 	dm_device.state = DM_INITIALIZED;
1485 
1486 	return 0;
1487 
1488 probe_error2:
1489 #ifdef CONFIG_MEMORY_HOTPLUG
1490 	restore_online_page_callback(&hv_online_page);
1491 #endif
1492 	kthread_stop(dm_device.thread);
1493 
1494 probe_error1:
1495 	vmbus_close(dev->channel);
1496 probe_error0:
1497 	kfree(send_buffer);
1498 	return ret;
1499 }
1500 
1501 static int balloon_remove(struct hv_device *dev)
1502 {
1503 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1504 	struct list_head *cur, *tmp;
1505 	struct hv_hotadd_state *has;
1506 
1507 	if (dm->num_pages_ballooned != 0)
1508 		pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1509 
1510 	cancel_work_sync(&dm->balloon_wrk.wrk);
1511 	cancel_work_sync(&dm->ha_wrk.wrk);
1512 
1513 	vmbus_close(dev->channel);
1514 	kthread_stop(dm->thread);
1515 	kfree(send_buffer);
1516 #ifdef CONFIG_MEMORY_HOTPLUG
1517 	restore_online_page_callback(&hv_online_page);
1518 #endif
1519 	list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1520 		has = list_entry(cur, struct hv_hotadd_state, list);
1521 		list_del(&has->list);
1522 		kfree(has);
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static const struct hv_vmbus_device_id id_table[] = {
1529 	/* Dynamic Memory Class ID */
1530 	/* 525074DC-8985-46e2-8057-A307DC18A502 */
1531 	{ HV_DM_GUID, },
1532 	{ },
1533 };
1534 
1535 MODULE_DEVICE_TABLE(vmbus, id_table);
1536 
1537 static  struct hv_driver balloon_drv = {
1538 	.name = "hv_balloon",
1539 	.id_table = id_table,
1540 	.probe =  balloon_probe,
1541 	.remove =  balloon_remove,
1542 };
1543 
1544 static int __init init_balloon_drv(void)
1545 {
1546 
1547 	return vmbus_driver_register(&balloon_drv);
1548 }
1549 
1550 module_init(init_balloon_drv);
1551 
1552 MODULE_DESCRIPTION("Hyper-V Balloon");
1553 MODULE_LICENSE("GPL");
1554