1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012, Microsoft Corporation. 4 * 5 * Author: 6 * K. Y. Srinivasan <kys@microsoft.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/jiffies.h> 13 #include <linux/mman.h> 14 #include <linux/debugfs.h> 15 #include <linux/delay.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/kthread.h> 20 #include <linux/completion.h> 21 #include <linux/count_zeros.h> 22 #include <linux/memory_hotplug.h> 23 #include <linux/memory.h> 24 #include <linux/notifier.h> 25 #include <linux/percpu_counter.h> 26 #include <linux/page_reporting.h> 27 28 #include <linux/hyperv.h> 29 #include <asm/hyperv-tlfs.h> 30 31 #include <asm/mshyperv.h> 32 33 #define CREATE_TRACE_POINTS 34 #include "hv_trace_balloon.h" 35 36 /* 37 * We begin with definitions supporting the Dynamic Memory protocol 38 * with the host. 39 * 40 * Begin protocol definitions. 41 */ 42 43 44 45 /* 46 * Protocol versions. The low word is the minor version, the high word the major 47 * version. 48 * 49 * History: 50 * Initial version 1.0 51 * Changed to 0.1 on 2009/03/25 52 * Changes to 0.2 on 2009/05/14 53 * Changes to 0.3 on 2009/12/03 54 * Changed to 1.0 on 2011/04/05 55 */ 56 57 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor))) 58 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16) 59 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff) 60 61 enum { 62 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3), 63 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0), 64 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0), 65 66 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1, 67 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2, 68 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3, 69 70 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10 71 }; 72 73 74 75 /* 76 * Message Types 77 */ 78 79 enum dm_message_type { 80 /* 81 * Version 0.3 82 */ 83 DM_ERROR = 0, 84 DM_VERSION_REQUEST = 1, 85 DM_VERSION_RESPONSE = 2, 86 DM_CAPABILITIES_REPORT = 3, 87 DM_CAPABILITIES_RESPONSE = 4, 88 DM_STATUS_REPORT = 5, 89 DM_BALLOON_REQUEST = 6, 90 DM_BALLOON_RESPONSE = 7, 91 DM_UNBALLOON_REQUEST = 8, 92 DM_UNBALLOON_RESPONSE = 9, 93 DM_MEM_HOT_ADD_REQUEST = 10, 94 DM_MEM_HOT_ADD_RESPONSE = 11, 95 DM_VERSION_03_MAX = 11, 96 /* 97 * Version 1.0. 98 */ 99 DM_INFO_MESSAGE = 12, 100 DM_VERSION_1_MAX = 12 101 }; 102 103 104 /* 105 * Structures defining the dynamic memory management 106 * protocol. 107 */ 108 109 union dm_version { 110 struct { 111 __u16 minor_version; 112 __u16 major_version; 113 }; 114 __u32 version; 115 } __packed; 116 117 118 union dm_caps { 119 struct { 120 __u64 balloon:1; 121 __u64 hot_add:1; 122 /* 123 * To support guests that may have alignment 124 * limitations on hot-add, the guest can specify 125 * its alignment requirements; a value of n 126 * represents an alignment of 2^n in mega bytes. 127 */ 128 __u64 hot_add_alignment:4; 129 __u64 reservedz:58; 130 } cap_bits; 131 __u64 caps; 132 } __packed; 133 134 union dm_mem_page_range { 135 struct { 136 /* 137 * The PFN number of the first page in the range. 138 * 40 bits is the architectural limit of a PFN 139 * number for AMD64. 140 */ 141 __u64 start_page:40; 142 /* 143 * The number of pages in the range. 144 */ 145 __u64 page_cnt:24; 146 } finfo; 147 __u64 page_range; 148 } __packed; 149 150 151 152 /* 153 * The header for all dynamic memory messages: 154 * 155 * type: Type of the message. 156 * size: Size of the message in bytes; including the header. 157 * trans_id: The guest is responsible for manufacturing this ID. 158 */ 159 160 struct dm_header { 161 __u16 type; 162 __u16 size; 163 __u32 trans_id; 164 } __packed; 165 166 /* 167 * A generic message format for dynamic memory. 168 * Specific message formats are defined later in the file. 169 */ 170 171 struct dm_message { 172 struct dm_header hdr; 173 __u8 data[]; /* enclosed message */ 174 } __packed; 175 176 177 /* 178 * Specific message types supporting the dynamic memory protocol. 179 */ 180 181 /* 182 * Version negotiation message. Sent from the guest to the host. 183 * The guest is free to try different versions until the host 184 * accepts the version. 185 * 186 * dm_version: The protocol version requested. 187 * is_last_attempt: If TRUE, this is the last version guest will request. 188 * reservedz: Reserved field, set to zero. 189 */ 190 191 struct dm_version_request { 192 struct dm_header hdr; 193 union dm_version version; 194 __u32 is_last_attempt:1; 195 __u32 reservedz:31; 196 } __packed; 197 198 /* 199 * Version response message; Host to Guest and indicates 200 * if the host has accepted the version sent by the guest. 201 * 202 * is_accepted: If TRUE, host has accepted the version and the guest 203 * should proceed to the next stage of the protocol. FALSE indicates that 204 * guest should re-try with a different version. 205 * 206 * reservedz: Reserved field, set to zero. 207 */ 208 209 struct dm_version_response { 210 struct dm_header hdr; 211 __u64 is_accepted:1; 212 __u64 reservedz:63; 213 } __packed; 214 215 /* 216 * Message reporting capabilities. This is sent from the guest to the 217 * host. 218 */ 219 220 struct dm_capabilities { 221 struct dm_header hdr; 222 union dm_caps caps; 223 __u64 min_page_cnt; 224 __u64 max_page_number; 225 } __packed; 226 227 /* 228 * Response to the capabilities message. This is sent from the host to the 229 * guest. This message notifies if the host has accepted the guest's 230 * capabilities. If the host has not accepted, the guest must shutdown 231 * the service. 232 * 233 * is_accepted: Indicates if the host has accepted guest's capabilities. 234 * reservedz: Must be 0. 235 */ 236 237 struct dm_capabilities_resp_msg { 238 struct dm_header hdr; 239 __u64 is_accepted:1; 240 __u64 reservedz:63; 241 } __packed; 242 243 /* 244 * This message is used to report memory pressure from the guest. 245 * This message is not part of any transaction and there is no 246 * response to this message. 247 * 248 * num_avail: Available memory in pages. 249 * num_committed: Committed memory in pages. 250 * page_file_size: The accumulated size of all page files 251 * in the system in pages. 252 * zero_free: The number of zero and free pages. 253 * page_file_writes: The writes to the page file in pages. 254 * io_diff: An indicator of file cache efficiency or page file activity, 255 * calculated as File Cache Page Fault Count - Page Read Count. 256 * This value is in pages. 257 * 258 * Some of these metrics are Windows specific and fortunately 259 * the algorithm on the host side that computes the guest memory 260 * pressure only uses num_committed value. 261 */ 262 263 struct dm_status { 264 struct dm_header hdr; 265 __u64 num_avail; 266 __u64 num_committed; 267 __u64 page_file_size; 268 __u64 zero_free; 269 __u32 page_file_writes; 270 __u32 io_diff; 271 } __packed; 272 273 274 /* 275 * Message to ask the guest to allocate memory - balloon up message. 276 * This message is sent from the host to the guest. The guest may not be 277 * able to allocate as much memory as requested. 278 * 279 * num_pages: number of pages to allocate. 280 */ 281 282 struct dm_balloon { 283 struct dm_header hdr; 284 __u32 num_pages; 285 __u32 reservedz; 286 } __packed; 287 288 289 /* 290 * Balloon response message; this message is sent from the guest 291 * to the host in response to the balloon message. 292 * 293 * reservedz: Reserved; must be set to zero. 294 * more_pages: If FALSE, this is the last message of the transaction. 295 * if TRUE there will atleast one more message from the guest. 296 * 297 * range_count: The number of ranges in the range array. 298 * 299 * range_array: An array of page ranges returned to the host. 300 * 301 */ 302 303 struct dm_balloon_response { 304 struct dm_header hdr; 305 __u32 reservedz; 306 __u32 more_pages:1; 307 __u32 range_count:31; 308 union dm_mem_page_range range_array[]; 309 } __packed; 310 311 /* 312 * Un-balloon message; this message is sent from the host 313 * to the guest to give guest more memory. 314 * 315 * more_pages: If FALSE, this is the last message of the transaction. 316 * if TRUE there will atleast one more message from the guest. 317 * 318 * reservedz: Reserved; must be set to zero. 319 * 320 * range_count: The number of ranges in the range array. 321 * 322 * range_array: An array of page ranges returned to the host. 323 * 324 */ 325 326 struct dm_unballoon_request { 327 struct dm_header hdr; 328 __u32 more_pages:1; 329 __u32 reservedz:31; 330 __u32 range_count; 331 union dm_mem_page_range range_array[]; 332 } __packed; 333 334 /* 335 * Un-balloon response message; this message is sent from the guest 336 * to the host in response to an unballoon request. 337 * 338 */ 339 340 struct dm_unballoon_response { 341 struct dm_header hdr; 342 } __packed; 343 344 345 /* 346 * Hot add request message. Message sent from the host to the guest. 347 * 348 * mem_range: Memory range to hot add. 349 * 350 */ 351 352 struct dm_hot_add { 353 struct dm_header hdr; 354 union dm_mem_page_range range; 355 } __packed; 356 357 /* 358 * Hot add response message. 359 * This message is sent by the guest to report the status of a hot add request. 360 * If page_count is less than the requested page count, then the host should 361 * assume all further hot add requests will fail, since this indicates that 362 * the guest has hit an upper physical memory barrier. 363 * 364 * Hot adds may also fail due to low resources; in this case, the guest must 365 * not complete this message until the hot add can succeed, and the host must 366 * not send a new hot add request until the response is sent. 367 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS 368 * times it fails the request. 369 * 370 * 371 * page_count: number of pages that were successfully hot added. 372 * 373 * result: result of the operation 1: success, 0: failure. 374 * 375 */ 376 377 struct dm_hot_add_response { 378 struct dm_header hdr; 379 __u32 page_count; 380 __u32 result; 381 } __packed; 382 383 /* 384 * Types of information sent from host to the guest. 385 */ 386 387 enum dm_info_type { 388 INFO_TYPE_MAX_PAGE_CNT = 0, 389 MAX_INFO_TYPE 390 }; 391 392 393 /* 394 * Header for the information message. 395 */ 396 397 struct dm_info_header { 398 enum dm_info_type type; 399 __u32 data_size; 400 } __packed; 401 402 /* 403 * This message is sent from the host to the guest to pass 404 * some relevant information (win8 addition). 405 * 406 * reserved: no used. 407 * info_size: size of the information blob. 408 * info: information blob. 409 */ 410 411 struct dm_info_msg { 412 struct dm_header hdr; 413 __u32 reserved; 414 __u32 info_size; 415 __u8 info[]; 416 }; 417 418 /* 419 * End protocol definitions. 420 */ 421 422 /* 423 * State to manage hot adding memory into the guest. 424 * The range start_pfn : end_pfn specifies the range 425 * that the host has asked us to hot add. The range 426 * start_pfn : ha_end_pfn specifies the range that we have 427 * currently hot added. We hot add in multiples of 128M 428 * chunks; it is possible that we may not be able to bring 429 * online all the pages in the region. The range 430 * covered_start_pfn:covered_end_pfn defines the pages that can 431 * be brough online. 432 */ 433 434 struct hv_hotadd_state { 435 struct list_head list; 436 unsigned long start_pfn; 437 unsigned long covered_start_pfn; 438 unsigned long covered_end_pfn; 439 unsigned long ha_end_pfn; 440 unsigned long end_pfn; 441 /* 442 * A list of gaps. 443 */ 444 struct list_head gap_list; 445 }; 446 447 struct hv_hotadd_gap { 448 struct list_head list; 449 unsigned long start_pfn; 450 unsigned long end_pfn; 451 }; 452 453 struct balloon_state { 454 __u32 num_pages; 455 struct work_struct wrk; 456 }; 457 458 struct hot_add_wrk { 459 union dm_mem_page_range ha_page_range; 460 union dm_mem_page_range ha_region_range; 461 struct work_struct wrk; 462 }; 463 464 static bool allow_hibernation; 465 static bool hot_add = true; 466 static bool do_hot_add; 467 /* 468 * Delay reporting memory pressure by 469 * the specified number of seconds. 470 */ 471 static uint pressure_report_delay = 45; 472 473 /* 474 * The last time we posted a pressure report to host. 475 */ 476 static unsigned long last_post_time; 477 478 module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 479 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 480 481 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); 482 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); 483 static atomic_t trans_id = ATOMIC_INIT(0); 484 485 static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024); 486 487 /* 488 * Driver specific state. 489 */ 490 491 enum hv_dm_state { 492 DM_INITIALIZING = 0, 493 DM_INITIALIZED, 494 DM_BALLOON_UP, 495 DM_BALLOON_DOWN, 496 DM_HOT_ADD, 497 DM_INIT_ERROR 498 }; 499 500 501 static __u8 recv_buffer[HV_HYP_PAGE_SIZE]; 502 static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE]; 503 #define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE) 504 #define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE) 505 506 struct hv_dynmem_device { 507 struct hv_device *dev; 508 enum hv_dm_state state; 509 struct completion host_event; 510 struct completion config_event; 511 512 /* 513 * Number of pages we have currently ballooned out. 514 */ 515 unsigned int num_pages_ballooned; 516 unsigned int num_pages_onlined; 517 unsigned int num_pages_added; 518 519 /* 520 * State to manage the ballooning (up) operation. 521 */ 522 struct balloon_state balloon_wrk; 523 524 /* 525 * State to execute the "hot-add" operation. 526 */ 527 struct hot_add_wrk ha_wrk; 528 529 /* 530 * This state tracks if the host has specified a hot-add 531 * region. 532 */ 533 bool host_specified_ha_region; 534 535 /* 536 * State to synchronize hot-add. 537 */ 538 struct completion ol_waitevent; 539 /* 540 * This thread handles hot-add 541 * requests from the host as well as notifying 542 * the host with regards to memory pressure in 543 * the guest. 544 */ 545 struct task_struct *thread; 546 547 /* 548 * Protects ha_region_list, num_pages_onlined counter and individual 549 * regions from ha_region_list. 550 */ 551 spinlock_t ha_lock; 552 553 /* 554 * A list of hot-add regions. 555 */ 556 struct list_head ha_region_list; 557 558 /* 559 * We start with the highest version we can support 560 * and downgrade based on the host; we save here the 561 * next version to try. 562 */ 563 __u32 next_version; 564 565 /* 566 * The negotiated version agreed by host. 567 */ 568 __u32 version; 569 570 struct page_reporting_dev_info pr_dev_info; 571 572 /* 573 * Maximum number of pages that can be hot_add-ed 574 */ 575 __u64 max_dynamic_page_count; 576 }; 577 578 static struct hv_dynmem_device dm_device; 579 580 static void post_status(struct hv_dynmem_device *dm); 581 582 #ifdef CONFIG_MEMORY_HOTPLUG 583 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has, 584 unsigned long pfn) 585 { 586 struct hv_hotadd_gap *gap; 587 588 /* The page is not backed. */ 589 if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn)) 590 return false; 591 592 /* Check for gaps. */ 593 list_for_each_entry(gap, &has->gap_list, list) { 594 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) 595 return false; 596 } 597 598 return true; 599 } 600 601 static unsigned long hv_page_offline_check(unsigned long start_pfn, 602 unsigned long nr_pages) 603 { 604 unsigned long pfn = start_pfn, count = 0; 605 struct hv_hotadd_state *has; 606 bool found; 607 608 while (pfn < start_pfn + nr_pages) { 609 /* 610 * Search for HAS which covers the pfn and when we find one 611 * count how many consequitive PFNs are covered. 612 */ 613 found = false; 614 list_for_each_entry(has, &dm_device.ha_region_list, list) { 615 while ((pfn >= has->start_pfn) && 616 (pfn < has->end_pfn) && 617 (pfn < start_pfn + nr_pages)) { 618 found = true; 619 if (has_pfn_is_backed(has, pfn)) 620 count++; 621 pfn++; 622 } 623 } 624 625 /* 626 * This PFN is not in any HAS (e.g. we're offlining a region 627 * which was present at boot), no need to account for it. Go 628 * to the next one. 629 */ 630 if (!found) 631 pfn++; 632 } 633 634 return count; 635 } 636 637 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, 638 void *v) 639 { 640 struct memory_notify *mem = (struct memory_notify *)v; 641 unsigned long flags, pfn_count; 642 643 switch (val) { 644 case MEM_ONLINE: 645 case MEM_CANCEL_ONLINE: 646 complete(&dm_device.ol_waitevent); 647 break; 648 649 case MEM_OFFLINE: 650 spin_lock_irqsave(&dm_device.ha_lock, flags); 651 pfn_count = hv_page_offline_check(mem->start_pfn, 652 mem->nr_pages); 653 if (pfn_count <= dm_device.num_pages_onlined) { 654 dm_device.num_pages_onlined -= pfn_count; 655 } else { 656 /* 657 * We're offlining more pages than we managed to online. 658 * This is unexpected. In any case don't let 659 * num_pages_onlined wrap around zero. 660 */ 661 WARN_ON_ONCE(1); 662 dm_device.num_pages_onlined = 0; 663 } 664 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 665 break; 666 case MEM_GOING_ONLINE: 667 case MEM_GOING_OFFLINE: 668 case MEM_CANCEL_OFFLINE: 669 break; 670 } 671 return NOTIFY_OK; 672 } 673 674 static struct notifier_block hv_memory_nb = { 675 .notifier_call = hv_memory_notifier, 676 .priority = 0 677 }; 678 679 /* Check if the particular page is backed and can be onlined and online it. */ 680 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) 681 { 682 if (!has_pfn_is_backed(has, page_to_pfn(pg))) { 683 if (!PageOffline(pg)) 684 __SetPageOffline(pg); 685 return; 686 } 687 if (PageOffline(pg)) 688 __ClearPageOffline(pg); 689 690 /* This frame is currently backed; online the page. */ 691 generic_online_page(pg, 0); 692 693 lockdep_assert_held(&dm_device.ha_lock); 694 dm_device.num_pages_onlined++; 695 } 696 697 static void hv_bring_pgs_online(struct hv_hotadd_state *has, 698 unsigned long start_pfn, unsigned long size) 699 { 700 int i; 701 702 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); 703 for (i = 0; i < size; i++) 704 hv_page_online_one(has, pfn_to_page(start_pfn + i)); 705 } 706 707 static void hv_mem_hot_add(unsigned long start, unsigned long size, 708 unsigned long pfn_count, 709 struct hv_hotadd_state *has) 710 { 711 int ret = 0; 712 int i, nid; 713 unsigned long start_pfn; 714 unsigned long processed_pfn; 715 unsigned long total_pfn = pfn_count; 716 unsigned long flags; 717 718 for (i = 0; i < (size/HA_CHUNK); i++) { 719 start_pfn = start + (i * HA_CHUNK); 720 721 spin_lock_irqsave(&dm_device.ha_lock, flags); 722 has->ha_end_pfn += HA_CHUNK; 723 724 if (total_pfn > HA_CHUNK) { 725 processed_pfn = HA_CHUNK; 726 total_pfn -= HA_CHUNK; 727 } else { 728 processed_pfn = total_pfn; 729 total_pfn = 0; 730 } 731 732 has->covered_end_pfn += processed_pfn; 733 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 734 735 reinit_completion(&dm_device.ol_waitevent); 736 737 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); 738 ret = add_memory(nid, PFN_PHYS((start_pfn)), 739 (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE); 740 741 if (ret) { 742 pr_err("hot_add memory failed error is %d\n", ret); 743 if (ret == -EEXIST) { 744 /* 745 * This error indicates that the error 746 * is not a transient failure. This is the 747 * case where the guest's physical address map 748 * precludes hot adding memory. Stop all further 749 * memory hot-add. 750 */ 751 do_hot_add = false; 752 } 753 spin_lock_irqsave(&dm_device.ha_lock, flags); 754 has->ha_end_pfn -= HA_CHUNK; 755 has->covered_end_pfn -= processed_pfn; 756 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 757 break; 758 } 759 760 /* 761 * Wait for memory to get onlined. If the kernel onlined the 762 * memory when adding it, this will return directly. Otherwise, 763 * it will wait for user space to online the memory. This helps 764 * to avoid adding memory faster than it is getting onlined. As 765 * adding succeeded, it is ok to proceed even if the memory was 766 * not onlined in time. 767 */ 768 wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ); 769 post_status(&dm_device); 770 } 771 } 772 773 static void hv_online_page(struct page *pg, unsigned int order) 774 { 775 struct hv_hotadd_state *has; 776 unsigned long flags; 777 unsigned long pfn = page_to_pfn(pg); 778 779 spin_lock_irqsave(&dm_device.ha_lock, flags); 780 list_for_each_entry(has, &dm_device.ha_region_list, list) { 781 /* The page belongs to a different HAS. */ 782 if ((pfn < has->start_pfn) || 783 (pfn + (1UL << order) > has->end_pfn)) 784 continue; 785 786 hv_bring_pgs_online(has, pfn, 1UL << order); 787 break; 788 } 789 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 790 } 791 792 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) 793 { 794 struct hv_hotadd_state *has; 795 struct hv_hotadd_gap *gap; 796 unsigned long residual, new_inc; 797 int ret = 0; 798 unsigned long flags; 799 800 spin_lock_irqsave(&dm_device.ha_lock, flags); 801 list_for_each_entry(has, &dm_device.ha_region_list, list) { 802 /* 803 * If the pfn range we are dealing with is not in the current 804 * "hot add block", move on. 805 */ 806 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) 807 continue; 808 809 /* 810 * If the current start pfn is not where the covered_end 811 * is, create a gap and update covered_end_pfn. 812 */ 813 if (has->covered_end_pfn != start_pfn) { 814 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); 815 if (!gap) { 816 ret = -ENOMEM; 817 break; 818 } 819 820 INIT_LIST_HEAD(&gap->list); 821 gap->start_pfn = has->covered_end_pfn; 822 gap->end_pfn = start_pfn; 823 list_add_tail(&gap->list, &has->gap_list); 824 825 has->covered_end_pfn = start_pfn; 826 } 827 828 /* 829 * If the current hot add-request extends beyond 830 * our current limit; extend it. 831 */ 832 if ((start_pfn + pfn_cnt) > has->end_pfn) { 833 residual = (start_pfn + pfn_cnt - has->end_pfn); 834 /* 835 * Extend the region by multiples of HA_CHUNK. 836 */ 837 new_inc = (residual / HA_CHUNK) * HA_CHUNK; 838 if (residual % HA_CHUNK) 839 new_inc += HA_CHUNK; 840 841 has->end_pfn += new_inc; 842 } 843 844 ret = 1; 845 break; 846 } 847 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 848 849 return ret; 850 } 851 852 static unsigned long handle_pg_range(unsigned long pg_start, 853 unsigned long pg_count) 854 { 855 unsigned long start_pfn = pg_start; 856 unsigned long pfn_cnt = pg_count; 857 unsigned long size; 858 struct hv_hotadd_state *has; 859 unsigned long pgs_ol = 0; 860 unsigned long old_covered_state; 861 unsigned long res = 0, flags; 862 863 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, 864 pg_start); 865 866 spin_lock_irqsave(&dm_device.ha_lock, flags); 867 list_for_each_entry(has, &dm_device.ha_region_list, list) { 868 /* 869 * If the pfn range we are dealing with is not in the current 870 * "hot add block", move on. 871 */ 872 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) 873 continue; 874 875 old_covered_state = has->covered_end_pfn; 876 877 if (start_pfn < has->ha_end_pfn) { 878 /* 879 * This is the case where we are backing pages 880 * in an already hot added region. Bring 881 * these pages online first. 882 */ 883 pgs_ol = has->ha_end_pfn - start_pfn; 884 if (pgs_ol > pfn_cnt) 885 pgs_ol = pfn_cnt; 886 887 has->covered_end_pfn += pgs_ol; 888 pfn_cnt -= pgs_ol; 889 /* 890 * Check if the corresponding memory block is already 891 * online. It is possible to observe struct pages still 892 * being uninitialized here so check section instead. 893 * In case the section is online we need to bring the 894 * rest of pfns (which were not backed previously) 895 * online too. 896 */ 897 if (start_pfn > has->start_pfn && 898 online_section_nr(pfn_to_section_nr(start_pfn))) 899 hv_bring_pgs_online(has, start_pfn, pgs_ol); 900 901 } 902 903 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { 904 /* 905 * We have some residual hot add range 906 * that needs to be hot added; hot add 907 * it now. Hot add a multiple of 908 * of HA_CHUNK that fully covers the pages 909 * we have. 910 */ 911 size = (has->end_pfn - has->ha_end_pfn); 912 if (pfn_cnt <= size) { 913 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK); 914 if (pfn_cnt % HA_CHUNK) 915 size += HA_CHUNK; 916 } else { 917 pfn_cnt = size; 918 } 919 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 920 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has); 921 spin_lock_irqsave(&dm_device.ha_lock, flags); 922 } 923 /* 924 * If we managed to online any pages that were given to us, 925 * we declare success. 926 */ 927 res = has->covered_end_pfn - old_covered_state; 928 break; 929 } 930 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 931 932 return res; 933 } 934 935 static unsigned long process_hot_add(unsigned long pg_start, 936 unsigned long pfn_cnt, 937 unsigned long rg_start, 938 unsigned long rg_size) 939 { 940 struct hv_hotadd_state *ha_region = NULL; 941 int covered; 942 unsigned long flags; 943 944 if (pfn_cnt == 0) 945 return 0; 946 947 if (!dm_device.host_specified_ha_region) { 948 covered = pfn_covered(pg_start, pfn_cnt); 949 if (covered < 0) 950 return 0; 951 952 if (covered) 953 goto do_pg_range; 954 } 955 956 /* 957 * If the host has specified a hot-add range; deal with it first. 958 */ 959 960 if (rg_size != 0) { 961 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL); 962 if (!ha_region) 963 return 0; 964 965 INIT_LIST_HEAD(&ha_region->list); 966 INIT_LIST_HEAD(&ha_region->gap_list); 967 968 ha_region->start_pfn = rg_start; 969 ha_region->ha_end_pfn = rg_start; 970 ha_region->covered_start_pfn = pg_start; 971 ha_region->covered_end_pfn = pg_start; 972 ha_region->end_pfn = rg_start + rg_size; 973 974 spin_lock_irqsave(&dm_device.ha_lock, flags); 975 list_add_tail(&ha_region->list, &dm_device.ha_region_list); 976 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 977 } 978 979 do_pg_range: 980 /* 981 * Process the page range specified; bringing them 982 * online if possible. 983 */ 984 return handle_pg_range(pg_start, pfn_cnt); 985 } 986 987 #endif 988 989 static void hot_add_req(struct work_struct *dummy) 990 { 991 struct dm_hot_add_response resp; 992 #ifdef CONFIG_MEMORY_HOTPLUG 993 unsigned long pg_start, pfn_cnt; 994 unsigned long rg_start, rg_sz; 995 #endif 996 struct hv_dynmem_device *dm = &dm_device; 997 998 memset(&resp, 0, sizeof(struct dm_hot_add_response)); 999 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE; 1000 resp.hdr.size = sizeof(struct dm_hot_add_response); 1001 1002 #ifdef CONFIG_MEMORY_HOTPLUG 1003 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; 1004 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt; 1005 1006 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page; 1007 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; 1008 1009 if ((rg_start == 0) && (!dm->host_specified_ha_region)) { 1010 unsigned long region_size; 1011 unsigned long region_start; 1012 1013 /* 1014 * The host has not specified the hot-add region. 1015 * Based on the hot-add page range being specified, 1016 * compute a hot-add region that can cover the pages 1017 * that need to be hot-added while ensuring the alignment 1018 * and size requirements of Linux as it relates to hot-add. 1019 */ 1020 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK; 1021 if (pfn_cnt % HA_CHUNK) 1022 region_size += HA_CHUNK; 1023 1024 region_start = (pg_start / HA_CHUNK) * HA_CHUNK; 1025 1026 rg_start = region_start; 1027 rg_sz = region_size; 1028 } 1029 1030 if (do_hot_add) 1031 resp.page_count = process_hot_add(pg_start, pfn_cnt, 1032 rg_start, rg_sz); 1033 1034 dm->num_pages_added += resp.page_count; 1035 #endif 1036 /* 1037 * The result field of the response structure has the 1038 * following semantics: 1039 * 1040 * 1. If all or some pages hot-added: Guest should return success. 1041 * 1042 * 2. If no pages could be hot-added: 1043 * 1044 * If the guest returns success, then the host 1045 * will not attempt any further hot-add operations. This 1046 * signifies a permanent failure. 1047 * 1048 * If the guest returns failure, then this failure will be 1049 * treated as a transient failure and the host may retry the 1050 * hot-add operation after some delay. 1051 */ 1052 if (resp.page_count > 0) 1053 resp.result = 1; 1054 else if (!do_hot_add) 1055 resp.result = 1; 1056 else 1057 resp.result = 0; 1058 1059 if (!do_hot_add || resp.page_count == 0) { 1060 if (!allow_hibernation) 1061 pr_err("Memory hot add failed\n"); 1062 else 1063 pr_info("Ignore hot-add request!\n"); 1064 } 1065 1066 dm->state = DM_INITIALIZED; 1067 resp.hdr.trans_id = atomic_inc_return(&trans_id); 1068 vmbus_sendpacket(dm->dev->channel, &resp, 1069 sizeof(struct dm_hot_add_response), 1070 (unsigned long)NULL, 1071 VM_PKT_DATA_INBAND, 0); 1072 } 1073 1074 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) 1075 { 1076 struct dm_info_header *info_hdr; 1077 1078 info_hdr = (struct dm_info_header *)msg->info; 1079 1080 switch (info_hdr->type) { 1081 case INFO_TYPE_MAX_PAGE_CNT: 1082 if (info_hdr->data_size == sizeof(__u64)) { 1083 __u64 *max_page_count = (__u64 *)&info_hdr[1]; 1084 1085 pr_info("Max. dynamic memory size: %llu MB\n", 1086 (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT)); 1087 dm->max_dynamic_page_count = *max_page_count; 1088 } 1089 1090 break; 1091 default: 1092 pr_warn("Received Unknown type: %d\n", info_hdr->type); 1093 } 1094 } 1095 1096 static unsigned long compute_balloon_floor(void) 1097 { 1098 unsigned long min_pages; 1099 unsigned long nr_pages = totalram_pages(); 1100 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 1101 /* Simple continuous piecewiese linear function: 1102 * max MiB -> min MiB gradient 1103 * 0 0 1104 * 16 16 1105 * 32 24 1106 * 128 72 (1/2) 1107 * 512 168 (1/4) 1108 * 2048 360 (1/8) 1109 * 8192 744 (1/16) 1110 * 32768 1512 (1/32) 1111 */ 1112 if (nr_pages < MB2PAGES(128)) 1113 min_pages = MB2PAGES(8) + (nr_pages >> 1); 1114 else if (nr_pages < MB2PAGES(512)) 1115 min_pages = MB2PAGES(40) + (nr_pages >> 2); 1116 else if (nr_pages < MB2PAGES(2048)) 1117 min_pages = MB2PAGES(104) + (nr_pages >> 3); 1118 else if (nr_pages < MB2PAGES(8192)) 1119 min_pages = MB2PAGES(232) + (nr_pages >> 4); 1120 else 1121 min_pages = MB2PAGES(488) + (nr_pages >> 5); 1122 #undef MB2PAGES 1123 return min_pages; 1124 } 1125 1126 /* 1127 * Compute total committed memory pages 1128 */ 1129 1130 static unsigned long get_pages_committed(struct hv_dynmem_device *dm) 1131 { 1132 return vm_memory_committed() + 1133 dm->num_pages_ballooned + 1134 (dm->num_pages_added > dm->num_pages_onlined ? 1135 dm->num_pages_added - dm->num_pages_onlined : 0) + 1136 compute_balloon_floor(); 1137 } 1138 1139 /* 1140 * Post our status as it relates memory pressure to the 1141 * host. Host expects the guests to post this status 1142 * periodically at 1 second intervals. 1143 * 1144 * The metrics specified in this protocol are very Windows 1145 * specific and so we cook up numbers here to convey our memory 1146 * pressure. 1147 */ 1148 1149 static void post_status(struct hv_dynmem_device *dm) 1150 { 1151 struct dm_status status; 1152 unsigned long now = jiffies; 1153 unsigned long last_post = last_post_time; 1154 unsigned long num_pages_avail, num_pages_committed; 1155 1156 if (pressure_report_delay > 0) { 1157 --pressure_report_delay; 1158 return; 1159 } 1160 1161 if (!time_after(now, (last_post_time + HZ))) 1162 return; 1163 1164 memset(&status, 0, sizeof(struct dm_status)); 1165 status.hdr.type = DM_STATUS_REPORT; 1166 status.hdr.size = sizeof(struct dm_status); 1167 status.hdr.trans_id = atomic_inc_return(&trans_id); 1168 1169 /* 1170 * The host expects the guest to report free and committed memory. 1171 * Furthermore, the host expects the pressure information to include 1172 * the ballooned out pages. For a given amount of memory that we are 1173 * managing we need to compute a floor below which we should not 1174 * balloon. Compute this and add it to the pressure report. 1175 * We also need to report all offline pages (num_pages_added - 1176 * num_pages_onlined) as committed to the host, otherwise it can try 1177 * asking us to balloon them out. 1178 */ 1179 num_pages_avail = si_mem_available(); 1180 num_pages_committed = get_pages_committed(dm); 1181 1182 trace_balloon_status(num_pages_avail, num_pages_committed, 1183 vm_memory_committed(), dm->num_pages_ballooned, 1184 dm->num_pages_added, dm->num_pages_onlined); 1185 1186 /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */ 1187 status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE; 1188 status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE; 1189 1190 /* 1191 * If our transaction ID is no longer current, just don't 1192 * send the status. This can happen if we were interrupted 1193 * after we picked our transaction ID. 1194 */ 1195 if (status.hdr.trans_id != atomic_read(&trans_id)) 1196 return; 1197 1198 /* 1199 * If the last post time that we sampled has changed, 1200 * we have raced, don't post the status. 1201 */ 1202 if (last_post != last_post_time) 1203 return; 1204 1205 last_post_time = jiffies; 1206 vmbus_sendpacket(dm->dev->channel, &status, 1207 sizeof(struct dm_status), 1208 (unsigned long)NULL, 1209 VM_PKT_DATA_INBAND, 0); 1210 1211 } 1212 1213 static void free_balloon_pages(struct hv_dynmem_device *dm, 1214 union dm_mem_page_range *range_array) 1215 { 1216 int num_pages = range_array->finfo.page_cnt; 1217 __u64 start_frame = range_array->finfo.start_page; 1218 struct page *pg; 1219 int i; 1220 1221 for (i = 0; i < num_pages; i++) { 1222 pg = pfn_to_page(i + start_frame); 1223 __ClearPageOffline(pg); 1224 __free_page(pg); 1225 dm->num_pages_ballooned--; 1226 adjust_managed_page_count(pg, 1); 1227 } 1228 } 1229 1230 1231 1232 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, 1233 unsigned int num_pages, 1234 struct dm_balloon_response *bl_resp, 1235 int alloc_unit) 1236 { 1237 unsigned int i, j; 1238 struct page *pg; 1239 1240 for (i = 0; i < num_pages / alloc_unit; i++) { 1241 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > 1242 HV_HYP_PAGE_SIZE) 1243 return i * alloc_unit; 1244 1245 /* 1246 * We execute this code in a thread context. Furthermore, 1247 * we don't want the kernel to try too hard. 1248 */ 1249 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY | 1250 __GFP_NOMEMALLOC | __GFP_NOWARN, 1251 get_order(alloc_unit << PAGE_SHIFT)); 1252 1253 if (!pg) 1254 return i * alloc_unit; 1255 1256 dm->num_pages_ballooned += alloc_unit; 1257 1258 /* 1259 * If we allocatted 2M pages; split them so we 1260 * can free them in any order we get. 1261 */ 1262 1263 if (alloc_unit != 1) 1264 split_page(pg, get_order(alloc_unit << PAGE_SHIFT)); 1265 1266 /* mark all pages offline */ 1267 for (j = 0; j < alloc_unit; j++) { 1268 __SetPageOffline(pg + j); 1269 adjust_managed_page_count(pg + j, -1); 1270 } 1271 1272 bl_resp->range_count++; 1273 bl_resp->range_array[i].finfo.start_page = 1274 page_to_pfn(pg); 1275 bl_resp->range_array[i].finfo.page_cnt = alloc_unit; 1276 bl_resp->hdr.size += sizeof(union dm_mem_page_range); 1277 1278 } 1279 1280 return i * alloc_unit; 1281 } 1282 1283 static void balloon_up(struct work_struct *dummy) 1284 { 1285 unsigned int num_pages = dm_device.balloon_wrk.num_pages; 1286 unsigned int num_ballooned = 0; 1287 struct dm_balloon_response *bl_resp; 1288 int alloc_unit; 1289 int ret; 1290 bool done = false; 1291 int i; 1292 long avail_pages; 1293 unsigned long floor; 1294 1295 /* 1296 * We will attempt 2M allocations. However, if we fail to 1297 * allocate 2M chunks, we will go back to PAGE_SIZE allocations. 1298 */ 1299 alloc_unit = PAGES_IN_2M; 1300 1301 avail_pages = si_mem_available(); 1302 floor = compute_balloon_floor(); 1303 1304 /* Refuse to balloon below the floor. */ 1305 if (avail_pages < num_pages || avail_pages - num_pages < floor) { 1306 pr_info("Balloon request will be partially fulfilled. %s\n", 1307 avail_pages < num_pages ? "Not enough memory." : 1308 "Balloon floor reached."); 1309 1310 num_pages = avail_pages > floor ? (avail_pages - floor) : 0; 1311 } 1312 1313 while (!done) { 1314 memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE); 1315 bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer; 1316 bl_resp->hdr.type = DM_BALLOON_RESPONSE; 1317 bl_resp->hdr.size = sizeof(struct dm_balloon_response); 1318 bl_resp->more_pages = 1; 1319 1320 num_pages -= num_ballooned; 1321 num_ballooned = alloc_balloon_pages(&dm_device, num_pages, 1322 bl_resp, alloc_unit); 1323 1324 if (alloc_unit != 1 && num_ballooned == 0) { 1325 alloc_unit = 1; 1326 continue; 1327 } 1328 1329 if (num_ballooned == 0 || num_ballooned == num_pages) { 1330 pr_debug("Ballooned %u out of %u requested pages.\n", 1331 num_pages, dm_device.balloon_wrk.num_pages); 1332 1333 bl_resp->more_pages = 0; 1334 done = true; 1335 dm_device.state = DM_INITIALIZED; 1336 } 1337 1338 /* 1339 * We are pushing a lot of data through the channel; 1340 * deal with transient failures caused because of the 1341 * lack of space in the ring buffer. 1342 */ 1343 1344 do { 1345 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); 1346 ret = vmbus_sendpacket(dm_device.dev->channel, 1347 bl_resp, 1348 bl_resp->hdr.size, 1349 (unsigned long)NULL, 1350 VM_PKT_DATA_INBAND, 0); 1351 1352 if (ret == -EAGAIN) 1353 msleep(20); 1354 post_status(&dm_device); 1355 } while (ret == -EAGAIN); 1356 1357 if (ret) { 1358 /* 1359 * Free up the memory we allocatted. 1360 */ 1361 pr_err("Balloon response failed\n"); 1362 1363 for (i = 0; i < bl_resp->range_count; i++) 1364 free_balloon_pages(&dm_device, 1365 &bl_resp->range_array[i]); 1366 1367 done = true; 1368 } 1369 } 1370 1371 } 1372 1373 static void balloon_down(struct hv_dynmem_device *dm, 1374 struct dm_unballoon_request *req) 1375 { 1376 union dm_mem_page_range *range_array = req->range_array; 1377 int range_count = req->range_count; 1378 struct dm_unballoon_response resp; 1379 int i; 1380 unsigned int prev_pages_ballooned = dm->num_pages_ballooned; 1381 1382 for (i = 0; i < range_count; i++) { 1383 free_balloon_pages(dm, &range_array[i]); 1384 complete(&dm_device.config_event); 1385 } 1386 1387 pr_debug("Freed %u ballooned pages.\n", 1388 prev_pages_ballooned - dm->num_pages_ballooned); 1389 1390 if (req->more_pages == 1) 1391 return; 1392 1393 memset(&resp, 0, sizeof(struct dm_unballoon_response)); 1394 resp.hdr.type = DM_UNBALLOON_RESPONSE; 1395 resp.hdr.trans_id = atomic_inc_return(&trans_id); 1396 resp.hdr.size = sizeof(struct dm_unballoon_response); 1397 1398 vmbus_sendpacket(dm_device.dev->channel, &resp, 1399 sizeof(struct dm_unballoon_response), 1400 (unsigned long)NULL, 1401 VM_PKT_DATA_INBAND, 0); 1402 1403 dm->state = DM_INITIALIZED; 1404 } 1405 1406 static void balloon_onchannelcallback(void *context); 1407 1408 static int dm_thread_func(void *dm_dev) 1409 { 1410 struct hv_dynmem_device *dm = dm_dev; 1411 1412 while (!kthread_should_stop()) { 1413 wait_for_completion_interruptible_timeout( 1414 &dm_device.config_event, 1*HZ); 1415 /* 1416 * The host expects us to post information on the memory 1417 * pressure every second. 1418 */ 1419 reinit_completion(&dm_device.config_event); 1420 post_status(dm); 1421 } 1422 1423 return 0; 1424 } 1425 1426 1427 static void version_resp(struct hv_dynmem_device *dm, 1428 struct dm_version_response *vresp) 1429 { 1430 struct dm_version_request version_req; 1431 int ret; 1432 1433 if (vresp->is_accepted) { 1434 /* 1435 * We are done; wakeup the 1436 * context waiting for version 1437 * negotiation. 1438 */ 1439 complete(&dm->host_event); 1440 return; 1441 } 1442 /* 1443 * If there are more versions to try, continue 1444 * with negotiations; if not 1445 * shutdown the service since we are not able 1446 * to negotiate a suitable version number 1447 * with the host. 1448 */ 1449 if (dm->next_version == 0) 1450 goto version_error; 1451 1452 memset(&version_req, 0, sizeof(struct dm_version_request)); 1453 version_req.hdr.type = DM_VERSION_REQUEST; 1454 version_req.hdr.size = sizeof(struct dm_version_request); 1455 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 1456 version_req.version.version = dm->next_version; 1457 dm->version = version_req.version.version; 1458 1459 /* 1460 * Set the next version to try in case current version fails. 1461 * Win7 protocol ought to be the last one to try. 1462 */ 1463 switch (version_req.version.version) { 1464 case DYNMEM_PROTOCOL_VERSION_WIN8: 1465 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7; 1466 version_req.is_last_attempt = 0; 1467 break; 1468 default: 1469 dm->next_version = 0; 1470 version_req.is_last_attempt = 1; 1471 } 1472 1473 ret = vmbus_sendpacket(dm->dev->channel, &version_req, 1474 sizeof(struct dm_version_request), 1475 (unsigned long)NULL, 1476 VM_PKT_DATA_INBAND, 0); 1477 1478 if (ret) 1479 goto version_error; 1480 1481 return; 1482 1483 version_error: 1484 dm->state = DM_INIT_ERROR; 1485 complete(&dm->host_event); 1486 } 1487 1488 static void cap_resp(struct hv_dynmem_device *dm, 1489 struct dm_capabilities_resp_msg *cap_resp) 1490 { 1491 if (!cap_resp->is_accepted) { 1492 pr_err("Capabilities not accepted by host\n"); 1493 dm->state = DM_INIT_ERROR; 1494 } 1495 complete(&dm->host_event); 1496 } 1497 1498 static void balloon_onchannelcallback(void *context) 1499 { 1500 struct hv_device *dev = context; 1501 u32 recvlen; 1502 u64 requestid; 1503 struct dm_message *dm_msg; 1504 struct dm_header *dm_hdr; 1505 struct hv_dynmem_device *dm = hv_get_drvdata(dev); 1506 struct dm_balloon *bal_msg; 1507 struct dm_hot_add *ha_msg; 1508 union dm_mem_page_range *ha_pg_range; 1509 union dm_mem_page_range *ha_region; 1510 1511 memset(recv_buffer, 0, sizeof(recv_buffer)); 1512 vmbus_recvpacket(dev->channel, recv_buffer, 1513 HV_HYP_PAGE_SIZE, &recvlen, &requestid); 1514 1515 if (recvlen > 0) { 1516 dm_msg = (struct dm_message *)recv_buffer; 1517 dm_hdr = &dm_msg->hdr; 1518 1519 switch (dm_hdr->type) { 1520 case DM_VERSION_RESPONSE: 1521 version_resp(dm, 1522 (struct dm_version_response *)dm_msg); 1523 break; 1524 1525 case DM_CAPABILITIES_RESPONSE: 1526 cap_resp(dm, 1527 (struct dm_capabilities_resp_msg *)dm_msg); 1528 break; 1529 1530 case DM_BALLOON_REQUEST: 1531 if (allow_hibernation) { 1532 pr_info("Ignore balloon-up request!\n"); 1533 break; 1534 } 1535 1536 if (dm->state == DM_BALLOON_UP) 1537 pr_warn("Currently ballooning\n"); 1538 bal_msg = (struct dm_balloon *)recv_buffer; 1539 dm->state = DM_BALLOON_UP; 1540 dm_device.balloon_wrk.num_pages = bal_msg->num_pages; 1541 schedule_work(&dm_device.balloon_wrk.wrk); 1542 break; 1543 1544 case DM_UNBALLOON_REQUEST: 1545 if (allow_hibernation) { 1546 pr_info("Ignore balloon-down request!\n"); 1547 break; 1548 } 1549 1550 dm->state = DM_BALLOON_DOWN; 1551 balloon_down(dm, 1552 (struct dm_unballoon_request *)recv_buffer); 1553 break; 1554 1555 case DM_MEM_HOT_ADD_REQUEST: 1556 if (dm->state == DM_HOT_ADD) 1557 pr_warn("Currently hot-adding\n"); 1558 dm->state = DM_HOT_ADD; 1559 ha_msg = (struct dm_hot_add *)recv_buffer; 1560 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) { 1561 /* 1562 * This is a normal hot-add request specifying 1563 * hot-add memory. 1564 */ 1565 dm->host_specified_ha_region = false; 1566 ha_pg_range = &ha_msg->range; 1567 dm->ha_wrk.ha_page_range = *ha_pg_range; 1568 dm->ha_wrk.ha_region_range.page_range = 0; 1569 } else { 1570 /* 1571 * Host is specifying that we first hot-add 1572 * a region and then partially populate this 1573 * region. 1574 */ 1575 dm->host_specified_ha_region = true; 1576 ha_pg_range = &ha_msg->range; 1577 ha_region = &ha_pg_range[1]; 1578 dm->ha_wrk.ha_page_range = *ha_pg_range; 1579 dm->ha_wrk.ha_region_range = *ha_region; 1580 } 1581 schedule_work(&dm_device.ha_wrk.wrk); 1582 break; 1583 1584 case DM_INFO_MESSAGE: 1585 process_info(dm, (struct dm_info_msg *)dm_msg); 1586 break; 1587 1588 default: 1589 pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); 1590 1591 } 1592 } 1593 1594 } 1595 1596 /* Hyper-V only supports reporting 2MB pages or higher */ 1597 #define HV_MIN_PAGE_REPORTING_ORDER 9 1598 #define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER) 1599 static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info, 1600 struct scatterlist *sgl, unsigned int nents) 1601 { 1602 unsigned long flags; 1603 struct hv_memory_hint *hint; 1604 int i; 1605 u64 status; 1606 struct scatterlist *sg; 1607 1608 WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); 1609 WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN); 1610 local_irq_save(flags); 1611 hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg); 1612 if (!hint) { 1613 local_irq_restore(flags); 1614 return -ENOSPC; 1615 } 1616 1617 hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD; 1618 hint->reserved = 0; 1619 for_each_sg(sgl, sg, nents, i) { 1620 union hv_gpa_page_range *range; 1621 1622 range = &hint->ranges[i]; 1623 range->address_space = 0; 1624 /* page reporting only reports 2MB pages or higher */ 1625 range->page.largepage = 1; 1626 range->page.additional_pages = 1627 (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1; 1628 range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB; 1629 range->base_large_pfn = 1630 page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER; 1631 } 1632 1633 status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0, 1634 hint, NULL); 1635 local_irq_restore(flags); 1636 if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) { 1637 pr_err("Cold memory discard hypercall failed with status %llx\n", 1638 status); 1639 return -EINVAL; 1640 } 1641 1642 return 0; 1643 } 1644 1645 static void enable_page_reporting(void) 1646 { 1647 int ret; 1648 1649 /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */ 1650 if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) { 1651 pr_debug("Cold memory discard is only supported on 2MB pages and above\n"); 1652 return; 1653 } 1654 1655 if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) { 1656 pr_debug("Cold memory discard hint not supported by Hyper-V\n"); 1657 return; 1658 } 1659 1660 BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES); 1661 dm_device.pr_dev_info.report = hv_free_page_report; 1662 ret = page_reporting_register(&dm_device.pr_dev_info); 1663 if (ret < 0) { 1664 dm_device.pr_dev_info.report = NULL; 1665 pr_err("Failed to enable cold memory discard: %d\n", ret); 1666 } else { 1667 pr_info("Cold memory discard hint enabled\n"); 1668 } 1669 } 1670 1671 static void disable_page_reporting(void) 1672 { 1673 if (dm_device.pr_dev_info.report) { 1674 page_reporting_unregister(&dm_device.pr_dev_info); 1675 dm_device.pr_dev_info.report = NULL; 1676 } 1677 } 1678 1679 static int ballooning_enabled(void) 1680 { 1681 /* 1682 * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE), 1683 * since currently it's unclear to us whether an unballoon request can 1684 * make sure all page ranges are guest page size aligned. 1685 */ 1686 if (PAGE_SIZE != HV_HYP_PAGE_SIZE) { 1687 pr_info("Ballooning disabled because page size is not 4096 bytes\n"); 1688 return 0; 1689 } 1690 1691 return 1; 1692 } 1693 1694 static int hot_add_enabled(void) 1695 { 1696 /* 1697 * Disable hot add on ARM64, because we currently rely on 1698 * memory_add_physaddr_to_nid() to get a node id of a hot add range, 1699 * however ARM64's memory_add_physaddr_to_nid() always return 0 and 1700 * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for 1701 * add_memory(). 1702 */ 1703 if (IS_ENABLED(CONFIG_ARM64)) { 1704 pr_info("Memory hot add disabled on ARM64\n"); 1705 return 0; 1706 } 1707 1708 return 1; 1709 } 1710 1711 static int balloon_connect_vsp(struct hv_device *dev) 1712 { 1713 struct dm_version_request version_req; 1714 struct dm_capabilities cap_msg; 1715 unsigned long t; 1716 int ret; 1717 1718 /* 1719 * max_pkt_size should be large enough for one vmbus packet header plus 1720 * our receive buffer size. Hyper-V sends messages up to 1721 * HV_HYP_PAGE_SIZE bytes long on balloon channel. 1722 */ 1723 dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2; 1724 1725 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, 1726 balloon_onchannelcallback, dev); 1727 if (ret) 1728 return ret; 1729 1730 /* 1731 * Initiate the hand shake with the host and negotiate 1732 * a version that the host can support. We start with the 1733 * highest version number and go down if the host cannot 1734 * support it. 1735 */ 1736 memset(&version_req, 0, sizeof(struct dm_version_request)); 1737 version_req.hdr.type = DM_VERSION_REQUEST; 1738 version_req.hdr.size = sizeof(struct dm_version_request); 1739 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 1740 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10; 1741 version_req.is_last_attempt = 0; 1742 dm_device.version = version_req.version.version; 1743 1744 ret = vmbus_sendpacket(dev->channel, &version_req, 1745 sizeof(struct dm_version_request), 1746 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0); 1747 if (ret) 1748 goto out; 1749 1750 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1751 if (t == 0) { 1752 ret = -ETIMEDOUT; 1753 goto out; 1754 } 1755 1756 /* 1757 * If we could not negotiate a compatible version with the host 1758 * fail the probe function. 1759 */ 1760 if (dm_device.state == DM_INIT_ERROR) { 1761 ret = -EPROTO; 1762 goto out; 1763 } 1764 1765 pr_info("Using Dynamic Memory protocol version %u.%u\n", 1766 DYNMEM_MAJOR_VERSION(dm_device.version), 1767 DYNMEM_MINOR_VERSION(dm_device.version)); 1768 1769 /* 1770 * Now submit our capabilities to the host. 1771 */ 1772 memset(&cap_msg, 0, sizeof(struct dm_capabilities)); 1773 cap_msg.hdr.type = DM_CAPABILITIES_REPORT; 1774 cap_msg.hdr.size = sizeof(struct dm_capabilities); 1775 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id); 1776 1777 /* 1778 * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host 1779 * currently still requires the bits to be set, so we have to add code 1780 * to fail the host's hot-add and balloon up/down requests, if any. 1781 */ 1782 cap_msg.caps.cap_bits.balloon = ballooning_enabled(); 1783 cap_msg.caps.cap_bits.hot_add = hot_add_enabled(); 1784 1785 /* 1786 * Specify our alignment requirements as it relates 1787 * memory hot-add. Specify 128MB alignment. 1788 */ 1789 cap_msg.caps.cap_bits.hot_add_alignment = 7; 1790 1791 /* 1792 * Currently the host does not use these 1793 * values and we set them to what is done in the 1794 * Windows driver. 1795 */ 1796 cap_msg.min_page_cnt = 0; 1797 cap_msg.max_page_number = -1; 1798 1799 ret = vmbus_sendpacket(dev->channel, &cap_msg, 1800 sizeof(struct dm_capabilities), 1801 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0); 1802 if (ret) 1803 goto out; 1804 1805 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1806 if (t == 0) { 1807 ret = -ETIMEDOUT; 1808 goto out; 1809 } 1810 1811 /* 1812 * If the host does not like our capabilities, 1813 * fail the probe function. 1814 */ 1815 if (dm_device.state == DM_INIT_ERROR) { 1816 ret = -EPROTO; 1817 goto out; 1818 } 1819 1820 return 0; 1821 out: 1822 vmbus_close(dev->channel); 1823 return ret; 1824 } 1825 1826 /* 1827 * DEBUGFS Interface 1828 */ 1829 #ifdef CONFIG_DEBUG_FS 1830 1831 /** 1832 * hv_balloon_debug_show - shows statistics of balloon operations. 1833 * @f: pointer to the &struct seq_file. 1834 * @offset: ignored. 1835 * 1836 * Provides the statistics that can be accessed in hv-balloon in the debugfs. 1837 * 1838 * Return: zero on success or an error code. 1839 */ 1840 static int hv_balloon_debug_show(struct seq_file *f, void *offset) 1841 { 1842 struct hv_dynmem_device *dm = f->private; 1843 char *sname; 1844 1845 seq_printf(f, "%-22s: %u.%u\n", "host_version", 1846 DYNMEM_MAJOR_VERSION(dm->version), 1847 DYNMEM_MINOR_VERSION(dm->version)); 1848 1849 seq_printf(f, "%-22s:", "capabilities"); 1850 if (ballooning_enabled()) 1851 seq_puts(f, " enabled"); 1852 1853 if (hot_add_enabled()) 1854 seq_puts(f, " hot_add"); 1855 1856 seq_puts(f, "\n"); 1857 1858 seq_printf(f, "%-22s: %u", "state", dm->state); 1859 switch (dm->state) { 1860 case DM_INITIALIZING: 1861 sname = "Initializing"; 1862 break; 1863 case DM_INITIALIZED: 1864 sname = "Initialized"; 1865 break; 1866 case DM_BALLOON_UP: 1867 sname = "Balloon Up"; 1868 break; 1869 case DM_BALLOON_DOWN: 1870 sname = "Balloon Down"; 1871 break; 1872 case DM_HOT_ADD: 1873 sname = "Hot Add"; 1874 break; 1875 case DM_INIT_ERROR: 1876 sname = "Error"; 1877 break; 1878 default: 1879 sname = "Unknown"; 1880 } 1881 seq_printf(f, " (%s)\n", sname); 1882 1883 /* HV Page Size */ 1884 seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE); 1885 1886 /* Pages added with hot_add */ 1887 seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added); 1888 1889 /* pages that are "onlined"/used from pages_added */ 1890 seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined); 1891 1892 /* pages we have given back to host */ 1893 seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned); 1894 1895 seq_printf(f, "%-22s: %lu\n", "total_pages_committed", 1896 get_pages_committed(dm)); 1897 1898 seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count", 1899 dm->max_dynamic_page_count); 1900 1901 return 0; 1902 } 1903 1904 DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug); 1905 1906 static void hv_balloon_debugfs_init(struct hv_dynmem_device *b) 1907 { 1908 debugfs_create_file("hv-balloon", 0444, NULL, b, 1909 &hv_balloon_debug_fops); 1910 } 1911 1912 static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b) 1913 { 1914 debugfs_remove(debugfs_lookup("hv-balloon", NULL)); 1915 } 1916 1917 #else 1918 1919 static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b) 1920 { 1921 } 1922 1923 static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b) 1924 { 1925 } 1926 1927 #endif /* CONFIG_DEBUG_FS */ 1928 1929 static int balloon_probe(struct hv_device *dev, 1930 const struct hv_vmbus_device_id *dev_id) 1931 { 1932 int ret; 1933 1934 allow_hibernation = hv_is_hibernation_supported(); 1935 if (allow_hibernation) 1936 hot_add = false; 1937 1938 #ifdef CONFIG_MEMORY_HOTPLUG 1939 do_hot_add = hot_add; 1940 #else 1941 do_hot_add = false; 1942 #endif 1943 dm_device.dev = dev; 1944 dm_device.state = DM_INITIALIZING; 1945 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8; 1946 init_completion(&dm_device.host_event); 1947 init_completion(&dm_device.config_event); 1948 INIT_LIST_HEAD(&dm_device.ha_region_list); 1949 spin_lock_init(&dm_device.ha_lock); 1950 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); 1951 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); 1952 dm_device.host_specified_ha_region = false; 1953 1954 #ifdef CONFIG_MEMORY_HOTPLUG 1955 set_online_page_callback(&hv_online_page); 1956 init_completion(&dm_device.ol_waitevent); 1957 register_memory_notifier(&hv_memory_nb); 1958 #endif 1959 1960 hv_set_drvdata(dev, &dm_device); 1961 1962 ret = balloon_connect_vsp(dev); 1963 if (ret != 0) 1964 goto connect_error; 1965 1966 enable_page_reporting(); 1967 dm_device.state = DM_INITIALIZED; 1968 1969 dm_device.thread = 1970 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); 1971 if (IS_ERR(dm_device.thread)) { 1972 ret = PTR_ERR(dm_device.thread); 1973 goto probe_error; 1974 } 1975 1976 hv_balloon_debugfs_init(&dm_device); 1977 1978 return 0; 1979 1980 probe_error: 1981 dm_device.state = DM_INIT_ERROR; 1982 dm_device.thread = NULL; 1983 disable_page_reporting(); 1984 vmbus_close(dev->channel); 1985 connect_error: 1986 #ifdef CONFIG_MEMORY_HOTPLUG 1987 unregister_memory_notifier(&hv_memory_nb); 1988 restore_online_page_callback(&hv_online_page); 1989 #endif 1990 return ret; 1991 } 1992 1993 static int balloon_remove(struct hv_device *dev) 1994 { 1995 struct hv_dynmem_device *dm = hv_get_drvdata(dev); 1996 struct hv_hotadd_state *has, *tmp; 1997 struct hv_hotadd_gap *gap, *tmp_gap; 1998 unsigned long flags; 1999 2000 if (dm->num_pages_ballooned != 0) 2001 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned); 2002 2003 hv_balloon_debugfs_exit(dm); 2004 2005 cancel_work_sync(&dm->balloon_wrk.wrk); 2006 cancel_work_sync(&dm->ha_wrk.wrk); 2007 2008 kthread_stop(dm->thread); 2009 2010 /* 2011 * This is to handle the case when balloon_resume() 2012 * call has failed and some cleanup has been done as 2013 * a part of the error handling. 2014 */ 2015 if (dm_device.state != DM_INIT_ERROR) { 2016 disable_page_reporting(); 2017 vmbus_close(dev->channel); 2018 #ifdef CONFIG_MEMORY_HOTPLUG 2019 unregister_memory_notifier(&hv_memory_nb); 2020 restore_online_page_callback(&hv_online_page); 2021 #endif 2022 } 2023 2024 spin_lock_irqsave(&dm_device.ha_lock, flags); 2025 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { 2026 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { 2027 list_del(&gap->list); 2028 kfree(gap); 2029 } 2030 list_del(&has->list); 2031 kfree(has); 2032 } 2033 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 2034 2035 return 0; 2036 } 2037 2038 static int balloon_suspend(struct hv_device *hv_dev) 2039 { 2040 struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev); 2041 2042 tasklet_disable(&hv_dev->channel->callback_event); 2043 2044 cancel_work_sync(&dm->balloon_wrk.wrk); 2045 cancel_work_sync(&dm->ha_wrk.wrk); 2046 2047 if (dm->thread) { 2048 kthread_stop(dm->thread); 2049 dm->thread = NULL; 2050 vmbus_close(hv_dev->channel); 2051 } 2052 2053 tasklet_enable(&hv_dev->channel->callback_event); 2054 2055 return 0; 2056 2057 } 2058 2059 static int balloon_resume(struct hv_device *dev) 2060 { 2061 int ret; 2062 2063 dm_device.state = DM_INITIALIZING; 2064 2065 ret = balloon_connect_vsp(dev); 2066 2067 if (ret != 0) 2068 goto out; 2069 2070 dm_device.thread = 2071 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); 2072 if (IS_ERR(dm_device.thread)) { 2073 ret = PTR_ERR(dm_device.thread); 2074 dm_device.thread = NULL; 2075 goto close_channel; 2076 } 2077 2078 dm_device.state = DM_INITIALIZED; 2079 return 0; 2080 close_channel: 2081 vmbus_close(dev->channel); 2082 out: 2083 dm_device.state = DM_INIT_ERROR; 2084 disable_page_reporting(); 2085 #ifdef CONFIG_MEMORY_HOTPLUG 2086 unregister_memory_notifier(&hv_memory_nb); 2087 restore_online_page_callback(&hv_online_page); 2088 #endif 2089 return ret; 2090 } 2091 2092 static const struct hv_vmbus_device_id id_table[] = { 2093 /* Dynamic Memory Class ID */ 2094 /* 525074DC-8985-46e2-8057-A307DC18A502 */ 2095 { HV_DM_GUID, }, 2096 { }, 2097 }; 2098 2099 MODULE_DEVICE_TABLE(vmbus, id_table); 2100 2101 static struct hv_driver balloon_drv = { 2102 .name = "hv_balloon", 2103 .id_table = id_table, 2104 .probe = balloon_probe, 2105 .remove = balloon_remove, 2106 .suspend = balloon_suspend, 2107 .resume = balloon_resume, 2108 .driver = { 2109 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2110 }, 2111 }; 2112 2113 static int __init init_balloon_drv(void) 2114 { 2115 2116 return vmbus_driver_register(&balloon_drv); 2117 } 2118 2119 module_init(init_balloon_drv); 2120 2121 MODULE_DESCRIPTION("Hyper-V Balloon"); 2122 MODULE_LICENSE("GPL"); 2123