1 /****************************************************************************** 2 * Xen balloon driver - enables returning/claiming memory to/from Xen. 3 * 4 * Copyright (c) 2003, B Dragovic 5 * Copyright (c) 2003-2004, M Williamson, K Fraser 6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation 7 * Copyright (c) 2010 Daniel Kiper 8 * 9 * Memory hotplug support was written by Daniel Kiper. Work on 10 * it was sponsored by Google under Google Summer of Code 2010 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for 12 * this project. 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License version 2 16 * as published by the Free Software Foundation; or, when distributed 17 * separately from the Linux kernel or incorporated into other 18 * software packages, subject to the following license: 19 * 20 * Permission is hereby granted, free of charge, to any person obtaining a copy 21 * of this source file (the "Software"), to deal in the Software without 22 * restriction, including without limitation the rights to use, copy, modify, 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 24 * and to permit persons to whom the Software is furnished to do so, subject to 25 * the following conditions: 26 * 27 * The above copyright notice and this permission notice shall be included in 28 * all copies or substantial portions of the Software. 29 * 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 36 * IN THE SOFTWARE. 37 */ 38 39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 40 41 #include <linux/cpu.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/cred.h> 45 #include <linux/errno.h> 46 #include <linux/mm.h> 47 #include <linux/memblock.h> 48 #include <linux/pagemap.h> 49 #include <linux/highmem.h> 50 #include <linux/mutex.h> 51 #include <linux/list.h> 52 #include <linux/gfp.h> 53 #include <linux/notifier.h> 54 #include <linux/memory.h> 55 #include <linux/memory_hotplug.h> 56 #include <linux/percpu-defs.h> 57 #include <linux/slab.h> 58 #include <linux/sysctl.h> 59 60 #include <asm/page.h> 61 #include <asm/pgalloc.h> 62 #include <asm/pgtable.h> 63 #include <asm/tlb.h> 64 65 #include <asm/xen/hypervisor.h> 66 #include <asm/xen/hypercall.h> 67 68 #include <xen/xen.h> 69 #include <xen/interface/xen.h> 70 #include <xen/interface/memory.h> 71 #include <xen/balloon.h> 72 #include <xen/features.h> 73 #include <xen/page.h> 74 #include <xen/mem-reservation.h> 75 76 static int xen_hotplug_unpopulated; 77 78 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 79 80 static int zero; 81 static int one = 1; 82 83 static struct ctl_table balloon_table[] = { 84 { 85 .procname = "hotplug_unpopulated", 86 .data = &xen_hotplug_unpopulated, 87 .maxlen = sizeof(int), 88 .mode = 0644, 89 .proc_handler = proc_dointvec_minmax, 90 .extra1 = &zero, 91 .extra2 = &one, 92 }, 93 { } 94 }; 95 96 static struct ctl_table balloon_root[] = { 97 { 98 .procname = "balloon", 99 .mode = 0555, 100 .child = balloon_table, 101 }, 102 { } 103 }; 104 105 static struct ctl_table xen_root[] = { 106 { 107 .procname = "xen", 108 .mode = 0555, 109 .child = balloon_root, 110 }, 111 { } 112 }; 113 114 #endif 115 116 /* 117 * Use one extent per PAGE_SIZE to avoid to break down the page into 118 * multiple frame. 119 */ 120 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) 121 122 /* 123 * balloon_process() state: 124 * 125 * BP_DONE: done or nothing to do, 126 * BP_WAIT: wait to be rescheduled, 127 * BP_EAGAIN: error, go to sleep, 128 * BP_ECANCELED: error, balloon operation canceled. 129 */ 130 131 enum bp_state { 132 BP_DONE, 133 BP_WAIT, 134 BP_EAGAIN, 135 BP_ECANCELED 136 }; 137 138 139 static DEFINE_MUTEX(balloon_mutex); 140 141 struct balloon_stats balloon_stats; 142 EXPORT_SYMBOL_GPL(balloon_stats); 143 144 /* We increase/decrease in batches which fit in a page */ 145 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; 146 147 148 /* List of ballooned pages, threaded through the mem_map array. */ 149 static LIST_HEAD(ballooned_pages); 150 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); 151 152 /* Main work function, always executed in process context. */ 153 static void balloon_process(struct work_struct *work); 154 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 155 156 /* When ballooning out (allocating memory to return to Xen) we don't really 157 want the kernel to try too hard since that can trigger the oom killer. */ 158 #define GFP_BALLOON \ 159 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) 160 161 /* balloon_append: add the given page to the balloon. */ 162 static void __balloon_append(struct page *page) 163 { 164 /* Lowmem is re-populated first, so highmem pages go at list tail. */ 165 if (PageHighMem(page)) { 166 list_add_tail(&page->lru, &ballooned_pages); 167 balloon_stats.balloon_high++; 168 } else { 169 list_add(&page->lru, &ballooned_pages); 170 balloon_stats.balloon_low++; 171 } 172 wake_up(&balloon_wq); 173 } 174 175 static void balloon_append(struct page *page) 176 { 177 __balloon_append(page); 178 } 179 180 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 181 static struct page *balloon_retrieve(bool require_lowmem) 182 { 183 struct page *page; 184 185 if (list_empty(&ballooned_pages)) 186 return NULL; 187 188 page = list_entry(ballooned_pages.next, struct page, lru); 189 if (require_lowmem && PageHighMem(page)) 190 return NULL; 191 list_del(&page->lru); 192 193 if (PageHighMem(page)) 194 balloon_stats.balloon_high--; 195 else 196 balloon_stats.balloon_low--; 197 198 return page; 199 } 200 201 static struct page *balloon_next_page(struct page *page) 202 { 203 struct list_head *next = page->lru.next; 204 if (next == &ballooned_pages) 205 return NULL; 206 return list_entry(next, struct page, lru); 207 } 208 209 static enum bp_state update_schedule(enum bp_state state) 210 { 211 if (state == BP_WAIT) 212 return BP_WAIT; 213 214 if (state == BP_ECANCELED) 215 return BP_ECANCELED; 216 217 if (state == BP_DONE) { 218 balloon_stats.schedule_delay = 1; 219 balloon_stats.retry_count = 1; 220 return BP_DONE; 221 } 222 223 ++balloon_stats.retry_count; 224 225 if (balloon_stats.max_retry_count != RETRY_UNLIMITED && 226 balloon_stats.retry_count > balloon_stats.max_retry_count) { 227 balloon_stats.schedule_delay = 1; 228 balloon_stats.retry_count = 1; 229 return BP_ECANCELED; 230 } 231 232 balloon_stats.schedule_delay <<= 1; 233 234 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) 235 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; 236 237 return BP_EAGAIN; 238 } 239 240 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 241 static void release_memory_resource(struct resource *resource) 242 { 243 if (!resource) 244 return; 245 246 /* 247 * No need to reset region to identity mapped since we now 248 * know that no I/O can be in this region 249 */ 250 release_resource(resource); 251 kfree(resource); 252 } 253 254 static struct resource *additional_memory_resource(phys_addr_t size) 255 { 256 struct resource *res; 257 int ret; 258 259 res = kzalloc(sizeof(*res), GFP_KERNEL); 260 if (!res) 261 return NULL; 262 263 res->name = "System RAM"; 264 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 265 266 ret = allocate_resource(&iomem_resource, res, 267 size, 0, -1, 268 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 269 if (ret < 0) { 270 pr_err("Cannot allocate new System RAM resource\n"); 271 kfree(res); 272 return NULL; 273 } 274 275 #ifdef CONFIG_SPARSEMEM 276 { 277 unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); 278 unsigned long pfn = res->start >> PAGE_SHIFT; 279 280 if (pfn > limit) { 281 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", 282 pfn, limit); 283 release_memory_resource(res); 284 return NULL; 285 } 286 } 287 #endif 288 289 return res; 290 } 291 292 static enum bp_state reserve_additional_memory(void) 293 { 294 long credit; 295 struct resource *resource; 296 int nid, rc; 297 unsigned long balloon_hotplug; 298 299 credit = balloon_stats.target_pages + balloon_stats.target_unpopulated 300 - balloon_stats.total_pages; 301 302 /* 303 * Already hotplugged enough pages? Wait for them to be 304 * onlined. 305 */ 306 if (credit <= 0) 307 return BP_WAIT; 308 309 balloon_hotplug = round_up(credit, PAGES_PER_SECTION); 310 311 resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE); 312 if (!resource) 313 goto err; 314 315 nid = memory_add_physaddr_to_nid(resource->start); 316 317 #ifdef CONFIG_XEN_HAVE_PVMMU 318 /* 319 * We don't support PV MMU when Linux and Xen is using 320 * different page granularity. 321 */ 322 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 323 324 /* 325 * add_memory() will build page tables for the new memory so 326 * the p2m must contain invalid entries so the correct 327 * non-present PTEs will be written. 328 * 329 * If a failure occurs, the original (identity) p2m entries 330 * are not restored since this region is now known not to 331 * conflict with any devices. 332 */ 333 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 334 unsigned long pfn, i; 335 336 pfn = PFN_DOWN(resource->start); 337 for (i = 0; i < balloon_hotplug; i++) { 338 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 339 pr_warn("set_phys_to_machine() failed, no memory added\n"); 340 goto err; 341 } 342 } 343 } 344 #endif 345 346 /* 347 * add_memory_resource() will call online_pages() which in its turn 348 * will call xen_online_page() callback causing deadlock if we don't 349 * release balloon_mutex here. Unlocking here is safe because the 350 * callers drop the mutex before trying again. 351 */ 352 mutex_unlock(&balloon_mutex); 353 /* add_memory_resource() requires the device_hotplug lock */ 354 lock_device_hotplug(); 355 rc = add_memory_resource(nid, resource); 356 unlock_device_hotplug(); 357 mutex_lock(&balloon_mutex); 358 359 if (rc) { 360 pr_warn("Cannot add additional memory (%i)\n", rc); 361 goto err; 362 } 363 364 balloon_stats.total_pages += balloon_hotplug; 365 366 return BP_WAIT; 367 err: 368 release_memory_resource(resource); 369 return BP_ECANCELED; 370 } 371 372 static void xen_online_page(struct page *page, unsigned int order) 373 { 374 unsigned long i, size = (1 << order); 375 unsigned long start_pfn = page_to_pfn(page); 376 struct page *p; 377 378 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); 379 mutex_lock(&balloon_mutex); 380 for (i = 0; i < size; i++) { 381 p = pfn_to_page(start_pfn + i); 382 __online_page_set_limits(p); 383 __SetPageOffline(p); 384 __balloon_append(p); 385 } 386 mutex_unlock(&balloon_mutex); 387 } 388 389 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) 390 { 391 if (val == MEM_ONLINE) 392 schedule_delayed_work(&balloon_worker, 0); 393 394 return NOTIFY_OK; 395 } 396 397 static struct notifier_block xen_memory_nb = { 398 .notifier_call = xen_memory_notifier, 399 .priority = 0 400 }; 401 #else 402 static enum bp_state reserve_additional_memory(void) 403 { 404 balloon_stats.target_pages = balloon_stats.current_pages; 405 return BP_ECANCELED; 406 } 407 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ 408 409 static long current_credit(void) 410 { 411 return balloon_stats.target_pages - balloon_stats.current_pages; 412 } 413 414 static bool balloon_is_inflated(void) 415 { 416 return balloon_stats.balloon_low || balloon_stats.balloon_high; 417 } 418 419 static enum bp_state increase_reservation(unsigned long nr_pages) 420 { 421 int rc; 422 unsigned long i; 423 struct page *page; 424 425 if (nr_pages > ARRAY_SIZE(frame_list)) 426 nr_pages = ARRAY_SIZE(frame_list); 427 428 page = list_first_entry_or_null(&ballooned_pages, struct page, lru); 429 for (i = 0; i < nr_pages; i++) { 430 if (!page) { 431 nr_pages = i; 432 break; 433 } 434 435 frame_list[i] = page_to_xen_pfn(page); 436 page = balloon_next_page(page); 437 } 438 439 rc = xenmem_reservation_increase(nr_pages, frame_list); 440 if (rc <= 0) 441 return BP_EAGAIN; 442 443 for (i = 0; i < rc; i++) { 444 page = balloon_retrieve(false); 445 BUG_ON(page == NULL); 446 447 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); 448 449 /* Relinquish the page back to the allocator. */ 450 __ClearPageOffline(page); 451 free_reserved_page(page); 452 } 453 454 balloon_stats.current_pages += rc; 455 456 return BP_DONE; 457 } 458 459 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) 460 { 461 enum bp_state state = BP_DONE; 462 unsigned long i; 463 struct page *page, *tmp; 464 int ret; 465 LIST_HEAD(pages); 466 467 if (nr_pages > ARRAY_SIZE(frame_list)) 468 nr_pages = ARRAY_SIZE(frame_list); 469 470 for (i = 0; i < nr_pages; i++) { 471 page = alloc_page(gfp); 472 if (page == NULL) { 473 nr_pages = i; 474 state = BP_EAGAIN; 475 break; 476 } 477 __SetPageOffline(page); 478 adjust_managed_page_count(page, -1); 479 xenmem_reservation_scrub_page(page); 480 list_add(&page->lru, &pages); 481 } 482 483 /* 484 * Ensure that ballooned highmem pages don't have kmaps. 485 * 486 * Do this before changing the p2m as kmap_flush_unused() 487 * reads PTEs to obtain pages (and hence needs the original 488 * p2m entry). 489 */ 490 kmap_flush_unused(); 491 492 /* 493 * Setup the frame, update direct mapping, invalidate P2M, 494 * and add to balloon. 495 */ 496 i = 0; 497 list_for_each_entry_safe(page, tmp, &pages, lru) { 498 frame_list[i++] = xen_page_to_gfn(page); 499 500 xenmem_reservation_va_mapping_reset(1, &page); 501 502 list_del(&page->lru); 503 504 balloon_append(page); 505 } 506 507 flush_tlb_all(); 508 509 ret = xenmem_reservation_decrease(nr_pages, frame_list); 510 BUG_ON(ret != nr_pages); 511 512 balloon_stats.current_pages -= nr_pages; 513 514 return state; 515 } 516 517 /* 518 * As this is a work item it is guaranteed to run as a single instance only. 519 * We may of course race updates of the target counts (which are protected 520 * by the balloon lock), or with changes to the Xen hard limit, but we will 521 * recover from these in time. 522 */ 523 static void balloon_process(struct work_struct *work) 524 { 525 enum bp_state state = BP_DONE; 526 long credit; 527 528 529 do { 530 mutex_lock(&balloon_mutex); 531 532 credit = current_credit(); 533 534 if (credit > 0) { 535 if (balloon_is_inflated()) 536 state = increase_reservation(credit); 537 else 538 state = reserve_additional_memory(); 539 } 540 541 if (credit < 0) 542 state = decrease_reservation(-credit, GFP_BALLOON); 543 544 state = update_schedule(state); 545 546 mutex_unlock(&balloon_mutex); 547 548 cond_resched(); 549 550 } while (credit && state == BP_DONE); 551 552 /* Schedule more work if there is some still to be done. */ 553 if (state == BP_EAGAIN) 554 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 555 } 556 557 /* Resets the Xen limit, sets new target, and kicks off processing. */ 558 void balloon_set_new_target(unsigned long target) 559 { 560 /* No need for lock. Not read-modify-write updates. */ 561 balloon_stats.target_pages = target; 562 schedule_delayed_work(&balloon_worker, 0); 563 } 564 EXPORT_SYMBOL_GPL(balloon_set_new_target); 565 566 static int add_ballooned_pages(int nr_pages) 567 { 568 enum bp_state st; 569 570 if (xen_hotplug_unpopulated) { 571 st = reserve_additional_memory(); 572 if (st != BP_ECANCELED) { 573 mutex_unlock(&balloon_mutex); 574 wait_event(balloon_wq, 575 !list_empty(&ballooned_pages)); 576 mutex_lock(&balloon_mutex); 577 return 0; 578 } 579 } 580 581 st = decrease_reservation(nr_pages, GFP_USER); 582 if (st != BP_DONE) 583 return -ENOMEM; 584 585 return 0; 586 } 587 588 /** 589 * alloc_xenballooned_pages - get pages that have been ballooned out 590 * @nr_pages: Number of pages to get 591 * @pages: pages returned 592 * @return 0 on success, error otherwise 593 */ 594 int alloc_xenballooned_pages(int nr_pages, struct page **pages) 595 { 596 int pgno = 0; 597 struct page *page; 598 int ret; 599 600 mutex_lock(&balloon_mutex); 601 602 balloon_stats.target_unpopulated += nr_pages; 603 604 while (pgno < nr_pages) { 605 page = balloon_retrieve(true); 606 if (page) { 607 __ClearPageOffline(page); 608 pages[pgno++] = page; 609 #ifdef CONFIG_XEN_HAVE_PVMMU 610 /* 611 * We don't support PV MMU when Linux and Xen is using 612 * different page granularity. 613 */ 614 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 615 616 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 617 ret = xen_alloc_p2m_entry(page_to_pfn(page)); 618 if (ret < 0) 619 goto out_undo; 620 } 621 #endif 622 } else { 623 ret = add_ballooned_pages(nr_pages - pgno); 624 if (ret < 0) 625 goto out_undo; 626 } 627 } 628 mutex_unlock(&balloon_mutex); 629 return 0; 630 out_undo: 631 mutex_unlock(&balloon_mutex); 632 free_xenballooned_pages(pgno, pages); 633 return ret; 634 } 635 EXPORT_SYMBOL(alloc_xenballooned_pages); 636 637 /** 638 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages 639 * @nr_pages: Number of pages 640 * @pages: pages to return 641 */ 642 void free_xenballooned_pages(int nr_pages, struct page **pages) 643 { 644 int i; 645 646 mutex_lock(&balloon_mutex); 647 648 for (i = 0; i < nr_pages; i++) { 649 if (pages[i]) { 650 __SetPageOffline(pages[i]); 651 balloon_append(pages[i]); 652 } 653 } 654 655 balloon_stats.target_unpopulated -= nr_pages; 656 657 /* The balloon may be too large now. Shrink it if needed. */ 658 if (current_credit()) 659 schedule_delayed_work(&balloon_worker, 0); 660 661 mutex_unlock(&balloon_mutex); 662 } 663 EXPORT_SYMBOL(free_xenballooned_pages); 664 665 #ifdef CONFIG_XEN_PV 666 static void __init balloon_add_region(unsigned long start_pfn, 667 unsigned long pages) 668 { 669 unsigned long pfn, extra_pfn_end; 670 struct page *page; 671 672 /* 673 * If the amount of usable memory has been limited (e.g., with 674 * the 'mem' command line parameter), don't add pages beyond 675 * this limit. 676 */ 677 extra_pfn_end = min(max_pfn, start_pfn + pages); 678 679 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { 680 page = pfn_to_page(pfn); 681 /* totalram_pages and totalhigh_pages do not 682 include the boot-time balloon extension, so 683 don't subtract from it. */ 684 __balloon_append(page); 685 } 686 687 balloon_stats.total_pages += extra_pfn_end - start_pfn; 688 } 689 #endif 690 691 static int __init balloon_init(void) 692 { 693 if (!xen_domain()) 694 return -ENODEV; 695 696 pr_info("Initialising balloon driver\n"); 697 698 #ifdef CONFIG_XEN_PV 699 balloon_stats.current_pages = xen_pv_domain() 700 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) 701 : get_num_physpages(); 702 #else 703 balloon_stats.current_pages = get_num_physpages(); 704 #endif 705 balloon_stats.target_pages = balloon_stats.current_pages; 706 balloon_stats.balloon_low = 0; 707 balloon_stats.balloon_high = 0; 708 balloon_stats.total_pages = balloon_stats.current_pages; 709 710 balloon_stats.schedule_delay = 1; 711 balloon_stats.max_schedule_delay = 32; 712 balloon_stats.retry_count = 1; 713 balloon_stats.max_retry_count = RETRY_UNLIMITED; 714 715 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 716 set_online_page_callback(&xen_online_page); 717 register_memory_notifier(&xen_memory_nb); 718 register_sysctl_table(xen_root); 719 #endif 720 721 #ifdef CONFIG_XEN_PV 722 { 723 int i; 724 725 /* 726 * Initialize the balloon with pages from the extra memory 727 * regions (see arch/x86/xen/setup.c). 728 */ 729 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) 730 if (xen_extra_mem[i].n_pfns) 731 balloon_add_region(xen_extra_mem[i].start_pfn, 732 xen_extra_mem[i].n_pfns); 733 } 734 #endif 735 736 /* Init the xen-balloon driver. */ 737 xen_balloon_init(); 738 739 return 0; 740 } 741 subsys_initcall(balloon_init); 742