1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo 4 * Tosatti's implementations. 5 * 6 * Copyright 2008 Rusty Russell IBM Corporation 7 */ 8 9 #include <linux/virtio.h> 10 #include <linux/virtio_balloon.h> 11 #include <linux/swap.h> 12 #include <linux/workqueue.h> 13 #include <linux/delay.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/balloon_compaction.h> 17 #include <linux/oom.h> 18 #include <linux/wait.h> 19 #include <linux/mm.h> 20 #include <linux/page_reporting.h> 21 22 /* 23 * Balloon device works in 4K page units. So each page is pointed to by 24 * multiple balloon pages. All memory counters in this driver are in balloon 25 * page units. 26 */ 27 #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) 28 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 29 /* Maximum number of (4k) pages to deflate on OOM notifications. */ 30 #define VIRTIO_BALLOON_OOM_NR_PAGES 256 31 #define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80 32 33 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ 34 __GFP_NOMEMALLOC) 35 /* The order of free page blocks to report to host */ 36 #define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER 37 /* The size of a free page block in bytes */ 38 #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \ 39 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT)) 40 #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER) 41 42 enum virtio_balloon_vq { 43 VIRTIO_BALLOON_VQ_INFLATE, 44 VIRTIO_BALLOON_VQ_DEFLATE, 45 VIRTIO_BALLOON_VQ_STATS, 46 VIRTIO_BALLOON_VQ_FREE_PAGE, 47 VIRTIO_BALLOON_VQ_REPORTING, 48 VIRTIO_BALLOON_VQ_MAX 49 }; 50 51 enum virtio_balloon_config_read { 52 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, 53 }; 54 55 struct virtio_balloon { 56 struct virtio_device *vdev; 57 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; 58 59 /* Balloon's own wq for cpu-intensive work items */ 60 struct workqueue_struct *balloon_wq; 61 /* The free page reporting work item submitted to the balloon wq */ 62 struct work_struct report_free_page_work; 63 64 /* The balloon servicing is delegated to a freezable workqueue. */ 65 struct work_struct update_balloon_stats_work; 66 struct work_struct update_balloon_size_work; 67 68 /* Prevent updating balloon when it is being canceled. */ 69 spinlock_t stop_update_lock; 70 bool stop_update; 71 /* Bitmap to indicate if reading the related config fields are needed */ 72 unsigned long config_read_bitmap; 73 74 /* The list of allocated free pages, waiting to be given back to mm */ 75 struct list_head free_page_list; 76 spinlock_t free_page_list_lock; 77 /* The number of free page blocks on the above list */ 78 unsigned long num_free_page_blocks; 79 /* 80 * The cmd id received from host. 81 * Read it via virtio_balloon_cmd_id_received to get the latest value 82 * sent from host. 83 */ 84 u32 cmd_id_received_cache; 85 /* The cmd id that is actively in use */ 86 __virtio32 cmd_id_active; 87 /* Buffer to store the stop sign */ 88 __virtio32 cmd_id_stop; 89 90 /* Waiting for host to ack the pages we released. */ 91 wait_queue_head_t acked; 92 93 /* Number of balloon pages we've told the Host we're not using. */ 94 unsigned int num_pages; 95 /* 96 * The pages we've told the Host we're not using are enqueued 97 * at vb_dev_info->pages list. 98 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE 99 * to num_pages above. 100 */ 101 struct balloon_dev_info vb_dev_info; 102 103 /* Synchronize access/update to this struct virtio_balloon elements */ 104 struct mutex balloon_lock; 105 106 /* The array of pfns we tell the Host about. */ 107 unsigned int num_pfns; 108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; 109 110 /* Memory statistics */ 111 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; 112 113 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */ 114 struct shrinker *shrinker; 115 116 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */ 117 struct notifier_block oom_nb; 118 119 /* Free page reporting device */ 120 struct virtqueue *reporting_vq; 121 struct page_reporting_dev_info pr_dev_info; 122 123 /* State for keeping the wakeup_source active while adjusting the balloon */ 124 spinlock_t wakeup_lock; 125 bool processing_wakeup_event; 126 u32 wakeup_signal_mask; 127 }; 128 129 #define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0) 130 #define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1) 131 132 static const struct virtio_device_id id_table[] = { 133 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID }, 134 { 0 }, 135 }; 136 137 static u32 page_to_balloon_pfn(struct page *page) 138 { 139 unsigned long pfn = page_to_pfn(page); 140 141 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT); 142 /* Convert pfn from Linux page size to balloon page size. */ 143 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; 144 } 145 146 static void start_wakeup_event(struct virtio_balloon *vb, u32 mask) 147 { 148 unsigned long flags; 149 150 spin_lock_irqsave(&vb->wakeup_lock, flags); 151 vb->wakeup_signal_mask |= mask; 152 if (!vb->processing_wakeup_event) { 153 vb->processing_wakeup_event = true; 154 pm_stay_awake(&vb->vdev->dev); 155 } 156 spin_unlock_irqrestore(&vb->wakeup_lock, flags); 157 } 158 159 static void process_wakeup_event(struct virtio_balloon *vb, u32 mask) 160 { 161 spin_lock_irq(&vb->wakeup_lock); 162 vb->wakeup_signal_mask &= ~mask; 163 spin_unlock_irq(&vb->wakeup_lock); 164 } 165 166 static void finish_wakeup_event(struct virtio_balloon *vb) 167 { 168 spin_lock_irq(&vb->wakeup_lock); 169 if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) { 170 vb->processing_wakeup_event = false; 171 pm_relax(&vb->vdev->dev); 172 } 173 spin_unlock_irq(&vb->wakeup_lock); 174 } 175 176 static void balloon_ack(struct virtqueue *vq) 177 { 178 struct virtio_balloon *vb = vq->vdev->priv; 179 180 wake_up(&vb->acked); 181 } 182 183 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) 184 { 185 struct scatterlist sg; 186 unsigned int len; 187 188 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); 189 190 /* We should always be able to add one buffer to an empty queue. */ 191 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 192 virtqueue_kick(vq); 193 194 /* When host has read buffer, this completes via balloon_ack */ 195 wait_event(vb->acked, virtqueue_get_buf(vq, &len)); 196 197 } 198 199 static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info, 200 struct scatterlist *sg, unsigned int nents) 201 { 202 struct virtio_balloon *vb = 203 container_of(pr_dev_info, struct virtio_balloon, pr_dev_info); 204 struct virtqueue *vq = vb->reporting_vq; 205 unsigned int unused, err; 206 207 /* We should always be able to add these buffers to an empty queue. */ 208 err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN); 209 210 /* 211 * In the extremely unlikely case that something has occurred and we 212 * are able to trigger an error we will simply display a warning 213 * and exit without actually processing the pages. 214 */ 215 if (WARN_ON_ONCE(err)) 216 return err; 217 218 virtqueue_kick(vq); 219 220 /* When host has read buffer, this completes via balloon_ack */ 221 wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); 222 223 return 0; 224 } 225 226 static void set_page_pfns(struct virtio_balloon *vb, 227 __virtio32 pfns[], struct page *page) 228 { 229 unsigned int i; 230 231 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX); 232 233 /* 234 * Set balloon pfns pointing at this page. 235 * Note that the first pfn points at start of the page. 236 */ 237 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) 238 pfns[i] = cpu_to_virtio32(vb->vdev, 239 page_to_balloon_pfn(page) + i); 240 } 241 242 static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num) 243 { 244 unsigned int num_allocated_pages; 245 unsigned int num_pfns; 246 struct page *page; 247 LIST_HEAD(pages); 248 249 /* We can only do one array worth at a time. */ 250 num = min(num, ARRAY_SIZE(vb->pfns)); 251 252 for (num_pfns = 0; num_pfns < num; 253 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 254 struct page *page = balloon_page_alloc(); 255 256 if (!page) { 257 dev_info_ratelimited(&vb->vdev->dev, 258 "Out of puff! Can't get %u pages\n", 259 VIRTIO_BALLOON_PAGES_PER_PAGE); 260 /* Sleep for at least 1/5 of a second before retry. */ 261 msleep(200); 262 break; 263 } 264 265 balloon_page_push(&pages, page); 266 } 267 268 mutex_lock(&vb->balloon_lock); 269 270 vb->num_pfns = 0; 271 272 while ((page = balloon_page_pop(&pages))) { 273 balloon_page_enqueue(&vb->vb_dev_info, page); 274 275 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 276 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; 277 if (!virtio_has_feature(vb->vdev, 278 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 279 adjust_managed_page_count(page, -1); 280 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE; 281 } 282 283 num_allocated_pages = vb->num_pfns; 284 /* Did we get any? */ 285 if (vb->num_pfns != 0) 286 tell_host(vb, vb->inflate_vq); 287 mutex_unlock(&vb->balloon_lock); 288 289 return num_allocated_pages; 290 } 291 292 static void release_pages_balloon(struct virtio_balloon *vb, 293 struct list_head *pages) 294 { 295 struct page *page, *next; 296 297 list_for_each_entry_safe(page, next, pages, lru) { 298 if (!virtio_has_feature(vb->vdev, 299 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 300 adjust_managed_page_count(page, 1); 301 list_del(&page->lru); 302 put_page(page); /* balloon reference */ 303 } 304 } 305 306 static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num) 307 { 308 unsigned int num_freed_pages; 309 struct page *page; 310 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; 311 LIST_HEAD(pages); 312 313 /* We can only do one array worth at a time. */ 314 num = min(num, ARRAY_SIZE(vb->pfns)); 315 316 mutex_lock(&vb->balloon_lock); 317 /* We can't release more pages than taken */ 318 num = min(num, (size_t)vb->num_pages); 319 for (vb->num_pfns = 0; vb->num_pfns < num; 320 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 321 page = balloon_page_dequeue(vb_dev_info); 322 if (!page) 323 break; 324 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 325 list_add(&page->lru, &pages); 326 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; 327 } 328 329 num_freed_pages = vb->num_pfns; 330 /* 331 * Note that if 332 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); 333 * is true, we *have* to do it in this order 334 */ 335 if (vb->num_pfns != 0) 336 tell_host(vb, vb->deflate_vq); 337 release_pages_balloon(vb, &pages); 338 mutex_unlock(&vb->balloon_lock); 339 return num_freed_pages; 340 } 341 342 static inline void update_stat(struct virtio_balloon *vb, int idx, 343 u16 tag, u64 val) 344 { 345 BUG_ON(idx >= VIRTIO_BALLOON_S_NR); 346 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag); 347 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val); 348 } 349 350 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 351 352 #ifdef CONFIG_VM_EVENT_COUNTERS 353 /* Return the number of entries filled by vm events */ 354 static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb) 355 { 356 unsigned long events[NR_VM_EVENT_ITEMS]; 357 unsigned int idx = 0; 358 359 all_vm_events(events); 360 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 361 pages_to_bytes(events[PSWPIN])); 362 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 363 pages_to_bytes(events[PSWPOUT])); 364 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 365 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 366 367 #ifdef CONFIG_HUGETLB_PAGE 368 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC, 369 events[HTLB_BUDDY_PGALLOC]); 370 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL, 371 events[HTLB_BUDDY_PGALLOC_FAIL]); 372 #endif /* CONFIG_HUGETLB_PAGE */ 373 374 return idx; 375 } 376 #else /* CONFIG_VM_EVENT_COUNTERS */ 377 static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb) 378 { 379 return 0; 380 } 381 #endif /* CONFIG_VM_EVENT_COUNTERS */ 382 383 static unsigned int update_balloon_stats(struct virtio_balloon *vb) 384 { 385 struct sysinfo i; 386 unsigned int idx; 387 long available; 388 unsigned long caches; 389 390 idx = update_balloon_vm_stats(vb); 391 392 si_meminfo(&i); 393 available = si_mem_available(); 394 caches = global_node_page_state(NR_FILE_PAGES); 395 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 396 pages_to_bytes(i.freeram)); 397 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 398 pages_to_bytes(i.totalram)); 399 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 400 pages_to_bytes(available)); 401 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES, 402 pages_to_bytes(caches)); 403 404 return idx; 405 } 406 407 /* 408 * While most virtqueues communicate guest-initiated requests to the hypervisor, 409 * the stats queue operates in reverse. The driver initializes the virtqueue 410 * with a single buffer. From that point forward, all conversations consist of 411 * a hypervisor request (a call to this function) which directs us to refill 412 * the virtqueue with a fresh stats buffer. Since stats collection can sleep, 413 * we delegate the job to a freezable workqueue that will do the actual work via 414 * stats_handle_request(). 415 */ 416 static void stats_request(struct virtqueue *vq) 417 { 418 struct virtio_balloon *vb = vq->vdev->priv; 419 420 spin_lock(&vb->stop_update_lock); 421 if (!vb->stop_update) { 422 start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS); 423 queue_work(system_freezable_wq, &vb->update_balloon_stats_work); 424 } 425 spin_unlock(&vb->stop_update_lock); 426 } 427 428 static void stats_handle_request(struct virtio_balloon *vb) 429 { 430 struct virtqueue *vq; 431 struct scatterlist sg; 432 unsigned int len, num_stats; 433 434 num_stats = update_balloon_stats(vb); 435 436 vq = vb->stats_vq; 437 if (!virtqueue_get_buf(vq, &len)) 438 return; 439 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 440 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 441 virtqueue_kick(vq); 442 } 443 444 static inline s64 towards_target(struct virtio_balloon *vb) 445 { 446 s64 target; 447 u32 num_pages; 448 449 /* Legacy balloon config space is LE, unlike all other devices. */ 450 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages, 451 &num_pages); 452 453 /* 454 * Aligned up to guest page size to avoid inflating and deflating 455 * balloon endlessly. 456 */ 457 target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE); 458 return target - vb->num_pages; 459 } 460 461 /* Gives back @num_to_return blocks of free pages to mm. */ 462 static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, 463 unsigned long num_to_return) 464 { 465 struct page *page; 466 unsigned long num_returned; 467 468 spin_lock_irq(&vb->free_page_list_lock); 469 for (num_returned = 0; num_returned < num_to_return; num_returned++) { 470 page = balloon_page_pop(&vb->free_page_list); 471 if (!page) 472 break; 473 free_pages((unsigned long)page_address(page), 474 VIRTIO_BALLOON_HINT_BLOCK_ORDER); 475 } 476 vb->num_free_page_blocks -= num_returned; 477 spin_unlock_irq(&vb->free_page_list_lock); 478 479 return num_returned; 480 } 481 482 static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) 483 { 484 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 485 return; 486 487 /* No need to queue the work if the bit was already set. */ 488 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 489 &vb->config_read_bitmap)) 490 return; 491 492 queue_work(vb->balloon_wq, &vb->report_free_page_work); 493 } 494 495 static void start_update_balloon_size(struct virtio_balloon *vb) 496 { 497 start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST); 498 queue_work(system_freezable_wq, &vb->update_balloon_size_work); 499 } 500 501 static void virtballoon_changed(struct virtio_device *vdev) 502 { 503 struct virtio_balloon *vb = vdev->priv; 504 unsigned long flags; 505 506 spin_lock_irqsave(&vb->stop_update_lock, flags); 507 if (!vb->stop_update) { 508 start_update_balloon_size(vb); 509 virtio_balloon_queue_free_page_work(vb); 510 } 511 spin_unlock_irqrestore(&vb->stop_update_lock, flags); 512 } 513 514 static void update_balloon_size(struct virtio_balloon *vb) 515 { 516 u32 actual = vb->num_pages; 517 518 /* Legacy balloon config space is LE, unlike all other devices. */ 519 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual, 520 &actual); 521 } 522 523 static void update_balloon_stats_func(struct work_struct *work) 524 { 525 struct virtio_balloon *vb; 526 527 vb = container_of(work, struct virtio_balloon, 528 update_balloon_stats_work); 529 530 process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS); 531 stats_handle_request(vb); 532 finish_wakeup_event(vb); 533 } 534 535 static void update_balloon_size_func(struct work_struct *work) 536 { 537 struct virtio_balloon *vb; 538 s64 diff; 539 540 vb = container_of(work, struct virtio_balloon, 541 update_balloon_size_work); 542 543 process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST); 544 545 diff = towards_target(vb); 546 547 if (diff) { 548 if (diff > 0) 549 diff -= fill_balloon(vb, diff); 550 else 551 diff += leak_balloon(vb, -diff); 552 update_balloon_size(vb); 553 } 554 555 if (diff) 556 queue_work(system_freezable_wq, work); 557 else 558 finish_wakeup_event(vb); 559 } 560 561 static int init_vqs(struct virtio_balloon *vb) 562 { 563 struct virtqueue_info vqs_info[VIRTIO_BALLOON_VQ_MAX] = {}; 564 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX]; 565 int err; 566 567 /* 568 * Inflateq and deflateq are used unconditionally. The names[] 569 * will be NULL if the related feature is not enabled, which will 570 * cause no allocation for the corresponding virtqueue in find_vqs. 571 */ 572 vqs_info[VIRTIO_BALLOON_VQ_INFLATE].callback = balloon_ack; 573 vqs_info[VIRTIO_BALLOON_VQ_INFLATE].name = "inflate"; 574 vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].callback = balloon_ack; 575 vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].name = "deflate"; 576 577 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 578 vqs_info[VIRTIO_BALLOON_VQ_STATS].name = "stats"; 579 vqs_info[VIRTIO_BALLOON_VQ_STATS].callback = stats_request; 580 } 581 582 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 583 vqs_info[VIRTIO_BALLOON_VQ_FREE_PAGE].name = "free_page_vq"; 584 585 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) { 586 vqs_info[VIRTIO_BALLOON_VQ_REPORTING].name = "reporting_vq"; 587 vqs_info[VIRTIO_BALLOON_VQ_REPORTING].callback = balloon_ack; 588 } 589 590 err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs, 591 vqs_info, NULL); 592 if (err) 593 return err; 594 595 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; 596 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; 597 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 598 struct scatterlist sg; 599 unsigned int num_stats; 600 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS]; 601 602 /* 603 * Prime this virtqueue with one buffer so the hypervisor can 604 * use it to signal us later (it can't be broken yet!). 605 */ 606 num_stats = update_balloon_stats(vb); 607 608 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 609 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, 610 GFP_KERNEL); 611 if (err) { 612 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n", 613 __func__); 614 return err; 615 } 616 virtqueue_kick(vb->stats_vq); 617 } 618 619 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 620 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE]; 621 622 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) 623 vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING]; 624 625 return 0; 626 } 627 628 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) 629 { 630 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 631 &vb->config_read_bitmap)) { 632 /* Legacy balloon config space is LE, unlike all other devices. */ 633 virtio_cread_le(vb->vdev, struct virtio_balloon_config, 634 free_page_hint_cmd_id, 635 &vb->cmd_id_received_cache); 636 } 637 638 return vb->cmd_id_received_cache; 639 } 640 641 static int send_cmd_id_start(struct virtio_balloon *vb) 642 { 643 struct scatterlist sg; 644 struct virtqueue *vq = vb->free_page_vq; 645 int err, unused; 646 647 /* Detach all the used buffers from the vq */ 648 while (virtqueue_get_buf(vq, &unused)) 649 ; 650 651 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 652 virtio_balloon_cmd_id_received(vb)); 653 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 654 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 655 if (!err) 656 virtqueue_kick(vq); 657 return err; 658 } 659 660 static int send_cmd_id_stop(struct virtio_balloon *vb) 661 { 662 struct scatterlist sg; 663 struct virtqueue *vq = vb->free_page_vq; 664 int err, unused; 665 666 /* Detach all the used buffers from the vq */ 667 while (virtqueue_get_buf(vq, &unused)) 668 ; 669 670 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); 671 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); 672 if (!err) 673 virtqueue_kick(vq); 674 return err; 675 } 676 677 static int get_free_page_and_send(struct virtio_balloon *vb) 678 { 679 struct virtqueue *vq = vb->free_page_vq; 680 struct page *page; 681 struct scatterlist sg; 682 int err, unused; 683 void *p; 684 685 /* Detach all the used buffers from the vq */ 686 while (virtqueue_get_buf(vq, &unused)) 687 ; 688 689 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, 690 VIRTIO_BALLOON_HINT_BLOCK_ORDER); 691 /* 692 * When the allocation returns NULL, it indicates that we have got all 693 * the possible free pages, so return -EINTR to stop. 694 */ 695 if (!page) 696 return -EINTR; 697 698 p = page_address(page); 699 sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES); 700 /* There is always 1 entry reserved for the cmd id to use. */ 701 if (vq->num_free > 1) { 702 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); 703 if (unlikely(err)) { 704 free_pages((unsigned long)p, 705 VIRTIO_BALLOON_HINT_BLOCK_ORDER); 706 return err; 707 } 708 virtqueue_kick(vq); 709 spin_lock_irq(&vb->free_page_list_lock); 710 balloon_page_push(&vb->free_page_list, page); 711 vb->num_free_page_blocks++; 712 spin_unlock_irq(&vb->free_page_list_lock); 713 } else { 714 /* 715 * The vq has no available entry to add this page block, so 716 * just free it. 717 */ 718 free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER); 719 } 720 721 return 0; 722 } 723 724 static int send_free_pages(struct virtio_balloon *vb) 725 { 726 int err; 727 u32 cmd_id_active; 728 729 while (1) { 730 /* 731 * If a stop id or a new cmd id was just received from host, 732 * stop the reporting. 733 */ 734 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 735 if (unlikely(cmd_id_active != 736 virtio_balloon_cmd_id_received(vb))) 737 break; 738 739 /* 740 * The free page blocks are allocated and sent to host one by 741 * one. 742 */ 743 err = get_free_page_and_send(vb); 744 if (err == -EINTR) 745 break; 746 else if (unlikely(err)) 747 return err; 748 } 749 750 return 0; 751 } 752 753 static void virtio_balloon_report_free_page(struct virtio_balloon *vb) 754 { 755 int err; 756 struct device *dev = &vb->vdev->dev; 757 758 /* Start by sending the received cmd id to host with an outbuf. */ 759 err = send_cmd_id_start(vb); 760 if (unlikely(err)) 761 dev_err(dev, "Failed to send a start id, err = %d\n", err); 762 763 err = send_free_pages(vb); 764 if (unlikely(err)) 765 dev_err(dev, "Failed to send a free page, err = %d\n", err); 766 767 /* End by sending a stop id to host with an outbuf. */ 768 err = send_cmd_id_stop(vb); 769 if (unlikely(err)) 770 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 771 } 772 773 static void report_free_page_func(struct work_struct *work) 774 { 775 struct virtio_balloon *vb = container_of(work, struct virtio_balloon, 776 report_free_page_work); 777 u32 cmd_id_received; 778 779 cmd_id_received = virtio_balloon_cmd_id_received(vb); 780 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 781 /* Pass ULONG_MAX to give back all the free pages */ 782 return_free_pages_to_mm(vb, ULONG_MAX); 783 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && 784 cmd_id_received != 785 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { 786 virtio_balloon_report_free_page(vb); 787 } 788 } 789 790 #ifdef CONFIG_BALLOON_COMPACTION 791 /* 792 * virtballoon_migratepage - perform the balloon page migration on behalf of 793 * a compaction thread. (called under page lock) 794 * @vb_dev_info: the balloon device 795 * @newpage: page that will replace the isolated page after migration finishes. 796 * @page : the isolated (old) page that is about to be migrated to newpage. 797 * @mode : compaction mode -- not used for balloon page migration. 798 * 799 * After a ballooned page gets isolated by compaction procedures, this is the 800 * function that performs the page migration on behalf of a compaction thread 801 * The page migration for virtio balloon is done in a simple swap fashion which 802 * follows these two macro steps: 803 * 1) insert newpage into vb->pages list and update the host about it; 804 * 2) update the host about the old page removed from vb->pages list; 805 * 806 * This function preforms the balloon page migration task. 807 * Called through movable_operations->migrate_page 808 */ 809 static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, 810 struct page *newpage, struct page *page, enum migrate_mode mode) 811 { 812 struct virtio_balloon *vb = container_of(vb_dev_info, 813 struct virtio_balloon, vb_dev_info); 814 unsigned long flags; 815 816 /* 817 * In order to avoid lock contention while migrating pages concurrently 818 * to leak_balloon() or fill_balloon() we just give up the balloon_lock 819 * this turn, as it is easier to retry the page migration later. 820 * This also prevents fill_balloon() getting stuck into a mutex 821 * recursion in the case it ends up triggering memory compaction 822 * while it is attempting to inflate the ballon. 823 */ 824 if (!mutex_trylock(&vb->balloon_lock)) 825 return -EAGAIN; 826 827 get_page(newpage); /* balloon reference */ 828 829 /* 830 * When we migrate a page to a different zone and adjusted the 831 * managed page count when inflating, we have to fixup the count of 832 * both involved zones. 833 */ 834 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) && 835 page_zone(page) != page_zone(newpage)) { 836 adjust_managed_page_count(page, 1); 837 adjust_managed_page_count(newpage, -1); 838 } 839 840 /* balloon's page migration 1st step -- inflate "newpage" */ 841 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 842 balloon_page_insert(vb_dev_info, newpage); 843 vb_dev_info->isolated_pages--; 844 __count_vm_event(BALLOON_MIGRATE); 845 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 846 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 847 set_page_pfns(vb, vb->pfns, newpage); 848 tell_host(vb, vb->inflate_vq); 849 850 /* balloon's page migration 2nd step -- deflate "page" */ 851 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 852 balloon_page_delete(page); 853 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 854 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 855 set_page_pfns(vb, vb->pfns, page); 856 tell_host(vb, vb->deflate_vq); 857 858 mutex_unlock(&vb->balloon_lock); 859 860 put_page(page); /* balloon reference */ 861 862 return MIGRATEPAGE_SUCCESS; 863 } 864 #endif /* CONFIG_BALLOON_COMPACTION */ 865 866 static unsigned long shrink_free_pages(struct virtio_balloon *vb, 867 unsigned long pages_to_free) 868 { 869 unsigned long blocks_to_free, blocks_freed; 870 871 pages_to_free = round_up(pages_to_free, 872 VIRTIO_BALLOON_HINT_BLOCK_PAGES); 873 blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES; 874 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free); 875 876 return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES; 877 } 878 879 static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, 880 struct shrink_control *sc) 881 { 882 struct virtio_balloon *vb = shrinker->private_data; 883 884 return shrink_free_pages(vb, sc->nr_to_scan); 885 } 886 887 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, 888 struct shrink_control *sc) 889 { 890 struct virtio_balloon *vb = shrinker->private_data; 891 892 return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; 893 } 894 895 static int virtio_balloon_oom_notify(struct notifier_block *nb, 896 unsigned long dummy, void *parm) 897 { 898 struct virtio_balloon *vb = container_of(nb, 899 struct virtio_balloon, oom_nb); 900 unsigned long *freed = parm; 901 902 *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) / 903 VIRTIO_BALLOON_PAGES_PER_PAGE; 904 update_balloon_size(vb); 905 906 return NOTIFY_OK; 907 } 908 909 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) 910 { 911 shrinker_free(vb->shrinker); 912 } 913 914 static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) 915 { 916 vb->shrinker = shrinker_alloc(0, "virtio-balloon"); 917 if (!vb->shrinker) 918 return -ENOMEM; 919 920 vb->shrinker->scan_objects = virtio_balloon_shrinker_scan; 921 vb->shrinker->count_objects = virtio_balloon_shrinker_count; 922 vb->shrinker->private_data = vb; 923 924 shrinker_register(vb->shrinker); 925 926 return 0; 927 } 928 929 static int virtballoon_probe(struct virtio_device *vdev) 930 { 931 struct virtio_balloon *vb; 932 int err; 933 934 if (!vdev->config->get) { 935 dev_err(&vdev->dev, "%s failure: config access disabled\n", 936 __func__); 937 return -EINVAL; 938 } 939 940 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL); 941 if (!vb) { 942 err = -ENOMEM; 943 goto out; 944 } 945 946 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func); 947 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func); 948 spin_lock_init(&vb->stop_update_lock); 949 mutex_init(&vb->balloon_lock); 950 init_waitqueue_head(&vb->acked); 951 vb->vdev = vdev; 952 953 balloon_devinfo_init(&vb->vb_dev_info); 954 955 err = init_vqs(vb); 956 if (err) 957 goto out_free_vb; 958 959 #ifdef CONFIG_BALLOON_COMPACTION 960 vb->vb_dev_info.migratepage = virtballoon_migratepage; 961 #endif 962 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 963 /* 964 * There is always one entry reserved for cmd id, so the ring 965 * size needs to be at least two to report free page hints. 966 */ 967 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { 968 err = -ENOSPC; 969 goto out_del_vqs; 970 } 971 vb->balloon_wq = alloc_workqueue("balloon-wq", 972 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); 973 if (!vb->balloon_wq) { 974 err = -ENOMEM; 975 goto out_del_vqs; 976 } 977 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 978 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; 979 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 980 VIRTIO_BALLOON_CMD_ID_STOP); 981 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, 982 VIRTIO_BALLOON_CMD_ID_STOP); 983 spin_lock_init(&vb->free_page_list_lock); 984 INIT_LIST_HEAD(&vb->free_page_list); 985 /* 986 * We're allowed to reuse any free pages, even if they are 987 * still to be processed by the host. 988 */ 989 err = virtio_balloon_register_shrinker(vb); 990 if (err) 991 goto out_del_balloon_wq; 992 } 993 994 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { 995 vb->oom_nb.notifier_call = virtio_balloon_oom_notify; 996 vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY; 997 err = register_oom_notifier(&vb->oom_nb); 998 if (err < 0) 999 goto out_unregister_shrinker; 1000 } 1001 1002 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { 1003 /* Start with poison val of 0 representing general init */ 1004 __u32 poison_val = 0; 1005 1006 /* 1007 * Let the hypervisor know that we are expecting a 1008 * specific value to be written back in balloon pages. 1009 * 1010 * If the PAGE_POISON value was larger than a byte we would 1011 * need to byte swap poison_val here to guarantee it is 1012 * little-endian. However for now it is a single byte so we 1013 * can pass it as-is. 1014 */ 1015 if (!want_init_on_free()) 1016 memset(&poison_val, PAGE_POISON, sizeof(poison_val)); 1017 1018 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, 1019 poison_val, &poison_val); 1020 } 1021 1022 vb->pr_dev_info.report = virtballoon_free_page_report; 1023 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) { 1024 unsigned int capacity; 1025 1026 capacity = virtqueue_get_vring_size(vb->reporting_vq); 1027 if (capacity < PAGE_REPORTING_CAPACITY) { 1028 err = -ENOSPC; 1029 goto out_unregister_oom; 1030 } 1031 1032 /* 1033 * The default page reporting order is @pageblock_order, which 1034 * corresponds to 512MB in size on ARM64 when 64KB base page 1035 * size is used. The page reporting won't be triggered if the 1036 * freeing page can't come up with a free area like that huge. 1037 * So we specify the page reporting order to 5, corresponding 1038 * to 2MB. It helps to avoid THP splitting if 4KB base page 1039 * size is used by host. 1040 * 1041 * Ideally, the page reporting order is selected based on the 1042 * host's base page size. However, it needs more work to report 1043 * that value. The hard-coded order would be fine currently. 1044 */ 1045 #if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES) 1046 vb->pr_dev_info.order = 5; 1047 #endif 1048 1049 err = page_reporting_register(&vb->pr_dev_info); 1050 if (err) 1051 goto out_unregister_oom; 1052 } 1053 1054 spin_lock_init(&vb->wakeup_lock); 1055 1056 /* 1057 * The virtio balloon itself can't wake up the device, but it is 1058 * responsible for processing wakeup events passed up from the transport 1059 * layer. Wakeup sources don't support nesting/chaining calls, so we use 1060 * our own wakeup source to ensure wakeup events are properly handled 1061 * without trampling on the transport layer's wakeup source. 1062 */ 1063 device_set_wakeup_capable(&vb->vdev->dev, true); 1064 1065 virtio_device_ready(vdev); 1066 1067 if (towards_target(vb)) 1068 virtballoon_changed(vdev); 1069 return 0; 1070 1071 out_unregister_oom: 1072 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 1073 unregister_oom_notifier(&vb->oom_nb); 1074 out_unregister_shrinker: 1075 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1076 virtio_balloon_unregister_shrinker(vb); 1077 out_del_balloon_wq: 1078 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1079 destroy_workqueue(vb->balloon_wq); 1080 out_del_vqs: 1081 vdev->config->del_vqs(vdev); 1082 out_free_vb: 1083 kfree(vb); 1084 out: 1085 return err; 1086 } 1087 1088 static void remove_common(struct virtio_balloon *vb) 1089 { 1090 /* There might be pages left in the balloon: free them. */ 1091 while (vb->num_pages) 1092 leak_balloon(vb, vb->num_pages); 1093 update_balloon_size(vb); 1094 1095 /* There might be free pages that are being reported: release them. */ 1096 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1097 return_free_pages_to_mm(vb, ULONG_MAX); 1098 1099 /* Now we reset the device so we can clean up the queues. */ 1100 virtio_reset_device(vb->vdev); 1101 1102 vb->vdev->config->del_vqs(vb->vdev); 1103 } 1104 1105 static void virtballoon_remove(struct virtio_device *vdev) 1106 { 1107 struct virtio_balloon *vb = vdev->priv; 1108 1109 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) 1110 page_reporting_unregister(&vb->pr_dev_info); 1111 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 1112 unregister_oom_notifier(&vb->oom_nb); 1113 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1114 virtio_balloon_unregister_shrinker(vb); 1115 spin_lock_irq(&vb->stop_update_lock); 1116 vb->stop_update = true; 1117 spin_unlock_irq(&vb->stop_update_lock); 1118 cancel_work_sync(&vb->update_balloon_size_work); 1119 cancel_work_sync(&vb->update_balloon_stats_work); 1120 1121 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 1122 cancel_work_sync(&vb->report_free_page_work); 1123 destroy_workqueue(vb->balloon_wq); 1124 } 1125 1126 remove_common(vb); 1127 kfree(vb); 1128 } 1129 1130 #ifdef CONFIG_PM_SLEEP 1131 static int virtballoon_freeze(struct virtio_device *vdev) 1132 { 1133 struct virtio_balloon *vb = vdev->priv; 1134 1135 /* 1136 * The workqueue is already frozen by the PM core before this 1137 * function is called. 1138 */ 1139 remove_common(vb); 1140 return 0; 1141 } 1142 1143 static int virtballoon_restore(struct virtio_device *vdev) 1144 { 1145 struct virtio_balloon *vb = vdev->priv; 1146 int ret; 1147 1148 ret = init_vqs(vdev->priv); 1149 if (ret) 1150 return ret; 1151 1152 virtio_device_ready(vdev); 1153 1154 if (towards_target(vb)) 1155 virtballoon_changed(vdev); 1156 update_balloon_size(vb); 1157 return 0; 1158 } 1159 #endif 1160 1161 static int virtballoon_validate(struct virtio_device *vdev) 1162 { 1163 /* 1164 * Inform the hypervisor that our pages are poisoned or 1165 * initialized. If we cannot do that then we should disable 1166 * page reporting as it could potentially change the contents 1167 * of our free pages. 1168 */ 1169 if (!want_init_on_free() && !page_poisoning_enabled_static()) 1170 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); 1171 else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) 1172 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); 1173 1174 __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM); 1175 return 0; 1176 } 1177 1178 static unsigned int features[] = { 1179 VIRTIO_BALLOON_F_MUST_TELL_HOST, 1180 VIRTIO_BALLOON_F_STATS_VQ, 1181 VIRTIO_BALLOON_F_DEFLATE_ON_OOM, 1182 VIRTIO_BALLOON_F_FREE_PAGE_HINT, 1183 VIRTIO_BALLOON_F_PAGE_POISON, 1184 VIRTIO_BALLOON_F_REPORTING, 1185 }; 1186 1187 static struct virtio_driver virtio_balloon_driver = { 1188 .feature_table = features, 1189 .feature_table_size = ARRAY_SIZE(features), 1190 .driver.name = KBUILD_MODNAME, 1191 .id_table = id_table, 1192 .validate = virtballoon_validate, 1193 .probe = virtballoon_probe, 1194 .remove = virtballoon_remove, 1195 .config_changed = virtballoon_changed, 1196 #ifdef CONFIG_PM_SLEEP 1197 .freeze = virtballoon_freeze, 1198 .restore = virtballoon_restore, 1199 #endif 1200 }; 1201 1202 module_virtio_driver(virtio_balloon_driver); 1203 MODULE_DEVICE_TABLE(virtio, id_table); 1204 MODULE_DESCRIPTION("Virtio balloon driver"); 1205 MODULE_LICENSE("GPL"); 1206