1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo 4 * Tosatti's implementations. 5 * 6 * Copyright 2008 Rusty Russell IBM Corporation 7 */ 8 9 #include <linux/virtio.h> 10 #include <linux/virtio_balloon.h> 11 #include <linux/swap.h> 12 #include <linux/workqueue.h> 13 #include <linux/delay.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/balloon_compaction.h> 17 #include <linux/oom.h> 18 #include <linux/wait.h> 19 #include <linux/mm.h> 20 #include <linux/mount.h> 21 #include <linux/magic.h> 22 #include <linux/pseudo_fs.h> 23 #include <linux/page_reporting.h> 24 25 /* 26 * Balloon device works in 4K page units. So each page is pointed to by 27 * multiple balloon pages. All memory counters in this driver are in balloon 28 * page units. 29 */ 30 #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) 31 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 32 /* Maximum number of (4k) pages to deflate on OOM notifications. */ 33 #define VIRTIO_BALLOON_OOM_NR_PAGES 256 34 #define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80 35 36 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ 37 __GFP_NOMEMALLOC) 38 /* The order of free page blocks to report to host */ 39 #define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1) 40 /* The size of a free page block in bytes */ 41 #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \ 42 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT)) 43 #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER) 44 45 #ifdef CONFIG_BALLOON_COMPACTION 46 static struct vfsmount *balloon_mnt; 47 #endif 48 49 enum virtio_balloon_vq { 50 VIRTIO_BALLOON_VQ_INFLATE, 51 VIRTIO_BALLOON_VQ_DEFLATE, 52 VIRTIO_BALLOON_VQ_STATS, 53 VIRTIO_BALLOON_VQ_FREE_PAGE, 54 VIRTIO_BALLOON_VQ_REPORTING, 55 VIRTIO_BALLOON_VQ_MAX 56 }; 57 58 enum virtio_balloon_config_read { 59 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, 60 }; 61 62 struct virtio_balloon { 63 struct virtio_device *vdev; 64 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; 65 66 /* Balloon's own wq for cpu-intensive work items */ 67 struct workqueue_struct *balloon_wq; 68 /* The free page reporting work item submitted to the balloon wq */ 69 struct work_struct report_free_page_work; 70 71 /* The balloon servicing is delegated to a freezable workqueue. */ 72 struct work_struct update_balloon_stats_work; 73 struct work_struct update_balloon_size_work; 74 75 /* Prevent updating balloon when it is being canceled. */ 76 spinlock_t stop_update_lock; 77 bool stop_update; 78 /* Bitmap to indicate if reading the related config fields are needed */ 79 unsigned long config_read_bitmap; 80 81 /* The list of allocated free pages, waiting to be given back to mm */ 82 struct list_head free_page_list; 83 spinlock_t free_page_list_lock; 84 /* The number of free page blocks on the above list */ 85 unsigned long num_free_page_blocks; 86 /* 87 * The cmd id received from host. 88 * Read it via virtio_balloon_cmd_id_received to get the latest value 89 * sent from host. 90 */ 91 u32 cmd_id_received_cache; 92 /* The cmd id that is actively in use */ 93 __virtio32 cmd_id_active; 94 /* Buffer to store the stop sign */ 95 __virtio32 cmd_id_stop; 96 97 /* Waiting for host to ack the pages we released. */ 98 wait_queue_head_t acked; 99 100 /* Number of balloon pages we've told the Host we're not using. */ 101 unsigned int num_pages; 102 /* 103 * The pages we've told the Host we're not using are enqueued 104 * at vb_dev_info->pages list. 105 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE 106 * to num_pages above. 107 */ 108 struct balloon_dev_info vb_dev_info; 109 110 /* Synchronize access/update to this struct virtio_balloon elements */ 111 struct mutex balloon_lock; 112 113 /* The array of pfns we tell the Host about. */ 114 unsigned int num_pfns; 115 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; 116 117 /* Memory statistics */ 118 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; 119 120 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */ 121 struct shrinker shrinker; 122 123 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */ 124 struct notifier_block oom_nb; 125 126 /* Free page reporting device */ 127 struct virtqueue *reporting_vq; 128 struct page_reporting_dev_info pr_dev_info; 129 }; 130 131 static const struct virtio_device_id id_table[] = { 132 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID }, 133 { 0 }, 134 }; 135 136 static u32 page_to_balloon_pfn(struct page *page) 137 { 138 unsigned long pfn = page_to_pfn(page); 139 140 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT); 141 /* Convert pfn from Linux page size to balloon page size. */ 142 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; 143 } 144 145 static void balloon_ack(struct virtqueue *vq) 146 { 147 struct virtio_balloon *vb = vq->vdev->priv; 148 149 wake_up(&vb->acked); 150 } 151 152 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) 153 { 154 struct scatterlist sg; 155 unsigned int len; 156 157 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); 158 159 /* We should always be able to add one buffer to an empty queue. */ 160 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 161 virtqueue_kick(vq); 162 163 /* When host has read buffer, this completes via balloon_ack */ 164 wait_event(vb->acked, virtqueue_get_buf(vq, &len)); 165 166 } 167 168 static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info, 169 struct scatterlist *sg, unsigned int nents) 170 { 171 struct virtio_balloon *vb = 172 container_of(pr_dev_info, struct virtio_balloon, pr_dev_info); 173 struct virtqueue *vq = vb->reporting_vq; 174 unsigned int unused, err; 175 176 /* We should always be able to add these buffers to an empty queue. */ 177 err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN); 178 179 /* 180 * In the extremely unlikely case that something has occurred and we 181 * are able to trigger an error we will simply display a warning 182 * and exit without actually processing the pages. 183 */ 184 if (WARN_ON_ONCE(err)) 185 return err; 186 187 virtqueue_kick(vq); 188 189 /* When host has read buffer, this completes via balloon_ack */ 190 wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); 191 192 return 0; 193 } 194 195 static void set_page_pfns(struct virtio_balloon *vb, 196 __virtio32 pfns[], struct page *page) 197 { 198 unsigned int i; 199 200 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX); 201 202 /* 203 * Set balloon pfns pointing at this page. 204 * Note that the first pfn points at start of the page. 205 */ 206 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) 207 pfns[i] = cpu_to_virtio32(vb->vdev, 208 page_to_balloon_pfn(page) + i); 209 } 210 211 static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) 212 { 213 unsigned num_allocated_pages; 214 unsigned num_pfns; 215 struct page *page; 216 LIST_HEAD(pages); 217 218 /* We can only do one array worth at a time. */ 219 num = min(num, ARRAY_SIZE(vb->pfns)); 220 221 for (num_pfns = 0; num_pfns < num; 222 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 223 struct page *page = balloon_page_alloc(); 224 225 if (!page) { 226 dev_info_ratelimited(&vb->vdev->dev, 227 "Out of puff! Can't get %u pages\n", 228 VIRTIO_BALLOON_PAGES_PER_PAGE); 229 /* Sleep for at least 1/5 of a second before retry. */ 230 msleep(200); 231 break; 232 } 233 234 balloon_page_push(&pages, page); 235 } 236 237 mutex_lock(&vb->balloon_lock); 238 239 vb->num_pfns = 0; 240 241 while ((page = balloon_page_pop(&pages))) { 242 balloon_page_enqueue(&vb->vb_dev_info, page); 243 244 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 245 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; 246 if (!virtio_has_feature(vb->vdev, 247 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 248 adjust_managed_page_count(page, -1); 249 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE; 250 } 251 252 num_allocated_pages = vb->num_pfns; 253 /* Did we get any? */ 254 if (vb->num_pfns != 0) 255 tell_host(vb, vb->inflate_vq); 256 mutex_unlock(&vb->balloon_lock); 257 258 return num_allocated_pages; 259 } 260 261 static void release_pages_balloon(struct virtio_balloon *vb, 262 struct list_head *pages) 263 { 264 struct page *page, *next; 265 266 list_for_each_entry_safe(page, next, pages, lru) { 267 if (!virtio_has_feature(vb->vdev, 268 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 269 adjust_managed_page_count(page, 1); 270 list_del(&page->lru); 271 put_page(page); /* balloon reference */ 272 } 273 } 274 275 static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) 276 { 277 unsigned num_freed_pages; 278 struct page *page; 279 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; 280 LIST_HEAD(pages); 281 282 /* We can only do one array worth at a time. */ 283 num = min(num, ARRAY_SIZE(vb->pfns)); 284 285 mutex_lock(&vb->balloon_lock); 286 /* We can't release more pages than taken */ 287 num = min(num, (size_t)vb->num_pages); 288 for (vb->num_pfns = 0; vb->num_pfns < num; 289 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 290 page = balloon_page_dequeue(vb_dev_info); 291 if (!page) 292 break; 293 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 294 list_add(&page->lru, &pages); 295 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; 296 } 297 298 num_freed_pages = vb->num_pfns; 299 /* 300 * Note that if 301 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); 302 * is true, we *have* to do it in this order 303 */ 304 if (vb->num_pfns != 0) 305 tell_host(vb, vb->deflate_vq); 306 release_pages_balloon(vb, &pages); 307 mutex_unlock(&vb->balloon_lock); 308 return num_freed_pages; 309 } 310 311 static inline void update_stat(struct virtio_balloon *vb, int idx, 312 u16 tag, u64 val) 313 { 314 BUG_ON(idx >= VIRTIO_BALLOON_S_NR); 315 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag); 316 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val); 317 } 318 319 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 320 321 static unsigned int update_balloon_stats(struct virtio_balloon *vb) 322 { 323 unsigned long events[NR_VM_EVENT_ITEMS]; 324 struct sysinfo i; 325 unsigned int idx = 0; 326 long available; 327 unsigned long caches; 328 329 all_vm_events(events); 330 si_meminfo(&i); 331 332 available = si_mem_available(); 333 caches = global_node_page_state(NR_FILE_PAGES); 334 335 #ifdef CONFIG_VM_EVENT_COUNTERS 336 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 337 pages_to_bytes(events[PSWPIN])); 338 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 339 pages_to_bytes(events[PSWPOUT])); 340 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 341 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 342 #ifdef CONFIG_HUGETLB_PAGE 343 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC, 344 events[HTLB_BUDDY_PGALLOC]); 345 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL, 346 events[HTLB_BUDDY_PGALLOC_FAIL]); 347 #endif 348 #endif 349 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 350 pages_to_bytes(i.freeram)); 351 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 352 pages_to_bytes(i.totalram)); 353 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 354 pages_to_bytes(available)); 355 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES, 356 pages_to_bytes(caches)); 357 358 return idx; 359 } 360 361 /* 362 * While most virtqueues communicate guest-initiated requests to the hypervisor, 363 * the stats queue operates in reverse. The driver initializes the virtqueue 364 * with a single buffer. From that point forward, all conversations consist of 365 * a hypervisor request (a call to this function) which directs us to refill 366 * the virtqueue with a fresh stats buffer. Since stats collection can sleep, 367 * we delegate the job to a freezable workqueue that will do the actual work via 368 * stats_handle_request(). 369 */ 370 static void stats_request(struct virtqueue *vq) 371 { 372 struct virtio_balloon *vb = vq->vdev->priv; 373 374 spin_lock(&vb->stop_update_lock); 375 if (!vb->stop_update) 376 queue_work(system_freezable_wq, &vb->update_balloon_stats_work); 377 spin_unlock(&vb->stop_update_lock); 378 } 379 380 static void stats_handle_request(struct virtio_balloon *vb) 381 { 382 struct virtqueue *vq; 383 struct scatterlist sg; 384 unsigned int len, num_stats; 385 386 num_stats = update_balloon_stats(vb); 387 388 vq = vb->stats_vq; 389 if (!virtqueue_get_buf(vq, &len)) 390 return; 391 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 392 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 393 virtqueue_kick(vq); 394 } 395 396 static inline s64 towards_target(struct virtio_balloon *vb) 397 { 398 s64 target; 399 u32 num_pages; 400 401 /* Legacy balloon config space is LE, unlike all other devices. */ 402 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages, 403 &num_pages); 404 405 target = num_pages; 406 return target - vb->num_pages; 407 } 408 409 /* Gives back @num_to_return blocks of free pages to mm. */ 410 static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, 411 unsigned long num_to_return) 412 { 413 struct page *page; 414 unsigned long num_returned; 415 416 spin_lock_irq(&vb->free_page_list_lock); 417 for (num_returned = 0; num_returned < num_to_return; num_returned++) { 418 page = balloon_page_pop(&vb->free_page_list); 419 if (!page) 420 break; 421 free_pages((unsigned long)page_address(page), 422 VIRTIO_BALLOON_HINT_BLOCK_ORDER); 423 } 424 vb->num_free_page_blocks -= num_returned; 425 spin_unlock_irq(&vb->free_page_list_lock); 426 427 return num_returned; 428 } 429 430 static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) 431 { 432 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 433 return; 434 435 /* No need to queue the work if the bit was already set. */ 436 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 437 &vb->config_read_bitmap)) 438 return; 439 440 queue_work(vb->balloon_wq, &vb->report_free_page_work); 441 } 442 443 static void virtballoon_changed(struct virtio_device *vdev) 444 { 445 struct virtio_balloon *vb = vdev->priv; 446 unsigned long flags; 447 448 spin_lock_irqsave(&vb->stop_update_lock, flags); 449 if (!vb->stop_update) { 450 queue_work(system_freezable_wq, 451 &vb->update_balloon_size_work); 452 virtio_balloon_queue_free_page_work(vb); 453 } 454 spin_unlock_irqrestore(&vb->stop_update_lock, flags); 455 } 456 457 static void update_balloon_size(struct virtio_balloon *vb) 458 { 459 u32 actual = vb->num_pages; 460 461 /* Legacy balloon config space is LE, unlike all other devices. */ 462 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual, 463 &actual); 464 } 465 466 static void update_balloon_stats_func(struct work_struct *work) 467 { 468 struct virtio_balloon *vb; 469 470 vb = container_of(work, struct virtio_balloon, 471 update_balloon_stats_work); 472 stats_handle_request(vb); 473 } 474 475 static void update_balloon_size_func(struct work_struct *work) 476 { 477 struct virtio_balloon *vb; 478 s64 diff; 479 480 vb = container_of(work, struct virtio_balloon, 481 update_balloon_size_work); 482 diff = towards_target(vb); 483 484 if (!diff) 485 return; 486 487 if (diff > 0) 488 diff -= fill_balloon(vb, diff); 489 else 490 diff += leak_balloon(vb, -diff); 491 update_balloon_size(vb); 492 493 if (diff) 494 queue_work(system_freezable_wq, work); 495 } 496 497 static int init_vqs(struct virtio_balloon *vb) 498 { 499 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX]; 500 vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX]; 501 const char *names[VIRTIO_BALLOON_VQ_MAX]; 502 int err; 503 504 /* 505 * Inflateq and deflateq are used unconditionally. The names[] 506 * will be NULL if the related feature is not enabled, which will 507 * cause no allocation for the corresponding virtqueue in find_vqs. 508 */ 509 callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack; 510 names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate"; 511 callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack; 512 names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; 513 callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL; 514 names[VIRTIO_BALLOON_VQ_STATS] = NULL; 515 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; 516 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; 517 names[VIRTIO_BALLOON_VQ_REPORTING] = NULL; 518 519 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 520 names[VIRTIO_BALLOON_VQ_STATS] = "stats"; 521 callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request; 522 } 523 524 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 525 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq"; 526 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; 527 } 528 529 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) { 530 names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq"; 531 callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack; 532 } 533 534 err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, 535 vqs, callbacks, names, NULL, NULL); 536 if (err) 537 return err; 538 539 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; 540 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; 541 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 542 struct scatterlist sg; 543 unsigned int num_stats; 544 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS]; 545 546 /* 547 * Prime this virtqueue with one buffer so the hypervisor can 548 * use it to signal us later (it can't be broken yet!). 549 */ 550 num_stats = update_balloon_stats(vb); 551 552 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 553 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, 554 GFP_KERNEL); 555 if (err) { 556 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n", 557 __func__); 558 return err; 559 } 560 virtqueue_kick(vb->stats_vq); 561 } 562 563 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 564 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE]; 565 566 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) 567 vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING]; 568 569 return 0; 570 } 571 572 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) 573 { 574 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 575 &vb->config_read_bitmap)) { 576 /* Legacy balloon config space is LE, unlike all other devices. */ 577 virtio_cread_le(vb->vdev, struct virtio_balloon_config, 578 free_page_hint_cmd_id, 579 &vb->cmd_id_received_cache); 580 } 581 582 return vb->cmd_id_received_cache; 583 } 584 585 static int send_cmd_id_start(struct virtio_balloon *vb) 586 { 587 struct scatterlist sg; 588 struct virtqueue *vq = vb->free_page_vq; 589 int err, unused; 590 591 /* Detach all the used buffers from the vq */ 592 while (virtqueue_get_buf(vq, &unused)) 593 ; 594 595 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 596 virtio_balloon_cmd_id_received(vb)); 597 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 598 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 599 if (!err) 600 virtqueue_kick(vq); 601 return err; 602 } 603 604 static int send_cmd_id_stop(struct virtio_balloon *vb) 605 { 606 struct scatterlist sg; 607 struct virtqueue *vq = vb->free_page_vq; 608 int err, unused; 609 610 /* Detach all the used buffers from the vq */ 611 while (virtqueue_get_buf(vq, &unused)) 612 ; 613 614 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); 615 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); 616 if (!err) 617 virtqueue_kick(vq); 618 return err; 619 } 620 621 static int get_free_page_and_send(struct virtio_balloon *vb) 622 { 623 struct virtqueue *vq = vb->free_page_vq; 624 struct page *page; 625 struct scatterlist sg; 626 int err, unused; 627 void *p; 628 629 /* Detach all the used buffers from the vq */ 630 while (virtqueue_get_buf(vq, &unused)) 631 ; 632 633 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, 634 VIRTIO_BALLOON_HINT_BLOCK_ORDER); 635 /* 636 * When the allocation returns NULL, it indicates that we have got all 637 * the possible free pages, so return -EINTR to stop. 638 */ 639 if (!page) 640 return -EINTR; 641 642 p = page_address(page); 643 sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES); 644 /* There is always 1 entry reserved for the cmd id to use. */ 645 if (vq->num_free > 1) { 646 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); 647 if (unlikely(err)) { 648 free_pages((unsigned long)p, 649 VIRTIO_BALLOON_HINT_BLOCK_ORDER); 650 return err; 651 } 652 virtqueue_kick(vq); 653 spin_lock_irq(&vb->free_page_list_lock); 654 balloon_page_push(&vb->free_page_list, page); 655 vb->num_free_page_blocks++; 656 spin_unlock_irq(&vb->free_page_list_lock); 657 } else { 658 /* 659 * The vq has no available entry to add this page block, so 660 * just free it. 661 */ 662 free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER); 663 } 664 665 return 0; 666 } 667 668 static int send_free_pages(struct virtio_balloon *vb) 669 { 670 int err; 671 u32 cmd_id_active; 672 673 while (1) { 674 /* 675 * If a stop id or a new cmd id was just received from host, 676 * stop the reporting. 677 */ 678 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 679 if (unlikely(cmd_id_active != 680 virtio_balloon_cmd_id_received(vb))) 681 break; 682 683 /* 684 * The free page blocks are allocated and sent to host one by 685 * one. 686 */ 687 err = get_free_page_and_send(vb); 688 if (err == -EINTR) 689 break; 690 else if (unlikely(err)) 691 return err; 692 } 693 694 return 0; 695 } 696 697 static void virtio_balloon_report_free_page(struct virtio_balloon *vb) 698 { 699 int err; 700 struct device *dev = &vb->vdev->dev; 701 702 /* Start by sending the received cmd id to host with an outbuf. */ 703 err = send_cmd_id_start(vb); 704 if (unlikely(err)) 705 dev_err(dev, "Failed to send a start id, err = %d\n", err); 706 707 err = send_free_pages(vb); 708 if (unlikely(err)) 709 dev_err(dev, "Failed to send a free page, err = %d\n", err); 710 711 /* End by sending a stop id to host with an outbuf. */ 712 err = send_cmd_id_stop(vb); 713 if (unlikely(err)) 714 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 715 } 716 717 static void report_free_page_func(struct work_struct *work) 718 { 719 struct virtio_balloon *vb = container_of(work, struct virtio_balloon, 720 report_free_page_work); 721 u32 cmd_id_received; 722 723 cmd_id_received = virtio_balloon_cmd_id_received(vb); 724 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 725 /* Pass ULONG_MAX to give back all the free pages */ 726 return_free_pages_to_mm(vb, ULONG_MAX); 727 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && 728 cmd_id_received != 729 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { 730 virtio_balloon_report_free_page(vb); 731 } 732 } 733 734 #ifdef CONFIG_BALLOON_COMPACTION 735 /* 736 * virtballoon_migratepage - perform the balloon page migration on behalf of 737 * a compation thread. (called under page lock) 738 * @vb_dev_info: the balloon device 739 * @newpage: page that will replace the isolated page after migration finishes. 740 * @page : the isolated (old) page that is about to be migrated to newpage. 741 * @mode : compaction mode -- not used for balloon page migration. 742 * 743 * After a ballooned page gets isolated by compaction procedures, this is the 744 * function that performs the page migration on behalf of a compaction thread 745 * The page migration for virtio balloon is done in a simple swap fashion which 746 * follows these two macro steps: 747 * 1) insert newpage into vb->pages list and update the host about it; 748 * 2) update the host about the old page removed from vb->pages list; 749 * 750 * This function preforms the balloon page migration task. 751 * Called through balloon_mapping->a_ops->migratepage 752 */ 753 static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, 754 struct page *newpage, struct page *page, enum migrate_mode mode) 755 { 756 struct virtio_balloon *vb = container_of(vb_dev_info, 757 struct virtio_balloon, vb_dev_info); 758 unsigned long flags; 759 760 /* 761 * In order to avoid lock contention while migrating pages concurrently 762 * to leak_balloon() or fill_balloon() we just give up the balloon_lock 763 * this turn, as it is easier to retry the page migration later. 764 * This also prevents fill_balloon() getting stuck into a mutex 765 * recursion in the case it ends up triggering memory compaction 766 * while it is attempting to inflate the ballon. 767 */ 768 if (!mutex_trylock(&vb->balloon_lock)) 769 return -EAGAIN; 770 771 get_page(newpage); /* balloon reference */ 772 773 /* 774 * When we migrate a page to a different zone and adjusted the 775 * managed page count when inflating, we have to fixup the count of 776 * both involved zones. 777 */ 778 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) && 779 page_zone(page) != page_zone(newpage)) { 780 adjust_managed_page_count(page, 1); 781 adjust_managed_page_count(newpage, -1); 782 } 783 784 /* balloon's page migration 1st step -- inflate "newpage" */ 785 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 786 balloon_page_insert(vb_dev_info, newpage); 787 vb_dev_info->isolated_pages--; 788 __count_vm_event(BALLOON_MIGRATE); 789 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 790 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 791 set_page_pfns(vb, vb->pfns, newpage); 792 tell_host(vb, vb->inflate_vq); 793 794 /* balloon's page migration 2nd step -- deflate "page" */ 795 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 796 balloon_page_delete(page); 797 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 798 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 799 set_page_pfns(vb, vb->pfns, page); 800 tell_host(vb, vb->deflate_vq); 801 802 mutex_unlock(&vb->balloon_lock); 803 804 put_page(page); /* balloon reference */ 805 806 return MIGRATEPAGE_SUCCESS; 807 } 808 809 static int balloon_init_fs_context(struct fs_context *fc) 810 { 811 return init_pseudo(fc, BALLOON_KVM_MAGIC) ? 0 : -ENOMEM; 812 } 813 814 static struct file_system_type balloon_fs = { 815 .name = "balloon-kvm", 816 .init_fs_context = balloon_init_fs_context, 817 .kill_sb = kill_anon_super, 818 }; 819 820 #endif /* CONFIG_BALLOON_COMPACTION */ 821 822 static unsigned long shrink_free_pages(struct virtio_balloon *vb, 823 unsigned long pages_to_free) 824 { 825 unsigned long blocks_to_free, blocks_freed; 826 827 pages_to_free = round_up(pages_to_free, 828 VIRTIO_BALLOON_HINT_BLOCK_PAGES); 829 blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES; 830 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free); 831 832 return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES; 833 } 834 835 static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, 836 struct shrink_control *sc) 837 { 838 struct virtio_balloon *vb = container_of(shrinker, 839 struct virtio_balloon, shrinker); 840 841 return shrink_free_pages(vb, sc->nr_to_scan); 842 } 843 844 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, 845 struct shrink_control *sc) 846 { 847 struct virtio_balloon *vb = container_of(shrinker, 848 struct virtio_balloon, shrinker); 849 850 return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; 851 } 852 853 static int virtio_balloon_oom_notify(struct notifier_block *nb, 854 unsigned long dummy, void *parm) 855 { 856 struct virtio_balloon *vb = container_of(nb, 857 struct virtio_balloon, oom_nb); 858 unsigned long *freed = parm; 859 860 *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) / 861 VIRTIO_BALLOON_PAGES_PER_PAGE; 862 update_balloon_size(vb); 863 864 return NOTIFY_OK; 865 } 866 867 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) 868 { 869 unregister_shrinker(&vb->shrinker); 870 } 871 872 static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) 873 { 874 vb->shrinker.scan_objects = virtio_balloon_shrinker_scan; 875 vb->shrinker.count_objects = virtio_balloon_shrinker_count; 876 vb->shrinker.seeks = DEFAULT_SEEKS; 877 878 return register_shrinker(&vb->shrinker); 879 } 880 881 static int virtballoon_probe(struct virtio_device *vdev) 882 { 883 struct virtio_balloon *vb; 884 int err; 885 886 if (!vdev->config->get) { 887 dev_err(&vdev->dev, "%s failure: config access disabled\n", 888 __func__); 889 return -EINVAL; 890 } 891 892 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL); 893 if (!vb) { 894 err = -ENOMEM; 895 goto out; 896 } 897 898 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func); 899 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func); 900 spin_lock_init(&vb->stop_update_lock); 901 mutex_init(&vb->balloon_lock); 902 init_waitqueue_head(&vb->acked); 903 vb->vdev = vdev; 904 905 balloon_devinfo_init(&vb->vb_dev_info); 906 907 err = init_vqs(vb); 908 if (err) 909 goto out_free_vb; 910 911 #ifdef CONFIG_BALLOON_COMPACTION 912 balloon_mnt = kern_mount(&balloon_fs); 913 if (IS_ERR(balloon_mnt)) { 914 err = PTR_ERR(balloon_mnt); 915 goto out_del_vqs; 916 } 917 918 vb->vb_dev_info.migratepage = virtballoon_migratepage; 919 vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); 920 if (IS_ERR(vb->vb_dev_info.inode)) { 921 err = PTR_ERR(vb->vb_dev_info.inode); 922 goto out_kern_unmount; 923 } 924 vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; 925 #endif 926 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 927 /* 928 * There is always one entry reserved for cmd id, so the ring 929 * size needs to be at least two to report free page hints. 930 */ 931 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { 932 err = -ENOSPC; 933 goto out_iput; 934 } 935 vb->balloon_wq = alloc_workqueue("balloon-wq", 936 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); 937 if (!vb->balloon_wq) { 938 err = -ENOMEM; 939 goto out_iput; 940 } 941 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 942 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; 943 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 944 VIRTIO_BALLOON_CMD_ID_STOP); 945 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, 946 VIRTIO_BALLOON_CMD_ID_STOP); 947 spin_lock_init(&vb->free_page_list_lock); 948 INIT_LIST_HEAD(&vb->free_page_list); 949 /* 950 * We're allowed to reuse any free pages, even if they are 951 * still to be processed by the host. 952 */ 953 err = virtio_balloon_register_shrinker(vb); 954 if (err) 955 goto out_del_balloon_wq; 956 } 957 958 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { 959 vb->oom_nb.notifier_call = virtio_balloon_oom_notify; 960 vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY; 961 err = register_oom_notifier(&vb->oom_nb); 962 if (err < 0) 963 goto out_unregister_shrinker; 964 } 965 966 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { 967 /* Start with poison val of 0 representing general init */ 968 __u32 poison_val = 0; 969 970 /* 971 * Let the hypervisor know that we are expecting a 972 * specific value to be written back in balloon pages. 973 * 974 * If the PAGE_POISON value was larger than a byte we would 975 * need to byte swap poison_val here to guarantee it is 976 * little-endian. However for now it is a single byte so we 977 * can pass it as-is. 978 */ 979 if (!want_init_on_free()) 980 memset(&poison_val, PAGE_POISON, sizeof(poison_val)); 981 982 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, 983 poison_val, &poison_val); 984 } 985 986 vb->pr_dev_info.report = virtballoon_free_page_report; 987 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) { 988 unsigned int capacity; 989 990 capacity = virtqueue_get_vring_size(vb->reporting_vq); 991 if (capacity < PAGE_REPORTING_CAPACITY) { 992 err = -ENOSPC; 993 goto out_unregister_oom; 994 } 995 996 err = page_reporting_register(&vb->pr_dev_info); 997 if (err) 998 goto out_unregister_oom; 999 } 1000 1001 virtio_device_ready(vdev); 1002 1003 if (towards_target(vb)) 1004 virtballoon_changed(vdev); 1005 return 0; 1006 1007 out_unregister_oom: 1008 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 1009 unregister_oom_notifier(&vb->oom_nb); 1010 out_unregister_shrinker: 1011 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1012 virtio_balloon_unregister_shrinker(vb); 1013 out_del_balloon_wq: 1014 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1015 destroy_workqueue(vb->balloon_wq); 1016 out_iput: 1017 #ifdef CONFIG_BALLOON_COMPACTION 1018 iput(vb->vb_dev_info.inode); 1019 out_kern_unmount: 1020 kern_unmount(balloon_mnt); 1021 out_del_vqs: 1022 #endif 1023 vdev->config->del_vqs(vdev); 1024 out_free_vb: 1025 kfree(vb); 1026 out: 1027 return err; 1028 } 1029 1030 static void remove_common(struct virtio_balloon *vb) 1031 { 1032 /* There might be pages left in the balloon: free them. */ 1033 while (vb->num_pages) 1034 leak_balloon(vb, vb->num_pages); 1035 update_balloon_size(vb); 1036 1037 /* There might be free pages that are being reported: release them. */ 1038 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1039 return_free_pages_to_mm(vb, ULONG_MAX); 1040 1041 /* Now we reset the device so we can clean up the queues. */ 1042 vb->vdev->config->reset(vb->vdev); 1043 1044 vb->vdev->config->del_vqs(vb->vdev); 1045 } 1046 1047 static void virtballoon_remove(struct virtio_device *vdev) 1048 { 1049 struct virtio_balloon *vb = vdev->priv; 1050 1051 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) 1052 page_reporting_unregister(&vb->pr_dev_info); 1053 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 1054 unregister_oom_notifier(&vb->oom_nb); 1055 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 1056 virtio_balloon_unregister_shrinker(vb); 1057 spin_lock_irq(&vb->stop_update_lock); 1058 vb->stop_update = true; 1059 spin_unlock_irq(&vb->stop_update_lock); 1060 cancel_work_sync(&vb->update_balloon_size_work); 1061 cancel_work_sync(&vb->update_balloon_stats_work); 1062 1063 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 1064 cancel_work_sync(&vb->report_free_page_work); 1065 destroy_workqueue(vb->balloon_wq); 1066 } 1067 1068 remove_common(vb); 1069 #ifdef CONFIG_BALLOON_COMPACTION 1070 if (vb->vb_dev_info.inode) 1071 iput(vb->vb_dev_info.inode); 1072 1073 kern_unmount(balloon_mnt); 1074 #endif 1075 kfree(vb); 1076 } 1077 1078 #ifdef CONFIG_PM_SLEEP 1079 static int virtballoon_freeze(struct virtio_device *vdev) 1080 { 1081 struct virtio_balloon *vb = vdev->priv; 1082 1083 /* 1084 * The workqueue is already frozen by the PM core before this 1085 * function is called. 1086 */ 1087 remove_common(vb); 1088 return 0; 1089 } 1090 1091 static int virtballoon_restore(struct virtio_device *vdev) 1092 { 1093 struct virtio_balloon *vb = vdev->priv; 1094 int ret; 1095 1096 ret = init_vqs(vdev->priv); 1097 if (ret) 1098 return ret; 1099 1100 virtio_device_ready(vdev); 1101 1102 if (towards_target(vb)) 1103 virtballoon_changed(vdev); 1104 update_balloon_size(vb); 1105 return 0; 1106 } 1107 #endif 1108 1109 static int virtballoon_validate(struct virtio_device *vdev) 1110 { 1111 /* 1112 * Inform the hypervisor that our pages are poisoned or 1113 * initialized. If we cannot do that then we should disable 1114 * page reporting as it could potentially change the contents 1115 * of our free pages. 1116 */ 1117 if (!want_init_on_free() && !page_poisoning_enabled_static()) 1118 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); 1119 else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) 1120 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); 1121 1122 __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM); 1123 return 0; 1124 } 1125 1126 static unsigned int features[] = { 1127 VIRTIO_BALLOON_F_MUST_TELL_HOST, 1128 VIRTIO_BALLOON_F_STATS_VQ, 1129 VIRTIO_BALLOON_F_DEFLATE_ON_OOM, 1130 VIRTIO_BALLOON_F_FREE_PAGE_HINT, 1131 VIRTIO_BALLOON_F_PAGE_POISON, 1132 VIRTIO_BALLOON_F_REPORTING, 1133 }; 1134 1135 static struct virtio_driver virtio_balloon_driver = { 1136 .feature_table = features, 1137 .feature_table_size = ARRAY_SIZE(features), 1138 .driver.name = KBUILD_MODNAME, 1139 .driver.owner = THIS_MODULE, 1140 .id_table = id_table, 1141 .validate = virtballoon_validate, 1142 .probe = virtballoon_probe, 1143 .remove = virtballoon_remove, 1144 .config_changed = virtballoon_changed, 1145 #ifdef CONFIG_PM_SLEEP 1146 .freeze = virtballoon_freeze, 1147 .restore = virtballoon_restore, 1148 #endif 1149 }; 1150 1151 module_virtio_driver(virtio_balloon_driver); 1152 MODULE_DEVICE_TABLE(virtio, id_table); 1153 MODULE_DESCRIPTION("Virtio balloon driver"); 1154 MODULE_LICENSE("GPL"); 1155