1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/page_io.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * Swap reorganised 29.12.95, 8 * Asynchronous swapping added 30.12.95. Stephen Tweedie 9 * Removed race in async swapping. 14.4.1996. Bruno Haible 10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/gfp.h> 17 #include <linux/pagemap.h> 18 #include <linux/swap.h> 19 #include <linux/bio.h> 20 #include <linux/swapops.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/psi.h> 24 #include <linux/uio.h> 25 #include <linux/sched/task.h> 26 #include <linux/delayacct.h> 27 #include <linux/zswap.h> 28 #include "swap.h" 29 30 static void __end_swap_bio_write(struct bio *bio) 31 { 32 struct folio *folio = bio_first_folio_all(bio); 33 34 if (bio->bi_status) { 35 /* 36 * We failed to write the page out to swap-space. 37 * Re-dirty the page in order to avoid it being reclaimed. 38 * Also print a dire warning that things will go BAD (tm) 39 * very quickly. 40 * 41 * Also clear PG_reclaim to avoid folio_rotate_reclaimable() 42 */ 43 folio_mark_dirty(folio); 44 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", 45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 46 (unsigned long long)bio->bi_iter.bi_sector); 47 folio_clear_reclaim(folio); 48 } 49 folio_end_writeback(folio); 50 } 51 52 static void end_swap_bio_write(struct bio *bio) 53 { 54 __end_swap_bio_write(bio); 55 bio_put(bio); 56 } 57 58 static void __end_swap_bio_read(struct bio *bio) 59 { 60 struct folio *folio = bio_first_folio_all(bio); 61 62 if (bio->bi_status) { 63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", 64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 65 (unsigned long long)bio->bi_iter.bi_sector); 66 } else { 67 folio_mark_uptodate(folio); 68 } 69 folio_unlock(folio); 70 } 71 72 static void end_swap_bio_read(struct bio *bio) 73 { 74 __end_swap_bio_read(bio); 75 bio_put(bio); 76 } 77 78 int generic_swapfile_activate(struct swap_info_struct *sis, 79 struct file *swap_file, 80 sector_t *span) 81 { 82 struct address_space *mapping = swap_file->f_mapping; 83 struct inode *inode = mapping->host; 84 unsigned blocks_per_page; 85 unsigned long page_no; 86 unsigned blkbits; 87 sector_t probe_block; 88 sector_t last_block; 89 sector_t lowest_block = -1; 90 sector_t highest_block = 0; 91 int nr_extents = 0; 92 int ret; 93 94 blkbits = inode->i_blkbits; 95 blocks_per_page = PAGE_SIZE >> blkbits; 96 97 /* 98 * Map all the blocks into the extent tree. This code doesn't try 99 * to be very smart. 100 */ 101 probe_block = 0; 102 page_no = 0; 103 last_block = i_size_read(inode) >> blkbits; 104 while ((probe_block + blocks_per_page) <= last_block && 105 page_no < sis->max) { 106 unsigned block_in_page; 107 sector_t first_block; 108 109 cond_resched(); 110 111 first_block = probe_block; 112 ret = bmap(inode, &first_block); 113 if (ret || !first_block) 114 goto bad_bmap; 115 116 /* 117 * It must be PAGE_SIZE aligned on-disk 118 */ 119 if (first_block & (blocks_per_page - 1)) { 120 probe_block++; 121 goto reprobe; 122 } 123 124 for (block_in_page = 1; block_in_page < blocks_per_page; 125 block_in_page++) { 126 sector_t block; 127 128 block = probe_block + block_in_page; 129 ret = bmap(inode, &block); 130 if (ret || !block) 131 goto bad_bmap; 132 133 if (block != first_block + block_in_page) { 134 /* Discontiguity */ 135 probe_block++; 136 goto reprobe; 137 } 138 } 139 140 first_block >>= (PAGE_SHIFT - blkbits); 141 if (page_no) { /* exclude the header page */ 142 if (first_block < lowest_block) 143 lowest_block = first_block; 144 if (first_block > highest_block) 145 highest_block = first_block; 146 } 147 148 /* 149 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 150 */ 151 ret = add_swap_extent(sis, page_no, 1, first_block); 152 if (ret < 0) 153 goto out; 154 nr_extents += ret; 155 page_no++; 156 probe_block += blocks_per_page; 157 reprobe: 158 continue; 159 } 160 ret = nr_extents; 161 *span = 1 + highest_block - lowest_block; 162 if (page_no == 0) 163 page_no = 1; /* force Empty message */ 164 sis->max = page_no; 165 sis->pages = page_no - 1; 166 out: 167 return ret; 168 bad_bmap: 169 pr_err("swapon: swapfile has holes\n"); 170 ret = -EINVAL; 171 goto out; 172 } 173 174 static bool is_folio_zero_filled(struct folio *folio) 175 { 176 unsigned int pos, last_pos; 177 unsigned long *data; 178 unsigned int i; 179 180 last_pos = PAGE_SIZE / sizeof(*data) - 1; 181 for (i = 0; i < folio_nr_pages(folio); i++) { 182 data = kmap_local_folio(folio, i * PAGE_SIZE); 183 /* 184 * Check last word first, incase the page is zero-filled at 185 * the start and has non-zero data at the end, which is common 186 * in real-world workloads. 187 */ 188 if (data[last_pos]) { 189 kunmap_local(data); 190 return false; 191 } 192 for (pos = 0; pos < last_pos; pos++) { 193 if (data[pos]) { 194 kunmap_local(data); 195 return false; 196 } 197 } 198 kunmap_local(data); 199 } 200 201 return true; 202 } 203 204 static void swap_zeromap_folio_set(struct folio *folio) 205 { 206 struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); 207 struct swap_info_struct *sis = swp_swap_info(folio->swap); 208 int nr_pages = folio_nr_pages(folio); 209 swp_entry_t entry; 210 unsigned int i; 211 212 for (i = 0; i < folio_nr_pages(folio); i++) { 213 entry = page_swap_entry(folio_page(folio, i)); 214 set_bit(swp_offset(entry), sis->zeromap); 215 } 216 217 count_vm_events(SWPOUT_ZERO, nr_pages); 218 if (objcg) { 219 count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); 220 obj_cgroup_put(objcg); 221 } 222 } 223 224 static void swap_zeromap_folio_clear(struct folio *folio) 225 { 226 struct swap_info_struct *sis = swp_swap_info(folio->swap); 227 swp_entry_t entry; 228 unsigned int i; 229 230 for (i = 0; i < folio_nr_pages(folio); i++) { 231 entry = page_swap_entry(folio_page(folio, i)); 232 clear_bit(swp_offset(entry), sis->zeromap); 233 } 234 } 235 236 /* 237 * We may have stale swap cache pages in memory: notice 238 * them here and get rid of the unnecessary final write. 239 */ 240 int swap_writepage(struct page *page, struct writeback_control *wbc) 241 { 242 struct folio *folio = page_folio(page); 243 int ret; 244 245 if (folio_free_swap(folio)) { 246 folio_unlock(folio); 247 return 0; 248 } 249 /* 250 * Arch code may have to preserve more data than just the page 251 * contents, e.g. memory tags. 252 */ 253 ret = arch_prepare_to_swap(folio); 254 if (ret) { 255 folio_mark_dirty(folio); 256 folio_unlock(folio); 257 return ret; 258 } 259 260 /* 261 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages. 262 * The bits in zeromap are protected by the locked swapcache folio 263 * and atomic updates are used to protect against read-modify-write 264 * corruption due to other zero swap entries seeing concurrent updates. 265 */ 266 if (is_folio_zero_filled(folio)) { 267 swap_zeromap_folio_set(folio); 268 folio_unlock(folio); 269 return 0; 270 } else { 271 /* 272 * Clear bits this folio occupies in the zeromap to prevent 273 * zero data being read in from any previous zero writes that 274 * occupied the same swap entries. 275 */ 276 swap_zeromap_folio_clear(folio); 277 } 278 if (zswap_store(folio)) { 279 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); 280 folio_unlock(folio); 281 return 0; 282 } 283 if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) { 284 folio_mark_dirty(folio); 285 return AOP_WRITEPAGE_ACTIVATE; 286 } 287 288 __swap_writepage(folio, wbc); 289 return 0; 290 } 291 292 static inline void count_swpout_vm_event(struct folio *folio) 293 { 294 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 295 if (unlikely(folio_test_pmd_mappable(folio))) { 296 count_memcg_folio_events(folio, THP_SWPOUT, 1); 297 count_vm_event(THP_SWPOUT); 298 } 299 #endif 300 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); 301 count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio)); 302 count_vm_events(PSWPOUT, folio_nr_pages(folio)); 303 } 304 305 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 306 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) 307 { 308 struct cgroup_subsys_state *css; 309 struct mem_cgroup *memcg; 310 311 memcg = folio_memcg(folio); 312 if (!memcg) 313 return; 314 315 rcu_read_lock(); 316 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); 317 bio_associate_blkg_from_css(bio, css); 318 rcu_read_unlock(); 319 } 320 #else 321 #define bio_associate_blkg_from_page(bio, folio) do { } while (0) 322 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ 323 324 struct swap_iocb { 325 struct kiocb iocb; 326 struct bio_vec bvec[SWAP_CLUSTER_MAX]; 327 int pages; 328 int len; 329 }; 330 static mempool_t *sio_pool; 331 332 int sio_pool_init(void) 333 { 334 if (!sio_pool) { 335 mempool_t *pool = mempool_create_kmalloc_pool( 336 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); 337 if (cmpxchg(&sio_pool, NULL, pool)) 338 mempool_destroy(pool); 339 } 340 if (!sio_pool) 341 return -ENOMEM; 342 return 0; 343 } 344 345 static void sio_write_complete(struct kiocb *iocb, long ret) 346 { 347 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 348 struct page *page = sio->bvec[0].bv_page; 349 int p; 350 351 if (ret != sio->len) { 352 /* 353 * In the case of swap-over-nfs, this can be a 354 * temporary failure if the system has limited 355 * memory for allocating transmit buffers. 356 * Mark the page dirty and avoid 357 * folio_rotate_reclaimable but rate-limit the 358 * messages. 359 */ 360 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", 361 ret, swap_dev_pos(page_swap_entry(page))); 362 for (p = 0; p < sio->pages; p++) { 363 page = sio->bvec[p].bv_page; 364 set_page_dirty(page); 365 ClearPageReclaim(page); 366 } 367 } 368 369 for (p = 0; p < sio->pages; p++) 370 end_page_writeback(sio->bvec[p].bv_page); 371 372 mempool_free(sio, sio_pool); 373 } 374 375 static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc) 376 { 377 struct swap_iocb *sio = NULL; 378 struct swap_info_struct *sis = swp_swap_info(folio->swap); 379 struct file *swap_file = sis->swap_file; 380 loff_t pos = swap_dev_pos(folio->swap); 381 382 count_swpout_vm_event(folio); 383 folio_start_writeback(folio); 384 folio_unlock(folio); 385 if (wbc->swap_plug) 386 sio = *wbc->swap_plug; 387 if (sio) { 388 if (sio->iocb.ki_filp != swap_file || 389 sio->iocb.ki_pos + sio->len != pos) { 390 swap_write_unplug(sio); 391 sio = NULL; 392 } 393 } 394 if (!sio) { 395 sio = mempool_alloc(sio_pool, GFP_NOIO); 396 init_sync_kiocb(&sio->iocb, swap_file); 397 sio->iocb.ki_complete = sio_write_complete; 398 sio->iocb.ki_pos = pos; 399 sio->pages = 0; 400 sio->len = 0; 401 } 402 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); 403 sio->len += folio_size(folio); 404 sio->pages += 1; 405 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) { 406 swap_write_unplug(sio); 407 sio = NULL; 408 } 409 if (wbc->swap_plug) 410 *wbc->swap_plug = sio; 411 } 412 413 static void swap_writepage_bdev_sync(struct folio *folio, 414 struct writeback_control *wbc, struct swap_info_struct *sis) 415 { 416 struct bio_vec bv; 417 struct bio bio; 418 419 bio_init(&bio, sis->bdev, &bv, 1, 420 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc)); 421 bio.bi_iter.bi_sector = swap_folio_sector(folio); 422 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); 423 424 bio_associate_blkg_from_page(&bio, folio); 425 count_swpout_vm_event(folio); 426 427 folio_start_writeback(folio); 428 folio_unlock(folio); 429 430 submit_bio_wait(&bio); 431 __end_swap_bio_write(&bio); 432 } 433 434 static void swap_writepage_bdev_async(struct folio *folio, 435 struct writeback_control *wbc, struct swap_info_struct *sis) 436 { 437 struct bio *bio; 438 439 bio = bio_alloc(sis->bdev, 1, 440 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc), 441 GFP_NOIO); 442 bio->bi_iter.bi_sector = swap_folio_sector(folio); 443 bio->bi_end_io = end_swap_bio_write; 444 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); 445 446 bio_associate_blkg_from_page(bio, folio); 447 count_swpout_vm_event(folio); 448 folio_start_writeback(folio); 449 folio_unlock(folio); 450 submit_bio(bio); 451 } 452 453 void __swap_writepage(struct folio *folio, struct writeback_control *wbc) 454 { 455 struct swap_info_struct *sis = swp_swap_info(folio->swap); 456 457 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); 458 /* 459 * ->flags can be updated non-atomicially (scan_swap_map_slots), 460 * but that will never affect SWP_FS_OPS, so the data_race 461 * is safe. 462 */ 463 if (data_race(sis->flags & SWP_FS_OPS)) 464 swap_writepage_fs(folio, wbc); 465 /* 466 * ->flags can be updated non-atomicially (scan_swap_map_slots), 467 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race 468 * is safe. 469 */ 470 else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO)) 471 swap_writepage_bdev_sync(folio, wbc, sis); 472 else 473 swap_writepage_bdev_async(folio, wbc, sis); 474 } 475 476 void swap_write_unplug(struct swap_iocb *sio) 477 { 478 struct iov_iter from; 479 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 480 int ret; 481 482 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); 483 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 484 if (ret != -EIOCBQUEUED) 485 sio_write_complete(&sio->iocb, ret); 486 } 487 488 static void sio_read_complete(struct kiocb *iocb, long ret) 489 { 490 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 491 int p; 492 493 if (ret == sio->len) { 494 for (p = 0; p < sio->pages; p++) { 495 struct folio *folio = page_folio(sio->bvec[p].bv_page); 496 497 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 498 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 499 folio_mark_uptodate(folio); 500 folio_unlock(folio); 501 } 502 count_vm_events(PSWPIN, sio->pages); 503 } else { 504 for (p = 0; p < sio->pages; p++) { 505 struct folio *folio = page_folio(sio->bvec[p].bv_page); 506 507 folio_unlock(folio); 508 } 509 pr_alert_ratelimited("Read-error on swap-device\n"); 510 } 511 mempool_free(sio, sio_pool); 512 } 513 514 static bool swap_read_folio_zeromap(struct folio *folio) 515 { 516 int nr_pages = folio_nr_pages(folio); 517 struct obj_cgroup *objcg; 518 bool is_zeromap; 519 520 /* 521 * Swapping in a large folio that is partially in the zeromap is not 522 * currently handled. Return true without marking the folio uptodate so 523 * that an IO error is emitted (e.g. do_swap_page() will sigbus). 524 */ 525 if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, 526 &is_zeromap) != nr_pages)) 527 return true; 528 529 if (!is_zeromap) 530 return false; 531 532 objcg = get_obj_cgroup_from_folio(folio); 533 count_vm_events(SWPIN_ZERO, nr_pages); 534 if (objcg) { 535 count_objcg_events(objcg, SWPIN_ZERO, nr_pages); 536 obj_cgroup_put(objcg); 537 } 538 539 folio_zero_range(folio, 0, folio_size(folio)); 540 folio_mark_uptodate(folio); 541 return true; 542 } 543 544 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug) 545 { 546 struct swap_info_struct *sis = swp_swap_info(folio->swap); 547 struct swap_iocb *sio = NULL; 548 loff_t pos = swap_dev_pos(folio->swap); 549 550 if (plug) 551 sio = *plug; 552 if (sio) { 553 if (sio->iocb.ki_filp != sis->swap_file || 554 sio->iocb.ki_pos + sio->len != pos) { 555 swap_read_unplug(sio); 556 sio = NULL; 557 } 558 } 559 if (!sio) { 560 sio = mempool_alloc(sio_pool, GFP_KERNEL); 561 init_sync_kiocb(&sio->iocb, sis->swap_file); 562 sio->iocb.ki_pos = pos; 563 sio->iocb.ki_complete = sio_read_complete; 564 sio->pages = 0; 565 sio->len = 0; 566 } 567 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); 568 sio->len += folio_size(folio); 569 sio->pages += 1; 570 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { 571 swap_read_unplug(sio); 572 sio = NULL; 573 } 574 if (plug) 575 *plug = sio; 576 } 577 578 static void swap_read_folio_bdev_sync(struct folio *folio, 579 struct swap_info_struct *sis) 580 { 581 struct bio_vec bv; 582 struct bio bio; 583 584 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); 585 bio.bi_iter.bi_sector = swap_folio_sector(folio); 586 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); 587 /* 588 * Keep this task valid during swap readpage because the oom killer may 589 * attempt to access it in the page fault retry time check. 590 */ 591 get_task_struct(current); 592 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 593 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 594 count_vm_events(PSWPIN, folio_nr_pages(folio)); 595 submit_bio_wait(&bio); 596 __end_swap_bio_read(&bio); 597 put_task_struct(current); 598 } 599 600 static void swap_read_folio_bdev_async(struct folio *folio, 601 struct swap_info_struct *sis) 602 { 603 struct bio *bio; 604 605 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); 606 bio->bi_iter.bi_sector = swap_folio_sector(folio); 607 bio->bi_end_io = end_swap_bio_read; 608 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); 609 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 610 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 611 count_vm_events(PSWPIN, folio_nr_pages(folio)); 612 submit_bio(bio); 613 } 614 615 void swap_read_folio(struct folio *folio, struct swap_iocb **plug) 616 { 617 struct swap_info_struct *sis = swp_swap_info(folio->swap); 618 bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; 619 bool workingset = folio_test_workingset(folio); 620 unsigned long pflags; 621 bool in_thrashing; 622 623 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); 624 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 625 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); 626 627 /* 628 * Count submission time as memory stall and delay. When the device 629 * is congested, or the submitting cgroup IO-throttled, submission 630 * can be a significant part of overall IO time. 631 */ 632 if (workingset) { 633 delayacct_thrashing_start(&in_thrashing); 634 psi_memstall_enter(&pflags); 635 } 636 delayacct_swapin_start(); 637 638 if (swap_read_folio_zeromap(folio)) { 639 folio_unlock(folio); 640 goto finish; 641 } else if (zswap_load(folio)) { 642 folio_unlock(folio); 643 goto finish; 644 } 645 646 /* We have to read from slower devices. Increase zswap protection. */ 647 zswap_folio_swapin(folio); 648 649 if (data_race(sis->flags & SWP_FS_OPS)) { 650 swap_read_folio_fs(folio, plug); 651 } else if (synchronous) { 652 swap_read_folio_bdev_sync(folio, sis); 653 } else { 654 swap_read_folio_bdev_async(folio, sis); 655 } 656 657 finish: 658 if (workingset) { 659 delayacct_thrashing_end(&in_thrashing); 660 psi_memstall_leave(&pflags); 661 } 662 delayacct_swapin_end(); 663 } 664 665 void __swap_read_unplug(struct swap_iocb *sio) 666 { 667 struct iov_iter from; 668 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 669 int ret; 670 671 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); 672 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 673 if (ret != -EIOCBQUEUED) 674 sio_read_complete(&sio->iocb, ret); 675 } 676