1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/page_io.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * Swap reorganised 29.12.95, 8 * Asynchronous swapping added 30.12.95. Stephen Tweedie 9 * Removed race in async swapping. 14.4.1996. Bruno Haible 10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/gfp.h> 17 #include <linux/pagemap.h> 18 #include <linux/swap.h> 19 #include <linux/bio.h> 20 #include <linux/swapops.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/psi.h> 24 #include <linux/uio.h> 25 #include <linux/sched/task.h> 26 #include <linux/delayacct.h> 27 #include <linux/zswap.h> 28 #include "swap.h" 29 30 static void __end_swap_bio_write(struct bio *bio) 31 { 32 struct folio *folio = bio_first_folio_all(bio); 33 34 if (bio->bi_status) { 35 /* 36 * We failed to write the page out to swap-space. 37 * Re-dirty the page in order to avoid it being reclaimed. 38 * Also print a dire warning that things will go BAD (tm) 39 * very quickly. 40 * 41 * Also clear PG_reclaim to avoid folio_rotate_reclaimable() 42 */ 43 folio_mark_dirty(folio); 44 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", 45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 46 (unsigned long long)bio->bi_iter.bi_sector); 47 folio_clear_reclaim(folio); 48 } 49 folio_end_writeback(folio); 50 } 51 52 static void end_swap_bio_write(struct bio *bio) 53 { 54 __end_swap_bio_write(bio); 55 bio_put(bio); 56 } 57 58 static void __end_swap_bio_read(struct bio *bio) 59 { 60 struct folio *folio = bio_first_folio_all(bio); 61 62 if (bio->bi_status) { 63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", 64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 65 (unsigned long long)bio->bi_iter.bi_sector); 66 } else { 67 folio_mark_uptodate(folio); 68 } 69 folio_unlock(folio); 70 } 71 72 static void end_swap_bio_read(struct bio *bio) 73 { 74 __end_swap_bio_read(bio); 75 bio_put(bio); 76 } 77 78 int generic_swapfile_activate(struct swap_info_struct *sis, 79 struct file *swap_file, 80 sector_t *span) 81 { 82 struct address_space *mapping = swap_file->f_mapping; 83 struct inode *inode = mapping->host; 84 unsigned blocks_per_page; 85 unsigned long page_no; 86 unsigned blkbits; 87 sector_t probe_block; 88 sector_t last_block; 89 sector_t lowest_block = -1; 90 sector_t highest_block = 0; 91 int nr_extents = 0; 92 int ret; 93 94 blkbits = inode->i_blkbits; 95 blocks_per_page = PAGE_SIZE >> blkbits; 96 97 /* 98 * Map all the blocks into the extent tree. This code doesn't try 99 * to be very smart. 100 */ 101 probe_block = 0; 102 page_no = 0; 103 last_block = i_size_read(inode) >> blkbits; 104 while ((probe_block + blocks_per_page) <= last_block && 105 page_no < sis->max) { 106 unsigned block_in_page; 107 sector_t first_block; 108 109 cond_resched(); 110 111 first_block = probe_block; 112 ret = bmap(inode, &first_block); 113 if (ret || !first_block) 114 goto bad_bmap; 115 116 /* 117 * It must be PAGE_SIZE aligned on-disk 118 */ 119 if (first_block & (blocks_per_page - 1)) { 120 probe_block++; 121 goto reprobe; 122 } 123 124 for (block_in_page = 1; block_in_page < blocks_per_page; 125 block_in_page++) { 126 sector_t block; 127 128 block = probe_block + block_in_page; 129 ret = bmap(inode, &block); 130 if (ret || !block) 131 goto bad_bmap; 132 133 if (block != first_block + block_in_page) { 134 /* Discontiguity */ 135 probe_block++; 136 goto reprobe; 137 } 138 } 139 140 first_block >>= (PAGE_SHIFT - blkbits); 141 if (page_no) { /* exclude the header page */ 142 if (first_block < lowest_block) 143 lowest_block = first_block; 144 if (first_block > highest_block) 145 highest_block = first_block; 146 } 147 148 /* 149 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 150 */ 151 ret = add_swap_extent(sis, page_no, 1, first_block); 152 if (ret < 0) 153 goto out; 154 nr_extents += ret; 155 page_no++; 156 probe_block += blocks_per_page; 157 reprobe: 158 continue; 159 } 160 ret = nr_extents; 161 *span = 1 + highest_block - lowest_block; 162 if (page_no == 0) 163 page_no = 1; /* force Empty message */ 164 sis->max = page_no; 165 sis->pages = page_no - 1; 166 out: 167 return ret; 168 bad_bmap: 169 pr_err("swapon: swapfile has holes\n"); 170 ret = -EINVAL; 171 goto out; 172 } 173 174 static bool is_folio_zero_filled(struct folio *folio) 175 { 176 unsigned int pos, last_pos; 177 unsigned long *data; 178 unsigned int i; 179 180 last_pos = PAGE_SIZE / sizeof(*data) - 1; 181 for (i = 0; i < folio_nr_pages(folio); i++) { 182 data = kmap_local_folio(folio, i * PAGE_SIZE); 183 /* 184 * Check last word first, incase the page is zero-filled at 185 * the start and has non-zero data at the end, which is common 186 * in real-world workloads. 187 */ 188 if (data[last_pos]) { 189 kunmap_local(data); 190 return false; 191 } 192 for (pos = 0; pos < last_pos; pos++) { 193 if (data[pos]) { 194 kunmap_local(data); 195 return false; 196 } 197 } 198 kunmap_local(data); 199 } 200 201 return true; 202 } 203 204 static void swap_zeromap_folio_set(struct folio *folio) 205 { 206 struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); 207 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 208 int nr_pages = folio_nr_pages(folio); 209 swp_entry_t entry; 210 unsigned int i; 211 212 for (i = 0; i < folio_nr_pages(folio); i++) { 213 entry = page_swap_entry(folio_page(folio, i)); 214 set_bit(swp_offset(entry), sis->zeromap); 215 } 216 217 count_vm_events(SWPOUT_ZERO, nr_pages); 218 if (objcg) { 219 count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); 220 obj_cgroup_put(objcg); 221 } 222 } 223 224 static void swap_zeromap_folio_clear(struct folio *folio) 225 { 226 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 227 swp_entry_t entry; 228 unsigned int i; 229 230 for (i = 0; i < folio_nr_pages(folio); i++) { 231 entry = page_swap_entry(folio_page(folio, i)); 232 clear_bit(swp_offset(entry), sis->zeromap); 233 } 234 } 235 236 /* 237 * We may have stale swap cache pages in memory: notice 238 * them here and get rid of the unnecessary final write. 239 */ 240 int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug) 241 { 242 int ret = 0; 243 244 if (folio_free_swap(folio)) 245 goto out_unlock; 246 247 /* 248 * Arch code may have to preserve more data than just the page 249 * contents, e.g. memory tags. 250 */ 251 ret = arch_prepare_to_swap(folio); 252 if (ret) { 253 folio_mark_dirty(folio); 254 goto out_unlock; 255 } 256 257 /* 258 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages. 259 * The bits in zeromap are protected by the locked swapcache folio 260 * and atomic updates are used to protect against read-modify-write 261 * corruption due to other zero swap entries seeing concurrent updates. 262 */ 263 if (is_folio_zero_filled(folio)) { 264 swap_zeromap_folio_set(folio); 265 goto out_unlock; 266 } 267 268 /* 269 * Clear bits this folio occupies in the zeromap to prevent zero data 270 * being read in from any previous zero writes that occupied the same 271 * swap entries. 272 */ 273 swap_zeromap_folio_clear(folio); 274 275 if (zswap_store(folio)) { 276 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); 277 goto out_unlock; 278 } 279 280 rcu_read_lock(); 281 if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) { 282 rcu_read_unlock(); 283 folio_mark_dirty(folio); 284 return AOP_WRITEPAGE_ACTIVATE; 285 } 286 rcu_read_unlock(); 287 288 __swap_writepage(folio, swap_plug); 289 return 0; 290 out_unlock: 291 folio_unlock(folio); 292 return ret; 293 } 294 295 static inline void count_swpout_vm_event(struct folio *folio) 296 { 297 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 298 if (unlikely(folio_test_pmd_mappable(folio))) { 299 count_memcg_folio_events(folio, THP_SWPOUT, 1); 300 count_vm_event(THP_SWPOUT); 301 } 302 #endif 303 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); 304 count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio)); 305 count_vm_events(PSWPOUT, folio_nr_pages(folio)); 306 } 307 308 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 309 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) 310 { 311 struct cgroup_subsys_state *css; 312 struct mem_cgroup *memcg; 313 314 if (!folio_memcg_charged(folio)) 315 return; 316 317 rcu_read_lock(); 318 memcg = folio_memcg(folio); 319 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); 320 bio_associate_blkg_from_css(bio, css); 321 rcu_read_unlock(); 322 } 323 #else 324 #define bio_associate_blkg_from_page(bio, folio) do { } while (0) 325 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ 326 327 struct swap_iocb { 328 struct kiocb iocb; 329 struct bio_vec bvec[SWAP_CLUSTER_MAX]; 330 int pages; 331 int len; 332 }; 333 static mempool_t *sio_pool; 334 335 int sio_pool_init(void) 336 { 337 if (!sio_pool) { 338 mempool_t *pool = mempool_create_kmalloc_pool( 339 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); 340 if (cmpxchg(&sio_pool, NULL, pool)) 341 mempool_destroy(pool); 342 } 343 if (!sio_pool) 344 return -ENOMEM; 345 return 0; 346 } 347 348 static void sio_write_complete(struct kiocb *iocb, long ret) 349 { 350 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 351 struct page *page = sio->bvec[0].bv_page; 352 int p; 353 354 if (ret != sio->len) { 355 /* 356 * In the case of swap-over-nfs, this can be a 357 * temporary failure if the system has limited 358 * memory for allocating transmit buffers. 359 * Mark the page dirty and avoid 360 * folio_rotate_reclaimable but rate-limit the 361 * messages. 362 */ 363 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", 364 ret, swap_dev_pos(page_swap_entry(page))); 365 for (p = 0; p < sio->pages; p++) { 366 page = sio->bvec[p].bv_page; 367 set_page_dirty(page); 368 ClearPageReclaim(page); 369 } 370 } 371 372 for (p = 0; p < sio->pages; p++) 373 end_page_writeback(sio->bvec[p].bv_page); 374 375 mempool_free(sio, sio_pool); 376 } 377 378 static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug) 379 { 380 struct swap_iocb *sio = swap_plug ? *swap_plug : NULL; 381 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 382 struct file *swap_file = sis->swap_file; 383 loff_t pos = swap_dev_pos(folio->swap); 384 385 count_swpout_vm_event(folio); 386 folio_start_writeback(folio); 387 folio_unlock(folio); 388 if (sio) { 389 if (sio->iocb.ki_filp != swap_file || 390 sio->iocb.ki_pos + sio->len != pos) { 391 swap_write_unplug(sio); 392 sio = NULL; 393 } 394 } 395 if (!sio) { 396 sio = mempool_alloc(sio_pool, GFP_NOIO); 397 init_sync_kiocb(&sio->iocb, swap_file); 398 sio->iocb.ki_complete = sio_write_complete; 399 sio->iocb.ki_pos = pos; 400 sio->pages = 0; 401 sio->len = 0; 402 } 403 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); 404 sio->len += folio_size(folio); 405 sio->pages += 1; 406 if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) { 407 swap_write_unplug(sio); 408 sio = NULL; 409 } 410 if (swap_plug) 411 *swap_plug = sio; 412 } 413 414 static void swap_writepage_bdev_sync(struct folio *folio, 415 struct swap_info_struct *sis) 416 { 417 struct bio_vec bv; 418 struct bio bio; 419 420 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP); 421 bio.bi_iter.bi_sector = swap_folio_sector(folio); 422 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); 423 424 bio_associate_blkg_from_page(&bio, folio); 425 count_swpout_vm_event(folio); 426 427 folio_start_writeback(folio); 428 folio_unlock(folio); 429 430 submit_bio_wait(&bio); 431 __end_swap_bio_write(&bio); 432 } 433 434 static void swap_writepage_bdev_async(struct folio *folio, 435 struct swap_info_struct *sis) 436 { 437 struct bio *bio; 438 439 bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO); 440 bio->bi_iter.bi_sector = swap_folio_sector(folio); 441 bio->bi_end_io = end_swap_bio_write; 442 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); 443 444 bio_associate_blkg_from_page(bio, folio); 445 count_swpout_vm_event(folio); 446 folio_start_writeback(folio); 447 folio_unlock(folio); 448 submit_bio(bio); 449 } 450 451 void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug) 452 { 453 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 454 455 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); 456 /* 457 * ->flags can be updated non-atomically, 458 * but that will never affect SWP_FS_OPS, so the data_race 459 * is safe. 460 */ 461 if (data_race(sis->flags & SWP_FS_OPS)) 462 swap_writepage_fs(folio, swap_plug); 463 /* 464 * ->flags can be updated non-atomically, 465 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race 466 * is safe. 467 */ 468 else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO)) 469 swap_writepage_bdev_sync(folio, sis); 470 else 471 swap_writepage_bdev_async(folio, sis); 472 } 473 474 void swap_write_unplug(struct swap_iocb *sio) 475 { 476 struct iov_iter from; 477 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 478 int ret; 479 480 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); 481 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 482 if (ret != -EIOCBQUEUED) 483 sio_write_complete(&sio->iocb, ret); 484 } 485 486 static void sio_read_complete(struct kiocb *iocb, long ret) 487 { 488 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 489 int p; 490 491 if (ret == sio->len) { 492 for (p = 0; p < sio->pages; p++) { 493 struct folio *folio = page_folio(sio->bvec[p].bv_page); 494 495 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 496 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 497 folio_mark_uptodate(folio); 498 folio_unlock(folio); 499 } 500 count_vm_events(PSWPIN, sio->len >> PAGE_SHIFT); 501 } else { 502 for (p = 0; p < sio->pages; p++) { 503 struct folio *folio = page_folio(sio->bvec[p].bv_page); 504 505 folio_unlock(folio); 506 } 507 pr_alert_ratelimited("Read-error on swap-device\n"); 508 } 509 mempool_free(sio, sio_pool); 510 } 511 512 static bool swap_read_folio_zeromap(struct folio *folio) 513 { 514 int nr_pages = folio_nr_pages(folio); 515 struct obj_cgroup *objcg; 516 bool is_zeromap; 517 518 /* 519 * Swapping in a large folio that is partially in the zeromap is not 520 * currently handled. Return true without marking the folio uptodate so 521 * that an IO error is emitted (e.g. do_swap_page() will sigbus). 522 */ 523 if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, 524 &is_zeromap) != nr_pages)) 525 return true; 526 527 if (!is_zeromap) 528 return false; 529 530 objcg = get_obj_cgroup_from_folio(folio); 531 count_vm_events(SWPIN_ZERO, nr_pages); 532 if (objcg) { 533 count_objcg_events(objcg, SWPIN_ZERO, nr_pages); 534 obj_cgroup_put(objcg); 535 } 536 537 folio_zero_range(folio, 0, folio_size(folio)); 538 folio_mark_uptodate(folio); 539 return true; 540 } 541 542 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug) 543 { 544 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 545 struct swap_iocb *sio = NULL; 546 loff_t pos = swap_dev_pos(folio->swap); 547 548 if (plug) 549 sio = *plug; 550 if (sio) { 551 if (sio->iocb.ki_filp != sis->swap_file || 552 sio->iocb.ki_pos + sio->len != pos) { 553 swap_read_unplug(sio); 554 sio = NULL; 555 } 556 } 557 if (!sio) { 558 sio = mempool_alloc(sio_pool, GFP_KERNEL); 559 init_sync_kiocb(&sio->iocb, sis->swap_file); 560 sio->iocb.ki_pos = pos; 561 sio->iocb.ki_complete = sio_read_complete; 562 sio->pages = 0; 563 sio->len = 0; 564 } 565 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); 566 sio->len += folio_size(folio); 567 sio->pages += 1; 568 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { 569 swap_read_unplug(sio); 570 sio = NULL; 571 } 572 if (plug) 573 *plug = sio; 574 } 575 576 static void swap_read_folio_bdev_sync(struct folio *folio, 577 struct swap_info_struct *sis) 578 { 579 struct bio_vec bv; 580 struct bio bio; 581 582 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); 583 bio.bi_iter.bi_sector = swap_folio_sector(folio); 584 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); 585 /* 586 * Keep this task valid during swap readpage because the oom killer may 587 * attempt to access it in the page fault retry time check. 588 */ 589 get_task_struct(current); 590 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 591 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 592 count_vm_events(PSWPIN, folio_nr_pages(folio)); 593 submit_bio_wait(&bio); 594 __end_swap_bio_read(&bio); 595 put_task_struct(current); 596 } 597 598 static void swap_read_folio_bdev_async(struct folio *folio, 599 struct swap_info_struct *sis) 600 { 601 struct bio *bio; 602 603 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); 604 bio->bi_iter.bi_sector = swap_folio_sector(folio); 605 bio->bi_end_io = end_swap_bio_read; 606 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); 607 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 608 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 609 count_vm_events(PSWPIN, folio_nr_pages(folio)); 610 submit_bio(bio); 611 } 612 613 void swap_read_folio(struct folio *folio, struct swap_iocb **plug) 614 { 615 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 616 bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; 617 bool workingset = folio_test_workingset(folio); 618 unsigned long pflags; 619 bool in_thrashing; 620 621 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); 622 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 623 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); 624 625 /* 626 * Count submission time as memory stall and delay. When the device 627 * is congested, or the submitting cgroup IO-throttled, submission 628 * can be a significant part of overall IO time. 629 */ 630 if (workingset) { 631 delayacct_thrashing_start(&in_thrashing); 632 psi_memstall_enter(&pflags); 633 } 634 delayacct_swapin_start(); 635 636 if (swap_read_folio_zeromap(folio)) { 637 folio_unlock(folio); 638 goto finish; 639 } 640 641 if (zswap_load(folio) != -ENOENT) 642 goto finish; 643 644 /* We have to read from slower devices. Increase zswap protection. */ 645 zswap_folio_swapin(folio); 646 647 if (data_race(sis->flags & SWP_FS_OPS)) { 648 swap_read_folio_fs(folio, plug); 649 } else if (synchronous) { 650 swap_read_folio_bdev_sync(folio, sis); 651 } else { 652 swap_read_folio_bdev_async(folio, sis); 653 } 654 655 finish: 656 if (workingset) { 657 delayacct_thrashing_end(&in_thrashing); 658 psi_memstall_leave(&pflags); 659 } 660 delayacct_swapin_end(); 661 } 662 663 void __swap_read_unplug(struct swap_iocb *sio) 664 { 665 struct iov_iter from; 666 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 667 int ret; 668 669 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); 670 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 671 if (ret != -EIOCBQUEUED) 672 sio_read_complete(&sio->iocb, ret); 673 } 674