1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/page_io.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * Swap reorganised 29.12.95, 8 * Asynchronous swapping added 30.12.95. Stephen Tweedie 9 * Removed race in async swapping. 14.4.1996. Bruno Haible 10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/gfp.h> 17 #include <linux/pagemap.h> 18 #include <linux/swap.h> 19 #include <linux/bio.h> 20 #include <linux/swapops.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/psi.h> 24 #include <linux/uio.h> 25 #include <linux/sched/task.h> 26 #include <linux/delayacct.h> 27 #include <linux/zswap.h> 28 #include "swap.h" 29 30 static void __end_swap_bio_write(struct bio *bio) 31 { 32 struct folio *folio = bio_first_folio_all(bio); 33 34 if (bio->bi_status) { 35 /* 36 * We failed to write the page out to swap-space. 37 * Re-dirty the page in order to avoid it being reclaimed. 38 * Also print a dire warning that things will go BAD (tm) 39 * very quickly. 40 * 41 * Also clear PG_reclaim to avoid folio_rotate_reclaimable() 42 */ 43 folio_mark_dirty(folio); 44 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", 45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 46 (unsigned long long)bio->bi_iter.bi_sector); 47 folio_clear_reclaim(folio); 48 } 49 folio_end_writeback(folio); 50 } 51 52 static void end_swap_bio_write(struct bio *bio) 53 { 54 __end_swap_bio_write(bio); 55 bio_put(bio); 56 } 57 58 static void __end_swap_bio_read(struct bio *bio) 59 { 60 struct folio *folio = bio_first_folio_all(bio); 61 62 if (bio->bi_status) { 63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", 64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 65 (unsigned long long)bio->bi_iter.bi_sector); 66 } else { 67 folio_mark_uptodate(folio); 68 } 69 folio_unlock(folio); 70 } 71 72 static void end_swap_bio_read(struct bio *bio) 73 { 74 __end_swap_bio_read(bio); 75 bio_put(bio); 76 } 77 78 int generic_swapfile_activate(struct swap_info_struct *sis, 79 struct file *swap_file, 80 sector_t *span) 81 { 82 struct address_space *mapping = swap_file->f_mapping; 83 struct inode *inode = mapping->host; 84 unsigned blocks_per_page; 85 unsigned long page_no; 86 unsigned blkbits; 87 sector_t probe_block; 88 sector_t last_block; 89 sector_t lowest_block = -1; 90 sector_t highest_block = 0; 91 int nr_extents = 0; 92 int ret; 93 94 blkbits = inode->i_blkbits; 95 blocks_per_page = PAGE_SIZE >> blkbits; 96 97 /* 98 * Map all the blocks into the extent tree. This code doesn't try 99 * to be very smart. 100 */ 101 probe_block = 0; 102 page_no = 0; 103 last_block = i_size_read(inode) >> blkbits; 104 while ((probe_block + blocks_per_page) <= last_block && 105 page_no < sis->max) { 106 unsigned block_in_page; 107 sector_t first_block; 108 109 cond_resched(); 110 111 first_block = probe_block; 112 ret = bmap(inode, &first_block); 113 if (ret || !first_block) 114 goto bad_bmap; 115 116 /* 117 * It must be PAGE_SIZE aligned on-disk 118 */ 119 if (first_block & (blocks_per_page - 1)) { 120 probe_block++; 121 goto reprobe; 122 } 123 124 for (block_in_page = 1; block_in_page < blocks_per_page; 125 block_in_page++) { 126 sector_t block; 127 128 block = probe_block + block_in_page; 129 ret = bmap(inode, &block); 130 if (ret || !block) 131 goto bad_bmap; 132 133 if (block != first_block + block_in_page) { 134 /* Discontiguity */ 135 probe_block++; 136 goto reprobe; 137 } 138 } 139 140 first_block >>= (PAGE_SHIFT - blkbits); 141 if (page_no) { /* exclude the header page */ 142 if (first_block < lowest_block) 143 lowest_block = first_block; 144 if (first_block > highest_block) 145 highest_block = first_block; 146 } 147 148 /* 149 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 150 */ 151 ret = add_swap_extent(sis, page_no, 1, first_block); 152 if (ret < 0) 153 goto out; 154 nr_extents += ret; 155 page_no++; 156 probe_block += blocks_per_page; 157 reprobe: 158 continue; 159 } 160 ret = nr_extents; 161 *span = 1 + highest_block - lowest_block; 162 if (page_no == 0) 163 page_no = 1; /* force Empty message */ 164 sis->max = page_no; 165 sis->pages = page_no - 1; 166 out: 167 return ret; 168 bad_bmap: 169 pr_err("swapon: swapfile has holes\n"); 170 ret = -EINVAL; 171 goto out; 172 } 173 174 static bool is_folio_zero_filled(struct folio *folio) 175 { 176 unsigned int pos, last_pos; 177 unsigned long *data; 178 unsigned int i; 179 180 last_pos = PAGE_SIZE / sizeof(*data) - 1; 181 for (i = 0; i < folio_nr_pages(folio); i++) { 182 data = kmap_local_folio(folio, i * PAGE_SIZE); 183 /* 184 * Check last word first, incase the page is zero-filled at 185 * the start and has non-zero data at the end, which is common 186 * in real-world workloads. 187 */ 188 if (data[last_pos]) { 189 kunmap_local(data); 190 return false; 191 } 192 for (pos = 0; pos < last_pos; pos++) { 193 if (data[pos]) { 194 kunmap_local(data); 195 return false; 196 } 197 } 198 kunmap_local(data); 199 } 200 201 return true; 202 } 203 204 static void swap_zeromap_folio_set(struct folio *folio) 205 { 206 struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); 207 struct swap_info_struct *sis = swp_swap_info(folio->swap); 208 int nr_pages = folio_nr_pages(folio); 209 swp_entry_t entry; 210 unsigned int i; 211 212 for (i = 0; i < folio_nr_pages(folio); i++) { 213 entry = page_swap_entry(folio_page(folio, i)); 214 set_bit(swp_offset(entry), sis->zeromap); 215 } 216 217 count_vm_events(SWPOUT_ZERO, nr_pages); 218 if (objcg) { 219 count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); 220 obj_cgroup_put(objcg); 221 } 222 } 223 224 static void swap_zeromap_folio_clear(struct folio *folio) 225 { 226 struct swap_info_struct *sis = swp_swap_info(folio->swap); 227 swp_entry_t entry; 228 unsigned int i; 229 230 for (i = 0; i < folio_nr_pages(folio); i++) { 231 entry = page_swap_entry(folio_page(folio, i)); 232 clear_bit(swp_offset(entry), sis->zeromap); 233 } 234 } 235 236 /* 237 * We may have stale swap cache pages in memory: notice 238 * them here and get rid of the unnecessary final write. 239 */ 240 int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug) 241 { 242 int ret = 0; 243 244 if (folio_free_swap(folio)) 245 goto out_unlock; 246 247 /* 248 * Arch code may have to preserve more data than just the page 249 * contents, e.g. memory tags. 250 */ 251 ret = arch_prepare_to_swap(folio); 252 if (ret) { 253 folio_mark_dirty(folio); 254 goto out_unlock; 255 } 256 257 /* 258 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages. 259 * The bits in zeromap are protected by the locked swapcache folio 260 * and atomic updates are used to protect against read-modify-write 261 * corruption due to other zero swap entries seeing concurrent updates. 262 */ 263 if (is_folio_zero_filled(folio)) { 264 swap_zeromap_folio_set(folio); 265 goto out_unlock; 266 } 267 268 /* 269 * Clear bits this folio occupies in the zeromap to prevent zero data 270 * being read in from any previous zero writes that occupied the same 271 * swap entries. 272 */ 273 swap_zeromap_folio_clear(folio); 274 275 if (zswap_store(folio)) { 276 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); 277 goto out_unlock; 278 } 279 if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) { 280 folio_mark_dirty(folio); 281 return AOP_WRITEPAGE_ACTIVATE; 282 } 283 284 __swap_writepage(folio, swap_plug); 285 return 0; 286 out_unlock: 287 folio_unlock(folio); 288 return ret; 289 } 290 291 static inline void count_swpout_vm_event(struct folio *folio) 292 { 293 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 294 if (unlikely(folio_test_pmd_mappable(folio))) { 295 count_memcg_folio_events(folio, THP_SWPOUT, 1); 296 count_vm_event(THP_SWPOUT); 297 } 298 #endif 299 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); 300 count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio)); 301 count_vm_events(PSWPOUT, folio_nr_pages(folio)); 302 } 303 304 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 305 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) 306 { 307 struct cgroup_subsys_state *css; 308 struct mem_cgroup *memcg; 309 310 memcg = folio_memcg(folio); 311 if (!memcg) 312 return; 313 314 rcu_read_lock(); 315 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); 316 bio_associate_blkg_from_css(bio, css); 317 rcu_read_unlock(); 318 } 319 #else 320 #define bio_associate_blkg_from_page(bio, folio) do { } while (0) 321 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ 322 323 struct swap_iocb { 324 struct kiocb iocb; 325 struct bio_vec bvec[SWAP_CLUSTER_MAX]; 326 int pages; 327 int len; 328 }; 329 static mempool_t *sio_pool; 330 331 int sio_pool_init(void) 332 { 333 if (!sio_pool) { 334 mempool_t *pool = mempool_create_kmalloc_pool( 335 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); 336 if (cmpxchg(&sio_pool, NULL, pool)) 337 mempool_destroy(pool); 338 } 339 if (!sio_pool) 340 return -ENOMEM; 341 return 0; 342 } 343 344 static void sio_write_complete(struct kiocb *iocb, long ret) 345 { 346 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 347 struct page *page = sio->bvec[0].bv_page; 348 int p; 349 350 if (ret != sio->len) { 351 /* 352 * In the case of swap-over-nfs, this can be a 353 * temporary failure if the system has limited 354 * memory for allocating transmit buffers. 355 * Mark the page dirty and avoid 356 * folio_rotate_reclaimable but rate-limit the 357 * messages. 358 */ 359 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", 360 ret, swap_dev_pos(page_swap_entry(page))); 361 for (p = 0; p < sio->pages; p++) { 362 page = sio->bvec[p].bv_page; 363 set_page_dirty(page); 364 ClearPageReclaim(page); 365 } 366 } 367 368 for (p = 0; p < sio->pages; p++) 369 end_page_writeback(sio->bvec[p].bv_page); 370 371 mempool_free(sio, sio_pool); 372 } 373 374 static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug) 375 { 376 struct swap_iocb *sio = swap_plug ? *swap_plug : NULL; 377 struct swap_info_struct *sis = swp_swap_info(folio->swap); 378 struct file *swap_file = sis->swap_file; 379 loff_t pos = swap_dev_pos(folio->swap); 380 381 count_swpout_vm_event(folio); 382 folio_start_writeback(folio); 383 folio_unlock(folio); 384 if (sio) { 385 if (sio->iocb.ki_filp != swap_file || 386 sio->iocb.ki_pos + sio->len != pos) { 387 swap_write_unplug(sio); 388 sio = NULL; 389 } 390 } 391 if (!sio) { 392 sio = mempool_alloc(sio_pool, GFP_NOIO); 393 init_sync_kiocb(&sio->iocb, swap_file); 394 sio->iocb.ki_complete = sio_write_complete; 395 sio->iocb.ki_pos = pos; 396 sio->pages = 0; 397 sio->len = 0; 398 } 399 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); 400 sio->len += folio_size(folio); 401 sio->pages += 1; 402 if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) { 403 swap_write_unplug(sio); 404 sio = NULL; 405 } 406 if (swap_plug) 407 *swap_plug = sio; 408 } 409 410 static void swap_writepage_bdev_sync(struct folio *folio, 411 struct swap_info_struct *sis) 412 { 413 struct bio_vec bv; 414 struct bio bio; 415 416 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP); 417 bio.bi_iter.bi_sector = swap_folio_sector(folio); 418 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); 419 420 bio_associate_blkg_from_page(&bio, folio); 421 count_swpout_vm_event(folio); 422 423 folio_start_writeback(folio); 424 folio_unlock(folio); 425 426 submit_bio_wait(&bio); 427 __end_swap_bio_write(&bio); 428 } 429 430 static void swap_writepage_bdev_async(struct folio *folio, 431 struct swap_info_struct *sis) 432 { 433 struct bio *bio; 434 435 bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO); 436 bio->bi_iter.bi_sector = swap_folio_sector(folio); 437 bio->bi_end_io = end_swap_bio_write; 438 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); 439 440 bio_associate_blkg_from_page(bio, folio); 441 count_swpout_vm_event(folio); 442 folio_start_writeback(folio); 443 folio_unlock(folio); 444 submit_bio(bio); 445 } 446 447 void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug) 448 { 449 struct swap_info_struct *sis = swp_swap_info(folio->swap); 450 451 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); 452 /* 453 * ->flags can be updated non-atomicially (scan_swap_map_slots), 454 * but that will never affect SWP_FS_OPS, so the data_race 455 * is safe. 456 */ 457 if (data_race(sis->flags & SWP_FS_OPS)) 458 swap_writepage_fs(folio, swap_plug); 459 /* 460 * ->flags can be updated non-atomicially (scan_swap_map_slots), 461 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race 462 * is safe. 463 */ 464 else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO)) 465 swap_writepage_bdev_sync(folio, sis); 466 else 467 swap_writepage_bdev_async(folio, sis); 468 } 469 470 void swap_write_unplug(struct swap_iocb *sio) 471 { 472 struct iov_iter from; 473 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 474 int ret; 475 476 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); 477 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 478 if (ret != -EIOCBQUEUED) 479 sio_write_complete(&sio->iocb, ret); 480 } 481 482 static void sio_read_complete(struct kiocb *iocb, long ret) 483 { 484 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 485 int p; 486 487 if (ret == sio->len) { 488 for (p = 0; p < sio->pages; p++) { 489 struct folio *folio = page_folio(sio->bvec[p].bv_page); 490 491 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 492 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 493 folio_mark_uptodate(folio); 494 folio_unlock(folio); 495 } 496 count_vm_events(PSWPIN, sio->pages); 497 } else { 498 for (p = 0; p < sio->pages; p++) { 499 struct folio *folio = page_folio(sio->bvec[p].bv_page); 500 501 folio_unlock(folio); 502 } 503 pr_alert_ratelimited("Read-error on swap-device\n"); 504 } 505 mempool_free(sio, sio_pool); 506 } 507 508 static bool swap_read_folio_zeromap(struct folio *folio) 509 { 510 int nr_pages = folio_nr_pages(folio); 511 struct obj_cgroup *objcg; 512 bool is_zeromap; 513 514 /* 515 * Swapping in a large folio that is partially in the zeromap is not 516 * currently handled. Return true without marking the folio uptodate so 517 * that an IO error is emitted (e.g. do_swap_page() will sigbus). 518 */ 519 if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, 520 &is_zeromap) != nr_pages)) 521 return true; 522 523 if (!is_zeromap) 524 return false; 525 526 objcg = get_obj_cgroup_from_folio(folio); 527 count_vm_events(SWPIN_ZERO, nr_pages); 528 if (objcg) { 529 count_objcg_events(objcg, SWPIN_ZERO, nr_pages); 530 obj_cgroup_put(objcg); 531 } 532 533 folio_zero_range(folio, 0, folio_size(folio)); 534 folio_mark_uptodate(folio); 535 return true; 536 } 537 538 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug) 539 { 540 struct swap_info_struct *sis = swp_swap_info(folio->swap); 541 struct swap_iocb *sio = NULL; 542 loff_t pos = swap_dev_pos(folio->swap); 543 544 if (plug) 545 sio = *plug; 546 if (sio) { 547 if (sio->iocb.ki_filp != sis->swap_file || 548 sio->iocb.ki_pos + sio->len != pos) { 549 swap_read_unplug(sio); 550 sio = NULL; 551 } 552 } 553 if (!sio) { 554 sio = mempool_alloc(sio_pool, GFP_KERNEL); 555 init_sync_kiocb(&sio->iocb, sis->swap_file); 556 sio->iocb.ki_pos = pos; 557 sio->iocb.ki_complete = sio_read_complete; 558 sio->pages = 0; 559 sio->len = 0; 560 } 561 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); 562 sio->len += folio_size(folio); 563 sio->pages += 1; 564 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { 565 swap_read_unplug(sio); 566 sio = NULL; 567 } 568 if (plug) 569 *plug = sio; 570 } 571 572 static void swap_read_folio_bdev_sync(struct folio *folio, 573 struct swap_info_struct *sis) 574 { 575 struct bio_vec bv; 576 struct bio bio; 577 578 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); 579 bio.bi_iter.bi_sector = swap_folio_sector(folio); 580 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); 581 /* 582 * Keep this task valid during swap readpage because the oom killer may 583 * attempt to access it in the page fault retry time check. 584 */ 585 get_task_struct(current); 586 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 587 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 588 count_vm_events(PSWPIN, folio_nr_pages(folio)); 589 submit_bio_wait(&bio); 590 __end_swap_bio_read(&bio); 591 put_task_struct(current); 592 } 593 594 static void swap_read_folio_bdev_async(struct folio *folio, 595 struct swap_info_struct *sis) 596 { 597 struct bio *bio; 598 599 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); 600 bio->bi_iter.bi_sector = swap_folio_sector(folio); 601 bio->bi_end_io = end_swap_bio_read; 602 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); 603 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); 604 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); 605 count_vm_events(PSWPIN, folio_nr_pages(folio)); 606 submit_bio(bio); 607 } 608 609 void swap_read_folio(struct folio *folio, struct swap_iocb **plug) 610 { 611 struct swap_info_struct *sis = swp_swap_info(folio->swap); 612 bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; 613 bool workingset = folio_test_workingset(folio); 614 unsigned long pflags; 615 bool in_thrashing; 616 617 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); 618 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 619 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); 620 621 /* 622 * Count submission time as memory stall and delay. When the device 623 * is congested, or the submitting cgroup IO-throttled, submission 624 * can be a significant part of overall IO time. 625 */ 626 if (workingset) { 627 delayacct_thrashing_start(&in_thrashing); 628 psi_memstall_enter(&pflags); 629 } 630 delayacct_swapin_start(); 631 632 if (swap_read_folio_zeromap(folio)) { 633 folio_unlock(folio); 634 goto finish; 635 } 636 637 if (zswap_load(folio) != -ENOENT) 638 goto finish; 639 640 /* We have to read from slower devices. Increase zswap protection. */ 641 zswap_folio_swapin(folio); 642 643 if (data_race(sis->flags & SWP_FS_OPS)) { 644 swap_read_folio_fs(folio, plug); 645 } else if (synchronous) { 646 swap_read_folio_bdev_sync(folio, sis); 647 } else { 648 swap_read_folio_bdev_async(folio, sis); 649 } 650 651 finish: 652 if (workingset) { 653 delayacct_thrashing_end(&in_thrashing); 654 psi_memstall_leave(&pflags); 655 } 656 delayacct_swapin_end(); 657 } 658 659 void __swap_read_unplug(struct swap_iocb *sio) 660 { 661 struct iov_iter from; 662 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 663 int ret; 664 665 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); 666 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 667 if (ret != -EIOCBQUEUED) 668 sio_read_complete(&sio->iocb, ret); 669 } 670