1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2023-2025 Christoph Hellwig. 4 * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates. 5 */ 6 #include "xfs.h" 7 #include "xfs_shared.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_inode.h" 13 #include "xfs_btree.h" 14 #include "xfs_trans.h" 15 #include "xfs_icache.h" 16 #include "xfs_rmap.h" 17 #include "xfs_rtbitmap.h" 18 #include "xfs_rtrmap_btree.h" 19 #include "xfs_zone_alloc.h" 20 #include "xfs_zone_priv.h" 21 #include "xfs_zones.h" 22 #include "xfs_trace.h" 23 24 /* 25 * Implement Garbage Collection (GC) of partially used zoned. 26 * 27 * To support the purely sequential writes in each zone, zoned XFS needs to be 28 * able to move data remaining in a zone out of it to reset the zone to prepare 29 * for writing to it again. 30 * 31 * This is done by the GC thread implemented in this file. To support that a 32 * number of zones (XFS_GC_ZONES) is reserved from the user visible capacity to 33 * write the garbage collected data into. 34 * 35 * Whenever the available space is below the chosen threshold, the GC thread 36 * looks for potential non-empty but not fully used zones that are worth 37 * reclaiming. Once found the rmap for the victim zone is queried, and after 38 * a bit of sorting to reduce fragmentation, the still live extents are read 39 * into memory and written to the GC target zone, and the bmap btree of the 40 * files is updated to point to the new location. To avoid taking the IOLOCK 41 * and MMAPLOCK for the entire GC process and thus affecting the latency of 42 * user reads and writes to the files, the GC writes are speculative and the 43 * I/O completion checks that no other writes happened for the affected regions 44 * before remapping. 45 * 46 * Once a zone does not contain any valid data, be that through GC or user 47 * block removal, it is queued for for a zone reset. The reset operation 48 * carefully ensures that the RT device cache is flushed and all transactions 49 * referencing the rmap have been committed to disk. 50 */ 51 52 /* 53 * Size of each GC scratch pad. This is also the upper bound for each 54 * GC I/O, which helps to keep latency down. 55 */ 56 #define XFS_GC_CHUNK_SIZE SZ_1M 57 58 /* 59 * Scratchpad data to read GCed data into. 60 * 61 * The offset member tracks where the next allocation starts, and freed tracks 62 * the amount of space that is not used anymore. 63 */ 64 #define XFS_ZONE_GC_NR_SCRATCH 2 65 struct xfs_zone_scratch { 66 struct folio *folio; 67 unsigned int offset; 68 unsigned int freed; 69 }; 70 71 /* 72 * Chunk that is read and written for each GC operation. 73 * 74 * Note that for writes to actual zoned devices, the chunk can be split when 75 * reaching the hardware limit. 76 */ 77 struct xfs_gc_bio { 78 struct xfs_zone_gc_data *data; 79 80 /* 81 * Entry into the reading/writing/resetting list. Only accessed from 82 * the GC thread, so no locking needed. 83 */ 84 struct list_head entry; 85 86 /* 87 * State of this gc_bio. Done means the current I/O completed. 88 * Set from the bio end I/O handler, read from the GC thread. 89 */ 90 enum { 91 XFS_GC_BIO_NEW, 92 XFS_GC_BIO_DONE, 93 } state; 94 95 /* 96 * Pointer to the inode and byte range in the inode that this 97 * GC chunk is operating on. 98 */ 99 struct xfs_inode *ip; 100 loff_t offset; 101 unsigned int len; 102 103 /* 104 * Existing startblock (in the zone to be freed) and newly assigned 105 * daddr in the zone GCed into. 106 */ 107 xfs_fsblock_t old_startblock; 108 xfs_daddr_t new_daddr; 109 struct xfs_zone_scratch *scratch; 110 111 /* Are we writing to a sequential write required zone? */ 112 bool is_seq; 113 114 /* Open Zone being written to */ 115 struct xfs_open_zone *oz; 116 117 /* Bio used for reads and writes, including the bvec used by it */ 118 struct bio_vec bv; 119 struct bio bio; /* must be last */ 120 }; 121 122 #define XFS_ZONE_GC_RECS 1024 123 124 /* iterator, needs to be reinitialized for each victim zone */ 125 struct xfs_zone_gc_iter { 126 struct xfs_rtgroup *victim_rtg; 127 unsigned int rec_count; 128 unsigned int rec_idx; 129 xfs_agblock_t next_startblock; 130 struct xfs_rmap_irec *recs; 131 }; 132 133 /* 134 * Per-mount GC state. 135 */ 136 struct xfs_zone_gc_data { 137 struct xfs_mount *mp; 138 139 /* bioset used to allocate the gc_bios */ 140 struct bio_set bio_set; 141 142 /* 143 * Scratchpad used, and index to indicated which one is used. 144 */ 145 struct xfs_zone_scratch scratch[XFS_ZONE_GC_NR_SCRATCH]; 146 unsigned int scratch_idx; 147 148 /* 149 * List of bios currently being read, written and reset. 150 * These lists are only accessed by the GC thread itself, and must only 151 * be processed in order. 152 */ 153 struct list_head reading; 154 struct list_head writing; 155 struct list_head resetting; 156 157 /* 158 * Iterator for the victim zone. 159 */ 160 struct xfs_zone_gc_iter iter; 161 }; 162 163 /* 164 * We aim to keep enough zones free in stock to fully use the open zone limit 165 * for data placement purposes. Additionally, the m_zonegc_low_space tunable 166 * can be set to make sure a fraction of the unused blocks are available for 167 * writing. 168 */ 169 bool 170 xfs_zoned_need_gc( 171 struct xfs_mount *mp) 172 { 173 s64 available, free, threshold; 174 s32 remainder; 175 176 if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_RECLAIMABLE)) 177 return false; 178 179 available = xfs_estimate_freecounter(mp, XC_FREE_RTAVAILABLE); 180 181 if (available < 182 mp->m_groups[XG_TYPE_RTG].blocks * 183 (mp->m_max_open_zones - XFS_OPEN_GC_ZONES)) 184 return true; 185 186 free = xfs_estimate_freecounter(mp, XC_FREE_RTEXTENTS); 187 188 threshold = div_s64_rem(free, 100, &remainder); 189 threshold = threshold * mp->m_zonegc_low_space + 190 remainder * div_s64(mp->m_zonegc_low_space, 100); 191 192 if (available < threshold) 193 return true; 194 195 return false; 196 } 197 198 static struct xfs_zone_gc_data * 199 xfs_zone_gc_data_alloc( 200 struct xfs_mount *mp) 201 { 202 struct xfs_zone_gc_data *data; 203 int i; 204 205 data = kzalloc(sizeof(*data), GFP_KERNEL); 206 if (!data) 207 return NULL; 208 data->iter.recs = kcalloc(XFS_ZONE_GC_RECS, sizeof(*data->iter.recs), 209 GFP_KERNEL); 210 if (!data->iter.recs) 211 goto out_free_data; 212 213 /* 214 * We actually only need a single bio_vec. It would be nice to have 215 * a flag that only allocates the inline bvecs and not the separate 216 * bvec pool. 217 */ 218 if (bioset_init(&data->bio_set, 16, offsetof(struct xfs_gc_bio, bio), 219 BIOSET_NEED_BVECS)) 220 goto out_free_recs; 221 for (i = 0; i < XFS_ZONE_GC_NR_SCRATCH; i++) { 222 data->scratch[i].folio = 223 folio_alloc(GFP_KERNEL, get_order(XFS_GC_CHUNK_SIZE)); 224 if (!data->scratch[i].folio) 225 goto out_free_scratch; 226 } 227 INIT_LIST_HEAD(&data->reading); 228 INIT_LIST_HEAD(&data->writing); 229 INIT_LIST_HEAD(&data->resetting); 230 data->mp = mp; 231 return data; 232 233 out_free_scratch: 234 while (--i >= 0) 235 folio_put(data->scratch[i].folio); 236 bioset_exit(&data->bio_set); 237 out_free_recs: 238 kfree(data->iter.recs); 239 out_free_data: 240 kfree(data); 241 return NULL; 242 } 243 244 static void 245 xfs_zone_gc_data_free( 246 struct xfs_zone_gc_data *data) 247 { 248 int i; 249 250 for (i = 0; i < XFS_ZONE_GC_NR_SCRATCH; i++) 251 folio_put(data->scratch[i].folio); 252 bioset_exit(&data->bio_set); 253 kfree(data->iter.recs); 254 kfree(data); 255 } 256 257 static void 258 xfs_zone_gc_iter_init( 259 struct xfs_zone_gc_iter *iter, 260 struct xfs_rtgroup *victim_rtg) 261 262 { 263 iter->next_startblock = 0; 264 iter->rec_count = 0; 265 iter->rec_idx = 0; 266 iter->victim_rtg = victim_rtg; 267 } 268 269 /* 270 * Query the rmap of the victim zone to gather the records to evacuate. 271 */ 272 static int 273 xfs_zone_gc_query_cb( 274 struct xfs_btree_cur *cur, 275 const struct xfs_rmap_irec *irec, 276 void *private) 277 { 278 struct xfs_zone_gc_iter *iter = private; 279 280 ASSERT(!XFS_RMAP_NON_INODE_OWNER(irec->rm_owner)); 281 ASSERT(!xfs_is_sb_inum(cur->bc_mp, irec->rm_owner)); 282 ASSERT(!(irec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))); 283 284 iter->recs[iter->rec_count] = *irec; 285 if (++iter->rec_count == XFS_ZONE_GC_RECS) { 286 iter->next_startblock = 287 irec->rm_startblock + irec->rm_blockcount; 288 return 1; 289 } 290 return 0; 291 } 292 293 static int 294 xfs_zone_gc_rmap_rec_cmp( 295 const void *a, 296 const void *b) 297 { 298 const struct xfs_rmap_irec *reca = a; 299 const struct xfs_rmap_irec *recb = b; 300 int diff; 301 302 diff = cmp_int(reca->rm_owner, recb->rm_owner); 303 if (diff) 304 return diff; 305 return cmp_int(reca->rm_offset, recb->rm_offset); 306 } 307 308 static int 309 xfs_zone_gc_query( 310 struct xfs_mount *mp, 311 struct xfs_zone_gc_iter *iter) 312 { 313 struct xfs_rtgroup *rtg = iter->victim_rtg; 314 struct xfs_rmap_irec ri_low = { }; 315 struct xfs_rmap_irec ri_high; 316 struct xfs_btree_cur *cur; 317 struct xfs_trans *tp; 318 int error; 319 320 ASSERT(iter->next_startblock <= rtg_blocks(rtg)); 321 if (iter->next_startblock == rtg_blocks(rtg)) 322 goto done; 323 324 ASSERT(iter->next_startblock < rtg_blocks(rtg)); 325 ri_low.rm_startblock = iter->next_startblock; 326 memset(&ri_high, 0xFF, sizeof(ri_high)); 327 328 iter->rec_idx = 0; 329 iter->rec_count = 0; 330 331 tp = xfs_trans_alloc_empty(mp); 332 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); 333 cur = xfs_rtrmapbt_init_cursor(tp, rtg); 334 error = xfs_rmap_query_range(cur, &ri_low, &ri_high, 335 xfs_zone_gc_query_cb, iter); 336 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); 337 xfs_btree_del_cursor(cur, error < 0 ? error : 0); 338 xfs_trans_cancel(tp); 339 340 if (error < 0) 341 return error; 342 343 /* 344 * Sort the rmap records by inode number and increasing offset to 345 * defragment the mappings. 346 * 347 * This could be further enhanced by an even bigger look ahead window, 348 * but that's better left until we have better detection of changes to 349 * inode mapping to avoid the potential of GCing already dead data. 350 */ 351 sort(iter->recs, iter->rec_count, sizeof(iter->recs[0]), 352 xfs_zone_gc_rmap_rec_cmp, NULL); 353 354 if (error == 0) { 355 /* 356 * We finished iterating through the zone. 357 */ 358 iter->next_startblock = rtg_blocks(rtg); 359 if (iter->rec_count == 0) 360 goto done; 361 } 362 363 return 0; 364 done: 365 xfs_rtgroup_rele(iter->victim_rtg); 366 iter->victim_rtg = NULL; 367 return 0; 368 } 369 370 static bool 371 xfs_zone_gc_iter_next( 372 struct xfs_mount *mp, 373 struct xfs_zone_gc_iter *iter, 374 struct xfs_rmap_irec *chunk_rec, 375 struct xfs_inode **ipp) 376 { 377 struct xfs_rmap_irec *irec; 378 int error; 379 380 if (!iter->victim_rtg) 381 return false; 382 383 retry: 384 if (iter->rec_idx == iter->rec_count) { 385 error = xfs_zone_gc_query(mp, iter); 386 if (error) 387 goto fail; 388 if (!iter->victim_rtg) 389 return false; 390 } 391 392 irec = &iter->recs[iter->rec_idx]; 393 error = xfs_iget(mp, NULL, irec->rm_owner, 394 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, ipp); 395 if (error) { 396 /* 397 * If the inode was already deleted, skip over it. 398 */ 399 if (error == -ENOENT) { 400 iter->rec_idx++; 401 goto retry; 402 } 403 goto fail; 404 } 405 406 if (!S_ISREG(VFS_I(*ipp)->i_mode) || !XFS_IS_REALTIME_INODE(*ipp)) { 407 iter->rec_idx++; 408 xfs_irele(*ipp); 409 goto retry; 410 } 411 412 *chunk_rec = *irec; 413 return true; 414 415 fail: 416 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 417 return false; 418 } 419 420 static void 421 xfs_zone_gc_iter_advance( 422 struct xfs_zone_gc_iter *iter, 423 xfs_extlen_t count_fsb) 424 { 425 struct xfs_rmap_irec *irec = &iter->recs[iter->rec_idx]; 426 427 irec->rm_offset += count_fsb; 428 irec->rm_startblock += count_fsb; 429 irec->rm_blockcount -= count_fsb; 430 if (!irec->rm_blockcount) 431 iter->rec_idx++; 432 } 433 434 static struct xfs_rtgroup * 435 xfs_zone_gc_pick_victim_from( 436 struct xfs_mount *mp, 437 uint32_t bucket) 438 { 439 struct xfs_zone_info *zi = mp->m_zone_info; 440 uint32_t victim_used = U32_MAX; 441 struct xfs_rtgroup *victim_rtg = NULL; 442 uint32_t bit; 443 444 if (!zi->zi_used_bucket_entries[bucket]) 445 return NULL; 446 447 for_each_set_bit(bit, zi->zi_used_bucket_bitmap[bucket], 448 mp->m_sb.sb_rgcount) { 449 struct xfs_rtgroup *rtg = xfs_rtgroup_grab(mp, bit); 450 451 if (!rtg) 452 continue; 453 454 /* skip zones that are just waiting for a reset */ 455 if (rtg_rmap(rtg)->i_used_blocks == 0 || 456 rtg_rmap(rtg)->i_used_blocks >= victim_used) { 457 xfs_rtgroup_rele(rtg); 458 continue; 459 } 460 461 if (victim_rtg) 462 xfs_rtgroup_rele(victim_rtg); 463 victim_rtg = rtg; 464 victim_used = rtg_rmap(rtg)->i_used_blocks; 465 466 /* 467 * Any zone that is less than 1 percent used is fair game for 468 * instant reclaim. All of these zones are in the last 469 * bucket, so avoid the expensive division for the zones 470 * in the other buckets. 471 */ 472 if (bucket == 0 && 473 rtg_rmap(rtg)->i_used_blocks < rtg_blocks(rtg) / 100) 474 break; 475 } 476 477 return victim_rtg; 478 } 479 480 /* 481 * Iterate through all zones marked as reclaimable and find a candidate to 482 * reclaim. 483 */ 484 static bool 485 xfs_zone_gc_select_victim( 486 struct xfs_zone_gc_data *data) 487 { 488 struct xfs_zone_gc_iter *iter = &data->iter; 489 struct xfs_mount *mp = data->mp; 490 struct xfs_zone_info *zi = mp->m_zone_info; 491 struct xfs_rtgroup *victim_rtg = NULL; 492 unsigned int bucket; 493 494 if (xfs_is_shutdown(mp)) 495 return false; 496 497 if (iter->victim_rtg) 498 return true; 499 500 /* 501 * Don't start new work if we are asked to stop or park. 502 */ 503 if (kthread_should_stop() || kthread_should_park()) 504 return false; 505 506 if (!xfs_zoned_need_gc(mp)) 507 return false; 508 509 spin_lock(&zi->zi_used_buckets_lock); 510 for (bucket = 0; bucket < XFS_ZONE_USED_BUCKETS; bucket++) { 511 victim_rtg = xfs_zone_gc_pick_victim_from(mp, bucket); 512 if (victim_rtg) 513 break; 514 } 515 spin_unlock(&zi->zi_used_buckets_lock); 516 517 if (!victim_rtg) 518 return false; 519 520 trace_xfs_zone_gc_select_victim(victim_rtg, bucket); 521 xfs_zone_gc_iter_init(iter, victim_rtg); 522 return true; 523 } 524 525 static struct xfs_open_zone * 526 xfs_zone_gc_steal_open( 527 struct xfs_zone_info *zi) 528 { 529 struct xfs_open_zone *oz, *found = NULL; 530 531 spin_lock(&zi->zi_open_zones_lock); 532 list_for_each_entry(oz, &zi->zi_open_zones, oz_entry) { 533 if (!found || oz->oz_allocated < found->oz_allocated) 534 found = oz; 535 } 536 537 if (found) { 538 found->oz_is_gc = true; 539 list_del_init(&found->oz_entry); 540 zi->zi_nr_open_zones--; 541 } 542 543 spin_unlock(&zi->zi_open_zones_lock); 544 return found; 545 } 546 547 static struct xfs_open_zone * 548 xfs_zone_gc_select_target( 549 struct xfs_mount *mp) 550 { 551 struct xfs_zone_info *zi = mp->m_zone_info; 552 struct xfs_open_zone *oz = zi->zi_open_gc_zone; 553 554 /* 555 * We need to wait for pending writes to finish. 556 */ 557 if (oz && oz->oz_written < rtg_blocks(oz->oz_rtg)) 558 return NULL; 559 560 ASSERT(zi->zi_nr_open_zones <= 561 mp->m_max_open_zones - XFS_OPEN_GC_ZONES); 562 oz = xfs_open_zone(mp, WRITE_LIFE_NOT_SET, true); 563 if (oz) 564 trace_xfs_zone_gc_target_opened(oz->oz_rtg); 565 spin_lock(&zi->zi_open_zones_lock); 566 zi->zi_open_gc_zone = oz; 567 spin_unlock(&zi->zi_open_zones_lock); 568 return oz; 569 } 570 571 /* 572 * Ensure we have a valid open zone to write the GC data to. 573 * 574 * If the current target zone has space keep writing to it, else first wait for 575 * all pending writes and then pick a new one. 576 */ 577 static struct xfs_open_zone * 578 xfs_zone_gc_ensure_target( 579 struct xfs_mount *mp) 580 { 581 struct xfs_open_zone *oz = mp->m_zone_info->zi_open_gc_zone; 582 583 if (!oz || oz->oz_allocated == rtg_blocks(oz->oz_rtg)) 584 return xfs_zone_gc_select_target(mp); 585 return oz; 586 } 587 588 static unsigned int 589 xfs_zone_gc_scratch_available( 590 struct xfs_zone_gc_data *data) 591 { 592 return XFS_GC_CHUNK_SIZE - data->scratch[data->scratch_idx].offset; 593 } 594 595 static bool 596 xfs_zone_gc_space_available( 597 struct xfs_zone_gc_data *data) 598 { 599 struct xfs_open_zone *oz; 600 601 oz = xfs_zone_gc_ensure_target(data->mp); 602 if (!oz) 603 return false; 604 return oz->oz_allocated < rtg_blocks(oz->oz_rtg) && 605 xfs_zone_gc_scratch_available(data); 606 } 607 608 static void 609 xfs_zone_gc_end_io( 610 struct bio *bio) 611 { 612 struct xfs_gc_bio *chunk = 613 container_of(bio, struct xfs_gc_bio, bio); 614 struct xfs_zone_gc_data *data = chunk->data; 615 616 WRITE_ONCE(chunk->state, XFS_GC_BIO_DONE); 617 wake_up_process(data->mp->m_zone_info->zi_gc_thread); 618 } 619 620 static struct xfs_open_zone * 621 xfs_zone_gc_alloc_blocks( 622 struct xfs_zone_gc_data *data, 623 xfs_extlen_t *count_fsb, 624 xfs_daddr_t *daddr, 625 bool *is_seq) 626 { 627 struct xfs_mount *mp = data->mp; 628 struct xfs_open_zone *oz; 629 630 oz = xfs_zone_gc_ensure_target(mp); 631 if (!oz) 632 return NULL; 633 634 *count_fsb = min(*count_fsb, 635 XFS_B_TO_FSB(mp, xfs_zone_gc_scratch_available(data))); 636 637 /* 638 * Directly allocate GC blocks from the reserved pool. 639 * 640 * If we'd take them from the normal pool we could be stealing blocks 641 * from a regular writer, which would then have to wait for GC and 642 * deadlock. 643 */ 644 spin_lock(&mp->m_sb_lock); 645 *count_fsb = min(*count_fsb, 646 rtg_blocks(oz->oz_rtg) - oz->oz_allocated); 647 *count_fsb = min3(*count_fsb, 648 mp->m_free[XC_FREE_RTEXTENTS].res_avail, 649 mp->m_free[XC_FREE_RTAVAILABLE].res_avail); 650 mp->m_free[XC_FREE_RTEXTENTS].res_avail -= *count_fsb; 651 mp->m_free[XC_FREE_RTAVAILABLE].res_avail -= *count_fsb; 652 spin_unlock(&mp->m_sb_lock); 653 654 if (!*count_fsb) 655 return NULL; 656 657 *daddr = xfs_gbno_to_daddr(&oz->oz_rtg->rtg_group, 0); 658 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *daddr); 659 if (!*is_seq) 660 *daddr += XFS_FSB_TO_BB(mp, oz->oz_allocated); 661 oz->oz_allocated += *count_fsb; 662 atomic_inc(&oz->oz_ref); 663 return oz; 664 } 665 666 static bool 667 xfs_zone_gc_start_chunk( 668 struct xfs_zone_gc_data *data) 669 { 670 struct xfs_zone_gc_iter *iter = &data->iter; 671 struct xfs_mount *mp = data->mp; 672 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev; 673 struct xfs_open_zone *oz; 674 struct xfs_rmap_irec irec; 675 struct xfs_gc_bio *chunk; 676 struct xfs_inode *ip; 677 struct bio *bio; 678 xfs_daddr_t daddr; 679 bool is_seq; 680 681 if (xfs_is_shutdown(mp)) 682 return false; 683 684 if (!xfs_zone_gc_iter_next(mp, iter, &irec, &ip)) 685 return false; 686 oz = xfs_zone_gc_alloc_blocks(data, &irec.rm_blockcount, &daddr, 687 &is_seq); 688 if (!oz) { 689 xfs_irele(ip); 690 return false; 691 } 692 693 bio = bio_alloc_bioset(bdev, 1, REQ_OP_READ, GFP_NOFS, &data->bio_set); 694 695 chunk = container_of(bio, struct xfs_gc_bio, bio); 696 chunk->ip = ip; 697 chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset); 698 chunk->len = XFS_FSB_TO_B(mp, irec.rm_blockcount); 699 chunk->old_startblock = 700 xfs_rgbno_to_rtb(iter->victim_rtg, irec.rm_startblock); 701 chunk->new_daddr = daddr; 702 chunk->is_seq = is_seq; 703 chunk->scratch = &data->scratch[data->scratch_idx]; 704 chunk->data = data; 705 chunk->oz = oz; 706 707 bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); 708 bio->bi_end_io = xfs_zone_gc_end_io; 709 bio_add_folio_nofail(bio, chunk->scratch->folio, chunk->len, 710 chunk->scratch->offset); 711 chunk->scratch->offset += chunk->len; 712 if (chunk->scratch->offset == XFS_GC_CHUNK_SIZE) { 713 data->scratch_idx = 714 (data->scratch_idx + 1) % XFS_ZONE_GC_NR_SCRATCH; 715 } 716 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); 717 list_add_tail(&chunk->entry, &data->reading); 718 xfs_zone_gc_iter_advance(iter, irec.rm_blockcount); 719 720 submit_bio(bio); 721 return true; 722 } 723 724 static void 725 xfs_zone_gc_free_chunk( 726 struct xfs_gc_bio *chunk) 727 { 728 list_del(&chunk->entry); 729 xfs_open_zone_put(chunk->oz); 730 xfs_irele(chunk->ip); 731 bio_put(&chunk->bio); 732 } 733 734 static void 735 xfs_zone_gc_submit_write( 736 struct xfs_zone_gc_data *data, 737 struct xfs_gc_bio *chunk) 738 { 739 if (chunk->is_seq) { 740 chunk->bio.bi_opf &= ~REQ_OP_WRITE; 741 chunk->bio.bi_opf |= REQ_OP_ZONE_APPEND; 742 } 743 chunk->bio.bi_iter.bi_sector = chunk->new_daddr; 744 chunk->bio.bi_end_io = xfs_zone_gc_end_io; 745 submit_bio(&chunk->bio); 746 } 747 748 static struct xfs_gc_bio * 749 xfs_zone_gc_split_write( 750 struct xfs_zone_gc_data *data, 751 struct xfs_gc_bio *chunk) 752 { 753 struct queue_limits *lim = 754 &bdev_get_queue(chunk->bio.bi_bdev)->limits; 755 struct xfs_gc_bio *split_chunk; 756 int split_sectors; 757 unsigned int split_len; 758 struct bio *split; 759 unsigned int nsegs; 760 761 if (!chunk->is_seq) 762 return NULL; 763 764 split_sectors = bio_split_rw_at(&chunk->bio, lim, &nsegs, 765 lim->max_zone_append_sectors << SECTOR_SHIFT); 766 if (!split_sectors) 767 return NULL; 768 769 /* ensure the split chunk is still block size aligned */ 770 split_sectors = ALIGN_DOWN(split_sectors << SECTOR_SHIFT, 771 data->mp->m_sb.sb_blocksize) >> SECTOR_SHIFT; 772 split_len = split_sectors << SECTOR_SHIFT; 773 774 split = bio_split(&chunk->bio, split_sectors, GFP_NOFS, &data->bio_set); 775 split_chunk = container_of(split, struct xfs_gc_bio, bio); 776 split_chunk->data = data; 777 ihold(VFS_I(chunk->ip)); 778 split_chunk->ip = chunk->ip; 779 split_chunk->is_seq = chunk->is_seq; 780 split_chunk->scratch = chunk->scratch; 781 split_chunk->offset = chunk->offset; 782 split_chunk->len = split_len; 783 split_chunk->old_startblock = chunk->old_startblock; 784 split_chunk->new_daddr = chunk->new_daddr; 785 split_chunk->oz = chunk->oz; 786 atomic_inc(&chunk->oz->oz_ref); 787 788 chunk->offset += split_len; 789 chunk->len -= split_len; 790 chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len); 791 792 /* add right before the original chunk */ 793 WRITE_ONCE(split_chunk->state, XFS_GC_BIO_NEW); 794 list_add_tail(&split_chunk->entry, &chunk->entry); 795 return split_chunk; 796 } 797 798 static void 799 xfs_zone_gc_write_chunk( 800 struct xfs_gc_bio *chunk) 801 { 802 struct xfs_zone_gc_data *data = chunk->data; 803 struct xfs_mount *mp = chunk->ip->i_mount; 804 phys_addr_t bvec_paddr = 805 bvec_phys(bio_first_bvec_all(&chunk->bio)); 806 struct xfs_gc_bio *split_chunk; 807 808 if (chunk->bio.bi_status) 809 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 810 if (xfs_is_shutdown(mp)) { 811 xfs_zone_gc_free_chunk(chunk); 812 return; 813 } 814 815 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); 816 list_move_tail(&chunk->entry, &data->writing); 817 818 bio_reset(&chunk->bio, mp->m_rtdev_targp->bt_bdev, REQ_OP_WRITE); 819 bio_add_folio_nofail(&chunk->bio, chunk->scratch->folio, chunk->len, 820 offset_in_folio(chunk->scratch->folio, bvec_paddr)); 821 822 while ((split_chunk = xfs_zone_gc_split_write(data, chunk))) 823 xfs_zone_gc_submit_write(data, split_chunk); 824 xfs_zone_gc_submit_write(data, chunk); 825 } 826 827 static void 828 xfs_zone_gc_finish_chunk( 829 struct xfs_gc_bio *chunk) 830 { 831 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 832 struct xfs_inode *ip = chunk->ip; 833 struct xfs_mount *mp = ip->i_mount; 834 int error; 835 836 if (chunk->bio.bi_status) 837 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 838 if (xfs_is_shutdown(mp)) { 839 xfs_zone_gc_free_chunk(chunk); 840 return; 841 } 842 843 chunk->scratch->freed += chunk->len; 844 if (chunk->scratch->freed == chunk->scratch->offset) { 845 chunk->scratch->offset = 0; 846 chunk->scratch->freed = 0; 847 } 848 849 /* 850 * Cycle through the iolock and wait for direct I/O and layouts to 851 * ensure no one is reading from the old mapping before it goes away. 852 * 853 * Note that xfs_zoned_end_io() below checks that no other writer raced 854 * with us to update the mapping by checking that the old startblock 855 * didn't change. 856 */ 857 xfs_ilock(ip, iolock); 858 error = xfs_break_layouts(VFS_I(ip), &iolock, BREAK_UNMAP); 859 if (!error) 860 inode_dio_wait(VFS_I(ip)); 861 xfs_iunlock(ip, iolock); 862 if (error) 863 goto free; 864 865 if (chunk->is_seq) 866 chunk->new_daddr = chunk->bio.bi_iter.bi_sector; 867 error = xfs_zoned_end_io(ip, chunk->offset, chunk->len, 868 chunk->new_daddr, chunk->oz, chunk->old_startblock); 869 free: 870 if (error) 871 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 872 xfs_zone_gc_free_chunk(chunk); 873 } 874 875 static void 876 xfs_zone_gc_finish_reset( 877 struct xfs_gc_bio *chunk) 878 { 879 struct xfs_rtgroup *rtg = chunk->bio.bi_private; 880 struct xfs_mount *mp = rtg_mount(rtg); 881 struct xfs_zone_info *zi = mp->m_zone_info; 882 883 if (chunk->bio.bi_status) { 884 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 885 goto out; 886 } 887 888 xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE); 889 atomic_inc(&zi->zi_nr_free_zones); 890 891 xfs_zoned_add_available(mp, rtg_blocks(rtg)); 892 893 wake_up_all(&zi->zi_zone_wait); 894 out: 895 list_del(&chunk->entry); 896 bio_put(&chunk->bio); 897 } 898 899 static bool 900 xfs_zone_gc_prepare_reset( 901 struct bio *bio, 902 struct xfs_rtgroup *rtg) 903 { 904 trace_xfs_zone_reset(rtg); 905 906 ASSERT(rtg_rmap(rtg)->i_used_blocks == 0); 907 bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); 908 if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) { 909 if (!bdev_max_discard_sectors(bio->bi_bdev)) 910 return false; 911 bio->bi_opf = REQ_OP_DISCARD | REQ_SYNC; 912 bio->bi_iter.bi_size = 913 XFS_FSB_TO_B(rtg_mount(rtg), rtg_blocks(rtg)); 914 } 915 916 return true; 917 } 918 919 int 920 xfs_zone_gc_reset_sync( 921 struct xfs_rtgroup *rtg) 922 { 923 int error = 0; 924 struct bio bio; 925 926 bio_init(&bio, rtg_mount(rtg)->m_rtdev_targp->bt_bdev, NULL, 0, 927 REQ_OP_ZONE_RESET); 928 if (xfs_zone_gc_prepare_reset(&bio, rtg)) 929 error = submit_bio_wait(&bio); 930 bio_uninit(&bio); 931 932 return error; 933 } 934 935 static void 936 xfs_zone_gc_reset_zones( 937 struct xfs_zone_gc_data *data, 938 struct xfs_group *reset_list) 939 { 940 struct xfs_group *next = reset_list; 941 942 if (blkdev_issue_flush(data->mp->m_rtdev_targp->bt_bdev) < 0) { 943 xfs_force_shutdown(data->mp, SHUTDOWN_META_IO_ERROR); 944 return; 945 } 946 947 do { 948 struct xfs_rtgroup *rtg = to_rtg(next); 949 struct xfs_gc_bio *chunk; 950 struct bio *bio; 951 952 xfs_log_force_inode(rtg_rmap(rtg)); 953 954 next = rtg_group(rtg)->xg_next_reset; 955 rtg_group(rtg)->xg_next_reset = NULL; 956 957 bio = bio_alloc_bioset(rtg_mount(rtg)->m_rtdev_targp->bt_bdev, 958 0, REQ_OP_ZONE_RESET, GFP_NOFS, &data->bio_set); 959 bio->bi_private = rtg; 960 bio->bi_end_io = xfs_zone_gc_end_io; 961 962 chunk = container_of(bio, struct xfs_gc_bio, bio); 963 chunk->data = data; 964 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); 965 list_add_tail(&chunk->entry, &data->resetting); 966 967 /* 968 * Also use the bio to drive the state machine when neither 969 * zone reset nor discard is supported to keep things simple. 970 */ 971 if (xfs_zone_gc_prepare_reset(bio, rtg)) 972 submit_bio(bio); 973 else 974 bio_endio(bio); 975 } while (next); 976 } 977 978 /* 979 * Handle the work to read and write data for GC and to reset the zones, 980 * including handling all completions. 981 * 982 * Note that the order of the chunks is preserved so that we don't undo the 983 * optimal order established by xfs_zone_gc_query(). 984 */ 985 static bool 986 xfs_zone_gc_handle_work( 987 struct xfs_zone_gc_data *data) 988 { 989 struct xfs_zone_info *zi = data->mp->m_zone_info; 990 struct xfs_gc_bio *chunk, *next; 991 struct xfs_group *reset_list; 992 struct blk_plug plug; 993 994 spin_lock(&zi->zi_reset_list_lock); 995 reset_list = zi->zi_reset_list; 996 zi->zi_reset_list = NULL; 997 spin_unlock(&zi->zi_reset_list_lock); 998 999 if (!xfs_zone_gc_select_victim(data) || 1000 !xfs_zone_gc_space_available(data)) { 1001 if (list_empty(&data->reading) && 1002 list_empty(&data->writing) && 1003 list_empty(&data->resetting) && 1004 !reset_list) 1005 return false; 1006 } 1007 1008 __set_current_state(TASK_RUNNING); 1009 try_to_freeze(); 1010 1011 if (reset_list) 1012 xfs_zone_gc_reset_zones(data, reset_list); 1013 1014 list_for_each_entry_safe(chunk, next, &data->resetting, entry) { 1015 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) 1016 break; 1017 xfs_zone_gc_finish_reset(chunk); 1018 } 1019 1020 list_for_each_entry_safe(chunk, next, &data->writing, entry) { 1021 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) 1022 break; 1023 xfs_zone_gc_finish_chunk(chunk); 1024 } 1025 1026 blk_start_plug(&plug); 1027 list_for_each_entry_safe(chunk, next, &data->reading, entry) { 1028 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) 1029 break; 1030 xfs_zone_gc_write_chunk(chunk); 1031 } 1032 blk_finish_plug(&plug); 1033 1034 blk_start_plug(&plug); 1035 while (xfs_zone_gc_start_chunk(data)) 1036 ; 1037 blk_finish_plug(&plug); 1038 return true; 1039 } 1040 1041 /* 1042 * Note that the current GC algorithm would break reflinks and thus duplicate 1043 * data that was shared by multiple owners before. Because of that reflinks 1044 * are currently not supported on zoned file systems and can't be created or 1045 * mounted. 1046 */ 1047 static int 1048 xfs_zoned_gcd( 1049 void *private) 1050 { 1051 struct xfs_zone_gc_data *data = private; 1052 struct xfs_mount *mp = data->mp; 1053 struct xfs_zone_info *zi = mp->m_zone_info; 1054 unsigned int nofs_flag; 1055 1056 nofs_flag = memalloc_nofs_save(); 1057 set_freezable(); 1058 1059 for (;;) { 1060 set_current_state(TASK_INTERRUPTIBLE | TASK_FREEZABLE); 1061 xfs_set_zonegc_running(mp); 1062 if (xfs_zone_gc_handle_work(data)) 1063 continue; 1064 1065 if (list_empty(&data->reading) && 1066 list_empty(&data->writing) && 1067 list_empty(&data->resetting) && 1068 !zi->zi_reset_list) { 1069 xfs_clear_zonegc_running(mp); 1070 xfs_zoned_resv_wake_all(mp); 1071 1072 if (kthread_should_stop()) { 1073 __set_current_state(TASK_RUNNING); 1074 break; 1075 } 1076 1077 if (kthread_should_park()) { 1078 __set_current_state(TASK_RUNNING); 1079 kthread_parkme(); 1080 continue; 1081 } 1082 } 1083 1084 schedule(); 1085 } 1086 xfs_clear_zonegc_running(mp); 1087 1088 if (data->iter.victim_rtg) 1089 xfs_rtgroup_rele(data->iter.victim_rtg); 1090 1091 memalloc_nofs_restore(nofs_flag); 1092 xfs_zone_gc_data_free(data); 1093 return 0; 1094 } 1095 1096 void 1097 xfs_zone_gc_start( 1098 struct xfs_mount *mp) 1099 { 1100 if (xfs_has_zoned(mp)) 1101 kthread_unpark(mp->m_zone_info->zi_gc_thread); 1102 } 1103 1104 void 1105 xfs_zone_gc_stop( 1106 struct xfs_mount *mp) 1107 { 1108 if (xfs_has_zoned(mp)) 1109 kthread_park(mp->m_zone_info->zi_gc_thread); 1110 } 1111 1112 int 1113 xfs_zone_gc_mount( 1114 struct xfs_mount *mp) 1115 { 1116 struct xfs_zone_info *zi = mp->m_zone_info; 1117 struct xfs_zone_gc_data *data; 1118 struct xfs_open_zone *oz; 1119 int error; 1120 1121 /* 1122 * If there are no free zones available for GC, pick the open zone with 1123 * the least used space to GC into. This should only happen after an 1124 * unclean shutdown near ENOSPC while GC was ongoing. 1125 * 1126 * We also need to do this for the first gc zone allocation if we 1127 * unmounted while at the open limit. 1128 */ 1129 if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_FREE) || 1130 zi->zi_nr_open_zones == mp->m_max_open_zones) 1131 oz = xfs_zone_gc_steal_open(zi); 1132 else 1133 oz = xfs_open_zone(mp, WRITE_LIFE_NOT_SET, true); 1134 if (!oz) { 1135 xfs_warn(mp, "unable to allocate a zone for gc"); 1136 error = -EIO; 1137 goto out; 1138 } 1139 1140 trace_xfs_zone_gc_target_opened(oz->oz_rtg); 1141 zi->zi_open_gc_zone = oz; 1142 1143 data = xfs_zone_gc_data_alloc(mp); 1144 if (!data) { 1145 error = -ENOMEM; 1146 goto out_put_gc_zone; 1147 } 1148 1149 mp->m_zone_info->zi_gc_thread = kthread_create(xfs_zoned_gcd, data, 1150 "xfs-zone-gc/%s", mp->m_super->s_id); 1151 if (IS_ERR(mp->m_zone_info->zi_gc_thread)) { 1152 xfs_warn(mp, "unable to create zone gc thread"); 1153 error = PTR_ERR(mp->m_zone_info->zi_gc_thread); 1154 goto out_free_gc_data; 1155 } 1156 1157 /* xfs_zone_gc_start will unpark for rw mounts */ 1158 kthread_park(mp->m_zone_info->zi_gc_thread); 1159 return 0; 1160 1161 out_free_gc_data: 1162 kfree(data); 1163 out_put_gc_zone: 1164 xfs_open_zone_put(zi->zi_open_gc_zone); 1165 out: 1166 return error; 1167 } 1168 1169 void 1170 xfs_zone_gc_unmount( 1171 struct xfs_mount *mp) 1172 { 1173 struct xfs_zone_info *zi = mp->m_zone_info; 1174 1175 kthread_stop(zi->zi_gc_thread); 1176 if (zi->zi_open_gc_zone) 1177 xfs_open_zone_put(zi->zi_open_gc_zone); 1178 } 1179