1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_inode_item.h" 17 #include "xfs_quota.h" 18 #include "xfs_trace.h" 19 #include "xfs_icache.h" 20 #include "xfs_bmap_util.h" 21 #include "xfs_dquot_item.h" 22 #include "xfs_dquot.h" 23 #include "xfs_reflink.h" 24 #include "xfs_ialloc.h" 25 #include "xfs_ag.h" 26 #include "xfs_log_priv.h" 27 #include "xfs_health.h" 28 #include "xfs_da_format.h" 29 #include "xfs_dir2.h" 30 #include "xfs_metafile.h" 31 32 #include <linux/iversion.h> 33 34 /* Radix tree tags for incore inode tree. */ 35 36 /* inode is to be reclaimed */ 37 #define XFS_ICI_RECLAIM_TAG 0 38 /* Inode has speculative preallocations (posteof or cow) to clean. */ 39 #define XFS_ICI_BLOCKGC_TAG 1 40 41 /* 42 * The goal for walking incore inodes. These can correspond with incore inode 43 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace. 44 */ 45 enum xfs_icwalk_goal { 46 /* Goals directly associated with tagged inodes. */ 47 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG, 48 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG, 49 }; 50 51 static int xfs_icwalk(struct xfs_mount *mp, 52 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw); 53 static int xfs_icwalk_ag(struct xfs_perag *pag, 54 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw); 55 56 /* 57 * Private inode cache walk flags for struct xfs_icwalk. Must not 58 * coincide with XFS_ICWALK_FLAGS_VALID. 59 */ 60 61 /* Stop scanning after icw_scan_limit inodes. */ 62 #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28) 63 64 #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27) 65 #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */ 66 67 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \ 68 XFS_ICWALK_FLAG_RECLAIM_SICK | \ 69 XFS_ICWALK_FLAG_UNION) 70 71 /* Marks for the perag xarray */ 72 #define XFS_PERAG_RECLAIM_MARK XA_MARK_0 73 #define XFS_PERAG_BLOCKGC_MARK XA_MARK_1 74 75 static inline xa_mark_t ici_tag_to_mark(unsigned int tag) 76 { 77 if (tag == XFS_ICI_RECLAIM_TAG) 78 return XFS_PERAG_RECLAIM_MARK; 79 ASSERT(tag == XFS_ICI_BLOCKGC_TAG); 80 return XFS_PERAG_BLOCKGC_MARK; 81 } 82 83 /* 84 * Allocate and initialise an xfs_inode. 85 */ 86 struct xfs_inode * 87 xfs_inode_alloc( 88 struct xfs_mount *mp, 89 xfs_ino_t ino) 90 { 91 struct xfs_inode *ip; 92 93 /* 94 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL 95 * and return NULL here on ENOMEM. 96 */ 97 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); 98 99 if (inode_init_always(mp->m_super, VFS_I(ip))) { 100 kmem_cache_free(xfs_inode_cache, ip); 101 return NULL; 102 } 103 104 /* VFS doesn't initialise i_mode! */ 105 VFS_I(ip)->i_mode = 0; 106 mapping_set_folio_min_order(VFS_I(ip)->i_mapping, 107 M_IGEO(mp)->min_folio_order); 108 109 XFS_STATS_INC(mp, vn_active); 110 ASSERT(atomic_read(&ip->i_pincount) == 0); 111 ASSERT(ip->i_ino == 0); 112 113 /* initialise the xfs inode */ 114 ip->i_ino = ino; 115 ip->i_mount = mp; 116 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); 117 ip->i_cowfp = NULL; 118 memset(&ip->i_af, 0, sizeof(ip->i_af)); 119 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; 120 memset(&ip->i_df, 0, sizeof(ip->i_df)); 121 ip->i_flags = 0; 122 ip->i_delayed_blks = 0; 123 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; 124 ip->i_nblocks = 0; 125 ip->i_forkoff = 0; 126 ip->i_sick = 0; 127 ip->i_checked = 0; 128 INIT_WORK(&ip->i_ioend_work, xfs_end_io); 129 INIT_LIST_HEAD(&ip->i_ioend_list); 130 spin_lock_init(&ip->i_ioend_lock); 131 ip->i_next_unlinked = NULLAGINO; 132 ip->i_prev_unlinked = 0; 133 134 return ip; 135 } 136 137 STATIC void 138 xfs_inode_free_callback( 139 struct rcu_head *head) 140 { 141 struct inode *inode = container_of(head, struct inode, i_rcu); 142 struct xfs_inode *ip = XFS_I(inode); 143 144 switch (VFS_I(ip)->i_mode & S_IFMT) { 145 case S_IFREG: 146 case S_IFDIR: 147 case S_IFLNK: 148 xfs_idestroy_fork(&ip->i_df); 149 break; 150 } 151 152 xfs_ifork_zap_attr(ip); 153 154 if (ip->i_cowfp) { 155 xfs_idestroy_fork(ip->i_cowfp); 156 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp); 157 } 158 if (ip->i_itemp) { 159 ASSERT(!test_bit(XFS_LI_IN_AIL, 160 &ip->i_itemp->ili_item.li_flags)); 161 xfs_inode_item_destroy(ip); 162 ip->i_itemp = NULL; 163 } 164 165 kmem_cache_free(xfs_inode_cache, ip); 166 } 167 168 static void 169 __xfs_inode_free( 170 struct xfs_inode *ip) 171 { 172 /* asserts to verify all state is correct here */ 173 ASSERT(atomic_read(&ip->i_pincount) == 0); 174 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); 175 XFS_STATS_DEC(ip->i_mount, vn_active); 176 177 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 178 } 179 180 void 181 xfs_inode_free( 182 struct xfs_inode *ip) 183 { 184 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING)); 185 186 /* 187 * Because we use RCU freeing we need to ensure the inode always 188 * appears to be reclaimed with an invalid inode number when in the 189 * free state. The ip->i_flags_lock provides the barrier against lookup 190 * races. 191 */ 192 spin_lock(&ip->i_flags_lock); 193 ip->i_flags = XFS_IRECLAIM; 194 ip->i_ino = 0; 195 spin_unlock(&ip->i_flags_lock); 196 197 __xfs_inode_free(ip); 198 } 199 200 /* 201 * Queue background inode reclaim work if there are reclaimable inodes and there 202 * isn't reclaim work already scheduled or in progress. 203 */ 204 static void 205 xfs_reclaim_work_queue( 206 struct xfs_mount *mp) 207 { 208 209 rcu_read_lock(); 210 if (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) { 211 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, 212 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 213 } 214 rcu_read_unlock(); 215 } 216 217 /* 218 * Background scanning to trim preallocated space. This is queued based on the 219 * 'speculative_prealloc_lifetime' tunable (5m by default). 220 */ 221 static inline void 222 xfs_blockgc_queue( 223 struct xfs_perag *pag) 224 { 225 struct xfs_mount *mp = pag_mount(pag); 226 227 if (!xfs_is_blockgc_enabled(mp)) 228 return; 229 230 rcu_read_lock(); 231 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) 232 queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 233 msecs_to_jiffies(xfs_blockgc_secs * 1000)); 234 rcu_read_unlock(); 235 } 236 237 /* Set a tag on both the AG incore inode tree and the AG radix tree. */ 238 static void 239 xfs_perag_set_inode_tag( 240 struct xfs_perag *pag, 241 xfs_agino_t agino, 242 unsigned int tag) 243 { 244 bool was_tagged; 245 246 lockdep_assert_held(&pag->pag_ici_lock); 247 248 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag); 249 radix_tree_tag_set(&pag->pag_ici_root, agino, tag); 250 251 if (tag == XFS_ICI_RECLAIM_TAG) 252 pag->pag_ici_reclaimable++; 253 254 if (was_tagged) 255 return; 256 257 /* propagate the tag up into the pag xarray tree */ 258 xfs_group_set_mark(pag_group(pag), ici_tag_to_mark(tag)); 259 260 /* start background work */ 261 switch (tag) { 262 case XFS_ICI_RECLAIM_TAG: 263 xfs_reclaim_work_queue(pag_mount(pag)); 264 break; 265 case XFS_ICI_BLOCKGC_TAG: 266 xfs_blockgc_queue(pag); 267 break; 268 } 269 270 trace_xfs_perag_set_inode_tag(pag, _RET_IP_); 271 } 272 273 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */ 274 static void 275 xfs_perag_clear_inode_tag( 276 struct xfs_perag *pag, 277 xfs_agino_t agino, 278 unsigned int tag) 279 { 280 lockdep_assert_held(&pag->pag_ici_lock); 281 282 /* 283 * Reclaim can signal (with a null agino) that it cleared its own tag 284 * by removing the inode from the radix tree. 285 */ 286 if (agino != NULLAGINO) 287 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag); 288 else 289 ASSERT(tag == XFS_ICI_RECLAIM_TAG); 290 291 if (tag == XFS_ICI_RECLAIM_TAG) 292 pag->pag_ici_reclaimable--; 293 294 if (radix_tree_tagged(&pag->pag_ici_root, tag)) 295 return; 296 297 /* clear the tag from the pag xarray */ 298 xfs_group_clear_mark(pag_group(pag), ici_tag_to_mark(tag)); 299 trace_xfs_perag_clear_inode_tag(pag, _RET_IP_); 300 } 301 302 /* 303 * Find the next AG after @pag, or the first AG if @pag is NULL. 304 */ 305 static struct xfs_perag * 306 xfs_perag_grab_next_tag( 307 struct xfs_mount *mp, 308 struct xfs_perag *pag, 309 int tag) 310 { 311 return to_perag(xfs_group_grab_next_mark(mp, 312 pag ? pag_group(pag) : NULL, 313 ici_tag_to_mark(tag), XG_TYPE_AG)); 314 } 315 316 /* 317 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode 318 * part of the structure. This is made more complex by the fact we store 319 * information about the on-disk values in the VFS inode and so we can't just 320 * overwrite the values unconditionally. Hence we save the parameters we 321 * need to retain across reinitialisation, and rewrite them into the VFS inode 322 * after reinitialisation even if it fails. 323 */ 324 static int 325 xfs_reinit_inode( 326 struct xfs_mount *mp, 327 struct inode *inode) 328 { 329 int error; 330 uint32_t nlink = inode->i_nlink; 331 uint32_t generation = inode->i_generation; 332 uint64_t version = inode_peek_iversion(inode); 333 umode_t mode = inode->i_mode; 334 dev_t dev = inode->i_rdev; 335 kuid_t uid = inode->i_uid; 336 kgid_t gid = inode->i_gid; 337 unsigned long state = inode->i_state; 338 339 error = inode_init_always(mp->m_super, inode); 340 341 set_nlink(inode, nlink); 342 inode->i_generation = generation; 343 inode_set_iversion_queried(inode, version); 344 inode->i_mode = mode; 345 inode->i_rdev = dev; 346 inode->i_uid = uid; 347 inode->i_gid = gid; 348 inode->i_state = state; 349 mapping_set_folio_min_order(inode->i_mapping, 350 M_IGEO(mp)->min_folio_order); 351 return error; 352 } 353 354 /* 355 * Carefully nudge an inode whose VFS state has been torn down back into a 356 * usable state. Drops the i_flags_lock and the rcu read lock. 357 */ 358 static int 359 xfs_iget_recycle( 360 struct xfs_perag *pag, 361 struct xfs_inode *ip) __releases(&ip->i_flags_lock) 362 { 363 struct xfs_mount *mp = ip->i_mount; 364 struct inode *inode = VFS_I(ip); 365 int error; 366 367 trace_xfs_iget_recycle(ip); 368 369 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) 370 return -EAGAIN; 371 372 /* 373 * We need to make it look like the inode is being reclaimed to prevent 374 * the actual reclaim workers from stomping over us while we recycle 375 * the inode. We can't clear the radix tree tag yet as it requires 376 * pag_ici_lock to be held exclusive. 377 */ 378 ip->i_flags |= XFS_IRECLAIM; 379 380 spin_unlock(&ip->i_flags_lock); 381 rcu_read_unlock(); 382 383 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 384 error = xfs_reinit_inode(mp, inode); 385 xfs_iunlock(ip, XFS_ILOCK_EXCL); 386 if (error) { 387 /* 388 * Re-initializing the inode failed, and we are in deep 389 * trouble. Try to re-add it to the reclaim list. 390 */ 391 rcu_read_lock(); 392 spin_lock(&ip->i_flags_lock); 393 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 394 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 395 spin_unlock(&ip->i_flags_lock); 396 rcu_read_unlock(); 397 398 trace_xfs_iget_recycle_fail(ip); 399 return error; 400 } 401 402 spin_lock(&pag->pag_ici_lock); 403 spin_lock(&ip->i_flags_lock); 404 405 /* 406 * Clear the per-lifetime state in the inode as we are now effectively 407 * a new inode and need to return to the initial state before reuse 408 * occurs. 409 */ 410 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 411 ip->i_flags |= XFS_INEW; 412 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), 413 XFS_ICI_RECLAIM_TAG); 414 inode->i_state = I_NEW; 415 spin_unlock(&ip->i_flags_lock); 416 spin_unlock(&pag->pag_ici_lock); 417 418 return 0; 419 } 420 421 /* 422 * If we are allocating a new inode, then check what was returned is 423 * actually a free, empty inode. If we are not allocating an inode, 424 * then check we didn't find a free inode. 425 * 426 * Returns: 427 * 0 if the inode free state matches the lookup context 428 * -ENOENT if the inode is free and we are not allocating 429 * -EFSCORRUPTED if there is any state mismatch at all 430 */ 431 static int 432 xfs_iget_check_free_state( 433 struct xfs_inode *ip, 434 int flags) 435 { 436 if (flags & XFS_IGET_CREATE) { 437 /* should be a free inode */ 438 if (VFS_I(ip)->i_mode != 0) { 439 xfs_warn(ip->i_mount, 440 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", 441 ip->i_ino, VFS_I(ip)->i_mode); 442 xfs_agno_mark_sick(ip->i_mount, 443 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 444 XFS_SICK_AG_INOBT); 445 return -EFSCORRUPTED; 446 } 447 448 if (ip->i_nblocks != 0) { 449 xfs_warn(ip->i_mount, 450 "Corruption detected! Free inode 0x%llx has blocks allocated!", 451 ip->i_ino); 452 xfs_agno_mark_sick(ip->i_mount, 453 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 454 XFS_SICK_AG_INOBT); 455 return -EFSCORRUPTED; 456 } 457 return 0; 458 } 459 460 /* should be an allocated inode */ 461 if (VFS_I(ip)->i_mode == 0) 462 return -ENOENT; 463 464 return 0; 465 } 466 467 /* Make all pending inactivation work start immediately. */ 468 static bool 469 xfs_inodegc_queue_all( 470 struct xfs_mount *mp) 471 { 472 struct xfs_inodegc *gc; 473 int cpu; 474 bool ret = false; 475 476 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { 477 gc = per_cpu_ptr(mp->m_inodegc, cpu); 478 if (!llist_empty(&gc->list)) { 479 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); 480 ret = true; 481 } 482 } 483 484 return ret; 485 } 486 487 /* Wait for all queued work and collect errors */ 488 static int 489 xfs_inodegc_wait_all( 490 struct xfs_mount *mp) 491 { 492 int cpu; 493 int error = 0; 494 495 flush_workqueue(mp->m_inodegc_wq); 496 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { 497 struct xfs_inodegc *gc; 498 499 gc = per_cpu_ptr(mp->m_inodegc, cpu); 500 if (gc->error && !error) 501 error = gc->error; 502 gc->error = 0; 503 } 504 505 return error; 506 } 507 508 /* 509 * Check the validity of the inode we just found it the cache 510 */ 511 static int 512 xfs_iget_cache_hit( 513 struct xfs_perag *pag, 514 struct xfs_inode *ip, 515 xfs_ino_t ino, 516 int flags, 517 int lock_flags) __releases(RCU) 518 { 519 struct inode *inode = VFS_I(ip); 520 struct xfs_mount *mp = ip->i_mount; 521 int error; 522 523 /* 524 * check for re-use of an inode within an RCU grace period due to the 525 * radix tree nodes not being updated yet. We monitor for this by 526 * setting the inode number to zero before freeing the inode structure. 527 * If the inode has been reallocated and set up, then the inode number 528 * will not match, so check for that, too. 529 */ 530 spin_lock(&ip->i_flags_lock); 531 if (ip->i_ino != ino) 532 goto out_skip; 533 534 /* 535 * If we are racing with another cache hit that is currently 536 * instantiating this inode or currently recycling it out of 537 * reclaimable state, wait for the initialisation to complete 538 * before continuing. 539 * 540 * If we're racing with the inactivation worker we also want to wait. 541 * If we're creating a new file, it's possible that the worker 542 * previously marked the inode as free on disk but hasn't finished 543 * updating the incore state yet. The AGI buffer will be dirty and 544 * locked to the icreate transaction, so a synchronous push of the 545 * inodegc workers would result in deadlock. For a regular iget, the 546 * worker is running already, so we might as well wait. 547 * 548 * XXX(hch): eventually we should do something equivalent to 549 * wait_on_inode to wait for these flags to be cleared 550 * instead of polling for it. 551 */ 552 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING)) 553 goto out_skip; 554 555 if (ip->i_flags & XFS_NEED_INACTIVE) { 556 /* Unlinked inodes cannot be re-grabbed. */ 557 if (VFS_I(ip)->i_nlink == 0) { 558 error = -ENOENT; 559 goto out_error; 560 } 561 goto out_inodegc_flush; 562 } 563 564 /* 565 * Check the inode free state is valid. This also detects lookup 566 * racing with unlinks. 567 */ 568 error = xfs_iget_check_free_state(ip, flags); 569 if (error) 570 goto out_error; 571 572 /* Skip inodes that have no vfs state. */ 573 if ((flags & XFS_IGET_INCORE) && 574 (ip->i_flags & XFS_IRECLAIMABLE)) 575 goto out_skip; 576 577 /* The inode fits the selection criteria; process it. */ 578 if (ip->i_flags & XFS_IRECLAIMABLE) { 579 /* Drops i_flags_lock and RCU read lock. */ 580 error = xfs_iget_recycle(pag, ip); 581 if (error == -EAGAIN) 582 goto out_skip; 583 if (error) 584 return error; 585 } else { 586 /* If the VFS inode is being torn down, pause and try again. */ 587 if (!igrab(inode)) 588 goto out_skip; 589 590 /* We've got a live one. */ 591 spin_unlock(&ip->i_flags_lock); 592 rcu_read_unlock(); 593 trace_xfs_iget_hit(ip); 594 } 595 596 if (lock_flags != 0) 597 xfs_ilock(ip, lock_flags); 598 599 if (!(flags & XFS_IGET_INCORE)) 600 xfs_iflags_clear(ip, XFS_ISTALE); 601 XFS_STATS_INC(mp, xs_ig_found); 602 603 return 0; 604 605 out_skip: 606 trace_xfs_iget_skip(ip); 607 XFS_STATS_INC(mp, xs_ig_frecycle); 608 error = -EAGAIN; 609 out_error: 610 spin_unlock(&ip->i_flags_lock); 611 rcu_read_unlock(); 612 return error; 613 614 out_inodegc_flush: 615 spin_unlock(&ip->i_flags_lock); 616 rcu_read_unlock(); 617 /* 618 * Do not wait for the workers, because the caller could hold an AGI 619 * buffer lock. We're just going to sleep in a loop anyway. 620 */ 621 if (xfs_is_inodegc_enabled(mp)) 622 xfs_inodegc_queue_all(mp); 623 return -EAGAIN; 624 } 625 626 static int 627 xfs_iget_cache_miss( 628 struct xfs_mount *mp, 629 struct xfs_perag *pag, 630 xfs_trans_t *tp, 631 xfs_ino_t ino, 632 struct xfs_inode **ipp, 633 int flags, 634 int lock_flags) 635 { 636 struct xfs_inode *ip; 637 int error; 638 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 639 640 ip = xfs_inode_alloc(mp, ino); 641 if (!ip) 642 return -ENOMEM; 643 644 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags); 645 if (error) 646 goto out_destroy; 647 648 /* 649 * For version 5 superblocks, if we are initialising a new inode and we 650 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can 651 * simply build the new inode core with a random generation number. 652 * 653 * For version 4 (and older) superblocks, log recovery is dependent on 654 * the i_flushiter field being initialised from the current on-disk 655 * value and hence we must also read the inode off disk even when 656 * initializing new inodes. 657 */ 658 if (xfs_has_v3inodes(mp) && 659 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) { 660 VFS_I(ip)->i_generation = get_random_u32(); 661 } else { 662 struct xfs_buf *bp; 663 664 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); 665 if (error) 666 goto out_destroy; 667 668 error = xfs_inode_from_disk(ip, 669 xfs_buf_offset(bp, ip->i_imap.im_boffset)); 670 if (!error) 671 xfs_buf_set_ref(bp, XFS_INO_REF); 672 else 673 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); 674 xfs_trans_brelse(tp, bp); 675 676 if (error) 677 goto out_destroy; 678 } 679 680 trace_xfs_iget_miss(ip); 681 682 /* 683 * Check the inode free state is valid. This also detects lookup 684 * racing with unlinks. 685 */ 686 error = xfs_iget_check_free_state(ip, flags); 687 if (error) 688 goto out_destroy; 689 690 /* 691 * Preload the radix tree so we can insert safely under the 692 * write spinlock. Note that we cannot sleep inside the preload 693 * region. 694 */ 695 if (radix_tree_preload(GFP_KERNEL | __GFP_NOLOCKDEP)) { 696 error = -EAGAIN; 697 goto out_destroy; 698 } 699 700 /* 701 * Because the inode hasn't been added to the radix-tree yet it can't 702 * be found by another thread, so we can do the non-sleeping lock here. 703 */ 704 if (lock_flags) { 705 if (!xfs_ilock_nowait(ip, lock_flags)) 706 BUG(); 707 } 708 709 /* 710 * These values must be set before inserting the inode into the radix 711 * tree as the moment it is inserted a concurrent lookup (allowed by the 712 * RCU locking mechanism) can find it and that lookup must see that this 713 * is an inode currently under construction (i.e. that XFS_INEW is set). 714 * The ip->i_flags_lock that protects the XFS_INEW flag forms the 715 * memory barrier that ensures this detection works correctly at lookup 716 * time. 717 */ 718 if (flags & XFS_IGET_DONTCACHE) 719 d_mark_dontcache(VFS_I(ip)); 720 ip->i_udquot = NULL; 721 ip->i_gdquot = NULL; 722 ip->i_pdquot = NULL; 723 xfs_iflags_set(ip, XFS_INEW); 724 725 /* insert the new inode */ 726 spin_lock(&pag->pag_ici_lock); 727 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 728 if (unlikely(error)) { 729 WARN_ON(error != -EEXIST); 730 XFS_STATS_INC(mp, xs_ig_dup); 731 error = -EAGAIN; 732 goto out_preload_end; 733 } 734 spin_unlock(&pag->pag_ici_lock); 735 radix_tree_preload_end(); 736 737 *ipp = ip; 738 return 0; 739 740 out_preload_end: 741 spin_unlock(&pag->pag_ici_lock); 742 radix_tree_preload_end(); 743 if (lock_flags) 744 xfs_iunlock(ip, lock_flags); 745 out_destroy: 746 __destroy_inode(VFS_I(ip)); 747 xfs_inode_free(ip); 748 return error; 749 } 750 751 /* 752 * Look up an inode by number in the given file system. The inode is looked up 753 * in the cache held in each AG. If the inode is found in the cache, initialise 754 * the vfs inode if necessary. 755 * 756 * If it is not in core, read it in from the file system's device, add it to the 757 * cache and initialise the vfs inode. 758 * 759 * The inode is locked according to the value of the lock_flags parameter. 760 * Inode lookup is only done during metadata operations and not as part of the 761 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup. 762 */ 763 int 764 xfs_iget( 765 struct xfs_mount *mp, 766 struct xfs_trans *tp, 767 xfs_ino_t ino, 768 uint flags, 769 uint lock_flags, 770 struct xfs_inode **ipp) 771 { 772 struct xfs_inode *ip; 773 struct xfs_perag *pag; 774 xfs_agino_t agino; 775 int error; 776 777 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); 778 779 /* reject inode numbers outside existing AGs */ 780 if (!xfs_verify_ino(mp, ino)) 781 return -EINVAL; 782 783 XFS_STATS_INC(mp, xs_ig_attempts); 784 785 /* get the perag structure and ensure that it's inode capable */ 786 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 787 agino = XFS_INO_TO_AGINO(mp, ino); 788 789 again: 790 error = 0; 791 rcu_read_lock(); 792 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 793 794 if (ip) { 795 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); 796 if (error) 797 goto out_error_or_again; 798 } else { 799 rcu_read_unlock(); 800 if (flags & XFS_IGET_INCORE) { 801 error = -ENODATA; 802 goto out_error_or_again; 803 } 804 XFS_STATS_INC(mp, xs_ig_missed); 805 806 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 807 flags, lock_flags); 808 if (error) 809 goto out_error_or_again; 810 } 811 xfs_perag_put(pag); 812 813 *ipp = ip; 814 815 /* 816 * If we have a real type for an on-disk inode, we can setup the inode 817 * now. If it's a new inode being created, xfs_init_new_inode will 818 * handle it. 819 */ 820 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) 821 xfs_setup_existing_inode(ip); 822 return 0; 823 824 out_error_or_again: 825 if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) && 826 error == -EAGAIN) { 827 delay(1); 828 goto again; 829 } 830 xfs_perag_put(pag); 831 return error; 832 } 833 834 /* 835 * Get a metadata inode. 836 * 837 * The metafile type must match the file mode exactly, and for files in the 838 * metadata directory tree, it must match the inode's metatype exactly. 839 */ 840 int 841 xfs_trans_metafile_iget( 842 struct xfs_trans *tp, 843 xfs_ino_t ino, 844 enum xfs_metafile_type metafile_type, 845 struct xfs_inode **ipp) 846 { 847 struct xfs_mount *mp = tp->t_mountp; 848 struct xfs_inode *ip; 849 umode_t mode; 850 int error; 851 852 error = xfs_iget(mp, tp, ino, 0, 0, &ip); 853 if (error == -EFSCORRUPTED || error == -EINVAL) 854 goto whine; 855 if (error) 856 return error; 857 858 if (VFS_I(ip)->i_nlink == 0) 859 goto bad_rele; 860 861 if (metafile_type == XFS_METAFILE_DIR) 862 mode = S_IFDIR; 863 else 864 mode = S_IFREG; 865 if (inode_wrong_type(VFS_I(ip), mode)) 866 goto bad_rele; 867 if (xfs_has_metadir(mp)) { 868 if (!xfs_is_metadir_inode(ip)) 869 goto bad_rele; 870 if (metafile_type != ip->i_metatype) 871 goto bad_rele; 872 } 873 874 *ipp = ip; 875 return 0; 876 bad_rele: 877 xfs_irele(ip); 878 whine: 879 xfs_err(mp, "metadata inode 0x%llx type %u is corrupt", ino, 880 metafile_type); 881 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR); 882 return -EFSCORRUPTED; 883 } 884 885 /* Grab a metadata file if the caller doesn't already have a transaction. */ 886 int 887 xfs_metafile_iget( 888 struct xfs_mount *mp, 889 xfs_ino_t ino, 890 enum xfs_metafile_type metafile_type, 891 struct xfs_inode **ipp) 892 { 893 struct xfs_trans *tp; 894 int error; 895 896 error = xfs_trans_alloc_empty(mp, &tp); 897 if (error) 898 return error; 899 900 error = xfs_trans_metafile_iget(tp, ino, metafile_type, ipp); 901 xfs_trans_cancel(tp); 902 return error; 903 } 904 905 /* 906 * Grab the inode for reclaim exclusively. 907 * 908 * We have found this inode via a lookup under RCU, so the inode may have 909 * already been freed, or it may be in the process of being recycled by 910 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode 911 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE 912 * will not be set. Hence we need to check for both these flag conditions to 913 * avoid inodes that are no longer reclaim candidates. 914 * 915 * Note: checking for other state flags here, under the i_flags_lock or not, is 916 * racy and should be avoided. Those races should be resolved only after we have 917 * ensured that we are able to reclaim this inode and the world can see that we 918 * are going to reclaim it. 919 * 920 * Return true if we grabbed it, false otherwise. 921 */ 922 static bool 923 xfs_reclaim_igrab( 924 struct xfs_inode *ip, 925 struct xfs_icwalk *icw) 926 { 927 ASSERT(rcu_read_lock_held()); 928 929 spin_lock(&ip->i_flags_lock); 930 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || 931 __xfs_iflags_test(ip, XFS_IRECLAIM)) { 932 /* not a reclaim candidate. */ 933 spin_unlock(&ip->i_flags_lock); 934 return false; 935 } 936 937 /* Don't reclaim a sick inode unless the caller asked for it. */ 938 if (ip->i_sick && 939 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) { 940 spin_unlock(&ip->i_flags_lock); 941 return false; 942 } 943 944 __xfs_iflags_set(ip, XFS_IRECLAIM); 945 spin_unlock(&ip->i_flags_lock); 946 return true; 947 } 948 949 /* 950 * Inode reclaim is non-blocking, so the default action if progress cannot be 951 * made is to "requeue" the inode for reclaim by unlocking it and clearing the 952 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about 953 * blocking anymore and hence we can wait for the inode to be able to reclaim 954 * it. 955 * 956 * We do no IO here - if callers require inodes to be cleaned they must push the 957 * AIL first to trigger writeback of dirty inodes. This enables writeback to be 958 * done in the background in a non-blocking manner, and enables memory reclaim 959 * to make progress without blocking. 960 */ 961 static void 962 xfs_reclaim_inode( 963 struct xfs_inode *ip, 964 struct xfs_perag *pag) 965 { 966 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ 967 968 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) 969 goto out; 970 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING)) 971 goto out_iunlock; 972 973 /* 974 * Check for log shutdown because aborting the inode can move the log 975 * tail and corrupt in memory state. This is fine if the log is shut 976 * down, but if the log is still active and only the mount is shut down 977 * then the in-memory log tail movement caused by the abort can be 978 * incorrectly propagated to disk. 979 */ 980 if (xlog_is_shutdown(ip->i_mount->m_log)) { 981 xfs_iunpin_wait(ip); 982 xfs_iflush_shutdown_abort(ip); 983 goto reclaim; 984 } 985 if (xfs_ipincount(ip)) 986 goto out_clear_flush; 987 if (!xfs_inode_clean(ip)) 988 goto out_clear_flush; 989 990 xfs_iflags_clear(ip, XFS_IFLUSHING); 991 reclaim: 992 trace_xfs_inode_reclaiming(ip); 993 994 /* 995 * Because we use RCU freeing we need to ensure the inode always appears 996 * to be reclaimed with an invalid inode number when in the free state. 997 * We do this as early as possible under the ILOCK so that 998 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to 999 * detect races with us here. By doing this, we guarantee that once 1000 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that 1001 * it will see either a valid inode that will serialise correctly, or it 1002 * will see an invalid inode that it can skip. 1003 */ 1004 spin_lock(&ip->i_flags_lock); 1005 ip->i_flags = XFS_IRECLAIM; 1006 ip->i_ino = 0; 1007 ip->i_sick = 0; 1008 ip->i_checked = 0; 1009 spin_unlock(&ip->i_flags_lock); 1010 1011 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL); 1012 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1013 1014 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); 1015 /* 1016 * Remove the inode from the per-AG radix tree. 1017 * 1018 * Because radix_tree_delete won't complain even if the item was never 1019 * added to the tree assert that it's been there before to catch 1020 * problems with the inode life time early on. 1021 */ 1022 spin_lock(&pag->pag_ici_lock); 1023 if (!radix_tree_delete(&pag->pag_ici_root, 1024 XFS_INO_TO_AGINO(ip->i_mount, ino))) 1025 ASSERT(0); 1026 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG); 1027 spin_unlock(&pag->pag_ici_lock); 1028 1029 /* 1030 * Here we do an (almost) spurious inode lock in order to coordinate 1031 * with inode cache radix tree lookups. This is because the lookup 1032 * can reference the inodes in the cache without taking references. 1033 * 1034 * We make that OK here by ensuring that we wait until the inode is 1035 * unlocked after the lookup before we go ahead and free it. 1036 */ 1037 xfs_ilock(ip, XFS_ILOCK_EXCL); 1038 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot); 1039 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1040 ASSERT(xfs_inode_clean(ip)); 1041 1042 __xfs_inode_free(ip); 1043 return; 1044 1045 out_clear_flush: 1046 xfs_iflags_clear(ip, XFS_IFLUSHING); 1047 out_iunlock: 1048 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1049 out: 1050 xfs_iflags_clear(ip, XFS_IRECLAIM); 1051 } 1052 1053 /* Reclaim sick inodes if we're unmounting or the fs went down. */ 1054 static inline bool 1055 xfs_want_reclaim_sick( 1056 struct xfs_mount *mp) 1057 { 1058 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) || 1059 xfs_is_shutdown(mp); 1060 } 1061 1062 void 1063 xfs_reclaim_inodes( 1064 struct xfs_mount *mp) 1065 { 1066 struct xfs_icwalk icw = { 1067 .icw_flags = 0, 1068 }; 1069 1070 if (xfs_want_reclaim_sick(mp)) 1071 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK; 1072 1073 while (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) { 1074 xfs_ail_push_all_sync(mp->m_ail); 1075 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); 1076 } 1077 } 1078 1079 /* 1080 * The shrinker infrastructure determines how many inodes we should scan for 1081 * reclaim. We want as many clean inodes ready to reclaim as possible, so we 1082 * push the AIL here. We also want to proactively free up memory if we can to 1083 * minimise the amount of work memory reclaim has to do so we kick the 1084 * background reclaim if it isn't already scheduled. 1085 */ 1086 long 1087 xfs_reclaim_inodes_nr( 1088 struct xfs_mount *mp, 1089 unsigned long nr_to_scan) 1090 { 1091 struct xfs_icwalk icw = { 1092 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT, 1093 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan), 1094 }; 1095 1096 if (xfs_want_reclaim_sick(mp)) 1097 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK; 1098 1099 /* kick background reclaimer and push the AIL */ 1100 xfs_reclaim_work_queue(mp); 1101 xfs_ail_push_all(mp->m_ail); 1102 1103 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); 1104 return 0; 1105 } 1106 1107 /* 1108 * Return the number of reclaimable inodes in the filesystem for 1109 * the shrinker to determine how much to reclaim. 1110 */ 1111 long 1112 xfs_reclaim_inodes_count( 1113 struct xfs_mount *mp) 1114 { 1115 XA_STATE (xas, &mp->m_groups[XG_TYPE_AG].xa, 0); 1116 long reclaimable = 0; 1117 struct xfs_perag *pag; 1118 1119 rcu_read_lock(); 1120 xas_for_each_marked(&xas, pag, ULONG_MAX, XFS_PERAG_RECLAIM_MARK) { 1121 trace_xfs_reclaim_inodes_count(pag, _THIS_IP_); 1122 reclaimable += pag->pag_ici_reclaimable; 1123 } 1124 rcu_read_unlock(); 1125 1126 return reclaimable; 1127 } 1128 1129 STATIC bool 1130 xfs_icwalk_match_id( 1131 struct xfs_inode *ip, 1132 struct xfs_icwalk *icw) 1133 { 1134 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && 1135 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) 1136 return false; 1137 1138 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && 1139 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) 1140 return false; 1141 1142 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && 1143 ip->i_projid != icw->icw_prid) 1144 return false; 1145 1146 return true; 1147 } 1148 1149 /* 1150 * A union-based inode filtering algorithm. Process the inode if any of the 1151 * criteria match. This is for global/internal scans only. 1152 */ 1153 STATIC bool 1154 xfs_icwalk_match_id_union( 1155 struct xfs_inode *ip, 1156 struct xfs_icwalk *icw) 1157 { 1158 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && 1159 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) 1160 return true; 1161 1162 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && 1163 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) 1164 return true; 1165 1166 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && 1167 ip->i_projid == icw->icw_prid) 1168 return true; 1169 1170 return false; 1171 } 1172 1173 /* 1174 * Is this inode @ip eligible for eof/cow block reclamation, given some 1175 * filtering parameters @icw? The inode is eligible if @icw is null or 1176 * if the predicate functions match. 1177 */ 1178 static bool 1179 xfs_icwalk_match( 1180 struct xfs_inode *ip, 1181 struct xfs_icwalk *icw) 1182 { 1183 bool match; 1184 1185 if (!icw) 1186 return true; 1187 1188 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION) 1189 match = xfs_icwalk_match_id_union(ip, icw); 1190 else 1191 match = xfs_icwalk_match_id(ip, icw); 1192 if (!match) 1193 return false; 1194 1195 /* skip the inode if the file size is too small */ 1196 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) && 1197 XFS_ISIZE(ip) < icw->icw_min_file_size) 1198 return false; 1199 1200 return true; 1201 } 1202 1203 /* 1204 * This is a fast pass over the inode cache to try to get reclaim moving on as 1205 * many inodes as possible in a short period of time. It kicks itself every few 1206 * seconds, as well as being kicked by the inode cache shrinker when memory 1207 * goes low. 1208 */ 1209 void 1210 xfs_reclaim_worker( 1211 struct work_struct *work) 1212 { 1213 struct xfs_mount *mp = container_of(to_delayed_work(work), 1214 struct xfs_mount, m_reclaim_work); 1215 1216 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); 1217 xfs_reclaim_work_queue(mp); 1218 } 1219 1220 STATIC int 1221 xfs_inode_free_eofblocks( 1222 struct xfs_inode *ip, 1223 struct xfs_icwalk *icw, 1224 unsigned int *lockflags) 1225 { 1226 bool wait; 1227 1228 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); 1229 1230 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS)) 1231 return 0; 1232 1233 /* 1234 * If the mapping is dirty the operation can block and wait for some 1235 * time. Unless we are waiting, skip it. 1236 */ 1237 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) 1238 return 0; 1239 1240 if (!xfs_icwalk_match(ip, icw)) 1241 return 0; 1242 1243 /* 1244 * If the caller is waiting, return -EAGAIN to keep the background 1245 * scanner moving and revisit the inode in a subsequent pass. 1246 */ 1247 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1248 if (wait) 1249 return -EAGAIN; 1250 return 0; 1251 } 1252 *lockflags |= XFS_IOLOCK_EXCL; 1253 1254 if (xfs_can_free_eofblocks(ip)) 1255 return xfs_free_eofblocks(ip); 1256 1257 /* inode could be preallocated */ 1258 trace_xfs_inode_free_eofblocks_invalid(ip); 1259 xfs_inode_clear_eofblocks_tag(ip); 1260 return 0; 1261 } 1262 1263 static void 1264 xfs_blockgc_set_iflag( 1265 struct xfs_inode *ip, 1266 unsigned long iflag) 1267 { 1268 struct xfs_mount *mp = ip->i_mount; 1269 struct xfs_perag *pag; 1270 1271 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); 1272 1273 /* 1274 * Don't bother locking the AG and looking up in the radix trees 1275 * if we already know that we have the tag set. 1276 */ 1277 if (ip->i_flags & iflag) 1278 return; 1279 spin_lock(&ip->i_flags_lock); 1280 ip->i_flags |= iflag; 1281 spin_unlock(&ip->i_flags_lock); 1282 1283 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1284 spin_lock(&pag->pag_ici_lock); 1285 1286 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), 1287 XFS_ICI_BLOCKGC_TAG); 1288 1289 spin_unlock(&pag->pag_ici_lock); 1290 xfs_perag_put(pag); 1291 } 1292 1293 void 1294 xfs_inode_set_eofblocks_tag( 1295 xfs_inode_t *ip) 1296 { 1297 trace_xfs_inode_set_eofblocks_tag(ip); 1298 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS); 1299 } 1300 1301 static void 1302 xfs_blockgc_clear_iflag( 1303 struct xfs_inode *ip, 1304 unsigned long iflag) 1305 { 1306 struct xfs_mount *mp = ip->i_mount; 1307 struct xfs_perag *pag; 1308 bool clear_tag; 1309 1310 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); 1311 1312 spin_lock(&ip->i_flags_lock); 1313 ip->i_flags &= ~iflag; 1314 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; 1315 spin_unlock(&ip->i_flags_lock); 1316 1317 if (!clear_tag) 1318 return; 1319 1320 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1321 spin_lock(&pag->pag_ici_lock); 1322 1323 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), 1324 XFS_ICI_BLOCKGC_TAG); 1325 1326 spin_unlock(&pag->pag_ici_lock); 1327 xfs_perag_put(pag); 1328 } 1329 1330 void 1331 xfs_inode_clear_eofblocks_tag( 1332 xfs_inode_t *ip) 1333 { 1334 trace_xfs_inode_clear_eofblocks_tag(ip); 1335 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS); 1336 } 1337 1338 /* 1339 * Prepare to free COW fork blocks from an inode. 1340 */ 1341 static bool 1342 xfs_prep_free_cowblocks( 1343 struct xfs_inode *ip, 1344 struct xfs_icwalk *icw) 1345 { 1346 bool sync; 1347 1348 sync = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); 1349 1350 /* 1351 * Just clear the tag if we have an empty cow fork or none at all. It's 1352 * possible the inode was fully unshared since it was originally tagged. 1353 */ 1354 if (!xfs_inode_has_cow_data(ip)) { 1355 trace_xfs_inode_free_cowblocks_invalid(ip); 1356 xfs_inode_clear_cowblocks_tag(ip); 1357 return false; 1358 } 1359 1360 /* 1361 * A cowblocks trim of an inode can have a significant effect on 1362 * fragmentation even when a reasonable COW extent size hint is set. 1363 * Therefore, we prefer to not process cowblocks unless they are clean 1364 * and idle. We can never process a cowblocks inode that is dirty or has 1365 * in-flight I/O under any circumstances, because outstanding writeback 1366 * or dio expects targeted COW fork blocks exist through write 1367 * completion where they can be remapped into the data fork. 1368 * 1369 * Therefore, the heuristic used here is to never process inodes 1370 * currently opened for write from background (i.e. non-sync) scans. For 1371 * sync scans, use the pagecache/dio state of the inode to ensure we 1372 * never free COW fork blocks out from under pending I/O. 1373 */ 1374 if (!sync && inode_is_open_for_write(VFS_I(ip))) 1375 return false; 1376 return xfs_can_free_cowblocks(ip); 1377 } 1378 1379 /* 1380 * Automatic CoW Reservation Freeing 1381 * 1382 * These functions automatically garbage collect leftover CoW reservations 1383 * that were made on behalf of a cowextsize hint when we start to run out 1384 * of quota or when the reservations sit around for too long. If the file 1385 * has dirty pages or is undergoing writeback, its CoW reservations will 1386 * be retained. 1387 * 1388 * The actual garbage collection piggybacks off the same code that runs 1389 * the speculative EOF preallocation garbage collector. 1390 */ 1391 STATIC int 1392 xfs_inode_free_cowblocks( 1393 struct xfs_inode *ip, 1394 struct xfs_icwalk *icw, 1395 unsigned int *lockflags) 1396 { 1397 bool wait; 1398 int ret = 0; 1399 1400 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); 1401 1402 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS)) 1403 return 0; 1404 1405 if (!xfs_prep_free_cowblocks(ip, icw)) 1406 return 0; 1407 1408 if (!xfs_icwalk_match(ip, icw)) 1409 return 0; 1410 1411 /* 1412 * If the caller is waiting, return -EAGAIN to keep the background 1413 * scanner moving and revisit the inode in a subsequent pass. 1414 */ 1415 if (!(*lockflags & XFS_IOLOCK_EXCL) && 1416 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1417 if (wait) 1418 return -EAGAIN; 1419 return 0; 1420 } 1421 *lockflags |= XFS_IOLOCK_EXCL; 1422 1423 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) { 1424 if (wait) 1425 return -EAGAIN; 1426 return 0; 1427 } 1428 *lockflags |= XFS_MMAPLOCK_EXCL; 1429 1430 /* 1431 * Check again, nobody else should be able to dirty blocks or change 1432 * the reflink iflag now that we have the first two locks held. 1433 */ 1434 if (xfs_prep_free_cowblocks(ip, icw)) 1435 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); 1436 return ret; 1437 } 1438 1439 void 1440 xfs_inode_set_cowblocks_tag( 1441 xfs_inode_t *ip) 1442 { 1443 trace_xfs_inode_set_cowblocks_tag(ip); 1444 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS); 1445 } 1446 1447 void 1448 xfs_inode_clear_cowblocks_tag( 1449 xfs_inode_t *ip) 1450 { 1451 trace_xfs_inode_clear_cowblocks_tag(ip); 1452 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS); 1453 } 1454 1455 /* Disable post-EOF and CoW block auto-reclamation. */ 1456 void 1457 xfs_blockgc_stop( 1458 struct xfs_mount *mp) 1459 { 1460 struct xfs_perag *pag = NULL; 1461 1462 if (!xfs_clear_blockgc_enabled(mp)) 1463 return; 1464 1465 while ((pag = xfs_perag_next(mp, pag))) 1466 cancel_delayed_work_sync(&pag->pag_blockgc_work); 1467 trace_xfs_blockgc_stop(mp, __return_address); 1468 } 1469 1470 /* Enable post-EOF and CoW block auto-reclamation. */ 1471 void 1472 xfs_blockgc_start( 1473 struct xfs_mount *mp) 1474 { 1475 struct xfs_perag *pag = NULL; 1476 1477 if (xfs_set_blockgc_enabled(mp)) 1478 return; 1479 1480 trace_xfs_blockgc_start(mp, __return_address); 1481 while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) 1482 xfs_blockgc_queue(pag); 1483 } 1484 1485 /* Don't try to run block gc on an inode that's in any of these states. */ 1486 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \ 1487 XFS_NEED_INACTIVE | \ 1488 XFS_INACTIVATING | \ 1489 XFS_IRECLAIMABLE | \ 1490 XFS_IRECLAIM) 1491 /* 1492 * Decide if the given @ip is eligible for garbage collection of speculative 1493 * preallocations, and grab it if so. Returns true if it's ready to go or 1494 * false if we should just ignore it. 1495 */ 1496 static bool 1497 xfs_blockgc_igrab( 1498 struct xfs_inode *ip) 1499 { 1500 struct inode *inode = VFS_I(ip); 1501 1502 ASSERT(rcu_read_lock_held()); 1503 1504 /* Check for stale RCU freed inode */ 1505 spin_lock(&ip->i_flags_lock); 1506 if (!ip->i_ino) 1507 goto out_unlock_noent; 1508 1509 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS) 1510 goto out_unlock_noent; 1511 spin_unlock(&ip->i_flags_lock); 1512 1513 /* nothing to sync during shutdown */ 1514 if (xfs_is_shutdown(ip->i_mount)) 1515 return false; 1516 1517 /* If we can't grab the inode, it must on it's way to reclaim. */ 1518 if (!igrab(inode)) 1519 return false; 1520 1521 /* inode is valid */ 1522 return true; 1523 1524 out_unlock_noent: 1525 spin_unlock(&ip->i_flags_lock); 1526 return false; 1527 } 1528 1529 /* Scan one incore inode for block preallocations that we can remove. */ 1530 static int 1531 xfs_blockgc_scan_inode( 1532 struct xfs_inode *ip, 1533 struct xfs_icwalk *icw) 1534 { 1535 unsigned int lockflags = 0; 1536 int error; 1537 1538 error = xfs_inode_free_eofblocks(ip, icw, &lockflags); 1539 if (error) 1540 goto unlock; 1541 1542 error = xfs_inode_free_cowblocks(ip, icw, &lockflags); 1543 unlock: 1544 if (lockflags) 1545 xfs_iunlock(ip, lockflags); 1546 xfs_irele(ip); 1547 return error; 1548 } 1549 1550 /* Background worker that trims preallocated space. */ 1551 void 1552 xfs_blockgc_worker( 1553 struct work_struct *work) 1554 { 1555 struct xfs_perag *pag = container_of(to_delayed_work(work), 1556 struct xfs_perag, pag_blockgc_work); 1557 struct xfs_mount *mp = pag_mount(pag); 1558 int error; 1559 1560 trace_xfs_blockgc_worker(mp, __return_address); 1561 1562 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL); 1563 if (error) 1564 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", 1565 pag_agno(pag), error); 1566 xfs_blockgc_queue(pag); 1567 } 1568 1569 /* 1570 * Try to free space in the filesystem by purging inactive inodes, eofblocks 1571 * and cowblocks. 1572 */ 1573 int 1574 xfs_blockgc_free_space( 1575 struct xfs_mount *mp, 1576 struct xfs_icwalk *icw) 1577 { 1578 int error; 1579 1580 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_); 1581 1582 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw); 1583 if (error) 1584 return error; 1585 1586 return xfs_inodegc_flush(mp); 1587 } 1588 1589 /* 1590 * Reclaim all the free space that we can by scheduling the background blockgc 1591 * and inodegc workers immediately and waiting for them all to clear. 1592 */ 1593 int 1594 xfs_blockgc_flush_all( 1595 struct xfs_mount *mp) 1596 { 1597 struct xfs_perag *pag = NULL; 1598 1599 trace_xfs_blockgc_flush_all(mp, __return_address); 1600 1601 /* 1602 * For each blockgc worker, move its queue time up to now. If it wasn't 1603 * queued, it will not be requeued. Then flush whatever is left. 1604 */ 1605 while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) 1606 mod_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 0); 1607 1608 while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) 1609 flush_delayed_work(&pag->pag_blockgc_work); 1610 1611 return xfs_inodegc_flush(mp); 1612 } 1613 1614 /* 1615 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which 1616 * quota caused an allocation failure, so we make a best effort by including 1617 * each quota under low free space conditions (less than 1% free space) in the 1618 * scan. 1619 * 1620 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan 1621 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or 1622 * MMAPLOCK. 1623 */ 1624 int 1625 xfs_blockgc_free_dquots( 1626 struct xfs_mount *mp, 1627 struct xfs_dquot *udqp, 1628 struct xfs_dquot *gdqp, 1629 struct xfs_dquot *pdqp, 1630 unsigned int iwalk_flags) 1631 { 1632 struct xfs_icwalk icw = {0}; 1633 bool do_work = false; 1634 1635 if (!udqp && !gdqp && !pdqp) 1636 return 0; 1637 1638 /* 1639 * Run a scan to free blocks using the union filter to cover all 1640 * applicable quotas in a single scan. 1641 */ 1642 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags; 1643 1644 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { 1645 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); 1646 icw.icw_flags |= XFS_ICWALK_FLAG_UID; 1647 do_work = true; 1648 } 1649 1650 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { 1651 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); 1652 icw.icw_flags |= XFS_ICWALK_FLAG_GID; 1653 do_work = true; 1654 } 1655 1656 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { 1657 icw.icw_prid = pdqp->q_id; 1658 icw.icw_flags |= XFS_ICWALK_FLAG_PRID; 1659 do_work = true; 1660 } 1661 1662 if (!do_work) 1663 return 0; 1664 1665 return xfs_blockgc_free_space(mp, &icw); 1666 } 1667 1668 /* Run cow/eofblocks scans on the quotas attached to the inode. */ 1669 int 1670 xfs_blockgc_free_quota( 1671 struct xfs_inode *ip, 1672 unsigned int iwalk_flags) 1673 { 1674 return xfs_blockgc_free_dquots(ip->i_mount, 1675 xfs_inode_dquot(ip, XFS_DQTYPE_USER), 1676 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP), 1677 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags); 1678 } 1679 1680 /* XFS Inode Cache Walking Code */ 1681 1682 /* 1683 * The inode lookup is done in batches to keep the amount of lock traffic and 1684 * radix tree lookups to a minimum. The batch size is a trade off between 1685 * lookup reduction and stack usage. This is in the reclaim path, so we can't 1686 * be too greedy. 1687 */ 1688 #define XFS_LOOKUP_BATCH 32 1689 1690 1691 /* 1692 * Decide if we want to grab this inode in anticipation of doing work towards 1693 * the goal. 1694 */ 1695 static inline bool 1696 xfs_icwalk_igrab( 1697 enum xfs_icwalk_goal goal, 1698 struct xfs_inode *ip, 1699 struct xfs_icwalk *icw) 1700 { 1701 switch (goal) { 1702 case XFS_ICWALK_BLOCKGC: 1703 return xfs_blockgc_igrab(ip); 1704 case XFS_ICWALK_RECLAIM: 1705 return xfs_reclaim_igrab(ip, icw); 1706 default: 1707 return false; 1708 } 1709 } 1710 1711 /* 1712 * Process an inode. Each processing function must handle any state changes 1713 * made by the icwalk igrab function. Return -EAGAIN to skip an inode. 1714 */ 1715 static inline int 1716 xfs_icwalk_process_inode( 1717 enum xfs_icwalk_goal goal, 1718 struct xfs_inode *ip, 1719 struct xfs_perag *pag, 1720 struct xfs_icwalk *icw) 1721 { 1722 int error = 0; 1723 1724 switch (goal) { 1725 case XFS_ICWALK_BLOCKGC: 1726 error = xfs_blockgc_scan_inode(ip, icw); 1727 break; 1728 case XFS_ICWALK_RECLAIM: 1729 xfs_reclaim_inode(ip, pag); 1730 break; 1731 } 1732 return error; 1733 } 1734 1735 /* 1736 * For a given per-AG structure @pag and a goal, grab qualifying inodes and 1737 * process them in some manner. 1738 */ 1739 static int 1740 xfs_icwalk_ag( 1741 struct xfs_perag *pag, 1742 enum xfs_icwalk_goal goal, 1743 struct xfs_icwalk *icw) 1744 { 1745 struct xfs_mount *mp = pag_mount(pag); 1746 uint32_t first_index; 1747 int last_error = 0; 1748 int skipped; 1749 bool done; 1750 int nr_found; 1751 1752 restart: 1753 done = false; 1754 skipped = 0; 1755 if (goal == XFS_ICWALK_RECLAIM) 1756 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); 1757 else 1758 first_index = 0; 1759 nr_found = 0; 1760 do { 1761 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 1762 int error = 0; 1763 int i; 1764 1765 rcu_read_lock(); 1766 1767 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, 1768 (void **) batch, first_index, 1769 XFS_LOOKUP_BATCH, goal); 1770 if (!nr_found) { 1771 done = true; 1772 rcu_read_unlock(); 1773 break; 1774 } 1775 1776 /* 1777 * Grab the inodes before we drop the lock. if we found 1778 * nothing, nr == 0 and the loop will be skipped. 1779 */ 1780 for (i = 0; i < nr_found; i++) { 1781 struct xfs_inode *ip = batch[i]; 1782 1783 if (done || !xfs_icwalk_igrab(goal, ip, icw)) 1784 batch[i] = NULL; 1785 1786 /* 1787 * Update the index for the next lookup. Catch 1788 * overflows into the next AG range which can occur if 1789 * we have inodes in the last block of the AG and we 1790 * are currently pointing to the last inode. 1791 * 1792 * Because we may see inodes that are from the wrong AG 1793 * due to RCU freeing and reallocation, only update the 1794 * index if it lies in this AG. It was a race that lead 1795 * us to see this inode, so another lookup from the 1796 * same index will not find it again. 1797 */ 1798 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag)) 1799 continue; 1800 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 1801 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 1802 done = true; 1803 } 1804 1805 /* unlock now we've grabbed the inodes. */ 1806 rcu_read_unlock(); 1807 1808 for (i = 0; i < nr_found; i++) { 1809 if (!batch[i]) 1810 continue; 1811 error = xfs_icwalk_process_inode(goal, batch[i], pag, 1812 icw); 1813 if (error == -EAGAIN) { 1814 skipped++; 1815 continue; 1816 } 1817 if (error && last_error != -EFSCORRUPTED) 1818 last_error = error; 1819 } 1820 1821 /* bail out if the filesystem is corrupted. */ 1822 if (error == -EFSCORRUPTED) 1823 break; 1824 1825 cond_resched(); 1826 1827 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) { 1828 icw->icw_scan_limit -= XFS_LOOKUP_BATCH; 1829 if (icw->icw_scan_limit <= 0) 1830 break; 1831 } 1832 } while (nr_found && !done); 1833 1834 if (goal == XFS_ICWALK_RECLAIM) { 1835 if (done) 1836 first_index = 0; 1837 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); 1838 } 1839 1840 if (skipped) { 1841 delay(1); 1842 goto restart; 1843 } 1844 return last_error; 1845 } 1846 1847 /* Walk all incore inodes to achieve a given goal. */ 1848 static int 1849 xfs_icwalk( 1850 struct xfs_mount *mp, 1851 enum xfs_icwalk_goal goal, 1852 struct xfs_icwalk *icw) 1853 { 1854 struct xfs_perag *pag = NULL; 1855 int error = 0; 1856 int last_error = 0; 1857 1858 while ((pag = xfs_perag_grab_next_tag(mp, pag, goal))) { 1859 error = xfs_icwalk_ag(pag, goal, icw); 1860 if (error) { 1861 last_error = error; 1862 if (error == -EFSCORRUPTED) { 1863 xfs_perag_rele(pag); 1864 break; 1865 } 1866 } 1867 } 1868 return last_error; 1869 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID); 1870 } 1871 1872 #ifdef DEBUG 1873 static void 1874 xfs_check_delalloc( 1875 struct xfs_inode *ip, 1876 int whichfork) 1877 { 1878 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1879 struct xfs_bmbt_irec got; 1880 struct xfs_iext_cursor icur; 1881 1882 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got)) 1883 return; 1884 do { 1885 if (isnullstartblock(got.br_startblock)) { 1886 xfs_warn(ip->i_mount, 1887 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]", 1888 ip->i_ino, 1889 whichfork == XFS_DATA_FORK ? "data" : "cow", 1890 got.br_startoff, got.br_blockcount); 1891 } 1892 } while (xfs_iext_next_extent(ifp, &icur, &got)); 1893 } 1894 #else 1895 #define xfs_check_delalloc(ip, whichfork) do { } while (0) 1896 #endif 1897 1898 /* Schedule the inode for reclaim. */ 1899 static void 1900 xfs_inodegc_set_reclaimable( 1901 struct xfs_inode *ip) 1902 { 1903 struct xfs_mount *mp = ip->i_mount; 1904 struct xfs_perag *pag; 1905 1906 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { 1907 xfs_check_delalloc(ip, XFS_DATA_FORK); 1908 xfs_check_delalloc(ip, XFS_COW_FORK); 1909 ASSERT(0); 1910 } 1911 1912 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1913 spin_lock(&pag->pag_ici_lock); 1914 spin_lock(&ip->i_flags_lock); 1915 1916 trace_xfs_inode_set_reclaimable(ip); 1917 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING); 1918 ip->i_flags |= XFS_IRECLAIMABLE; 1919 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), 1920 XFS_ICI_RECLAIM_TAG); 1921 1922 spin_unlock(&ip->i_flags_lock); 1923 spin_unlock(&pag->pag_ici_lock); 1924 xfs_perag_put(pag); 1925 } 1926 1927 /* 1928 * Free all speculative preallocations and possibly even the inode itself. 1929 * This is the last chance to make changes to an otherwise unreferenced file 1930 * before incore reclamation happens. 1931 */ 1932 static int 1933 xfs_inodegc_inactivate( 1934 struct xfs_inode *ip) 1935 { 1936 int error; 1937 1938 trace_xfs_inode_inactivating(ip); 1939 error = xfs_inactive(ip); 1940 xfs_inodegc_set_reclaimable(ip); 1941 return error; 1942 1943 } 1944 1945 void 1946 xfs_inodegc_worker( 1947 struct work_struct *work) 1948 { 1949 struct xfs_inodegc *gc = container_of(to_delayed_work(work), 1950 struct xfs_inodegc, work); 1951 struct llist_node *node = llist_del_all(&gc->list); 1952 struct xfs_inode *ip, *n; 1953 struct xfs_mount *mp = gc->mp; 1954 unsigned int nofs_flag; 1955 1956 /* 1957 * Clear the cpu mask bit and ensure that we have seen the latest 1958 * update of the gc structure associated with this CPU. This matches 1959 * with the release semantics used when setting the cpumask bit in 1960 * xfs_inodegc_queue. 1961 */ 1962 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask); 1963 smp_mb__after_atomic(); 1964 1965 WRITE_ONCE(gc->items, 0); 1966 1967 if (!node) 1968 return; 1969 1970 /* 1971 * We can allocate memory here while doing writeback on behalf of 1972 * memory reclaim. To avoid memory allocation deadlocks set the 1973 * task-wide nofs context for the following operations. 1974 */ 1975 nofs_flag = memalloc_nofs_save(); 1976 1977 ip = llist_entry(node, struct xfs_inode, i_gclist); 1978 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits)); 1979 1980 WRITE_ONCE(gc->shrinker_hits, 0); 1981 llist_for_each_entry_safe(ip, n, node, i_gclist) { 1982 int error; 1983 1984 xfs_iflags_set(ip, XFS_INACTIVATING); 1985 error = xfs_inodegc_inactivate(ip); 1986 if (error && !gc->error) 1987 gc->error = error; 1988 } 1989 1990 memalloc_nofs_restore(nofs_flag); 1991 } 1992 1993 /* 1994 * Expedite all pending inodegc work to run immediately. This does not wait for 1995 * completion of the work. 1996 */ 1997 void 1998 xfs_inodegc_push( 1999 struct xfs_mount *mp) 2000 { 2001 if (!xfs_is_inodegc_enabled(mp)) 2002 return; 2003 trace_xfs_inodegc_push(mp, __return_address); 2004 xfs_inodegc_queue_all(mp); 2005 } 2006 2007 /* 2008 * Force all currently queued inode inactivation work to run immediately and 2009 * wait for the work to finish. 2010 */ 2011 int 2012 xfs_inodegc_flush( 2013 struct xfs_mount *mp) 2014 { 2015 xfs_inodegc_push(mp); 2016 trace_xfs_inodegc_flush(mp, __return_address); 2017 return xfs_inodegc_wait_all(mp); 2018 } 2019 2020 /* 2021 * Flush all the pending work and then disable the inode inactivation background 2022 * workers and wait for them to stop. Caller must hold sb->s_umount to 2023 * coordinate changes in the inodegc_enabled state. 2024 */ 2025 void 2026 xfs_inodegc_stop( 2027 struct xfs_mount *mp) 2028 { 2029 bool rerun; 2030 2031 if (!xfs_clear_inodegc_enabled(mp)) 2032 return; 2033 2034 /* 2035 * Drain all pending inodegc work, including inodes that could be 2036 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan 2037 * threads that sample the inodegc state just prior to us clearing it. 2038 * The inodegc flag state prevents new threads from queuing more 2039 * inodes, so we queue pending work items and flush the workqueue until 2040 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue 2041 * here because it does not allow other unserialized mechanisms to 2042 * reschedule inodegc work while this draining is in progress. 2043 */ 2044 xfs_inodegc_queue_all(mp); 2045 do { 2046 flush_workqueue(mp->m_inodegc_wq); 2047 rerun = xfs_inodegc_queue_all(mp); 2048 } while (rerun); 2049 2050 trace_xfs_inodegc_stop(mp, __return_address); 2051 } 2052 2053 /* 2054 * Enable the inode inactivation background workers and schedule deferred inode 2055 * inactivation work if there is any. Caller must hold sb->s_umount to 2056 * coordinate changes in the inodegc_enabled state. 2057 */ 2058 void 2059 xfs_inodegc_start( 2060 struct xfs_mount *mp) 2061 { 2062 if (xfs_set_inodegc_enabled(mp)) 2063 return; 2064 2065 trace_xfs_inodegc_start(mp, __return_address); 2066 xfs_inodegc_queue_all(mp); 2067 } 2068 2069 #ifdef CONFIG_XFS_RT 2070 static inline bool 2071 xfs_inodegc_want_queue_rt_file( 2072 struct xfs_inode *ip) 2073 { 2074 struct xfs_mount *mp = ip->i_mount; 2075 2076 if (!XFS_IS_REALTIME_INODE(ip)) 2077 return false; 2078 2079 if (__percpu_counter_compare(&mp->m_frextents, 2080 mp->m_low_rtexts[XFS_LOWSP_5_PCNT], 2081 XFS_FDBLOCKS_BATCH) < 0) 2082 return true; 2083 2084 return false; 2085 } 2086 #else 2087 # define xfs_inodegc_want_queue_rt_file(ip) (false) 2088 #endif /* CONFIG_XFS_RT */ 2089 2090 /* 2091 * Schedule the inactivation worker when: 2092 * 2093 * - We've accumulated more than one inode cluster buffer's worth of inodes. 2094 * - There is less than 5% free space left. 2095 * - Any of the quotas for this inode are near an enforcement limit. 2096 */ 2097 static inline bool 2098 xfs_inodegc_want_queue_work( 2099 struct xfs_inode *ip, 2100 unsigned int items) 2101 { 2102 struct xfs_mount *mp = ip->i_mount; 2103 2104 if (items > mp->m_ino_geo.inodes_per_cluster) 2105 return true; 2106 2107 if (__percpu_counter_compare(&mp->m_fdblocks, 2108 mp->m_low_space[XFS_LOWSP_5_PCNT], 2109 XFS_FDBLOCKS_BATCH) < 0) 2110 return true; 2111 2112 if (xfs_inodegc_want_queue_rt_file(ip)) 2113 return true; 2114 2115 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER)) 2116 return true; 2117 2118 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP)) 2119 return true; 2120 2121 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ)) 2122 return true; 2123 2124 return false; 2125 } 2126 2127 /* 2128 * Upper bound on the number of inodes in each AG that can be queued for 2129 * inactivation at any given time, to avoid monopolizing the workqueue. 2130 */ 2131 #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK) 2132 2133 /* 2134 * Make the frontend wait for inactivations when: 2135 * 2136 * - Memory shrinkers queued the inactivation worker and it hasn't finished. 2137 * - The queue depth exceeds the maximum allowable percpu backlog. 2138 * 2139 * Note: If we are in a NOFS context here (e.g. current thread is running a 2140 * transaction) the we don't want to block here as inodegc progress may require 2141 * filesystem resources we hold to make progress and that could result in a 2142 * deadlock. Hence we skip out of here if we are in a scoped NOFS context. 2143 */ 2144 static inline bool 2145 xfs_inodegc_want_flush_work( 2146 struct xfs_inode *ip, 2147 unsigned int items, 2148 unsigned int shrinker_hits) 2149 { 2150 if (current->flags & PF_MEMALLOC_NOFS) 2151 return false; 2152 2153 if (shrinker_hits > 0) 2154 return true; 2155 2156 if (items > XFS_INODEGC_MAX_BACKLOG) 2157 return true; 2158 2159 return false; 2160 } 2161 2162 /* 2163 * Queue a background inactivation worker if there are inodes that need to be 2164 * inactivated and higher level xfs code hasn't disabled the background 2165 * workers. 2166 */ 2167 static void 2168 xfs_inodegc_queue( 2169 struct xfs_inode *ip) 2170 { 2171 struct xfs_mount *mp = ip->i_mount; 2172 struct xfs_inodegc *gc; 2173 int items; 2174 unsigned int shrinker_hits; 2175 unsigned int cpu_nr; 2176 unsigned long queue_delay = 1; 2177 2178 trace_xfs_inode_set_need_inactive(ip); 2179 spin_lock(&ip->i_flags_lock); 2180 ip->i_flags |= XFS_NEED_INACTIVE; 2181 spin_unlock(&ip->i_flags_lock); 2182 2183 cpu_nr = get_cpu(); 2184 gc = this_cpu_ptr(mp->m_inodegc); 2185 llist_add(&ip->i_gclist, &gc->list); 2186 items = READ_ONCE(gc->items); 2187 WRITE_ONCE(gc->items, items + 1); 2188 shrinker_hits = READ_ONCE(gc->shrinker_hits); 2189 2190 /* 2191 * Ensure the list add is always seen by anyone who finds the cpumask 2192 * bit set. This effectively gives the cpumask bit set operation 2193 * release ordering semantics. 2194 */ 2195 smp_mb__before_atomic(); 2196 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask)) 2197 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask); 2198 2199 /* 2200 * We queue the work while holding the current CPU so that the work 2201 * is scheduled to run on this CPU. 2202 */ 2203 if (!xfs_is_inodegc_enabled(mp)) { 2204 put_cpu(); 2205 return; 2206 } 2207 2208 if (xfs_inodegc_want_queue_work(ip, items)) 2209 queue_delay = 0; 2210 2211 trace_xfs_inodegc_queue(mp, __return_address); 2212 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, 2213 queue_delay); 2214 put_cpu(); 2215 2216 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) { 2217 trace_xfs_inodegc_throttle(mp, __return_address); 2218 flush_delayed_work(&gc->work); 2219 } 2220 } 2221 2222 /* 2223 * We set the inode flag atomically with the radix tree tag. Once we get tag 2224 * lookups on the radix tree, this inode flag can go away. 2225 * 2226 * We always use background reclaim here because even if the inode is clean, it 2227 * still may be under IO and hence we have wait for IO completion to occur 2228 * before we can reclaim the inode. The background reclaim path handles this 2229 * more efficiently than we can here, so simply let background reclaim tear down 2230 * all inodes. 2231 */ 2232 void 2233 xfs_inode_mark_reclaimable( 2234 struct xfs_inode *ip) 2235 { 2236 struct xfs_mount *mp = ip->i_mount; 2237 bool need_inactive; 2238 2239 XFS_STATS_INC(mp, vn_reclaim); 2240 2241 /* 2242 * We should never get here with any of the reclaim flags already set. 2243 */ 2244 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS)); 2245 2246 need_inactive = xfs_inode_needs_inactive(ip); 2247 if (need_inactive) { 2248 xfs_inodegc_queue(ip); 2249 return; 2250 } 2251 2252 /* Going straight to reclaim, so drop the dquots. */ 2253 xfs_qm_dqdetach(ip); 2254 xfs_inodegc_set_reclaimable(ip); 2255 } 2256 2257 /* 2258 * Register a phony shrinker so that we can run background inodegc sooner when 2259 * there's memory pressure. Inactivation does not itself free any memory but 2260 * it does make inodes reclaimable, which eventually frees memory. 2261 * 2262 * The count function, seek value, and batch value are crafted to trigger the 2263 * scan function during the second round of scanning. Hopefully this means 2264 * that we reclaimed enough memory that initiating metadata transactions won't 2265 * make things worse. 2266 */ 2267 #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY) 2268 #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1) 2269 2270 static unsigned long 2271 xfs_inodegc_shrinker_count( 2272 struct shrinker *shrink, 2273 struct shrink_control *sc) 2274 { 2275 struct xfs_mount *mp = shrink->private_data; 2276 struct xfs_inodegc *gc; 2277 int cpu; 2278 2279 if (!xfs_is_inodegc_enabled(mp)) 2280 return 0; 2281 2282 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { 2283 gc = per_cpu_ptr(mp->m_inodegc, cpu); 2284 if (!llist_empty(&gc->list)) 2285 return XFS_INODEGC_SHRINKER_COUNT; 2286 } 2287 2288 return 0; 2289 } 2290 2291 static unsigned long 2292 xfs_inodegc_shrinker_scan( 2293 struct shrinker *shrink, 2294 struct shrink_control *sc) 2295 { 2296 struct xfs_mount *mp = shrink->private_data; 2297 struct xfs_inodegc *gc; 2298 int cpu; 2299 bool no_items = true; 2300 2301 if (!xfs_is_inodegc_enabled(mp)) 2302 return SHRINK_STOP; 2303 2304 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address); 2305 2306 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { 2307 gc = per_cpu_ptr(mp->m_inodegc, cpu); 2308 if (!llist_empty(&gc->list)) { 2309 unsigned int h = READ_ONCE(gc->shrinker_hits); 2310 2311 WRITE_ONCE(gc->shrinker_hits, h + 1); 2312 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); 2313 no_items = false; 2314 } 2315 } 2316 2317 /* 2318 * If there are no inodes to inactivate, we don't want the shrinker 2319 * to think there's deferred work to call us back about. 2320 */ 2321 if (no_items) 2322 return LONG_MAX; 2323 2324 return SHRINK_STOP; 2325 } 2326 2327 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */ 2328 int 2329 xfs_inodegc_register_shrinker( 2330 struct xfs_mount *mp) 2331 { 2332 mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB, 2333 "xfs-inodegc:%s", 2334 mp->m_super->s_id); 2335 if (!mp->m_inodegc_shrinker) 2336 return -ENOMEM; 2337 2338 mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count; 2339 mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan; 2340 mp->m_inodegc_shrinker->seeks = 0; 2341 mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH; 2342 mp->m_inodegc_shrinker->private_data = mp; 2343 2344 shrinker_register(mp->m_inodegc_shrinker); 2345 2346 return 0; 2347 } 2348