1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_inode_item.h" 18 #include "xfs_quota.h" 19 #include "xfs_trace.h" 20 #include "xfs_icache.h" 21 #include "xfs_bmap_util.h" 22 #include "xfs_dquot_item.h" 23 #include "xfs_dquot.h" 24 #include "xfs_reflink.h" 25 #include "xfs_ialloc.h" 26 27 #include <linux/iversion.h> 28 29 /* 30 * Allocate and initialise an xfs_inode. 31 */ 32 struct xfs_inode * 33 xfs_inode_alloc( 34 struct xfs_mount *mp, 35 xfs_ino_t ino) 36 { 37 struct xfs_inode *ip; 38 39 /* 40 * if this didn't occur in transactions, we could use 41 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 42 * code up to do this anyway. 43 */ 44 ip = kmem_zone_alloc(xfs_inode_zone, 0); 45 if (!ip) 46 return NULL; 47 if (inode_init_always(mp->m_super, VFS_I(ip))) { 48 kmem_cache_free(xfs_inode_zone, ip); 49 return NULL; 50 } 51 52 /* VFS doesn't initialise i_mode! */ 53 VFS_I(ip)->i_mode = 0; 54 55 XFS_STATS_INC(mp, vn_active); 56 ASSERT(atomic_read(&ip->i_pincount) == 0); 57 ASSERT(!xfs_isiflocked(ip)); 58 ASSERT(ip->i_ino == 0); 59 60 /* initialise the xfs inode */ 61 ip->i_ino = ino; 62 ip->i_mount = mp; 63 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); 64 ip->i_afp = NULL; 65 ip->i_cowfp = NULL; 66 memset(&ip->i_df, 0, sizeof(ip->i_df)); 67 ip->i_flags = 0; 68 ip->i_delayed_blks = 0; 69 memset(&ip->i_d, 0, sizeof(ip->i_d)); 70 ip->i_sick = 0; 71 ip->i_checked = 0; 72 INIT_WORK(&ip->i_ioend_work, xfs_end_io); 73 INIT_LIST_HEAD(&ip->i_ioend_list); 74 spin_lock_init(&ip->i_ioend_lock); 75 76 return ip; 77 } 78 79 STATIC void 80 xfs_inode_free_callback( 81 struct rcu_head *head) 82 { 83 struct inode *inode = container_of(head, struct inode, i_rcu); 84 struct xfs_inode *ip = XFS_I(inode); 85 86 switch (VFS_I(ip)->i_mode & S_IFMT) { 87 case S_IFREG: 88 case S_IFDIR: 89 case S_IFLNK: 90 xfs_idestroy_fork(&ip->i_df); 91 break; 92 } 93 94 if (ip->i_afp) { 95 xfs_idestroy_fork(ip->i_afp); 96 kmem_cache_free(xfs_ifork_zone, ip->i_afp); 97 } 98 if (ip->i_cowfp) { 99 xfs_idestroy_fork(ip->i_cowfp); 100 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); 101 } 102 if (ip->i_itemp) { 103 ASSERT(!test_bit(XFS_LI_IN_AIL, 104 &ip->i_itemp->ili_item.li_flags)); 105 xfs_inode_item_destroy(ip); 106 ip->i_itemp = NULL; 107 } 108 109 kmem_cache_free(xfs_inode_zone, ip); 110 } 111 112 static void 113 __xfs_inode_free( 114 struct xfs_inode *ip) 115 { 116 /* asserts to verify all state is correct here */ 117 ASSERT(atomic_read(&ip->i_pincount) == 0); 118 XFS_STATS_DEC(ip->i_mount, vn_active); 119 120 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 121 } 122 123 void 124 xfs_inode_free( 125 struct xfs_inode *ip) 126 { 127 ASSERT(!xfs_isiflocked(ip)); 128 129 /* 130 * Because we use RCU freeing we need to ensure the inode always 131 * appears to be reclaimed with an invalid inode number when in the 132 * free state. The ip->i_flags_lock provides the barrier against lookup 133 * races. 134 */ 135 spin_lock(&ip->i_flags_lock); 136 ip->i_flags = XFS_IRECLAIM; 137 ip->i_ino = 0; 138 spin_unlock(&ip->i_flags_lock); 139 140 __xfs_inode_free(ip); 141 } 142 143 /* 144 * Queue a new inode reclaim pass if there are reclaimable inodes and there 145 * isn't a reclaim pass already in progress. By default it runs every 5s based 146 * on the xfs periodic sync default of 30s. Perhaps this should have it's own 147 * tunable, but that can be done if this method proves to be ineffective or too 148 * aggressive. 149 */ 150 static void 151 xfs_reclaim_work_queue( 152 struct xfs_mount *mp) 153 { 154 155 rcu_read_lock(); 156 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { 157 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, 158 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 159 } 160 rcu_read_unlock(); 161 } 162 163 /* 164 * This is a fast pass over the inode cache to try to get reclaim moving on as 165 * many inodes as possible in a short period of time. It kicks itself every few 166 * seconds, as well as being kicked by the inode cache shrinker when memory 167 * goes low. It scans as quickly as possible avoiding locked inodes or those 168 * already being flushed, and once done schedules a future pass. 169 */ 170 void 171 xfs_reclaim_worker( 172 struct work_struct *work) 173 { 174 struct xfs_mount *mp = container_of(to_delayed_work(work), 175 struct xfs_mount, m_reclaim_work); 176 177 xfs_reclaim_inodes(mp, SYNC_TRYLOCK); 178 xfs_reclaim_work_queue(mp); 179 } 180 181 static void 182 xfs_perag_set_reclaim_tag( 183 struct xfs_perag *pag) 184 { 185 struct xfs_mount *mp = pag->pag_mount; 186 187 lockdep_assert_held(&pag->pag_ici_lock); 188 if (pag->pag_ici_reclaimable++) 189 return; 190 191 /* propagate the reclaim tag up into the perag radix tree */ 192 spin_lock(&mp->m_perag_lock); 193 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, 194 XFS_ICI_RECLAIM_TAG); 195 spin_unlock(&mp->m_perag_lock); 196 197 /* schedule periodic background inode reclaim */ 198 xfs_reclaim_work_queue(mp); 199 200 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 201 } 202 203 static void 204 xfs_perag_clear_reclaim_tag( 205 struct xfs_perag *pag) 206 { 207 struct xfs_mount *mp = pag->pag_mount; 208 209 lockdep_assert_held(&pag->pag_ici_lock); 210 if (--pag->pag_ici_reclaimable) 211 return; 212 213 /* clear the reclaim tag from the perag radix tree */ 214 spin_lock(&mp->m_perag_lock); 215 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, 216 XFS_ICI_RECLAIM_TAG); 217 spin_unlock(&mp->m_perag_lock); 218 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 219 } 220 221 222 /* 223 * We set the inode flag atomically with the radix tree tag. 224 * Once we get tag lookups on the radix tree, this inode flag 225 * can go away. 226 */ 227 void 228 xfs_inode_set_reclaim_tag( 229 struct xfs_inode *ip) 230 { 231 struct xfs_mount *mp = ip->i_mount; 232 struct xfs_perag *pag; 233 234 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 235 spin_lock(&pag->pag_ici_lock); 236 spin_lock(&ip->i_flags_lock); 237 238 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), 239 XFS_ICI_RECLAIM_TAG); 240 xfs_perag_set_reclaim_tag(pag); 241 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 242 243 spin_unlock(&ip->i_flags_lock); 244 spin_unlock(&pag->pag_ici_lock); 245 xfs_perag_put(pag); 246 } 247 248 STATIC void 249 xfs_inode_clear_reclaim_tag( 250 struct xfs_perag *pag, 251 xfs_ino_t ino) 252 { 253 radix_tree_tag_clear(&pag->pag_ici_root, 254 XFS_INO_TO_AGINO(pag->pag_mount, ino), 255 XFS_ICI_RECLAIM_TAG); 256 xfs_perag_clear_reclaim_tag(pag); 257 } 258 259 static void 260 xfs_inew_wait( 261 struct xfs_inode *ip) 262 { 263 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); 264 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); 265 266 do { 267 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 268 if (!xfs_iflags_test(ip, XFS_INEW)) 269 break; 270 schedule(); 271 } while (true); 272 finish_wait(wq, &wait.wq_entry); 273 } 274 275 /* 276 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode 277 * part of the structure. This is made more complex by the fact we store 278 * information about the on-disk values in the VFS inode and so we can't just 279 * overwrite the values unconditionally. Hence we save the parameters we 280 * need to retain across reinitialisation, and rewrite them into the VFS inode 281 * after reinitialisation even if it fails. 282 */ 283 static int 284 xfs_reinit_inode( 285 struct xfs_mount *mp, 286 struct inode *inode) 287 { 288 int error; 289 uint32_t nlink = inode->i_nlink; 290 uint32_t generation = inode->i_generation; 291 uint64_t version = inode_peek_iversion(inode); 292 umode_t mode = inode->i_mode; 293 dev_t dev = inode->i_rdev; 294 kuid_t uid = inode->i_uid; 295 kgid_t gid = inode->i_gid; 296 297 error = inode_init_always(mp->m_super, inode); 298 299 set_nlink(inode, nlink); 300 inode->i_generation = generation; 301 inode_set_iversion_queried(inode, version); 302 inode->i_mode = mode; 303 inode->i_rdev = dev; 304 inode->i_uid = uid; 305 inode->i_gid = gid; 306 return error; 307 } 308 309 /* 310 * If we are allocating a new inode, then check what was returned is 311 * actually a free, empty inode. If we are not allocating an inode, 312 * then check we didn't find a free inode. 313 * 314 * Returns: 315 * 0 if the inode free state matches the lookup context 316 * -ENOENT if the inode is free and we are not allocating 317 * -EFSCORRUPTED if there is any state mismatch at all 318 */ 319 static int 320 xfs_iget_check_free_state( 321 struct xfs_inode *ip, 322 int flags) 323 { 324 if (flags & XFS_IGET_CREATE) { 325 /* should be a free inode */ 326 if (VFS_I(ip)->i_mode != 0) { 327 xfs_warn(ip->i_mount, 328 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", 329 ip->i_ino, VFS_I(ip)->i_mode); 330 return -EFSCORRUPTED; 331 } 332 333 if (ip->i_d.di_nblocks != 0) { 334 xfs_warn(ip->i_mount, 335 "Corruption detected! Free inode 0x%llx has blocks allocated!", 336 ip->i_ino); 337 return -EFSCORRUPTED; 338 } 339 return 0; 340 } 341 342 /* should be an allocated inode */ 343 if (VFS_I(ip)->i_mode == 0) 344 return -ENOENT; 345 346 return 0; 347 } 348 349 /* 350 * Check the validity of the inode we just found it the cache 351 */ 352 static int 353 xfs_iget_cache_hit( 354 struct xfs_perag *pag, 355 struct xfs_inode *ip, 356 xfs_ino_t ino, 357 int flags, 358 int lock_flags) __releases(RCU) 359 { 360 struct inode *inode = VFS_I(ip); 361 struct xfs_mount *mp = ip->i_mount; 362 int error; 363 364 /* 365 * check for re-use of an inode within an RCU grace period due to the 366 * radix tree nodes not being updated yet. We monitor for this by 367 * setting the inode number to zero before freeing the inode structure. 368 * If the inode has been reallocated and set up, then the inode number 369 * will not match, so check for that, too. 370 */ 371 spin_lock(&ip->i_flags_lock); 372 if (ip->i_ino != ino) { 373 trace_xfs_iget_skip(ip); 374 XFS_STATS_INC(mp, xs_ig_frecycle); 375 error = -EAGAIN; 376 goto out_error; 377 } 378 379 380 /* 381 * If we are racing with another cache hit that is currently 382 * instantiating this inode or currently recycling it out of 383 * reclaimabe state, wait for the initialisation to complete 384 * before continuing. 385 * 386 * XXX(hch): eventually we should do something equivalent to 387 * wait_on_inode to wait for these flags to be cleared 388 * instead of polling for it. 389 */ 390 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 391 trace_xfs_iget_skip(ip); 392 XFS_STATS_INC(mp, xs_ig_frecycle); 393 error = -EAGAIN; 394 goto out_error; 395 } 396 397 /* 398 * Check the inode free state is valid. This also detects lookup 399 * racing with unlinks. 400 */ 401 error = xfs_iget_check_free_state(ip, flags); 402 if (error) 403 goto out_error; 404 405 /* 406 * If IRECLAIMABLE is set, we've torn down the VFS inode already. 407 * Need to carefully get it back into useable state. 408 */ 409 if (ip->i_flags & XFS_IRECLAIMABLE) { 410 trace_xfs_iget_reclaim(ip); 411 412 if (flags & XFS_IGET_INCORE) { 413 error = -EAGAIN; 414 goto out_error; 415 } 416 417 /* 418 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode 419 * from stomping over us while we recycle the inode. We can't 420 * clear the radix tree reclaimable tag yet as it requires 421 * pag_ici_lock to be held exclusive. 422 */ 423 ip->i_flags |= XFS_IRECLAIM; 424 425 spin_unlock(&ip->i_flags_lock); 426 rcu_read_unlock(); 427 428 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 429 error = xfs_reinit_inode(mp, inode); 430 if (error) { 431 bool wake; 432 /* 433 * Re-initializing the inode failed, and we are in deep 434 * trouble. Try to re-add it to the reclaim list. 435 */ 436 rcu_read_lock(); 437 spin_lock(&ip->i_flags_lock); 438 wake = !!__xfs_iflags_test(ip, XFS_INEW); 439 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 440 if (wake) 441 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); 442 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 443 trace_xfs_iget_reclaim_fail(ip); 444 goto out_error; 445 } 446 447 spin_lock(&pag->pag_ici_lock); 448 spin_lock(&ip->i_flags_lock); 449 450 /* 451 * Clear the per-lifetime state in the inode as we are now 452 * effectively a new inode and need to return to the initial 453 * state before reuse occurs. 454 */ 455 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 456 ip->i_flags |= XFS_INEW; 457 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 458 inode->i_state = I_NEW; 459 ip->i_sick = 0; 460 ip->i_checked = 0; 461 462 spin_unlock(&ip->i_flags_lock); 463 spin_unlock(&pag->pag_ici_lock); 464 } else { 465 /* If the VFS inode is being torn down, pause and try again. */ 466 if (!igrab(inode)) { 467 trace_xfs_iget_skip(ip); 468 error = -EAGAIN; 469 goto out_error; 470 } 471 472 /* We've got a live one. */ 473 spin_unlock(&ip->i_flags_lock); 474 rcu_read_unlock(); 475 trace_xfs_iget_hit(ip); 476 } 477 478 if (lock_flags != 0) 479 xfs_ilock(ip, lock_flags); 480 481 if (!(flags & XFS_IGET_INCORE)) 482 xfs_iflags_clear(ip, XFS_ISTALE); 483 XFS_STATS_INC(mp, xs_ig_found); 484 485 return 0; 486 487 out_error: 488 spin_unlock(&ip->i_flags_lock); 489 rcu_read_unlock(); 490 return error; 491 } 492 493 494 static int 495 xfs_iget_cache_miss( 496 struct xfs_mount *mp, 497 struct xfs_perag *pag, 498 xfs_trans_t *tp, 499 xfs_ino_t ino, 500 struct xfs_inode **ipp, 501 int flags, 502 int lock_flags) 503 { 504 struct xfs_inode *ip; 505 int error; 506 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 507 int iflags; 508 509 ip = xfs_inode_alloc(mp, ino); 510 if (!ip) 511 return -ENOMEM; 512 513 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); 514 if (error) 515 goto out_destroy; 516 517 /* 518 * For version 5 superblocks, if we are initialising a new inode and we 519 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can 520 * simply build the new inode core with a random generation number. 521 * 522 * For version 4 (and older) superblocks, log recovery is dependent on 523 * the di_flushiter field being initialised from the current on-disk 524 * value and hence we must also read the inode off disk even when 525 * initializing new inodes. 526 */ 527 if (xfs_sb_version_has_v3inode(&mp->m_sb) && 528 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) { 529 VFS_I(ip)->i_generation = prandom_u32(); 530 } else { 531 struct xfs_dinode *dip; 532 struct xfs_buf *bp; 533 534 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0); 535 if (error) 536 goto out_destroy; 537 538 error = xfs_inode_from_disk(ip, dip); 539 if (!error) 540 xfs_buf_set_ref(bp, XFS_INO_REF); 541 xfs_trans_brelse(tp, bp); 542 543 if (error) 544 goto out_destroy; 545 } 546 547 trace_xfs_iget_miss(ip); 548 549 /* 550 * Check the inode free state is valid. This also detects lookup 551 * racing with unlinks. 552 */ 553 error = xfs_iget_check_free_state(ip, flags); 554 if (error) 555 goto out_destroy; 556 557 /* 558 * Preload the radix tree so we can insert safely under the 559 * write spinlock. Note that we cannot sleep inside the preload 560 * region. Since we can be called from transaction context, don't 561 * recurse into the file system. 562 */ 563 if (radix_tree_preload(GFP_NOFS)) { 564 error = -EAGAIN; 565 goto out_destroy; 566 } 567 568 /* 569 * Because the inode hasn't been added to the radix-tree yet it can't 570 * be found by another thread, so we can do the non-sleeping lock here. 571 */ 572 if (lock_flags) { 573 if (!xfs_ilock_nowait(ip, lock_flags)) 574 BUG(); 575 } 576 577 /* 578 * These values must be set before inserting the inode into the radix 579 * tree as the moment it is inserted a concurrent lookup (allowed by the 580 * RCU locking mechanism) can find it and that lookup must see that this 581 * is an inode currently under construction (i.e. that XFS_INEW is set). 582 * The ip->i_flags_lock that protects the XFS_INEW flag forms the 583 * memory barrier that ensures this detection works correctly at lookup 584 * time. 585 */ 586 iflags = XFS_INEW; 587 if (flags & XFS_IGET_DONTCACHE) 588 d_mark_dontcache(VFS_I(ip)); 589 ip->i_udquot = NULL; 590 ip->i_gdquot = NULL; 591 ip->i_pdquot = NULL; 592 xfs_iflags_set(ip, iflags); 593 594 /* insert the new inode */ 595 spin_lock(&pag->pag_ici_lock); 596 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 597 if (unlikely(error)) { 598 WARN_ON(error != -EEXIST); 599 XFS_STATS_INC(mp, xs_ig_dup); 600 error = -EAGAIN; 601 goto out_preload_end; 602 } 603 spin_unlock(&pag->pag_ici_lock); 604 radix_tree_preload_end(); 605 606 *ipp = ip; 607 return 0; 608 609 out_preload_end: 610 spin_unlock(&pag->pag_ici_lock); 611 radix_tree_preload_end(); 612 if (lock_flags) 613 xfs_iunlock(ip, lock_flags); 614 out_destroy: 615 __destroy_inode(VFS_I(ip)); 616 xfs_inode_free(ip); 617 return error; 618 } 619 620 /* 621 * Look up an inode by number in the given file system. 622 * The inode is looked up in the cache held in each AG. 623 * If the inode is found in the cache, initialise the vfs inode 624 * if necessary. 625 * 626 * If it is not in core, read it in from the file system's device, 627 * add it to the cache and initialise the vfs inode. 628 * 629 * The inode is locked according to the value of the lock_flags parameter. 630 * This flag parameter indicates how and if the inode's IO lock and inode lock 631 * should be taken. 632 * 633 * mp -- the mount point structure for the current file system. It points 634 * to the inode hash table. 635 * tp -- a pointer to the current transaction if there is one. This is 636 * simply passed through to the xfs_iread() call. 637 * ino -- the number of the inode desired. This is the unique identifier 638 * within the file system for the inode being requested. 639 * lock_flags -- flags indicating how to lock the inode. See the comment 640 * for xfs_ilock() for a list of valid values. 641 */ 642 int 643 xfs_iget( 644 xfs_mount_t *mp, 645 xfs_trans_t *tp, 646 xfs_ino_t ino, 647 uint flags, 648 uint lock_flags, 649 xfs_inode_t **ipp) 650 { 651 xfs_inode_t *ip; 652 int error; 653 xfs_perag_t *pag; 654 xfs_agino_t agino; 655 656 /* 657 * xfs_reclaim_inode() uses the ILOCK to ensure an inode 658 * doesn't get freed while it's being referenced during a 659 * radix tree traversal here. It assumes this function 660 * aqcuires only the ILOCK (and therefore it has no need to 661 * involve the IOLOCK in this synchronization). 662 */ 663 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); 664 665 /* reject inode numbers outside existing AGs */ 666 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 667 return -EINVAL; 668 669 XFS_STATS_INC(mp, xs_ig_attempts); 670 671 /* get the perag structure and ensure that it's inode capable */ 672 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 673 agino = XFS_INO_TO_AGINO(mp, ino); 674 675 again: 676 error = 0; 677 rcu_read_lock(); 678 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 679 680 if (ip) { 681 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); 682 if (error) 683 goto out_error_or_again; 684 } else { 685 rcu_read_unlock(); 686 if (flags & XFS_IGET_INCORE) { 687 error = -ENODATA; 688 goto out_error_or_again; 689 } 690 XFS_STATS_INC(mp, xs_ig_missed); 691 692 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 693 flags, lock_flags); 694 if (error) 695 goto out_error_or_again; 696 } 697 xfs_perag_put(pag); 698 699 *ipp = ip; 700 701 /* 702 * If we have a real type for an on-disk inode, we can setup the inode 703 * now. If it's a new inode being created, xfs_ialloc will handle it. 704 */ 705 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) 706 xfs_setup_existing_inode(ip); 707 return 0; 708 709 out_error_or_again: 710 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { 711 delay(1); 712 goto again; 713 } 714 xfs_perag_put(pag); 715 return error; 716 } 717 718 /* 719 * "Is this a cached inode that's also allocated?" 720 * 721 * Look up an inode by number in the given file system. If the inode is 722 * in cache and isn't in purgatory, return 1 if the inode is allocated 723 * and 0 if it is not. For all other cases (not in cache, being torn 724 * down, etc.), return a negative error code. 725 * 726 * The caller has to prevent inode allocation and freeing activity, 727 * presumably by locking the AGI buffer. This is to ensure that an 728 * inode cannot transition from allocated to freed until the caller is 729 * ready to allow that. If the inode is in an intermediate state (new, 730 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the 731 * inode is not in the cache, -ENOENT will be returned. The caller must 732 * deal with these scenarios appropriately. 733 * 734 * This is a specialized use case for the online scrubber; if you're 735 * reading this, you probably want xfs_iget. 736 */ 737 int 738 xfs_icache_inode_is_allocated( 739 struct xfs_mount *mp, 740 struct xfs_trans *tp, 741 xfs_ino_t ino, 742 bool *inuse) 743 { 744 struct xfs_inode *ip; 745 int error; 746 747 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); 748 if (error) 749 return error; 750 751 *inuse = !!(VFS_I(ip)->i_mode); 752 xfs_irele(ip); 753 return 0; 754 } 755 756 /* 757 * The inode lookup is done in batches to keep the amount of lock traffic and 758 * radix tree lookups to a minimum. The batch size is a trade off between 759 * lookup reduction and stack usage. This is in the reclaim path, so we can't 760 * be too greedy. 761 */ 762 #define XFS_LOOKUP_BATCH 32 763 764 /* 765 * Decide if the given @ip is eligible to be a part of the inode walk, and 766 * grab it if so. Returns true if it's ready to go or false if we should just 767 * ignore it. 768 */ 769 STATIC bool 770 xfs_inode_walk_ag_grab( 771 struct xfs_inode *ip, 772 int flags) 773 { 774 struct inode *inode = VFS_I(ip); 775 bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT); 776 777 ASSERT(rcu_read_lock_held()); 778 779 /* 780 * check for stale RCU freed inode 781 * 782 * If the inode has been reallocated, it doesn't matter if it's not in 783 * the AG we are walking - we are walking for writeback, so if it 784 * passes all the "valid inode" checks and is dirty, then we'll write 785 * it back anyway. If it has been reallocated and still being 786 * initialised, the XFS_INEW check below will catch it. 787 */ 788 spin_lock(&ip->i_flags_lock); 789 if (!ip->i_ino) 790 goto out_unlock_noent; 791 792 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ 793 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || 794 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) 795 goto out_unlock_noent; 796 spin_unlock(&ip->i_flags_lock); 797 798 /* nothing to sync during shutdown */ 799 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 800 return false; 801 802 /* If we can't grab the inode, it must on it's way to reclaim. */ 803 if (!igrab(inode)) 804 return false; 805 806 /* inode is valid */ 807 return true; 808 809 out_unlock_noent: 810 spin_unlock(&ip->i_flags_lock); 811 return false; 812 } 813 814 /* 815 * For a given per-AG structure @pag, grab, @execute, and rele all incore 816 * inodes with the given radix tree @tag. 817 */ 818 STATIC int 819 xfs_inode_walk_ag( 820 struct xfs_perag *pag, 821 int iter_flags, 822 int (*execute)(struct xfs_inode *ip, void *args), 823 void *args, 824 int tag) 825 { 826 struct xfs_mount *mp = pag->pag_mount; 827 uint32_t first_index; 828 int last_error = 0; 829 int skipped; 830 bool done; 831 int nr_found; 832 833 restart: 834 done = false; 835 skipped = 0; 836 first_index = 0; 837 nr_found = 0; 838 do { 839 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 840 int error = 0; 841 int i; 842 843 rcu_read_lock(); 844 845 if (tag == XFS_ICI_NO_TAG) 846 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 847 (void **)batch, first_index, 848 XFS_LOOKUP_BATCH); 849 else 850 nr_found = radix_tree_gang_lookup_tag( 851 &pag->pag_ici_root, 852 (void **) batch, first_index, 853 XFS_LOOKUP_BATCH, tag); 854 855 if (!nr_found) { 856 rcu_read_unlock(); 857 break; 858 } 859 860 /* 861 * Grab the inodes before we drop the lock. if we found 862 * nothing, nr == 0 and the loop will be skipped. 863 */ 864 for (i = 0; i < nr_found; i++) { 865 struct xfs_inode *ip = batch[i]; 866 867 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags)) 868 batch[i] = NULL; 869 870 /* 871 * Update the index for the next lookup. Catch 872 * overflows into the next AG range which can occur if 873 * we have inodes in the last block of the AG and we 874 * are currently pointing to the last inode. 875 * 876 * Because we may see inodes that are from the wrong AG 877 * due to RCU freeing and reallocation, only update the 878 * index if it lies in this AG. It was a race that lead 879 * us to see this inode, so another lookup from the 880 * same index will not find it again. 881 */ 882 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) 883 continue; 884 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 885 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 886 done = true; 887 } 888 889 /* unlock now we've grabbed the inodes. */ 890 rcu_read_unlock(); 891 892 for (i = 0; i < nr_found; i++) { 893 if (!batch[i]) 894 continue; 895 if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) && 896 xfs_iflags_test(batch[i], XFS_INEW)) 897 xfs_inew_wait(batch[i]); 898 error = execute(batch[i], args); 899 xfs_irele(batch[i]); 900 if (error == -EAGAIN) { 901 skipped++; 902 continue; 903 } 904 if (error && last_error != -EFSCORRUPTED) 905 last_error = error; 906 } 907 908 /* bail out if the filesystem is corrupted. */ 909 if (error == -EFSCORRUPTED) 910 break; 911 912 cond_resched(); 913 914 } while (nr_found && !done); 915 916 if (skipped) { 917 delay(1); 918 goto restart; 919 } 920 return last_error; 921 } 922 923 /* Fetch the next (possibly tagged) per-AG structure. */ 924 static inline struct xfs_perag * 925 xfs_inode_walk_get_perag( 926 struct xfs_mount *mp, 927 xfs_agnumber_t agno, 928 int tag) 929 { 930 if (tag == XFS_ICI_NO_TAG) 931 return xfs_perag_get(mp, agno); 932 return xfs_perag_get_tag(mp, agno, tag); 933 } 934 935 /* 936 * Call the @execute function on all incore inodes matching the radix tree 937 * @tag. 938 */ 939 int 940 xfs_inode_walk( 941 struct xfs_mount *mp, 942 int iter_flags, 943 int (*execute)(struct xfs_inode *ip, void *args), 944 void *args, 945 int tag) 946 { 947 struct xfs_perag *pag; 948 int error = 0; 949 int last_error = 0; 950 xfs_agnumber_t ag; 951 952 ag = 0; 953 while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) { 954 ag = pag->pag_agno + 1; 955 error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag); 956 xfs_perag_put(pag); 957 if (error) { 958 last_error = error; 959 if (error == -EFSCORRUPTED) 960 break; 961 } 962 } 963 return last_error; 964 } 965 966 /* 967 * Background scanning to trim post-EOF preallocated space. This is queued 968 * based on the 'speculative_prealloc_lifetime' tunable (5m by default). 969 */ 970 void 971 xfs_queue_eofblocks( 972 struct xfs_mount *mp) 973 { 974 rcu_read_lock(); 975 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) 976 queue_delayed_work(mp->m_eofblocks_workqueue, 977 &mp->m_eofblocks_work, 978 msecs_to_jiffies(xfs_eofb_secs * 1000)); 979 rcu_read_unlock(); 980 } 981 982 void 983 xfs_eofblocks_worker( 984 struct work_struct *work) 985 { 986 struct xfs_mount *mp = container_of(to_delayed_work(work), 987 struct xfs_mount, m_eofblocks_work); 988 989 if (!sb_start_write_trylock(mp->m_super)) 990 return; 991 xfs_icache_free_eofblocks(mp, NULL); 992 sb_end_write(mp->m_super); 993 994 xfs_queue_eofblocks(mp); 995 } 996 997 /* 998 * Background scanning to trim preallocated CoW space. This is queued 999 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). 1000 * (We'll just piggyback on the post-EOF prealloc space workqueue.) 1001 */ 1002 void 1003 xfs_queue_cowblocks( 1004 struct xfs_mount *mp) 1005 { 1006 rcu_read_lock(); 1007 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) 1008 queue_delayed_work(mp->m_eofblocks_workqueue, 1009 &mp->m_cowblocks_work, 1010 msecs_to_jiffies(xfs_cowb_secs * 1000)); 1011 rcu_read_unlock(); 1012 } 1013 1014 void 1015 xfs_cowblocks_worker( 1016 struct work_struct *work) 1017 { 1018 struct xfs_mount *mp = container_of(to_delayed_work(work), 1019 struct xfs_mount, m_cowblocks_work); 1020 1021 if (!sb_start_write_trylock(mp->m_super)) 1022 return; 1023 xfs_icache_free_cowblocks(mp, NULL); 1024 sb_end_write(mp->m_super); 1025 1026 xfs_queue_cowblocks(mp); 1027 } 1028 1029 /* 1030 * Grab the inode for reclaim exclusively. 1031 * Return 0 if we grabbed it, non-zero otherwise. 1032 */ 1033 STATIC int 1034 xfs_reclaim_inode_grab( 1035 struct xfs_inode *ip, 1036 int flags) 1037 { 1038 ASSERT(rcu_read_lock_held()); 1039 1040 /* quick check for stale RCU freed inode */ 1041 if (!ip->i_ino) 1042 return 1; 1043 1044 /* 1045 * If we are asked for non-blocking operation, do unlocked checks to 1046 * see if the inode already is being flushed or in reclaim to avoid 1047 * lock traffic. 1048 */ 1049 if ((flags & SYNC_TRYLOCK) && 1050 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) 1051 return 1; 1052 1053 /* 1054 * The radix tree lock here protects a thread in xfs_iget from racing 1055 * with us starting reclaim on the inode. Once we have the 1056 * XFS_IRECLAIM flag set it will not touch us. 1057 * 1058 * Due to RCU lookup, we may find inodes that have been freed and only 1059 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that 1060 * aren't candidates for reclaim at all, so we must check the 1061 * XFS_IRECLAIMABLE is set first before proceeding to reclaim. 1062 */ 1063 spin_lock(&ip->i_flags_lock); 1064 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || 1065 __xfs_iflags_test(ip, XFS_IRECLAIM)) { 1066 /* not a reclaim candidate. */ 1067 spin_unlock(&ip->i_flags_lock); 1068 return 1; 1069 } 1070 __xfs_iflags_set(ip, XFS_IRECLAIM); 1071 spin_unlock(&ip->i_flags_lock); 1072 return 0; 1073 } 1074 1075 /* 1076 * Inodes in different states need to be treated differently. The following 1077 * table lists the inode states and the reclaim actions necessary: 1078 * 1079 * inode state iflush ret required action 1080 * --------------- ---------- --------------- 1081 * bad - reclaim 1082 * shutdown EIO unpin and reclaim 1083 * clean, unpinned 0 reclaim 1084 * stale, unpinned 0 reclaim 1085 * clean, pinned(*) 0 requeue 1086 * stale, pinned EAGAIN requeue 1087 * dirty, async - requeue 1088 * dirty, sync 0 reclaim 1089 * 1090 * (*) dgc: I don't think the clean, pinned state is possible but it gets 1091 * handled anyway given the order of checks implemented. 1092 * 1093 * Also, because we get the flush lock first, we know that any inode that has 1094 * been flushed delwri has had the flush completed by the time we check that 1095 * the inode is clean. 1096 * 1097 * Note that because the inode is flushed delayed write by AIL pushing, the 1098 * flush lock may already be held here and waiting on it can result in very 1099 * long latencies. Hence for sync reclaims, where we wait on the flush lock, 1100 * the caller should push the AIL first before trying to reclaim inodes to 1101 * minimise the amount of time spent waiting. For background relaim, we only 1102 * bother to reclaim clean inodes anyway. 1103 * 1104 * Hence the order of actions after gaining the locks should be: 1105 * bad => reclaim 1106 * shutdown => unpin and reclaim 1107 * pinned, async => requeue 1108 * pinned, sync => unpin 1109 * stale => reclaim 1110 * clean => reclaim 1111 * dirty, async => requeue 1112 * dirty, sync => flush, wait and reclaim 1113 */ 1114 STATIC int 1115 xfs_reclaim_inode( 1116 struct xfs_inode *ip, 1117 struct xfs_perag *pag, 1118 int sync_mode) 1119 { 1120 struct xfs_buf *bp = NULL; 1121 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ 1122 int error; 1123 1124 restart: 1125 error = 0; 1126 xfs_ilock(ip, XFS_ILOCK_EXCL); 1127 if (!xfs_iflock_nowait(ip)) { 1128 if (!(sync_mode & SYNC_WAIT)) 1129 goto out; 1130 xfs_iflock(ip); 1131 } 1132 1133 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1134 xfs_iunpin_wait(ip); 1135 /* xfs_iflush_abort() drops the flush lock */ 1136 xfs_iflush_abort(ip); 1137 goto reclaim; 1138 } 1139 if (xfs_ipincount(ip)) { 1140 if (!(sync_mode & SYNC_WAIT)) 1141 goto out_ifunlock; 1142 xfs_iunpin_wait(ip); 1143 } 1144 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { 1145 xfs_ifunlock(ip); 1146 goto reclaim; 1147 } 1148 1149 /* 1150 * Never flush out dirty data during non-blocking reclaim, as it would 1151 * just contend with AIL pushing trying to do the same job. 1152 */ 1153 if (!(sync_mode & SYNC_WAIT)) 1154 goto out_ifunlock; 1155 1156 /* 1157 * Now we have an inode that needs flushing. 1158 * 1159 * Note that xfs_iflush will never block on the inode buffer lock, as 1160 * xfs_ifree_cluster() can lock the inode buffer before it locks the 1161 * ip->i_lock, and we are doing the exact opposite here. As a result, 1162 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would 1163 * result in an ABBA deadlock with xfs_ifree_cluster(). 1164 * 1165 * As xfs_ifree_cluser() must gather all inodes that are active in the 1166 * cache to mark them stale, if we hit this case we don't actually want 1167 * to do IO here - we want the inode marked stale so we can simply 1168 * reclaim it. Hence if we get an EAGAIN error here, just unlock the 1169 * inode, back off and try again. Hopefully the next pass through will 1170 * see the stale flag set on the inode. 1171 */ 1172 error = xfs_iflush(ip, &bp); 1173 if (error == -EAGAIN) { 1174 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1175 /* backoff longer than in xfs_ifree_cluster */ 1176 delay(2); 1177 goto restart; 1178 } 1179 1180 if (!error) { 1181 error = xfs_bwrite(bp); 1182 xfs_buf_relse(bp); 1183 } 1184 1185 reclaim: 1186 ASSERT(!xfs_isiflocked(ip)); 1187 1188 /* 1189 * Because we use RCU freeing we need to ensure the inode always appears 1190 * to be reclaimed with an invalid inode number when in the free state. 1191 * We do this as early as possible under the ILOCK so that 1192 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to 1193 * detect races with us here. By doing this, we guarantee that once 1194 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that 1195 * it will see either a valid inode that will serialise correctly, or it 1196 * will see an invalid inode that it can skip. 1197 */ 1198 spin_lock(&ip->i_flags_lock); 1199 ip->i_flags = XFS_IRECLAIM; 1200 ip->i_ino = 0; 1201 spin_unlock(&ip->i_flags_lock); 1202 1203 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1204 1205 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); 1206 /* 1207 * Remove the inode from the per-AG radix tree. 1208 * 1209 * Because radix_tree_delete won't complain even if the item was never 1210 * added to the tree assert that it's been there before to catch 1211 * problems with the inode life time early on. 1212 */ 1213 spin_lock(&pag->pag_ici_lock); 1214 if (!radix_tree_delete(&pag->pag_ici_root, 1215 XFS_INO_TO_AGINO(ip->i_mount, ino))) 1216 ASSERT(0); 1217 xfs_perag_clear_reclaim_tag(pag); 1218 spin_unlock(&pag->pag_ici_lock); 1219 1220 /* 1221 * Here we do an (almost) spurious inode lock in order to coordinate 1222 * with inode cache radix tree lookups. This is because the lookup 1223 * can reference the inodes in the cache without taking references. 1224 * 1225 * We make that OK here by ensuring that we wait until the inode is 1226 * unlocked after the lookup before we go ahead and free it. 1227 */ 1228 xfs_ilock(ip, XFS_ILOCK_EXCL); 1229 xfs_qm_dqdetach(ip); 1230 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1231 1232 __xfs_inode_free(ip); 1233 return error; 1234 1235 out_ifunlock: 1236 xfs_ifunlock(ip); 1237 out: 1238 xfs_iflags_clear(ip, XFS_IRECLAIM); 1239 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1240 /* 1241 * We could return -EAGAIN here to make reclaim rescan the inode tree in 1242 * a short while. However, this just burns CPU time scanning the tree 1243 * waiting for IO to complete and the reclaim work never goes back to 1244 * the idle state. Instead, return 0 to let the next scheduled 1245 * background reclaim attempt to reclaim the inode again. 1246 */ 1247 return 0; 1248 } 1249 1250 /* 1251 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is 1252 * corrupted, we still want to try to reclaim all the inodes. If we don't, 1253 * then a shut down during filesystem unmount reclaim walk leak all the 1254 * unreclaimed inodes. 1255 */ 1256 STATIC int 1257 xfs_reclaim_inodes_ag( 1258 struct xfs_mount *mp, 1259 int flags, 1260 int *nr_to_scan) 1261 { 1262 struct xfs_perag *pag; 1263 int error = 0; 1264 int last_error = 0; 1265 xfs_agnumber_t ag; 1266 int trylock = flags & SYNC_TRYLOCK; 1267 int skipped; 1268 1269 restart: 1270 ag = 0; 1271 skipped = 0; 1272 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1273 unsigned long first_index = 0; 1274 int done = 0; 1275 int nr_found = 0; 1276 1277 ag = pag->pag_agno + 1; 1278 1279 if (trylock) { 1280 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 1281 skipped++; 1282 xfs_perag_put(pag); 1283 continue; 1284 } 1285 first_index = pag->pag_ici_reclaim_cursor; 1286 } else 1287 mutex_lock(&pag->pag_ici_reclaim_lock); 1288 1289 do { 1290 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 1291 int i; 1292 1293 rcu_read_lock(); 1294 nr_found = radix_tree_gang_lookup_tag( 1295 &pag->pag_ici_root, 1296 (void **)batch, first_index, 1297 XFS_LOOKUP_BATCH, 1298 XFS_ICI_RECLAIM_TAG); 1299 if (!nr_found) { 1300 done = 1; 1301 rcu_read_unlock(); 1302 break; 1303 } 1304 1305 /* 1306 * Grab the inodes before we drop the lock. if we found 1307 * nothing, nr == 0 and the loop will be skipped. 1308 */ 1309 for (i = 0; i < nr_found; i++) { 1310 struct xfs_inode *ip = batch[i]; 1311 1312 if (done || xfs_reclaim_inode_grab(ip, flags)) 1313 batch[i] = NULL; 1314 1315 /* 1316 * Update the index for the next lookup. Catch 1317 * overflows into the next AG range which can 1318 * occur if we have inodes in the last block of 1319 * the AG and we are currently pointing to the 1320 * last inode. 1321 * 1322 * Because we may see inodes that are from the 1323 * wrong AG due to RCU freeing and 1324 * reallocation, only update the index if it 1325 * lies in this AG. It was a race that lead us 1326 * to see this inode, so another lookup from 1327 * the same index will not find it again. 1328 */ 1329 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != 1330 pag->pag_agno) 1331 continue; 1332 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 1333 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 1334 done = 1; 1335 } 1336 1337 /* unlock now we've grabbed the inodes. */ 1338 rcu_read_unlock(); 1339 1340 for (i = 0; i < nr_found; i++) { 1341 if (!batch[i]) 1342 continue; 1343 error = xfs_reclaim_inode(batch[i], pag, flags); 1344 if (error && last_error != -EFSCORRUPTED) 1345 last_error = error; 1346 } 1347 1348 *nr_to_scan -= XFS_LOOKUP_BATCH; 1349 1350 cond_resched(); 1351 1352 } while (nr_found && !done && *nr_to_scan > 0); 1353 1354 if (trylock && !done) 1355 pag->pag_ici_reclaim_cursor = first_index; 1356 else 1357 pag->pag_ici_reclaim_cursor = 0; 1358 mutex_unlock(&pag->pag_ici_reclaim_lock); 1359 xfs_perag_put(pag); 1360 } 1361 1362 /* 1363 * if we skipped any AG, and we still have scan count remaining, do 1364 * another pass this time using blocking reclaim semantics (i.e 1365 * waiting on the reclaim locks and ignoring the reclaim cursors). This 1366 * ensure that when we get more reclaimers than AGs we block rather 1367 * than spin trying to execute reclaim. 1368 */ 1369 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { 1370 trylock = 0; 1371 goto restart; 1372 } 1373 return last_error; 1374 } 1375 1376 int 1377 xfs_reclaim_inodes( 1378 xfs_mount_t *mp, 1379 int mode) 1380 { 1381 int nr_to_scan = INT_MAX; 1382 1383 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); 1384 } 1385 1386 /* 1387 * Scan a certain number of inodes for reclaim. 1388 * 1389 * When called we make sure that there is a background (fast) inode reclaim in 1390 * progress, while we will throttle the speed of reclaim via doing synchronous 1391 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1392 * them to be cleaned, which we hope will not be very long due to the 1393 * background walker having already kicked the IO off on those dirty inodes. 1394 */ 1395 long 1396 xfs_reclaim_inodes_nr( 1397 struct xfs_mount *mp, 1398 int nr_to_scan) 1399 { 1400 /* kick background reclaimer and push the AIL */ 1401 xfs_reclaim_work_queue(mp); 1402 xfs_ail_push_all(mp->m_ail); 1403 1404 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 1405 } 1406 1407 /* 1408 * Return the number of reclaimable inodes in the filesystem for 1409 * the shrinker to determine how much to reclaim. 1410 */ 1411 int 1412 xfs_reclaim_inodes_count( 1413 struct xfs_mount *mp) 1414 { 1415 struct xfs_perag *pag; 1416 xfs_agnumber_t ag = 0; 1417 int reclaimable = 0; 1418 1419 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1420 ag = pag->pag_agno + 1; 1421 reclaimable += pag->pag_ici_reclaimable; 1422 xfs_perag_put(pag); 1423 } 1424 return reclaimable; 1425 } 1426 1427 STATIC bool 1428 xfs_inode_match_id( 1429 struct xfs_inode *ip, 1430 struct xfs_eofblocks *eofb) 1431 { 1432 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1433 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1434 return false; 1435 1436 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1437 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1438 return false; 1439 1440 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1441 ip->i_d.di_projid != eofb->eof_prid) 1442 return false; 1443 1444 return true; 1445 } 1446 1447 /* 1448 * A union-based inode filtering algorithm. Process the inode if any of the 1449 * criteria match. This is for global/internal scans only. 1450 */ 1451 STATIC bool 1452 xfs_inode_match_id_union( 1453 struct xfs_inode *ip, 1454 struct xfs_eofblocks *eofb) 1455 { 1456 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1457 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1458 return true; 1459 1460 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1461 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1462 return true; 1463 1464 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1465 ip->i_d.di_projid == eofb->eof_prid) 1466 return true; 1467 1468 return false; 1469 } 1470 1471 /* 1472 * Is this inode @ip eligible for eof/cow block reclamation, given some 1473 * filtering parameters @eofb? The inode is eligible if @eofb is null or 1474 * if the predicate functions match. 1475 */ 1476 static bool 1477 xfs_inode_matches_eofb( 1478 struct xfs_inode *ip, 1479 struct xfs_eofblocks *eofb) 1480 { 1481 bool match; 1482 1483 if (!eofb) 1484 return true; 1485 1486 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1487 match = xfs_inode_match_id_union(ip, eofb); 1488 else 1489 match = xfs_inode_match_id(ip, eofb); 1490 if (!match) 1491 return false; 1492 1493 /* skip the inode if the file size is too small */ 1494 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) && 1495 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1496 return false; 1497 1498 return true; 1499 } 1500 1501 STATIC int 1502 xfs_inode_free_eofblocks( 1503 struct xfs_inode *ip, 1504 void *args) 1505 { 1506 struct xfs_eofblocks *eofb = args; 1507 bool wait; 1508 int ret; 1509 1510 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); 1511 1512 if (!xfs_can_free_eofblocks(ip, false)) { 1513 /* inode could be preallocated or append-only */ 1514 trace_xfs_inode_free_eofblocks_invalid(ip); 1515 xfs_inode_clear_eofblocks_tag(ip); 1516 return 0; 1517 } 1518 1519 /* 1520 * If the mapping is dirty the operation can block and wait for some 1521 * time. Unless we are waiting, skip it. 1522 */ 1523 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) 1524 return 0; 1525 1526 if (!xfs_inode_matches_eofb(ip, eofb)) 1527 return 0; 1528 1529 /* 1530 * If the caller is waiting, return -EAGAIN to keep the background 1531 * scanner moving and revisit the inode in a subsequent pass. 1532 */ 1533 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1534 if (wait) 1535 return -EAGAIN; 1536 return 0; 1537 } 1538 1539 ret = xfs_free_eofblocks(ip); 1540 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1541 1542 return ret; 1543 } 1544 1545 int 1546 xfs_icache_free_eofblocks( 1547 struct xfs_mount *mp, 1548 struct xfs_eofblocks *eofb) 1549 { 1550 return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb, 1551 XFS_ICI_EOFBLOCKS_TAG); 1552 } 1553 1554 /* 1555 * Run eofblocks scans on the quotas applicable to the inode. For inodes with 1556 * multiple quotas, we don't know exactly which quota caused an allocation 1557 * failure. We make a best effort by including each quota under low free space 1558 * conditions (less than 1% free space) in the scan. 1559 */ 1560 static int 1561 __xfs_inode_free_quota_eofblocks( 1562 struct xfs_inode *ip, 1563 int (*execute)(struct xfs_mount *mp, 1564 struct xfs_eofblocks *eofb)) 1565 { 1566 int scan = 0; 1567 struct xfs_eofblocks eofb = {0}; 1568 struct xfs_dquot *dq; 1569 1570 /* 1571 * Run a sync scan to increase effectiveness and use the union filter to 1572 * cover all applicable quotas in a single scan. 1573 */ 1574 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; 1575 1576 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { 1577 dq = xfs_inode_dquot(ip, XFS_DQ_USER); 1578 if (dq && xfs_dquot_lowsp(dq)) { 1579 eofb.eof_uid = VFS_I(ip)->i_uid; 1580 eofb.eof_flags |= XFS_EOF_FLAGS_UID; 1581 scan = 1; 1582 } 1583 } 1584 1585 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { 1586 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); 1587 if (dq && xfs_dquot_lowsp(dq)) { 1588 eofb.eof_gid = VFS_I(ip)->i_gid; 1589 eofb.eof_flags |= XFS_EOF_FLAGS_GID; 1590 scan = 1; 1591 } 1592 } 1593 1594 if (scan) 1595 execute(ip->i_mount, &eofb); 1596 1597 return scan; 1598 } 1599 1600 int 1601 xfs_inode_free_quota_eofblocks( 1602 struct xfs_inode *ip) 1603 { 1604 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); 1605 } 1606 1607 static inline unsigned long 1608 xfs_iflag_for_tag( 1609 int tag) 1610 { 1611 switch (tag) { 1612 case XFS_ICI_EOFBLOCKS_TAG: 1613 return XFS_IEOFBLOCKS; 1614 case XFS_ICI_COWBLOCKS_TAG: 1615 return XFS_ICOWBLOCKS; 1616 default: 1617 ASSERT(0); 1618 return 0; 1619 } 1620 } 1621 1622 static void 1623 __xfs_inode_set_blocks_tag( 1624 xfs_inode_t *ip, 1625 void (*execute)(struct xfs_mount *mp), 1626 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1627 int error, unsigned long caller_ip), 1628 int tag) 1629 { 1630 struct xfs_mount *mp = ip->i_mount; 1631 struct xfs_perag *pag; 1632 int tagged; 1633 1634 /* 1635 * Don't bother locking the AG and looking up in the radix trees 1636 * if we already know that we have the tag set. 1637 */ 1638 if (ip->i_flags & xfs_iflag_for_tag(tag)) 1639 return; 1640 spin_lock(&ip->i_flags_lock); 1641 ip->i_flags |= xfs_iflag_for_tag(tag); 1642 spin_unlock(&ip->i_flags_lock); 1643 1644 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1645 spin_lock(&pag->pag_ici_lock); 1646 1647 tagged = radix_tree_tagged(&pag->pag_ici_root, tag); 1648 radix_tree_tag_set(&pag->pag_ici_root, 1649 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1650 if (!tagged) { 1651 /* propagate the eofblocks tag up into the perag radix tree */ 1652 spin_lock(&ip->i_mount->m_perag_lock); 1653 radix_tree_tag_set(&ip->i_mount->m_perag_tree, 1654 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1655 tag); 1656 spin_unlock(&ip->i_mount->m_perag_lock); 1657 1658 /* kick off background trimming */ 1659 execute(ip->i_mount); 1660 1661 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1662 } 1663 1664 spin_unlock(&pag->pag_ici_lock); 1665 xfs_perag_put(pag); 1666 } 1667 1668 void 1669 xfs_inode_set_eofblocks_tag( 1670 xfs_inode_t *ip) 1671 { 1672 trace_xfs_inode_set_eofblocks_tag(ip); 1673 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks, 1674 trace_xfs_perag_set_eofblocks, 1675 XFS_ICI_EOFBLOCKS_TAG); 1676 } 1677 1678 static void 1679 __xfs_inode_clear_blocks_tag( 1680 xfs_inode_t *ip, 1681 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1682 int error, unsigned long caller_ip), 1683 int tag) 1684 { 1685 struct xfs_mount *mp = ip->i_mount; 1686 struct xfs_perag *pag; 1687 1688 spin_lock(&ip->i_flags_lock); 1689 ip->i_flags &= ~xfs_iflag_for_tag(tag); 1690 spin_unlock(&ip->i_flags_lock); 1691 1692 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1693 spin_lock(&pag->pag_ici_lock); 1694 1695 radix_tree_tag_clear(&pag->pag_ici_root, 1696 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1697 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { 1698 /* clear the eofblocks tag from the perag radix tree */ 1699 spin_lock(&ip->i_mount->m_perag_lock); 1700 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, 1701 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1702 tag); 1703 spin_unlock(&ip->i_mount->m_perag_lock); 1704 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1705 } 1706 1707 spin_unlock(&pag->pag_ici_lock); 1708 xfs_perag_put(pag); 1709 } 1710 1711 void 1712 xfs_inode_clear_eofblocks_tag( 1713 xfs_inode_t *ip) 1714 { 1715 trace_xfs_inode_clear_eofblocks_tag(ip); 1716 return __xfs_inode_clear_blocks_tag(ip, 1717 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); 1718 } 1719 1720 /* 1721 * Set ourselves up to free CoW blocks from this file. If it's already clean 1722 * then we can bail out quickly, but otherwise we must back off if the file 1723 * is undergoing some kind of write. 1724 */ 1725 static bool 1726 xfs_prep_free_cowblocks( 1727 struct xfs_inode *ip) 1728 { 1729 /* 1730 * Just clear the tag if we have an empty cow fork or none at all. It's 1731 * possible the inode was fully unshared since it was originally tagged. 1732 */ 1733 if (!xfs_inode_has_cow_data(ip)) { 1734 trace_xfs_inode_free_cowblocks_invalid(ip); 1735 xfs_inode_clear_cowblocks_tag(ip); 1736 return false; 1737 } 1738 1739 /* 1740 * If the mapping is dirty or under writeback we cannot touch the 1741 * CoW fork. Leave it alone if we're in the midst of a directio. 1742 */ 1743 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || 1744 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || 1745 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || 1746 atomic_read(&VFS_I(ip)->i_dio_count)) 1747 return false; 1748 1749 return true; 1750 } 1751 1752 /* 1753 * Automatic CoW Reservation Freeing 1754 * 1755 * These functions automatically garbage collect leftover CoW reservations 1756 * that were made on behalf of a cowextsize hint when we start to run out 1757 * of quota or when the reservations sit around for too long. If the file 1758 * has dirty pages or is undergoing writeback, its CoW reservations will 1759 * be retained. 1760 * 1761 * The actual garbage collection piggybacks off the same code that runs 1762 * the speculative EOF preallocation garbage collector. 1763 */ 1764 STATIC int 1765 xfs_inode_free_cowblocks( 1766 struct xfs_inode *ip, 1767 void *args) 1768 { 1769 struct xfs_eofblocks *eofb = args; 1770 int ret = 0; 1771 1772 if (!xfs_prep_free_cowblocks(ip)) 1773 return 0; 1774 1775 if (!xfs_inode_matches_eofb(ip, eofb)) 1776 return 0; 1777 1778 /* Free the CoW blocks */ 1779 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1780 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 1781 1782 /* 1783 * Check again, nobody else should be able to dirty blocks or change 1784 * the reflink iflag now that we have the first two locks held. 1785 */ 1786 if (xfs_prep_free_cowblocks(ip)) 1787 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); 1788 1789 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 1790 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1791 1792 return ret; 1793 } 1794 1795 int 1796 xfs_icache_free_cowblocks( 1797 struct xfs_mount *mp, 1798 struct xfs_eofblocks *eofb) 1799 { 1800 return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb, 1801 XFS_ICI_COWBLOCKS_TAG); 1802 } 1803 1804 int 1805 xfs_inode_free_quota_cowblocks( 1806 struct xfs_inode *ip) 1807 { 1808 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); 1809 } 1810 1811 void 1812 xfs_inode_set_cowblocks_tag( 1813 xfs_inode_t *ip) 1814 { 1815 trace_xfs_inode_set_cowblocks_tag(ip); 1816 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks, 1817 trace_xfs_perag_set_cowblocks, 1818 XFS_ICI_COWBLOCKS_TAG); 1819 } 1820 1821 void 1822 xfs_inode_clear_cowblocks_tag( 1823 xfs_inode_t *ip) 1824 { 1825 trace_xfs_inode_clear_cowblocks_tag(ip); 1826 return __xfs_inode_clear_blocks_tag(ip, 1827 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1828 } 1829 1830 /* Disable post-EOF and CoW block auto-reclamation. */ 1831 void 1832 xfs_stop_block_reaping( 1833 struct xfs_mount *mp) 1834 { 1835 cancel_delayed_work_sync(&mp->m_eofblocks_work); 1836 cancel_delayed_work_sync(&mp->m_cowblocks_work); 1837 } 1838 1839 /* Enable post-EOF and CoW block auto-reclamation. */ 1840 void 1841 xfs_start_block_reaping( 1842 struct xfs_mount *mp) 1843 { 1844 xfs_queue_eofblocks(mp); 1845 xfs_queue_cowblocks(mp); 1846 } 1847