Lines Matching +full:wait +full:- +full:free +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
94 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); in xfs_inode_alloc()
96 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
102 VFS_I(ip)->i_mode = 0; in xfs_inode_alloc()
103 mapping_set_folio_min_order(VFS_I(ip)->i_mapping, in xfs_inode_alloc()
104 M_IGEO(mp)->min_folio_order); in xfs_inode_alloc()
107 ASSERT(atomic_read(&ip->i_pincount) == 0); in xfs_inode_alloc()
108 ASSERT(ip->i_ino == 0); in xfs_inode_alloc()
111 ip->i_ino = ino; in xfs_inode_alloc()
112 ip->i_mount = mp; in xfs_inode_alloc()
113 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); in xfs_inode_alloc()
114 ip->i_cowfp = NULL; in xfs_inode_alloc()
115 memset(&ip->i_af, 0, sizeof(ip->i_af)); in xfs_inode_alloc()
116 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; in xfs_inode_alloc()
117 memset(&ip->i_df, 0, sizeof(ip->i_df)); in xfs_inode_alloc()
118 ip->i_flags = 0; in xfs_inode_alloc()
119 ip->i_delayed_blks = 0; in xfs_inode_alloc()
120 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; in xfs_inode_alloc()
121 ip->i_nblocks = 0; in xfs_inode_alloc()
122 ip->i_forkoff = 0; in xfs_inode_alloc()
123 ip->i_sick = 0; in xfs_inode_alloc()
124 ip->i_checked = 0; in xfs_inode_alloc()
125 INIT_WORK(&ip->i_ioend_work, xfs_end_io); in xfs_inode_alloc()
126 INIT_LIST_HEAD(&ip->i_ioend_list); in xfs_inode_alloc()
127 spin_lock_init(&ip->i_ioend_lock); in xfs_inode_alloc()
128 ip->i_next_unlinked = NULLAGINO; in xfs_inode_alloc()
129 ip->i_prev_unlinked = 0; in xfs_inode_alloc()
141 switch (VFS_I(ip)->i_mode & S_IFMT) { in xfs_inode_free_callback()
145 xfs_idestroy_fork(&ip->i_df); in xfs_inode_free_callback()
151 if (ip->i_cowfp) { in xfs_inode_free_callback()
152 xfs_idestroy_fork(ip->i_cowfp); in xfs_inode_free_callback()
153 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp); in xfs_inode_free_callback()
155 if (ip->i_itemp) { in xfs_inode_free_callback()
157 &ip->i_itemp->ili_item.li_flags)); in xfs_inode_free_callback()
159 ip->i_itemp = NULL; in xfs_inode_free_callback()
170 ASSERT(atomic_read(&ip->i_pincount) == 0); in __xfs_inode_free()
171 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); in __xfs_inode_free()
172 XFS_STATS_DEC(ip->i_mount, vn_active); in __xfs_inode_free()
174 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); in __xfs_inode_free()
186 * free state. The ip->i_flags_lock provides the barrier against lookup in xfs_inode_free()
189 spin_lock(&ip->i_flags_lock); in xfs_inode_free()
190 ip->i_flags = XFS_IRECLAIM; in xfs_inode_free()
191 ip->i_ino = 0; in xfs_inode_free()
192 spin_unlock(&ip->i_flags_lock); in xfs_inode_free()
207 if (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) { in xfs_reclaim_work_queue()
208 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, in xfs_reclaim_work_queue()
222 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_queue()
228 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) in xfs_blockgc_queue()
229 queue_delayed_work(pag->pag_mount->m_blockgc_wq, in xfs_blockgc_queue()
230 &pag->pag_blockgc_work, in xfs_blockgc_queue()
242 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_inode_tag()
245 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_set_inode_tag()
247 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag); in xfs_perag_set_inode_tag()
248 radix_tree_tag_set(&pag->pag_ici_root, agino, tag); in xfs_perag_set_inode_tag()
251 pag->pag_ici_reclaimable++; in xfs_perag_set_inode_tag()
257 xa_set_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag)); in xfs_perag_set_inode_tag()
279 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_clear_inode_tag()
281 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_clear_inode_tag()
288 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag); in xfs_perag_clear_inode_tag()
293 pag->pag_ici_reclaimable--; in xfs_perag_clear_inode_tag()
295 if (radix_tree_tagged(&pag->pag_ici_root, tag)) in xfs_perag_clear_inode_tag()
299 xa_clear_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag)); in xfs_perag_clear_inode_tag()
316 index = pag->pag_agno + 1; in xfs_perag_grab_next_tag()
321 pag = xa_find(&mp->m_perags, &index, ULONG_MAX, ici_tag_to_mark(tag)); in xfs_perag_grab_next_tag()
324 if (!atomic_inc_not_zero(&pag->pag_active_ref)) in xfs_perag_grab_next_tag()
332 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
334 * information about the on-disk values in the VFS inode and so we can't just
345 uint32_t nlink = inode->i_nlink; in xfs_reinit_inode()
346 uint32_t generation = inode->i_generation; in xfs_reinit_inode()
348 umode_t mode = inode->i_mode; in xfs_reinit_inode()
349 dev_t dev = inode->i_rdev; in xfs_reinit_inode()
350 kuid_t uid = inode->i_uid; in xfs_reinit_inode()
351 kgid_t gid = inode->i_gid; in xfs_reinit_inode()
352 unsigned long state = inode->i_state; in xfs_reinit_inode()
354 error = inode_init_always(mp->m_super, inode); in xfs_reinit_inode()
357 inode->i_generation = generation; in xfs_reinit_inode()
359 inode->i_mode = mode; in xfs_reinit_inode()
360 inode->i_rdev = dev; in xfs_reinit_inode()
361 inode->i_uid = uid; in xfs_reinit_inode()
362 inode->i_gid = gid; in xfs_reinit_inode()
363 inode->i_state = state; in xfs_reinit_inode()
364 mapping_set_folio_min_order(inode->i_mapping, in xfs_reinit_inode()
365 M_IGEO(mp)->min_folio_order); in xfs_reinit_inode()
376 struct xfs_inode *ip) __releases(&ip->i_flags_lock) in xfs_iget_recycle()
378 struct xfs_mount *mp = ip->i_mount; in xfs_iget_recycle()
385 return -EAGAIN; in xfs_iget_recycle()
389 * the actual reclaim workers from stomping over us while we recycle in xfs_iget_recycle()
393 ip->i_flags |= XFS_IRECLAIM; in xfs_iget_recycle()
395 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
398 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); in xfs_iget_recycle()
403 * Re-initializing the inode failed, and we are in deep in xfs_iget_recycle()
404 * trouble. Try to re-add it to the reclaim list. in xfs_iget_recycle()
407 spin_lock(&ip->i_flags_lock); in xfs_iget_recycle()
408 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); in xfs_iget_recycle()
409 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); in xfs_iget_recycle()
410 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
417 spin_lock(&pag->pag_ici_lock); in xfs_iget_recycle()
418 spin_lock(&ip->i_flags_lock); in xfs_iget_recycle()
421 * Clear the per-lifetime state in the inode as we are now effectively in xfs_iget_recycle()
425 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; in xfs_iget_recycle()
426 ip->i_flags |= XFS_INEW; in xfs_iget_recycle()
427 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_iget_recycle()
429 inode->i_state = I_NEW; in xfs_iget_recycle()
430 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
431 spin_unlock(&pag->pag_ici_lock); in xfs_iget_recycle()
438 * actually a free, empty inode. If we are not allocating an inode,
439 * then check we didn't find a free inode.
442 * 0 if the inode free state matches the lookup context
443 * -ENOENT if the inode is free and we are not allocating
444 * -EFSCORRUPTED if there is any state mismatch at all
452 /* should be a free inode */ in xfs_iget_check_free_state()
453 if (VFS_I(ip)->i_mode != 0) { in xfs_iget_check_free_state()
454 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
455 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", in xfs_iget_check_free_state()
456 ip->i_ino, VFS_I(ip)->i_mode); in xfs_iget_check_free_state()
457 xfs_agno_mark_sick(ip->i_mount, in xfs_iget_check_free_state()
458 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), in xfs_iget_check_free_state()
460 return -EFSCORRUPTED; in xfs_iget_check_free_state()
463 if (ip->i_nblocks != 0) { in xfs_iget_check_free_state()
464 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
465 "Corruption detected! Free inode 0x%llx has blocks allocated!", in xfs_iget_check_free_state()
466 ip->i_ino); in xfs_iget_check_free_state()
467 xfs_agno_mark_sick(ip->i_mount, in xfs_iget_check_free_state()
468 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), in xfs_iget_check_free_state()
470 return -EFSCORRUPTED; in xfs_iget_check_free_state()
476 if (VFS_I(ip)->i_mode == 0) in xfs_iget_check_free_state()
477 return -ENOENT; in xfs_iget_check_free_state()
491 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_queue_all()
492 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_queue_all()
493 if (!llist_empty(&gc->list)) { in xfs_inodegc_queue_all()
494 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_queue_all()
502 /* Wait for all queued work and collect errors */
510 flush_workqueue(mp->m_inodegc_wq); in xfs_inodegc_wait_all()
511 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_wait_all()
514 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_wait_all()
515 if (gc->error && !error) in xfs_inodegc_wait_all()
516 error = gc->error; in xfs_inodegc_wait_all()
517 gc->error = 0; in xfs_inodegc_wait_all()
535 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit()
539 * check for re-use of an inode within an RCU grace period due to the in xfs_iget_cache_hit()
545 spin_lock(&ip->i_flags_lock); in xfs_iget_cache_hit()
546 if (ip->i_ino != ino) in xfs_iget_cache_hit()
552 * reclaimable state, wait for the initialisation to complete in xfs_iget_cache_hit()
555 * If we're racing with the inactivation worker we also want to wait. in xfs_iget_cache_hit()
557 * previously marked the inode as free on disk but hasn't finished in xfs_iget_cache_hit()
561 * worker is running already, so we might as well wait. in xfs_iget_cache_hit()
564 * wait_on_inode to wait for these flags to be cleared in xfs_iget_cache_hit()
567 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING)) in xfs_iget_cache_hit()
570 if (ip->i_flags & XFS_NEED_INACTIVE) { in xfs_iget_cache_hit()
571 /* Unlinked inodes cannot be re-grabbed. */ in xfs_iget_cache_hit()
572 if (VFS_I(ip)->i_nlink == 0) { in xfs_iget_cache_hit()
573 error = -ENOENT; in xfs_iget_cache_hit()
580 * Check the inode free state is valid. This also detects lookup in xfs_iget_cache_hit()
589 (ip->i_flags & XFS_IRECLAIMABLE)) in xfs_iget_cache_hit()
593 if (ip->i_flags & XFS_IRECLAIMABLE) { in xfs_iget_cache_hit()
596 if (error == -EAGAIN) in xfs_iget_cache_hit()
606 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
623 error = -EAGAIN; in xfs_iget_cache_hit()
625 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
630 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
633 * Do not wait for the workers, because the caller could hold an AGI in xfs_iget_cache_hit()
638 return -EAGAIN; in xfs_iget_cache_hit()
657 return -ENOMEM; in xfs_iget_cache_miss()
659 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags); in xfs_iget_cache_miss()
669 * the i_flushiter field being initialised from the current on-disk in xfs_iget_cache_miss()
675 VFS_I(ip)->i_generation = get_random_u32(); in xfs_iget_cache_miss()
679 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); in xfs_iget_cache_miss()
684 xfs_buf_offset(bp, ip->i_imap.im_boffset)); in xfs_iget_cache_miss()
698 * Check the inode free state is valid. This also detects lookup in xfs_iget_cache_miss()
711 error = -EAGAIN; in xfs_iget_cache_miss()
716 * Because the inode hasn't been added to the radix-tree yet it can't in xfs_iget_cache_miss()
717 * be found by another thread, so we can do the non-sleeping lock here. in xfs_iget_cache_miss()
729 * The ip->i_flags_lock that protects the XFS_INEW flag forms the in xfs_iget_cache_miss()
735 ip->i_udquot = NULL; in xfs_iget_cache_miss()
736 ip->i_gdquot = NULL; in xfs_iget_cache_miss()
737 ip->i_pdquot = NULL; in xfs_iget_cache_miss()
741 spin_lock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
742 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); in xfs_iget_cache_miss()
744 WARN_ON(error != -EEXIST); in xfs_iget_cache_miss()
746 error = -EAGAIN; in xfs_iget_cache_miss()
749 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
756 spin_unlock(&pag->pag_ici_lock); in xfs_iget_cache_miss()
796 return -EINVAL; in xfs_iget()
807 ip = radix_tree_lookup(&pag->pag_ici_root, agino); in xfs_iget()
816 error = -ENODATA; in xfs_iget()
831 * If we have a real type for an on-disk inode, we can setup the inode in xfs_iget()
835 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) in xfs_iget()
841 error == -EAGAIN) { in xfs_iget()
873 spin_lock(&ip->i_flags_lock); in xfs_reclaim_igrab()
877 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
882 if (ip->i_sick && in xfs_reclaim_igrab()
883 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) { in xfs_reclaim_igrab()
884 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
889 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
894 * Inode reclaim is non-blocking, so the default action if progress cannot be
897 * blocking anymore and hence we can wait for the inode to be able to reclaim
900 * We do no IO here - if callers require inodes to be cleaned they must push the
902 * done in the background in a non-blocking manner, and enables memory reclaim
910 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ in xfs_reclaim_inode()
921 * then the in-memory log tail movement caused by the abort can be in xfs_reclaim_inode()
924 if (xlog_is_shutdown(ip->i_mount->m_log)) { in xfs_reclaim_inode()
940 * to be reclaimed with an invalid inode number when in the free state. in xfs_reclaim_inode()
943 * detect races with us here. By doing this, we guarantee that once in xfs_reclaim_inode()
948 spin_lock(&ip->i_flags_lock); in xfs_reclaim_inode()
949 ip->i_flags = XFS_IRECLAIM; in xfs_reclaim_inode()
950 ip->i_ino = 0; in xfs_reclaim_inode()
951 ip->i_sick = 0; in xfs_reclaim_inode()
952 ip->i_checked = 0; in xfs_reclaim_inode()
953 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_inode()
955 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL); in xfs_reclaim_inode()
958 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); in xfs_reclaim_inode()
960 * Remove the inode from the per-AG radix tree. in xfs_reclaim_inode()
966 spin_lock(&pag->pag_ici_lock); in xfs_reclaim_inode()
967 if (!radix_tree_delete(&pag->pag_ici_root, in xfs_reclaim_inode()
968 XFS_INO_TO_AGINO(ip->i_mount, ino))) in xfs_reclaim_inode()
971 spin_unlock(&pag->pag_ici_lock); in xfs_reclaim_inode()
978 * We make that OK here by ensuring that we wait until the inode is in xfs_reclaim_inode()
979 * unlocked after the lookup before we go ahead and free it. in xfs_reclaim_inode()
982 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot); in xfs_reclaim_inode()
1017 while (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) { in xfs_reclaim_inodes()
1018 xfs_ail_push_all_sync(mp->m_ail); in xfs_reclaim_inodes()
1026 * push the AIL here. We also want to proactively free up memory if we can to
1045 xfs_ail_push_all(mp->m_ail); in xfs_reclaim_inodes_nr()
1059 XA_STATE (xas, &mp->m_perags, 0); in xfs_reclaim_inodes_count()
1066 reclaimable += pag->pag_ici_reclaimable; in xfs_reclaim_inodes_count()
1078 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && in xfs_icwalk_match_id()
1079 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) in xfs_icwalk_match_id()
1082 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && in xfs_icwalk_match_id()
1083 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) in xfs_icwalk_match_id()
1086 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && in xfs_icwalk_match_id()
1087 ip->i_projid != icw->icw_prid) in xfs_icwalk_match_id()
1094 * A union-based inode filtering algorithm. Process the inode if any of the
1102 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && in xfs_icwalk_match_id_union()
1103 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) in xfs_icwalk_match_id_union()
1106 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && in xfs_icwalk_match_id_union()
1107 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) in xfs_icwalk_match_id_union()
1110 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && in xfs_icwalk_match_id_union()
1111 ip->i_projid == icw->icw_prid) in xfs_icwalk_match_id_union()
1132 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION) in xfs_icwalk_match()
1140 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) && in xfs_icwalk_match()
1141 XFS_ISIZE(ip) < icw->icw_min_file_size) in xfs_icwalk_match()
1170 bool wait; in xfs_inode_free_eofblocks() local
1172 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); in xfs_inode_free_eofblocks()
1178 * If the mapping is dirty the operation can block and wait for some in xfs_inode_free_eofblocks()
1181 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) in xfs_inode_free_eofblocks()
1188 * If the caller is waiting, return -EAGAIN to keep the background in xfs_inode_free_eofblocks()
1192 if (wait) in xfs_inode_free_eofblocks()
1193 return -EAGAIN; in xfs_inode_free_eofblocks()
1212 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_set_iflag()
1221 if (ip->i_flags & iflag) in xfs_blockgc_set_iflag()
1223 spin_lock(&ip->i_flags_lock); in xfs_blockgc_set_iflag()
1224 ip->i_flags |= iflag; in xfs_blockgc_set_iflag()
1225 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_set_iflag()
1227 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_set_iflag()
1228 spin_lock(&pag->pag_ici_lock); in xfs_blockgc_set_iflag()
1230 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_set_iflag()
1233 spin_unlock(&pag->pag_ici_lock); in xfs_blockgc_set_iflag()
1250 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_clear_iflag()
1256 spin_lock(&ip->i_flags_lock); in xfs_blockgc_clear_iflag()
1257 ip->i_flags &= ~iflag; in xfs_blockgc_clear_iflag()
1258 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; in xfs_blockgc_clear_iflag()
1259 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_clear_iflag()
1264 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_clear_iflag()
1265 spin_lock(&pag->pag_ici_lock); in xfs_blockgc_clear_iflag()
1267 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_clear_iflag()
1270 spin_unlock(&pag->pag_ici_lock); in xfs_blockgc_clear_iflag()
1283 * Prepare to free COW fork blocks from an inode.
1292 sync = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); in xfs_prep_free_cowblocks()
1309 * in-flight I/O under any circumstances, because outstanding writeback in xfs_prep_free_cowblocks()
1314 * currently opened for write from background (i.e. non-sync) scans. For in xfs_prep_free_cowblocks()
1316 * never free COW fork blocks out from under pending I/O. in xfs_prep_free_cowblocks()
1341 bool wait; in xfs_inode_free_cowblocks() local
1344 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); in xfs_inode_free_cowblocks()
1356 * If the caller is waiting, return -EAGAIN to keep the background in xfs_inode_free_cowblocks()
1361 if (wait) in xfs_inode_free_cowblocks()
1362 return -EAGAIN; in xfs_inode_free_cowblocks()
1368 if (wait) in xfs_inode_free_cowblocks()
1369 return -EAGAIN; in xfs_inode_free_cowblocks()
1399 /* Disable post-EOF and CoW block auto-reclamation. */
1411 cancel_delayed_work_sync(&pag->pag_blockgc_work); in xfs_blockgc_stop()
1415 /* Enable post-EOF and CoW block auto-reclamation. */
1450 spin_lock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1451 if (!ip->i_ino) in xfs_blockgc_igrab()
1454 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS) in xfs_blockgc_igrab()
1456 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1459 if (xfs_is_shutdown(ip->i_mount)) in xfs_blockgc_igrab()
1470 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1502 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_worker()
1510 pag->pag_agno, error); in xfs_blockgc_worker()
1515 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1535 * Reclaim all the free space that we can by scheduling the background blockgc
1551 mod_delayed_work(pag->pag_mount->m_blockgc_wq, in xfs_blockgc_flush_all()
1552 &pag->pag_blockgc_work, 0); in xfs_blockgc_flush_all()
1555 flush_delayed_work(&pag->pag_blockgc_work); in xfs_blockgc_flush_all()
1563 * each quota under low free space conditions (less than 1% free space) in the
1585 * Run a scan to free blocks using the union filter to cover all in xfs_blockgc_free_dquots()
1591 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); in xfs_blockgc_free_dquots()
1597 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); in xfs_blockgc_free_dquots()
1603 icw.icw_prid = pdqp->q_id; in xfs_blockgc_free_dquots()
1620 return xfs_blockgc_free_dquots(ip->i_mount, in xfs_blockgc_free_quota()
1659 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1682 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1691 struct xfs_mount *mp = pag->pag_mount; in xfs_icwalk_ag()
1702 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); in xfs_icwalk_ag()
1713 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, in xfs_icwalk_ag()
1741 * us to see this inode, so another lookup from the in xfs_icwalk_ag()
1744 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) in xfs_icwalk_ag()
1746 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_icwalk_ag()
1747 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_icwalk_ag()
1759 if (error == -EAGAIN) { in xfs_icwalk_ag()
1763 if (error && last_error != -EFSCORRUPTED) in xfs_icwalk_ag()
1768 if (error == -EFSCORRUPTED) in xfs_icwalk_ag()
1773 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) { in xfs_icwalk_ag()
1774 icw->icw_scan_limit -= XFS_LOOKUP_BATCH; in xfs_icwalk_ag()
1775 if (icw->icw_scan_limit <= 0) in xfs_icwalk_ag()
1783 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); in xfs_icwalk_ag()
1808 if (error == -EFSCORRUPTED) { in xfs_icwalk()
1832 xfs_warn(ip->i_mount, in xfs_check_delalloc()
1834 ip->i_ino, in xfs_check_delalloc()
1849 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_set_reclaimable()
1852 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { in xfs_inodegc_set_reclaimable()
1858 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inodegc_set_reclaimable()
1859 spin_lock(&pag->pag_ici_lock); in xfs_inodegc_set_reclaimable()
1860 spin_lock(&ip->i_flags_lock); in xfs_inodegc_set_reclaimable()
1863 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING); in xfs_inodegc_set_reclaimable()
1864 ip->i_flags |= XFS_IRECLAIMABLE; in xfs_inodegc_set_reclaimable()
1865 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inodegc_set_reclaimable()
1868 spin_unlock(&ip->i_flags_lock); in xfs_inodegc_set_reclaimable()
1869 spin_unlock(&pag->pag_ici_lock); in xfs_inodegc_set_reclaimable()
1874 * Free all speculative preallocations and possibly even the inode itself.
1897 struct llist_node *node = llist_del_all(&gc->list); in xfs_inodegc_worker()
1899 struct xfs_mount *mp = gc->mp; in xfs_inodegc_worker()
1908 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask); in xfs_inodegc_worker()
1911 WRITE_ONCE(gc->items, 0); in xfs_inodegc_worker()
1919 * task-wide nofs context for the following operations. in xfs_inodegc_worker()
1924 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits)); in xfs_inodegc_worker()
1926 WRITE_ONCE(gc->shrinker_hits, 0); in xfs_inodegc_worker()
1932 if (error && !gc->error) in xfs_inodegc_worker()
1933 gc->error = error; in xfs_inodegc_worker()
1940 * Expedite all pending inodegc work to run immediately. This does not wait for
1955 * wait for the work to finish.
1968 * workers and wait for them to stop. Caller must hold sb->s_umount to
1983 * threads that sample the inodegc state just prior to us clearing it. in xfs_inodegc_stop()
1992 flush_workqueue(mp->m_inodegc_wq); in xfs_inodegc_stop()
2001 * inactivation work if there is any. Caller must hold sb->s_umount to
2020 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_rt_file()
2025 if (__percpu_counter_compare(&mp->m_frextents, in xfs_inodegc_want_queue_rt_file()
2026 mp->m_low_rtexts[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_rt_file()
2039 * - We've accumulated more than one inode cluster buffer's worth of inodes.
2040 * - There is less than 5% free space left.
2041 * - Any of the quotas for this inode are near an enforcement limit.
2048 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_work()
2050 if (items > mp->m_ino_geo.inodes_per_cluster) in xfs_inodegc_want_queue_work()
2053 if (__percpu_counter_compare(&mp->m_fdblocks, in xfs_inodegc_want_queue_work()
2054 mp->m_low_space[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_work()
2080 * Make the frontend wait for inactivations when:
2082 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2083 * - The queue depth exceeds the maximum allowable percpu backlog.
2096 if (current->flags & PF_MEMALLOC_NOFS) in xfs_inodegc_want_flush_work()
2117 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_queue()
2125 spin_lock(&ip->i_flags_lock); in xfs_inodegc_queue()
2126 ip->i_flags |= XFS_NEED_INACTIVE; in xfs_inodegc_queue()
2127 spin_unlock(&ip->i_flags_lock); in xfs_inodegc_queue()
2130 gc = this_cpu_ptr(mp->m_inodegc); in xfs_inodegc_queue()
2131 llist_add(&ip->i_gclist, &gc->list); in xfs_inodegc_queue()
2132 items = READ_ONCE(gc->items); in xfs_inodegc_queue()
2133 WRITE_ONCE(gc->items, items + 1); in xfs_inodegc_queue()
2134 shrinker_hits = READ_ONCE(gc->shrinker_hits); in xfs_inodegc_queue()
2142 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask)) in xfs_inodegc_queue()
2143 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask); in xfs_inodegc_queue()
2158 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, in xfs_inodegc_queue()
2164 flush_delayed_work(&gc->work); in xfs_inodegc_queue()
2173 * still may be under IO and hence we have wait for IO completion to occur
2182 struct xfs_mount *mp = ip->i_mount; in xfs_inode_mark_reclaimable()
2205 * there's memory pressure. Inactivation does not itself free any memory but
2221 struct xfs_mount *mp = shrink->private_data; in xfs_inodegc_shrinker_count()
2228 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_shrinker_count()
2229 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_count()
2230 if (!llist_empty(&gc->list)) in xfs_inodegc_shrinker_count()
2242 struct xfs_mount *mp = shrink->private_data; in xfs_inodegc_shrinker_scan()
2252 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_shrinker_scan()
2253 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_scan()
2254 if (!llist_empty(&gc->list)) { in xfs_inodegc_shrinker_scan()
2255 unsigned int h = READ_ONCE(gc->shrinker_hits); in xfs_inodegc_shrinker_scan()
2257 WRITE_ONCE(gc->shrinker_hits, h + 1); in xfs_inodegc_shrinker_scan()
2258 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_shrinker_scan()
2265 * to think there's deferred work to call us back about. in xfs_inodegc_shrinker_scan()
2278 mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB, in xfs_inodegc_register_shrinker()
2279 "xfs-inodegc:%s", in xfs_inodegc_register_shrinker()
2280 mp->m_super->s_id); in xfs_inodegc_register_shrinker()
2281 if (!mp->m_inodegc_shrinker) in xfs_inodegc_register_shrinker()
2282 return -ENOMEM; in xfs_inodegc_register_shrinker()
2284 mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count; in xfs_inodegc_register_shrinker()
2285 mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan; in xfs_inodegc_register_shrinker()
2286 mp->m_inodegc_shrinker->seeks = 0; in xfs_inodegc_register_shrinker()
2287 mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH; in xfs_inodegc_register_shrinker()
2288 mp->m_inodegc_shrinker->private_data = mp; in xfs_inodegc_register_shrinker()
2290 shrinker_register(mp->m_inodegc_shrinker); in xfs_inodegc_register_shrinker()