xref: /linux/fs/xfs/xfs_icache.c (revision c4c11dd160a8cc98f402c4e12f94b1572e822ffd)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_dinode.h"
32 #include "xfs_error.h"
33 #include "xfs_filestream.h"
34 #include "xfs_vnodeops.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_quota.h"
37 #include "xfs_trace.h"
38 #include "xfs_fsops.h"
39 #include "xfs_icache.h"
40 
41 #include <linux/kthread.h>
42 #include <linux/freezer.h>
43 
44 STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
45 				struct xfs_perag *pag, struct xfs_inode *ip);
46 
47 /*
48  * Allocate and initialise an xfs_inode.
49  */
50 STATIC struct xfs_inode *
51 xfs_inode_alloc(
52 	struct xfs_mount	*mp,
53 	xfs_ino_t		ino)
54 {
55 	struct xfs_inode	*ip;
56 
57 	/*
58 	 * if this didn't occur in transactions, we could use
59 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
60 	 * code up to do this anyway.
61 	 */
62 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
63 	if (!ip)
64 		return NULL;
65 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
66 		kmem_zone_free(xfs_inode_zone, ip);
67 		return NULL;
68 	}
69 
70 	ASSERT(atomic_read(&ip->i_pincount) == 0);
71 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
72 	ASSERT(!xfs_isiflocked(ip));
73 	ASSERT(ip->i_ino == 0);
74 
75 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
76 
77 	/* initialise the xfs inode */
78 	ip->i_ino = ino;
79 	ip->i_mount = mp;
80 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
81 	ip->i_afp = NULL;
82 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 	ip->i_flags = 0;
84 	ip->i_delayed_blks = 0;
85 	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
86 
87 	return ip;
88 }
89 
90 STATIC void
91 xfs_inode_free_callback(
92 	struct rcu_head		*head)
93 {
94 	struct inode		*inode = container_of(head, struct inode, i_rcu);
95 	struct xfs_inode	*ip = XFS_I(inode);
96 
97 	kmem_zone_free(xfs_inode_zone, ip);
98 }
99 
100 STATIC void
101 xfs_inode_free(
102 	struct xfs_inode	*ip)
103 {
104 	switch (ip->i_d.di_mode & S_IFMT) {
105 	case S_IFREG:
106 	case S_IFDIR:
107 	case S_IFLNK:
108 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
109 		break;
110 	}
111 
112 	if (ip->i_afp)
113 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
114 
115 	if (ip->i_itemp) {
116 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
117 		xfs_inode_item_destroy(ip);
118 		ip->i_itemp = NULL;
119 	}
120 
121 	/* asserts to verify all state is correct here */
122 	ASSERT(atomic_read(&ip->i_pincount) == 0);
123 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
124 	ASSERT(!xfs_isiflocked(ip));
125 
126 	/*
127 	 * Because we use RCU freeing we need to ensure the inode always
128 	 * appears to be reclaimed with an invalid inode number when in the
129 	 * free state. The ip->i_flags_lock provides the barrier against lookup
130 	 * races.
131 	 */
132 	spin_lock(&ip->i_flags_lock);
133 	ip->i_flags = XFS_IRECLAIM;
134 	ip->i_ino = 0;
135 	spin_unlock(&ip->i_flags_lock);
136 
137 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
138 }
139 
140 /*
141  * Check the validity of the inode we just found it the cache
142  */
143 static int
144 xfs_iget_cache_hit(
145 	struct xfs_perag	*pag,
146 	struct xfs_inode	*ip,
147 	xfs_ino_t		ino,
148 	int			flags,
149 	int			lock_flags) __releases(RCU)
150 {
151 	struct inode		*inode = VFS_I(ip);
152 	struct xfs_mount	*mp = ip->i_mount;
153 	int			error;
154 
155 	/*
156 	 * check for re-use of an inode within an RCU grace period due to the
157 	 * radix tree nodes not being updated yet. We monitor for this by
158 	 * setting the inode number to zero before freeing the inode structure.
159 	 * If the inode has been reallocated and set up, then the inode number
160 	 * will not match, so check for that, too.
161 	 */
162 	spin_lock(&ip->i_flags_lock);
163 	if (ip->i_ino != ino) {
164 		trace_xfs_iget_skip(ip);
165 		XFS_STATS_INC(xs_ig_frecycle);
166 		error = EAGAIN;
167 		goto out_error;
168 	}
169 
170 
171 	/*
172 	 * If we are racing with another cache hit that is currently
173 	 * instantiating this inode or currently recycling it out of
174 	 * reclaimabe state, wait for the initialisation to complete
175 	 * before continuing.
176 	 *
177 	 * XXX(hch): eventually we should do something equivalent to
178 	 *	     wait_on_inode to wait for these flags to be cleared
179 	 *	     instead of polling for it.
180 	 */
181 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
182 		trace_xfs_iget_skip(ip);
183 		XFS_STATS_INC(xs_ig_frecycle);
184 		error = EAGAIN;
185 		goto out_error;
186 	}
187 
188 	/*
189 	 * If lookup is racing with unlink return an error immediately.
190 	 */
191 	if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
192 		error = ENOENT;
193 		goto out_error;
194 	}
195 
196 	/*
197 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
198 	 * Need to carefully get it back into useable state.
199 	 */
200 	if (ip->i_flags & XFS_IRECLAIMABLE) {
201 		trace_xfs_iget_reclaim(ip);
202 
203 		/*
204 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
205 		 * from stomping over us while we recycle the inode.  We can't
206 		 * clear the radix tree reclaimable tag yet as it requires
207 		 * pag_ici_lock to be held exclusive.
208 		 */
209 		ip->i_flags |= XFS_IRECLAIM;
210 
211 		spin_unlock(&ip->i_flags_lock);
212 		rcu_read_unlock();
213 
214 		error = -inode_init_always(mp->m_super, inode);
215 		if (error) {
216 			/*
217 			 * Re-initializing the inode failed, and we are in deep
218 			 * trouble.  Try to re-add it to the reclaim list.
219 			 */
220 			rcu_read_lock();
221 			spin_lock(&ip->i_flags_lock);
222 
223 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
224 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
225 			trace_xfs_iget_reclaim_fail(ip);
226 			goto out_error;
227 		}
228 
229 		spin_lock(&pag->pag_ici_lock);
230 		spin_lock(&ip->i_flags_lock);
231 
232 		/*
233 		 * Clear the per-lifetime state in the inode as we are now
234 		 * effectively a new inode and need to return to the initial
235 		 * state before reuse occurs.
236 		 */
237 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
238 		ip->i_flags |= XFS_INEW;
239 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
240 		inode->i_state = I_NEW;
241 
242 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
243 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
244 
245 		spin_unlock(&ip->i_flags_lock);
246 		spin_unlock(&pag->pag_ici_lock);
247 	} else {
248 		/* If the VFS inode is being torn down, pause and try again. */
249 		if (!igrab(inode)) {
250 			trace_xfs_iget_skip(ip);
251 			error = EAGAIN;
252 			goto out_error;
253 		}
254 
255 		/* We've got a live one. */
256 		spin_unlock(&ip->i_flags_lock);
257 		rcu_read_unlock();
258 		trace_xfs_iget_hit(ip);
259 	}
260 
261 	if (lock_flags != 0)
262 		xfs_ilock(ip, lock_flags);
263 
264 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
265 	XFS_STATS_INC(xs_ig_found);
266 
267 	return 0;
268 
269 out_error:
270 	spin_unlock(&ip->i_flags_lock);
271 	rcu_read_unlock();
272 	return error;
273 }
274 
275 
276 static int
277 xfs_iget_cache_miss(
278 	struct xfs_mount	*mp,
279 	struct xfs_perag	*pag,
280 	xfs_trans_t		*tp,
281 	xfs_ino_t		ino,
282 	struct xfs_inode	**ipp,
283 	int			flags,
284 	int			lock_flags)
285 {
286 	struct xfs_inode	*ip;
287 	int			error;
288 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
289 	int			iflags;
290 
291 	ip = xfs_inode_alloc(mp, ino);
292 	if (!ip)
293 		return ENOMEM;
294 
295 	error = xfs_iread(mp, tp, ip, flags);
296 	if (error)
297 		goto out_destroy;
298 
299 	trace_xfs_iget_miss(ip);
300 
301 	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
302 		error = ENOENT;
303 		goto out_destroy;
304 	}
305 
306 	/*
307 	 * Preload the radix tree so we can insert safely under the
308 	 * write spinlock. Note that we cannot sleep inside the preload
309 	 * region. Since we can be called from transaction context, don't
310 	 * recurse into the file system.
311 	 */
312 	if (radix_tree_preload(GFP_NOFS)) {
313 		error = EAGAIN;
314 		goto out_destroy;
315 	}
316 
317 	/*
318 	 * Because the inode hasn't been added to the radix-tree yet it can't
319 	 * be found by another thread, so we can do the non-sleeping lock here.
320 	 */
321 	if (lock_flags) {
322 		if (!xfs_ilock_nowait(ip, lock_flags))
323 			BUG();
324 	}
325 
326 	/*
327 	 * These values must be set before inserting the inode into the radix
328 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
329 	 * RCU locking mechanism) can find it and that lookup must see that this
330 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
331 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
332 	 * memory barrier that ensures this detection works correctly at lookup
333 	 * time.
334 	 */
335 	iflags = XFS_INEW;
336 	if (flags & XFS_IGET_DONTCACHE)
337 		iflags |= XFS_IDONTCACHE;
338 	ip->i_udquot = NULL;
339 	ip->i_gdquot = NULL;
340 	ip->i_pdquot = NULL;
341 	xfs_iflags_set(ip, iflags);
342 
343 	/* insert the new inode */
344 	spin_lock(&pag->pag_ici_lock);
345 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
346 	if (unlikely(error)) {
347 		WARN_ON(error != -EEXIST);
348 		XFS_STATS_INC(xs_ig_dup);
349 		error = EAGAIN;
350 		goto out_preload_end;
351 	}
352 	spin_unlock(&pag->pag_ici_lock);
353 	radix_tree_preload_end();
354 
355 	*ipp = ip;
356 	return 0;
357 
358 out_preload_end:
359 	spin_unlock(&pag->pag_ici_lock);
360 	radix_tree_preload_end();
361 	if (lock_flags)
362 		xfs_iunlock(ip, lock_flags);
363 out_destroy:
364 	__destroy_inode(VFS_I(ip));
365 	xfs_inode_free(ip);
366 	return error;
367 }
368 
369 /*
370  * Look up an inode by number in the given file system.
371  * The inode is looked up in the cache held in each AG.
372  * If the inode is found in the cache, initialise the vfs inode
373  * if necessary.
374  *
375  * If it is not in core, read it in from the file system's device,
376  * add it to the cache and initialise the vfs inode.
377  *
378  * The inode is locked according to the value of the lock_flags parameter.
379  * This flag parameter indicates how and if the inode's IO lock and inode lock
380  * should be taken.
381  *
382  * mp -- the mount point structure for the current file system.  It points
383  *       to the inode hash table.
384  * tp -- a pointer to the current transaction if there is one.  This is
385  *       simply passed through to the xfs_iread() call.
386  * ino -- the number of the inode desired.  This is the unique identifier
387  *        within the file system for the inode being requested.
388  * lock_flags -- flags indicating how to lock the inode.  See the comment
389  *		 for xfs_ilock() for a list of valid values.
390  */
391 int
392 xfs_iget(
393 	xfs_mount_t	*mp,
394 	xfs_trans_t	*tp,
395 	xfs_ino_t	ino,
396 	uint		flags,
397 	uint		lock_flags,
398 	xfs_inode_t	**ipp)
399 {
400 	xfs_inode_t	*ip;
401 	int		error;
402 	xfs_perag_t	*pag;
403 	xfs_agino_t	agino;
404 
405 	/*
406 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
407 	 * doesn't get freed while it's being referenced during a
408 	 * radix tree traversal here.  It assumes this function
409 	 * aqcuires only the ILOCK (and therefore it has no need to
410 	 * involve the IOLOCK in this synchronization).
411 	 */
412 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
413 
414 	/* reject inode numbers outside existing AGs */
415 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
416 		return EINVAL;
417 
418 	/* get the perag structure and ensure that it's inode capable */
419 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
420 	agino = XFS_INO_TO_AGINO(mp, ino);
421 
422 again:
423 	error = 0;
424 	rcu_read_lock();
425 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
426 
427 	if (ip) {
428 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
429 		if (error)
430 			goto out_error_or_again;
431 	} else {
432 		rcu_read_unlock();
433 		XFS_STATS_INC(xs_ig_missed);
434 
435 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
436 							flags, lock_flags);
437 		if (error)
438 			goto out_error_or_again;
439 	}
440 	xfs_perag_put(pag);
441 
442 	*ipp = ip;
443 
444 	/*
445 	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
446 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
447 	 */
448 	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
449 		xfs_setup_inode(ip);
450 	return 0;
451 
452 out_error_or_again:
453 	if (error == EAGAIN) {
454 		delay(1);
455 		goto again;
456 	}
457 	xfs_perag_put(pag);
458 	return error;
459 }
460 
461 /*
462  * The inode lookup is done in batches to keep the amount of lock traffic and
463  * radix tree lookups to a minimum. The batch size is a trade off between
464  * lookup reduction and stack usage. This is in the reclaim path, so we can't
465  * be too greedy.
466  */
467 #define XFS_LOOKUP_BATCH	32
468 
469 STATIC int
470 xfs_inode_ag_walk_grab(
471 	struct xfs_inode	*ip)
472 {
473 	struct inode		*inode = VFS_I(ip);
474 
475 	ASSERT(rcu_read_lock_held());
476 
477 	/*
478 	 * check for stale RCU freed inode
479 	 *
480 	 * If the inode has been reallocated, it doesn't matter if it's not in
481 	 * the AG we are walking - we are walking for writeback, so if it
482 	 * passes all the "valid inode" checks and is dirty, then we'll write
483 	 * it back anyway.  If it has been reallocated and still being
484 	 * initialised, the XFS_INEW check below will catch it.
485 	 */
486 	spin_lock(&ip->i_flags_lock);
487 	if (!ip->i_ino)
488 		goto out_unlock_noent;
489 
490 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
491 	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
492 		goto out_unlock_noent;
493 	spin_unlock(&ip->i_flags_lock);
494 
495 	/* nothing to sync during shutdown */
496 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
497 		return EFSCORRUPTED;
498 
499 	/* If we can't grab the inode, it must on it's way to reclaim. */
500 	if (!igrab(inode))
501 		return ENOENT;
502 
503 	if (is_bad_inode(inode)) {
504 		IRELE(ip);
505 		return ENOENT;
506 	}
507 
508 	/* inode is valid */
509 	return 0;
510 
511 out_unlock_noent:
512 	spin_unlock(&ip->i_flags_lock);
513 	return ENOENT;
514 }
515 
516 STATIC int
517 xfs_inode_ag_walk(
518 	struct xfs_mount	*mp,
519 	struct xfs_perag	*pag,
520 	int			(*execute)(struct xfs_inode *ip,
521 					   struct xfs_perag *pag, int flags,
522 					   void *args),
523 	int			flags,
524 	void			*args,
525 	int			tag)
526 {
527 	uint32_t		first_index;
528 	int			last_error = 0;
529 	int			skipped;
530 	int			done;
531 	int			nr_found;
532 
533 restart:
534 	done = 0;
535 	skipped = 0;
536 	first_index = 0;
537 	nr_found = 0;
538 	do {
539 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
540 		int		error = 0;
541 		int		i;
542 
543 		rcu_read_lock();
544 
545 		if (tag == -1)
546 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
547 					(void **)batch, first_index,
548 					XFS_LOOKUP_BATCH);
549 		else
550 			nr_found = radix_tree_gang_lookup_tag(
551 					&pag->pag_ici_root,
552 					(void **) batch, first_index,
553 					XFS_LOOKUP_BATCH, tag);
554 
555 		if (!nr_found) {
556 			rcu_read_unlock();
557 			break;
558 		}
559 
560 		/*
561 		 * Grab the inodes before we drop the lock. if we found
562 		 * nothing, nr == 0 and the loop will be skipped.
563 		 */
564 		for (i = 0; i < nr_found; i++) {
565 			struct xfs_inode *ip = batch[i];
566 
567 			if (done || xfs_inode_ag_walk_grab(ip))
568 				batch[i] = NULL;
569 
570 			/*
571 			 * Update the index for the next lookup. Catch
572 			 * overflows into the next AG range which can occur if
573 			 * we have inodes in the last block of the AG and we
574 			 * are currently pointing to the last inode.
575 			 *
576 			 * Because we may see inodes that are from the wrong AG
577 			 * due to RCU freeing and reallocation, only update the
578 			 * index if it lies in this AG. It was a race that lead
579 			 * us to see this inode, so another lookup from the
580 			 * same index will not find it again.
581 			 */
582 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
583 				continue;
584 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
585 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
586 				done = 1;
587 		}
588 
589 		/* unlock now we've grabbed the inodes. */
590 		rcu_read_unlock();
591 
592 		for (i = 0; i < nr_found; i++) {
593 			if (!batch[i])
594 				continue;
595 			error = execute(batch[i], pag, flags, args);
596 			IRELE(batch[i]);
597 			if (error == EAGAIN) {
598 				skipped++;
599 				continue;
600 			}
601 			if (error && last_error != EFSCORRUPTED)
602 				last_error = error;
603 		}
604 
605 		/* bail out if the filesystem is corrupted.  */
606 		if (error == EFSCORRUPTED)
607 			break;
608 
609 		cond_resched();
610 
611 	} while (nr_found && !done);
612 
613 	if (skipped) {
614 		delay(1);
615 		goto restart;
616 	}
617 	return last_error;
618 }
619 
620 /*
621  * Background scanning to trim post-EOF preallocated space. This is queued
622  * based on the 'background_prealloc_discard_period' tunable (5m by default).
623  */
624 STATIC void
625 xfs_queue_eofblocks(
626 	struct xfs_mount *mp)
627 {
628 	rcu_read_lock();
629 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
630 		queue_delayed_work(mp->m_eofblocks_workqueue,
631 				   &mp->m_eofblocks_work,
632 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
633 	rcu_read_unlock();
634 }
635 
636 void
637 xfs_eofblocks_worker(
638 	struct work_struct *work)
639 {
640 	struct xfs_mount *mp = container_of(to_delayed_work(work),
641 				struct xfs_mount, m_eofblocks_work);
642 	xfs_icache_free_eofblocks(mp, NULL);
643 	xfs_queue_eofblocks(mp);
644 }
645 
646 int
647 xfs_inode_ag_iterator(
648 	struct xfs_mount	*mp,
649 	int			(*execute)(struct xfs_inode *ip,
650 					   struct xfs_perag *pag, int flags,
651 					   void *args),
652 	int			flags,
653 	void			*args)
654 {
655 	struct xfs_perag	*pag;
656 	int			error = 0;
657 	int			last_error = 0;
658 	xfs_agnumber_t		ag;
659 
660 	ag = 0;
661 	while ((pag = xfs_perag_get(mp, ag))) {
662 		ag = pag->pag_agno + 1;
663 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
664 		xfs_perag_put(pag);
665 		if (error) {
666 			last_error = error;
667 			if (error == EFSCORRUPTED)
668 				break;
669 		}
670 	}
671 	return XFS_ERROR(last_error);
672 }
673 
674 int
675 xfs_inode_ag_iterator_tag(
676 	struct xfs_mount	*mp,
677 	int			(*execute)(struct xfs_inode *ip,
678 					   struct xfs_perag *pag, int flags,
679 					   void *args),
680 	int			flags,
681 	void			*args,
682 	int			tag)
683 {
684 	struct xfs_perag	*pag;
685 	int			error = 0;
686 	int			last_error = 0;
687 	xfs_agnumber_t		ag;
688 
689 	ag = 0;
690 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
691 		ag = pag->pag_agno + 1;
692 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
693 		xfs_perag_put(pag);
694 		if (error) {
695 			last_error = error;
696 			if (error == EFSCORRUPTED)
697 				break;
698 		}
699 	}
700 	return XFS_ERROR(last_error);
701 }
702 
703 /*
704  * Queue a new inode reclaim pass if there are reclaimable inodes and there
705  * isn't a reclaim pass already in progress. By default it runs every 5s based
706  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
707  * tunable, but that can be done if this method proves to be ineffective or too
708  * aggressive.
709  */
710 static void
711 xfs_reclaim_work_queue(
712 	struct xfs_mount        *mp)
713 {
714 
715 	rcu_read_lock();
716 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
717 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
718 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
719 	}
720 	rcu_read_unlock();
721 }
722 
723 /*
724  * This is a fast pass over the inode cache to try to get reclaim moving on as
725  * many inodes as possible in a short period of time. It kicks itself every few
726  * seconds, as well as being kicked by the inode cache shrinker when memory
727  * goes low. It scans as quickly as possible avoiding locked inodes or those
728  * already being flushed, and once done schedules a future pass.
729  */
730 void
731 xfs_reclaim_worker(
732 	struct work_struct *work)
733 {
734 	struct xfs_mount *mp = container_of(to_delayed_work(work),
735 					struct xfs_mount, m_reclaim_work);
736 
737 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
738 	xfs_reclaim_work_queue(mp);
739 }
740 
741 static void
742 __xfs_inode_set_reclaim_tag(
743 	struct xfs_perag	*pag,
744 	struct xfs_inode	*ip)
745 {
746 	radix_tree_tag_set(&pag->pag_ici_root,
747 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
748 			   XFS_ICI_RECLAIM_TAG);
749 
750 	if (!pag->pag_ici_reclaimable) {
751 		/* propagate the reclaim tag up into the perag radix tree */
752 		spin_lock(&ip->i_mount->m_perag_lock);
753 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
754 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
755 				XFS_ICI_RECLAIM_TAG);
756 		spin_unlock(&ip->i_mount->m_perag_lock);
757 
758 		/* schedule periodic background inode reclaim */
759 		xfs_reclaim_work_queue(ip->i_mount);
760 
761 		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
762 							-1, _RET_IP_);
763 	}
764 	pag->pag_ici_reclaimable++;
765 }
766 
767 /*
768  * We set the inode flag atomically with the radix tree tag.
769  * Once we get tag lookups on the radix tree, this inode flag
770  * can go away.
771  */
772 void
773 xfs_inode_set_reclaim_tag(
774 	xfs_inode_t	*ip)
775 {
776 	struct xfs_mount *mp = ip->i_mount;
777 	struct xfs_perag *pag;
778 
779 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
780 	spin_lock(&pag->pag_ici_lock);
781 	spin_lock(&ip->i_flags_lock);
782 	__xfs_inode_set_reclaim_tag(pag, ip);
783 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
784 	spin_unlock(&ip->i_flags_lock);
785 	spin_unlock(&pag->pag_ici_lock);
786 	xfs_perag_put(pag);
787 }
788 
789 STATIC void
790 __xfs_inode_clear_reclaim(
791 	xfs_perag_t	*pag,
792 	xfs_inode_t	*ip)
793 {
794 	pag->pag_ici_reclaimable--;
795 	if (!pag->pag_ici_reclaimable) {
796 		/* clear the reclaim tag from the perag radix tree */
797 		spin_lock(&ip->i_mount->m_perag_lock);
798 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
799 				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
800 				XFS_ICI_RECLAIM_TAG);
801 		spin_unlock(&ip->i_mount->m_perag_lock);
802 		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
803 							-1, _RET_IP_);
804 	}
805 }
806 
807 STATIC void
808 __xfs_inode_clear_reclaim_tag(
809 	xfs_mount_t	*mp,
810 	xfs_perag_t	*pag,
811 	xfs_inode_t	*ip)
812 {
813 	radix_tree_tag_clear(&pag->pag_ici_root,
814 			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
815 	__xfs_inode_clear_reclaim(pag, ip);
816 }
817 
818 /*
819  * Grab the inode for reclaim exclusively.
820  * Return 0 if we grabbed it, non-zero otherwise.
821  */
822 STATIC int
823 xfs_reclaim_inode_grab(
824 	struct xfs_inode	*ip,
825 	int			flags)
826 {
827 	ASSERT(rcu_read_lock_held());
828 
829 	/* quick check for stale RCU freed inode */
830 	if (!ip->i_ino)
831 		return 1;
832 
833 	/*
834 	 * If we are asked for non-blocking operation, do unlocked checks to
835 	 * see if the inode already is being flushed or in reclaim to avoid
836 	 * lock traffic.
837 	 */
838 	if ((flags & SYNC_TRYLOCK) &&
839 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
840 		return 1;
841 
842 	/*
843 	 * The radix tree lock here protects a thread in xfs_iget from racing
844 	 * with us starting reclaim on the inode.  Once we have the
845 	 * XFS_IRECLAIM flag set it will not touch us.
846 	 *
847 	 * Due to RCU lookup, we may find inodes that have been freed and only
848 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
849 	 * aren't candidates for reclaim at all, so we must check the
850 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
851 	 */
852 	spin_lock(&ip->i_flags_lock);
853 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
854 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
855 		/* not a reclaim candidate. */
856 		spin_unlock(&ip->i_flags_lock);
857 		return 1;
858 	}
859 	__xfs_iflags_set(ip, XFS_IRECLAIM);
860 	spin_unlock(&ip->i_flags_lock);
861 	return 0;
862 }
863 
864 /*
865  * Inodes in different states need to be treated differently. The following
866  * table lists the inode states and the reclaim actions necessary:
867  *
868  *	inode state	     iflush ret		required action
869  *      ---------------      ----------         ---------------
870  *	bad			-		reclaim
871  *	shutdown		EIO		unpin and reclaim
872  *	clean, unpinned		0		reclaim
873  *	stale, unpinned		0		reclaim
874  *	clean, pinned(*)	0		requeue
875  *	stale, pinned		EAGAIN		requeue
876  *	dirty, async		-		requeue
877  *	dirty, sync		0		reclaim
878  *
879  * (*) dgc: I don't think the clean, pinned state is possible but it gets
880  * handled anyway given the order of checks implemented.
881  *
882  * Also, because we get the flush lock first, we know that any inode that has
883  * been flushed delwri has had the flush completed by the time we check that
884  * the inode is clean.
885  *
886  * Note that because the inode is flushed delayed write by AIL pushing, the
887  * flush lock may already be held here and waiting on it can result in very
888  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
889  * the caller should push the AIL first before trying to reclaim inodes to
890  * minimise the amount of time spent waiting.  For background relaim, we only
891  * bother to reclaim clean inodes anyway.
892  *
893  * Hence the order of actions after gaining the locks should be:
894  *	bad		=> reclaim
895  *	shutdown	=> unpin and reclaim
896  *	pinned, async	=> requeue
897  *	pinned, sync	=> unpin
898  *	stale		=> reclaim
899  *	clean		=> reclaim
900  *	dirty, async	=> requeue
901  *	dirty, sync	=> flush, wait and reclaim
902  */
903 STATIC int
904 xfs_reclaim_inode(
905 	struct xfs_inode	*ip,
906 	struct xfs_perag	*pag,
907 	int			sync_mode)
908 {
909 	struct xfs_buf		*bp = NULL;
910 	int			error;
911 
912 restart:
913 	error = 0;
914 	xfs_ilock(ip, XFS_ILOCK_EXCL);
915 	if (!xfs_iflock_nowait(ip)) {
916 		if (!(sync_mode & SYNC_WAIT))
917 			goto out;
918 		xfs_iflock(ip);
919 	}
920 
921 	if (is_bad_inode(VFS_I(ip)))
922 		goto reclaim;
923 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
924 		xfs_iunpin_wait(ip);
925 		xfs_iflush_abort(ip, false);
926 		goto reclaim;
927 	}
928 	if (xfs_ipincount(ip)) {
929 		if (!(sync_mode & SYNC_WAIT))
930 			goto out_ifunlock;
931 		xfs_iunpin_wait(ip);
932 	}
933 	if (xfs_iflags_test(ip, XFS_ISTALE))
934 		goto reclaim;
935 	if (xfs_inode_clean(ip))
936 		goto reclaim;
937 
938 	/*
939 	 * Never flush out dirty data during non-blocking reclaim, as it would
940 	 * just contend with AIL pushing trying to do the same job.
941 	 */
942 	if (!(sync_mode & SYNC_WAIT))
943 		goto out_ifunlock;
944 
945 	/*
946 	 * Now we have an inode that needs flushing.
947 	 *
948 	 * Note that xfs_iflush will never block on the inode buffer lock, as
949 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
950 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
951 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
952 	 * result in an ABBA deadlock with xfs_ifree_cluster().
953 	 *
954 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
955 	 * cache to mark them stale, if we hit this case we don't actually want
956 	 * to do IO here - we want the inode marked stale so we can simply
957 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
958 	 * inode, back off and try again.  Hopefully the next pass through will
959 	 * see the stale flag set on the inode.
960 	 */
961 	error = xfs_iflush(ip, &bp);
962 	if (error == EAGAIN) {
963 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
964 		/* backoff longer than in xfs_ifree_cluster */
965 		delay(2);
966 		goto restart;
967 	}
968 
969 	if (!error) {
970 		error = xfs_bwrite(bp);
971 		xfs_buf_relse(bp);
972 	}
973 
974 	xfs_iflock(ip);
975 reclaim:
976 	xfs_ifunlock(ip);
977 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
978 
979 	XFS_STATS_INC(xs_ig_reclaims);
980 	/*
981 	 * Remove the inode from the per-AG radix tree.
982 	 *
983 	 * Because radix_tree_delete won't complain even if the item was never
984 	 * added to the tree assert that it's been there before to catch
985 	 * problems with the inode life time early on.
986 	 */
987 	spin_lock(&pag->pag_ici_lock);
988 	if (!radix_tree_delete(&pag->pag_ici_root,
989 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
990 		ASSERT(0);
991 	__xfs_inode_clear_reclaim(pag, ip);
992 	spin_unlock(&pag->pag_ici_lock);
993 
994 	/*
995 	 * Here we do an (almost) spurious inode lock in order to coordinate
996 	 * with inode cache radix tree lookups.  This is because the lookup
997 	 * can reference the inodes in the cache without taking references.
998 	 *
999 	 * We make that OK here by ensuring that we wait until the inode is
1000 	 * unlocked after the lookup before we go ahead and free it.
1001 	 */
1002 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1003 	xfs_qm_dqdetach(ip);
1004 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1005 
1006 	xfs_inode_free(ip);
1007 	return error;
1008 
1009 out_ifunlock:
1010 	xfs_ifunlock(ip);
1011 out:
1012 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1013 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1014 	/*
1015 	 * We could return EAGAIN here to make reclaim rescan the inode tree in
1016 	 * a short while. However, this just burns CPU time scanning the tree
1017 	 * waiting for IO to complete and the reclaim work never goes back to
1018 	 * the idle state. Instead, return 0 to let the next scheduled
1019 	 * background reclaim attempt to reclaim the inode again.
1020 	 */
1021 	return 0;
1022 }
1023 
1024 /*
1025  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1026  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1027  * then a shut down during filesystem unmount reclaim walk leak all the
1028  * unreclaimed inodes.
1029  */
1030 STATIC int
1031 xfs_reclaim_inodes_ag(
1032 	struct xfs_mount	*mp,
1033 	int			flags,
1034 	int			*nr_to_scan)
1035 {
1036 	struct xfs_perag	*pag;
1037 	int			error = 0;
1038 	int			last_error = 0;
1039 	xfs_agnumber_t		ag;
1040 	int			trylock = flags & SYNC_TRYLOCK;
1041 	int			skipped;
1042 
1043 restart:
1044 	ag = 0;
1045 	skipped = 0;
1046 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1047 		unsigned long	first_index = 0;
1048 		int		done = 0;
1049 		int		nr_found = 0;
1050 
1051 		ag = pag->pag_agno + 1;
1052 
1053 		if (trylock) {
1054 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1055 				skipped++;
1056 				xfs_perag_put(pag);
1057 				continue;
1058 			}
1059 			first_index = pag->pag_ici_reclaim_cursor;
1060 		} else
1061 			mutex_lock(&pag->pag_ici_reclaim_lock);
1062 
1063 		do {
1064 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1065 			int	i;
1066 
1067 			rcu_read_lock();
1068 			nr_found = radix_tree_gang_lookup_tag(
1069 					&pag->pag_ici_root,
1070 					(void **)batch, first_index,
1071 					XFS_LOOKUP_BATCH,
1072 					XFS_ICI_RECLAIM_TAG);
1073 			if (!nr_found) {
1074 				done = 1;
1075 				rcu_read_unlock();
1076 				break;
1077 			}
1078 
1079 			/*
1080 			 * Grab the inodes before we drop the lock. if we found
1081 			 * nothing, nr == 0 and the loop will be skipped.
1082 			 */
1083 			for (i = 0; i < nr_found; i++) {
1084 				struct xfs_inode *ip = batch[i];
1085 
1086 				if (done || xfs_reclaim_inode_grab(ip, flags))
1087 					batch[i] = NULL;
1088 
1089 				/*
1090 				 * Update the index for the next lookup. Catch
1091 				 * overflows into the next AG range which can
1092 				 * occur if we have inodes in the last block of
1093 				 * the AG and we are currently pointing to the
1094 				 * last inode.
1095 				 *
1096 				 * Because we may see inodes that are from the
1097 				 * wrong AG due to RCU freeing and
1098 				 * reallocation, only update the index if it
1099 				 * lies in this AG. It was a race that lead us
1100 				 * to see this inode, so another lookup from
1101 				 * the same index will not find it again.
1102 				 */
1103 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1104 								pag->pag_agno)
1105 					continue;
1106 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1107 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1108 					done = 1;
1109 			}
1110 
1111 			/* unlock now we've grabbed the inodes. */
1112 			rcu_read_unlock();
1113 
1114 			for (i = 0; i < nr_found; i++) {
1115 				if (!batch[i])
1116 					continue;
1117 				error = xfs_reclaim_inode(batch[i], pag, flags);
1118 				if (error && last_error != EFSCORRUPTED)
1119 					last_error = error;
1120 			}
1121 
1122 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1123 
1124 			cond_resched();
1125 
1126 		} while (nr_found && !done && *nr_to_scan > 0);
1127 
1128 		if (trylock && !done)
1129 			pag->pag_ici_reclaim_cursor = first_index;
1130 		else
1131 			pag->pag_ici_reclaim_cursor = 0;
1132 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1133 		xfs_perag_put(pag);
1134 	}
1135 
1136 	/*
1137 	 * if we skipped any AG, and we still have scan count remaining, do
1138 	 * another pass this time using blocking reclaim semantics (i.e
1139 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1140 	 * ensure that when we get more reclaimers than AGs we block rather
1141 	 * than spin trying to execute reclaim.
1142 	 */
1143 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1144 		trylock = 0;
1145 		goto restart;
1146 	}
1147 	return XFS_ERROR(last_error);
1148 }
1149 
1150 int
1151 xfs_reclaim_inodes(
1152 	xfs_mount_t	*mp,
1153 	int		mode)
1154 {
1155 	int		nr_to_scan = INT_MAX;
1156 
1157 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1158 }
1159 
1160 /*
1161  * Scan a certain number of inodes for reclaim.
1162  *
1163  * When called we make sure that there is a background (fast) inode reclaim in
1164  * progress, while we will throttle the speed of reclaim via doing synchronous
1165  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1166  * them to be cleaned, which we hope will not be very long due to the
1167  * background walker having already kicked the IO off on those dirty inodes.
1168  */
1169 void
1170 xfs_reclaim_inodes_nr(
1171 	struct xfs_mount	*mp,
1172 	int			nr_to_scan)
1173 {
1174 	/* kick background reclaimer and push the AIL */
1175 	xfs_reclaim_work_queue(mp);
1176 	xfs_ail_push_all(mp->m_ail);
1177 
1178 	xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1179 }
1180 
1181 /*
1182  * Return the number of reclaimable inodes in the filesystem for
1183  * the shrinker to determine how much to reclaim.
1184  */
1185 int
1186 xfs_reclaim_inodes_count(
1187 	struct xfs_mount	*mp)
1188 {
1189 	struct xfs_perag	*pag;
1190 	xfs_agnumber_t		ag = 0;
1191 	int			reclaimable = 0;
1192 
1193 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1194 		ag = pag->pag_agno + 1;
1195 		reclaimable += pag->pag_ici_reclaimable;
1196 		xfs_perag_put(pag);
1197 	}
1198 	return reclaimable;
1199 }
1200 
1201 STATIC int
1202 xfs_inode_match_id(
1203 	struct xfs_inode	*ip,
1204 	struct xfs_eofblocks	*eofb)
1205 {
1206 	if (eofb->eof_flags & XFS_EOF_FLAGS_UID &&
1207 	    ip->i_d.di_uid != eofb->eof_uid)
1208 		return 0;
1209 
1210 	if (eofb->eof_flags & XFS_EOF_FLAGS_GID &&
1211 	    ip->i_d.di_gid != eofb->eof_gid)
1212 		return 0;
1213 
1214 	if (eofb->eof_flags & XFS_EOF_FLAGS_PRID &&
1215 	    xfs_get_projid(ip) != eofb->eof_prid)
1216 		return 0;
1217 
1218 	return 1;
1219 }
1220 
1221 STATIC int
1222 xfs_inode_free_eofblocks(
1223 	struct xfs_inode	*ip,
1224 	struct xfs_perag	*pag,
1225 	int			flags,
1226 	void			*args)
1227 {
1228 	int ret;
1229 	struct xfs_eofblocks *eofb = args;
1230 
1231 	if (!xfs_can_free_eofblocks(ip, false)) {
1232 		/* inode could be preallocated or append-only */
1233 		trace_xfs_inode_free_eofblocks_invalid(ip);
1234 		xfs_inode_clear_eofblocks_tag(ip);
1235 		return 0;
1236 	}
1237 
1238 	/*
1239 	 * If the mapping is dirty the operation can block and wait for some
1240 	 * time. Unless we are waiting, skip it.
1241 	 */
1242 	if (!(flags & SYNC_WAIT) &&
1243 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1244 		return 0;
1245 
1246 	if (eofb) {
1247 		if (!xfs_inode_match_id(ip, eofb))
1248 			return 0;
1249 
1250 		/* skip the inode if the file size is too small */
1251 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1252 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1253 			return 0;
1254 	}
1255 
1256 	ret = xfs_free_eofblocks(ip->i_mount, ip, true);
1257 
1258 	/* don't revisit the inode if we're not waiting */
1259 	if (ret == EAGAIN && !(flags & SYNC_WAIT))
1260 		ret = 0;
1261 
1262 	return ret;
1263 }
1264 
1265 int
1266 xfs_icache_free_eofblocks(
1267 	struct xfs_mount	*mp,
1268 	struct xfs_eofblocks	*eofb)
1269 {
1270 	int flags = SYNC_TRYLOCK;
1271 
1272 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1273 		flags = SYNC_WAIT;
1274 
1275 	return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1276 					 eofb, XFS_ICI_EOFBLOCKS_TAG);
1277 }
1278 
1279 void
1280 xfs_inode_set_eofblocks_tag(
1281 	xfs_inode_t	*ip)
1282 {
1283 	struct xfs_mount *mp = ip->i_mount;
1284 	struct xfs_perag *pag;
1285 	int tagged;
1286 
1287 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1288 	spin_lock(&pag->pag_ici_lock);
1289 	trace_xfs_inode_set_eofblocks_tag(ip);
1290 
1291 	tagged = radix_tree_tagged(&pag->pag_ici_root,
1292 				   XFS_ICI_EOFBLOCKS_TAG);
1293 	radix_tree_tag_set(&pag->pag_ici_root,
1294 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1295 			   XFS_ICI_EOFBLOCKS_TAG);
1296 	if (!tagged) {
1297 		/* propagate the eofblocks tag up into the perag radix tree */
1298 		spin_lock(&ip->i_mount->m_perag_lock);
1299 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1300 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1301 				   XFS_ICI_EOFBLOCKS_TAG);
1302 		spin_unlock(&ip->i_mount->m_perag_lock);
1303 
1304 		/* kick off background trimming */
1305 		xfs_queue_eofblocks(ip->i_mount);
1306 
1307 		trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1308 					      -1, _RET_IP_);
1309 	}
1310 
1311 	spin_unlock(&pag->pag_ici_lock);
1312 	xfs_perag_put(pag);
1313 }
1314 
1315 void
1316 xfs_inode_clear_eofblocks_tag(
1317 	xfs_inode_t	*ip)
1318 {
1319 	struct xfs_mount *mp = ip->i_mount;
1320 	struct xfs_perag *pag;
1321 
1322 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1323 	spin_lock(&pag->pag_ici_lock);
1324 	trace_xfs_inode_clear_eofblocks_tag(ip);
1325 
1326 	radix_tree_tag_clear(&pag->pag_ici_root,
1327 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1328 			     XFS_ICI_EOFBLOCKS_TAG);
1329 	if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1330 		/* clear the eofblocks tag from the perag radix tree */
1331 		spin_lock(&ip->i_mount->m_perag_lock);
1332 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1333 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1334 				     XFS_ICI_EOFBLOCKS_TAG);
1335 		spin_unlock(&ip->i_mount->m_perag_lock);
1336 		trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1337 					       -1, _RET_IP_);
1338 	}
1339 
1340 	spin_unlock(&pag->pag_ici_lock);
1341 	xfs_perag_put(pag);
1342 }
1343 
1344