xref: /linux/fs/xfs/xfs_icache.c (revision 1ccea77e2a2687cae171b7987eb44730ec8c6d5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_sb.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_error.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25 
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/iversion.h>
29 
30 /*
31  * Allocate and initialise an xfs_inode.
32  */
33 struct xfs_inode *
34 xfs_inode_alloc(
35 	struct xfs_mount	*mp,
36 	xfs_ino_t		ino)
37 {
38 	struct xfs_inode	*ip;
39 
40 	/*
41 	 * if this didn't occur in transactions, we could use
42 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
43 	 * code up to do this anyway.
44 	 */
45 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
46 	if (!ip)
47 		return NULL;
48 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
49 		kmem_zone_free(xfs_inode_zone, ip);
50 		return NULL;
51 	}
52 
53 	/* VFS doesn't initialise i_mode! */
54 	VFS_I(ip)->i_mode = 0;
55 
56 	XFS_STATS_INC(mp, vn_active);
57 	ASSERT(atomic_read(&ip->i_pincount) == 0);
58 	ASSERT(!xfs_isiflocked(ip));
59 	ASSERT(ip->i_ino == 0);
60 
61 	/* initialise the xfs inode */
62 	ip->i_ino = ino;
63 	ip->i_mount = mp;
64 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
65 	ip->i_afp = NULL;
66 	ip->i_cowfp = NULL;
67 	ip->i_cnextents = 0;
68 	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
69 	memset(&ip->i_df, 0, sizeof(ip->i_df));
70 	ip->i_flags = 0;
71 	ip->i_delayed_blks = 0;
72 	memset(&ip->i_d, 0, sizeof(ip->i_d));
73 	ip->i_sick = 0;
74 	ip->i_checked = 0;
75 	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
76 	INIT_LIST_HEAD(&ip->i_ioend_list);
77 	spin_lock_init(&ip->i_ioend_lock);
78 
79 	return ip;
80 }
81 
82 STATIC void
83 xfs_inode_free_callback(
84 	struct rcu_head		*head)
85 {
86 	struct inode		*inode = container_of(head, struct inode, i_rcu);
87 	struct xfs_inode	*ip = XFS_I(inode);
88 
89 	switch (VFS_I(ip)->i_mode & S_IFMT) {
90 	case S_IFREG:
91 	case S_IFDIR:
92 	case S_IFLNK:
93 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
94 		break;
95 	}
96 
97 	if (ip->i_afp)
98 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
99 	if (ip->i_cowfp)
100 		xfs_idestroy_fork(ip, XFS_COW_FORK);
101 
102 	if (ip->i_itemp) {
103 		ASSERT(!test_bit(XFS_LI_IN_AIL,
104 				 &ip->i_itemp->ili_item.li_flags));
105 		xfs_inode_item_destroy(ip);
106 		ip->i_itemp = NULL;
107 	}
108 
109 	kmem_zone_free(xfs_inode_zone, ip);
110 }
111 
112 static void
113 __xfs_inode_free(
114 	struct xfs_inode	*ip)
115 {
116 	/* asserts to verify all state is correct here */
117 	ASSERT(atomic_read(&ip->i_pincount) == 0);
118 	XFS_STATS_DEC(ip->i_mount, vn_active);
119 
120 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
121 }
122 
123 void
124 xfs_inode_free(
125 	struct xfs_inode	*ip)
126 {
127 	ASSERT(!xfs_isiflocked(ip));
128 
129 	/*
130 	 * Because we use RCU freeing we need to ensure the inode always
131 	 * appears to be reclaimed with an invalid inode number when in the
132 	 * free state. The ip->i_flags_lock provides the barrier against lookup
133 	 * races.
134 	 */
135 	spin_lock(&ip->i_flags_lock);
136 	ip->i_flags = XFS_IRECLAIM;
137 	ip->i_ino = 0;
138 	spin_unlock(&ip->i_flags_lock);
139 
140 	__xfs_inode_free(ip);
141 }
142 
143 /*
144  * Queue a new inode reclaim pass if there are reclaimable inodes and there
145  * isn't a reclaim pass already in progress. By default it runs every 5s based
146  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
147  * tunable, but that can be done if this method proves to be ineffective or too
148  * aggressive.
149  */
150 static void
151 xfs_reclaim_work_queue(
152 	struct xfs_mount        *mp)
153 {
154 
155 	rcu_read_lock();
156 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
157 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
158 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
159 	}
160 	rcu_read_unlock();
161 }
162 
163 /*
164  * This is a fast pass over the inode cache to try to get reclaim moving on as
165  * many inodes as possible in a short period of time. It kicks itself every few
166  * seconds, as well as being kicked by the inode cache shrinker when memory
167  * goes low. It scans as quickly as possible avoiding locked inodes or those
168  * already being flushed, and once done schedules a future pass.
169  */
170 void
171 xfs_reclaim_worker(
172 	struct work_struct *work)
173 {
174 	struct xfs_mount *mp = container_of(to_delayed_work(work),
175 					struct xfs_mount, m_reclaim_work);
176 
177 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
178 	xfs_reclaim_work_queue(mp);
179 }
180 
181 static void
182 xfs_perag_set_reclaim_tag(
183 	struct xfs_perag	*pag)
184 {
185 	struct xfs_mount	*mp = pag->pag_mount;
186 
187 	lockdep_assert_held(&pag->pag_ici_lock);
188 	if (pag->pag_ici_reclaimable++)
189 		return;
190 
191 	/* propagate the reclaim tag up into the perag radix tree */
192 	spin_lock(&mp->m_perag_lock);
193 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
194 			   XFS_ICI_RECLAIM_TAG);
195 	spin_unlock(&mp->m_perag_lock);
196 
197 	/* schedule periodic background inode reclaim */
198 	xfs_reclaim_work_queue(mp);
199 
200 	trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
201 }
202 
203 static void
204 xfs_perag_clear_reclaim_tag(
205 	struct xfs_perag	*pag)
206 {
207 	struct xfs_mount	*mp = pag->pag_mount;
208 
209 	lockdep_assert_held(&pag->pag_ici_lock);
210 	if (--pag->pag_ici_reclaimable)
211 		return;
212 
213 	/* clear the reclaim tag from the perag radix tree */
214 	spin_lock(&mp->m_perag_lock);
215 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
216 			     XFS_ICI_RECLAIM_TAG);
217 	spin_unlock(&mp->m_perag_lock);
218 	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
219 }
220 
221 
222 /*
223  * We set the inode flag atomically with the radix tree tag.
224  * Once we get tag lookups on the radix tree, this inode flag
225  * can go away.
226  */
227 void
228 xfs_inode_set_reclaim_tag(
229 	struct xfs_inode	*ip)
230 {
231 	struct xfs_mount	*mp = ip->i_mount;
232 	struct xfs_perag	*pag;
233 
234 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
235 	spin_lock(&pag->pag_ici_lock);
236 	spin_lock(&ip->i_flags_lock);
237 
238 	radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
239 			   XFS_ICI_RECLAIM_TAG);
240 	xfs_perag_set_reclaim_tag(pag);
241 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
242 
243 	spin_unlock(&ip->i_flags_lock);
244 	spin_unlock(&pag->pag_ici_lock);
245 	xfs_perag_put(pag);
246 }
247 
248 STATIC void
249 xfs_inode_clear_reclaim_tag(
250 	struct xfs_perag	*pag,
251 	xfs_ino_t		ino)
252 {
253 	radix_tree_tag_clear(&pag->pag_ici_root,
254 			     XFS_INO_TO_AGINO(pag->pag_mount, ino),
255 			     XFS_ICI_RECLAIM_TAG);
256 	xfs_perag_clear_reclaim_tag(pag);
257 }
258 
259 static void
260 xfs_inew_wait(
261 	struct xfs_inode	*ip)
262 {
263 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
264 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
265 
266 	do {
267 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
268 		if (!xfs_iflags_test(ip, XFS_INEW))
269 			break;
270 		schedule();
271 	} while (true);
272 	finish_wait(wq, &wait.wq_entry);
273 }
274 
275 /*
276  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
277  * part of the structure. This is made more complex by the fact we store
278  * information about the on-disk values in the VFS inode and so we can't just
279  * overwrite the values unconditionally. Hence we save the parameters we
280  * need to retain across reinitialisation, and rewrite them into the VFS inode
281  * after reinitialisation even if it fails.
282  */
283 static int
284 xfs_reinit_inode(
285 	struct xfs_mount	*mp,
286 	struct inode		*inode)
287 {
288 	int		error;
289 	uint32_t	nlink = inode->i_nlink;
290 	uint32_t	generation = inode->i_generation;
291 	uint64_t	version = inode_peek_iversion(inode);
292 	umode_t		mode = inode->i_mode;
293 	dev_t		dev = inode->i_rdev;
294 
295 	error = inode_init_always(mp->m_super, inode);
296 
297 	set_nlink(inode, nlink);
298 	inode->i_generation = generation;
299 	inode_set_iversion_queried(inode, version);
300 	inode->i_mode = mode;
301 	inode->i_rdev = dev;
302 	return error;
303 }
304 
305 /*
306  * If we are allocating a new inode, then check what was returned is
307  * actually a free, empty inode. If we are not allocating an inode,
308  * then check we didn't find a free inode.
309  *
310  * Returns:
311  *	0		if the inode free state matches the lookup context
312  *	-ENOENT		if the inode is free and we are not allocating
313  *	-EFSCORRUPTED	if there is any state mismatch at all
314  */
315 static int
316 xfs_iget_check_free_state(
317 	struct xfs_inode	*ip,
318 	int			flags)
319 {
320 	if (flags & XFS_IGET_CREATE) {
321 		/* should be a free inode */
322 		if (VFS_I(ip)->i_mode != 0) {
323 			xfs_warn(ip->i_mount,
324 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
325 				ip->i_ino, VFS_I(ip)->i_mode);
326 			return -EFSCORRUPTED;
327 		}
328 
329 		if (ip->i_d.di_nblocks != 0) {
330 			xfs_warn(ip->i_mount,
331 "Corruption detected! Free inode 0x%llx has blocks allocated!",
332 				ip->i_ino);
333 			return -EFSCORRUPTED;
334 		}
335 		return 0;
336 	}
337 
338 	/* should be an allocated inode */
339 	if (VFS_I(ip)->i_mode == 0)
340 		return -ENOENT;
341 
342 	return 0;
343 }
344 
345 /*
346  * Check the validity of the inode we just found it the cache
347  */
348 static int
349 xfs_iget_cache_hit(
350 	struct xfs_perag	*pag,
351 	struct xfs_inode	*ip,
352 	xfs_ino_t		ino,
353 	int			flags,
354 	int			lock_flags) __releases(RCU)
355 {
356 	struct inode		*inode = VFS_I(ip);
357 	struct xfs_mount	*mp = ip->i_mount;
358 	int			error;
359 
360 	/*
361 	 * check for re-use of an inode within an RCU grace period due to the
362 	 * radix tree nodes not being updated yet. We monitor for this by
363 	 * setting the inode number to zero before freeing the inode structure.
364 	 * If the inode has been reallocated and set up, then the inode number
365 	 * will not match, so check for that, too.
366 	 */
367 	spin_lock(&ip->i_flags_lock);
368 	if (ip->i_ino != ino) {
369 		trace_xfs_iget_skip(ip);
370 		XFS_STATS_INC(mp, xs_ig_frecycle);
371 		error = -EAGAIN;
372 		goto out_error;
373 	}
374 
375 
376 	/*
377 	 * If we are racing with another cache hit that is currently
378 	 * instantiating this inode or currently recycling it out of
379 	 * reclaimabe state, wait for the initialisation to complete
380 	 * before continuing.
381 	 *
382 	 * XXX(hch): eventually we should do something equivalent to
383 	 *	     wait_on_inode to wait for these flags to be cleared
384 	 *	     instead of polling for it.
385 	 */
386 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
387 		trace_xfs_iget_skip(ip);
388 		XFS_STATS_INC(mp, xs_ig_frecycle);
389 		error = -EAGAIN;
390 		goto out_error;
391 	}
392 
393 	/*
394 	 * Check the inode free state is valid. This also detects lookup
395 	 * racing with unlinks.
396 	 */
397 	error = xfs_iget_check_free_state(ip, flags);
398 	if (error)
399 		goto out_error;
400 
401 	/*
402 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
403 	 * Need to carefully get it back into useable state.
404 	 */
405 	if (ip->i_flags & XFS_IRECLAIMABLE) {
406 		trace_xfs_iget_reclaim(ip);
407 
408 		if (flags & XFS_IGET_INCORE) {
409 			error = -EAGAIN;
410 			goto out_error;
411 		}
412 
413 		/*
414 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
415 		 * from stomping over us while we recycle the inode.  We can't
416 		 * clear the radix tree reclaimable tag yet as it requires
417 		 * pag_ici_lock to be held exclusive.
418 		 */
419 		ip->i_flags |= XFS_IRECLAIM;
420 
421 		spin_unlock(&ip->i_flags_lock);
422 		rcu_read_unlock();
423 
424 		error = xfs_reinit_inode(mp, inode);
425 		if (error) {
426 			bool wake;
427 			/*
428 			 * Re-initializing the inode failed, and we are in deep
429 			 * trouble.  Try to re-add it to the reclaim list.
430 			 */
431 			rcu_read_lock();
432 			spin_lock(&ip->i_flags_lock);
433 			wake = !!__xfs_iflags_test(ip, XFS_INEW);
434 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
435 			if (wake)
436 				wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
437 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
438 			trace_xfs_iget_reclaim_fail(ip);
439 			goto out_error;
440 		}
441 
442 		spin_lock(&pag->pag_ici_lock);
443 		spin_lock(&ip->i_flags_lock);
444 
445 		/*
446 		 * Clear the per-lifetime state in the inode as we are now
447 		 * effectively a new inode and need to return to the initial
448 		 * state before reuse occurs.
449 		 */
450 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
451 		ip->i_flags |= XFS_INEW;
452 		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
453 		inode->i_state = I_NEW;
454 		ip->i_sick = 0;
455 		ip->i_checked = 0;
456 
457 		ASSERT(!rwsem_is_locked(&inode->i_rwsem));
458 		init_rwsem(&inode->i_rwsem);
459 
460 		spin_unlock(&ip->i_flags_lock);
461 		spin_unlock(&pag->pag_ici_lock);
462 	} else {
463 		/* If the VFS inode is being torn down, pause and try again. */
464 		if (!igrab(inode)) {
465 			trace_xfs_iget_skip(ip);
466 			error = -EAGAIN;
467 			goto out_error;
468 		}
469 
470 		/* We've got a live one. */
471 		spin_unlock(&ip->i_flags_lock);
472 		rcu_read_unlock();
473 		trace_xfs_iget_hit(ip);
474 	}
475 
476 	if (lock_flags != 0)
477 		xfs_ilock(ip, lock_flags);
478 
479 	if (!(flags & XFS_IGET_INCORE))
480 		xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
481 	XFS_STATS_INC(mp, xs_ig_found);
482 
483 	return 0;
484 
485 out_error:
486 	spin_unlock(&ip->i_flags_lock);
487 	rcu_read_unlock();
488 	return error;
489 }
490 
491 
492 static int
493 xfs_iget_cache_miss(
494 	struct xfs_mount	*mp,
495 	struct xfs_perag	*pag,
496 	xfs_trans_t		*tp,
497 	xfs_ino_t		ino,
498 	struct xfs_inode	**ipp,
499 	int			flags,
500 	int			lock_flags)
501 {
502 	struct xfs_inode	*ip;
503 	int			error;
504 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
505 	int			iflags;
506 
507 	ip = xfs_inode_alloc(mp, ino);
508 	if (!ip)
509 		return -ENOMEM;
510 
511 	error = xfs_iread(mp, tp, ip, flags);
512 	if (error)
513 		goto out_destroy;
514 
515 	if (!xfs_inode_verify_forks(ip)) {
516 		error = -EFSCORRUPTED;
517 		goto out_destroy;
518 	}
519 
520 	trace_xfs_iget_miss(ip);
521 
522 
523 	/*
524 	 * Check the inode free state is valid. This also detects lookup
525 	 * racing with unlinks.
526 	 */
527 	error = xfs_iget_check_free_state(ip, flags);
528 	if (error)
529 		goto out_destroy;
530 
531 	/*
532 	 * Preload the radix tree so we can insert safely under the
533 	 * write spinlock. Note that we cannot sleep inside the preload
534 	 * region. Since we can be called from transaction context, don't
535 	 * recurse into the file system.
536 	 */
537 	if (radix_tree_preload(GFP_NOFS)) {
538 		error = -EAGAIN;
539 		goto out_destroy;
540 	}
541 
542 	/*
543 	 * Because the inode hasn't been added to the radix-tree yet it can't
544 	 * be found by another thread, so we can do the non-sleeping lock here.
545 	 */
546 	if (lock_flags) {
547 		if (!xfs_ilock_nowait(ip, lock_flags))
548 			BUG();
549 	}
550 
551 	/*
552 	 * These values must be set before inserting the inode into the radix
553 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
554 	 * RCU locking mechanism) can find it and that lookup must see that this
555 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
556 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
557 	 * memory barrier that ensures this detection works correctly at lookup
558 	 * time.
559 	 */
560 	iflags = XFS_INEW;
561 	if (flags & XFS_IGET_DONTCACHE)
562 		iflags |= XFS_IDONTCACHE;
563 	ip->i_udquot = NULL;
564 	ip->i_gdquot = NULL;
565 	ip->i_pdquot = NULL;
566 	xfs_iflags_set(ip, iflags);
567 
568 	/* insert the new inode */
569 	spin_lock(&pag->pag_ici_lock);
570 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
571 	if (unlikely(error)) {
572 		WARN_ON(error != -EEXIST);
573 		XFS_STATS_INC(mp, xs_ig_dup);
574 		error = -EAGAIN;
575 		goto out_preload_end;
576 	}
577 	spin_unlock(&pag->pag_ici_lock);
578 	radix_tree_preload_end();
579 
580 	*ipp = ip;
581 	return 0;
582 
583 out_preload_end:
584 	spin_unlock(&pag->pag_ici_lock);
585 	radix_tree_preload_end();
586 	if (lock_flags)
587 		xfs_iunlock(ip, lock_flags);
588 out_destroy:
589 	__destroy_inode(VFS_I(ip));
590 	xfs_inode_free(ip);
591 	return error;
592 }
593 
594 /*
595  * Look up an inode by number in the given file system.
596  * The inode is looked up in the cache held in each AG.
597  * If the inode is found in the cache, initialise the vfs inode
598  * if necessary.
599  *
600  * If it is not in core, read it in from the file system's device,
601  * add it to the cache and initialise the vfs inode.
602  *
603  * The inode is locked according to the value of the lock_flags parameter.
604  * This flag parameter indicates how and if the inode's IO lock and inode lock
605  * should be taken.
606  *
607  * mp -- the mount point structure for the current file system.  It points
608  *       to the inode hash table.
609  * tp -- a pointer to the current transaction if there is one.  This is
610  *       simply passed through to the xfs_iread() call.
611  * ino -- the number of the inode desired.  This is the unique identifier
612  *        within the file system for the inode being requested.
613  * lock_flags -- flags indicating how to lock the inode.  See the comment
614  *		 for xfs_ilock() for a list of valid values.
615  */
616 int
617 xfs_iget(
618 	xfs_mount_t	*mp,
619 	xfs_trans_t	*tp,
620 	xfs_ino_t	ino,
621 	uint		flags,
622 	uint		lock_flags,
623 	xfs_inode_t	**ipp)
624 {
625 	xfs_inode_t	*ip;
626 	int		error;
627 	xfs_perag_t	*pag;
628 	xfs_agino_t	agino;
629 
630 	/*
631 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
632 	 * doesn't get freed while it's being referenced during a
633 	 * radix tree traversal here.  It assumes this function
634 	 * aqcuires only the ILOCK (and therefore it has no need to
635 	 * involve the IOLOCK in this synchronization).
636 	 */
637 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
638 
639 	/* reject inode numbers outside existing AGs */
640 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
641 		return -EINVAL;
642 
643 	XFS_STATS_INC(mp, xs_ig_attempts);
644 
645 	/* get the perag structure and ensure that it's inode capable */
646 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
647 	agino = XFS_INO_TO_AGINO(mp, ino);
648 
649 again:
650 	error = 0;
651 	rcu_read_lock();
652 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
653 
654 	if (ip) {
655 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
656 		if (error)
657 			goto out_error_or_again;
658 	} else {
659 		rcu_read_unlock();
660 		if (flags & XFS_IGET_INCORE) {
661 			error = -ENODATA;
662 			goto out_error_or_again;
663 		}
664 		XFS_STATS_INC(mp, xs_ig_missed);
665 
666 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
667 							flags, lock_flags);
668 		if (error)
669 			goto out_error_or_again;
670 	}
671 	xfs_perag_put(pag);
672 
673 	*ipp = ip;
674 
675 	/*
676 	 * If we have a real type for an on-disk inode, we can setup the inode
677 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
678 	 */
679 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
680 		xfs_setup_existing_inode(ip);
681 	return 0;
682 
683 out_error_or_again:
684 	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
685 		delay(1);
686 		goto again;
687 	}
688 	xfs_perag_put(pag);
689 	return error;
690 }
691 
692 /*
693  * "Is this a cached inode that's also allocated?"
694  *
695  * Look up an inode by number in the given file system.  If the inode is
696  * in cache and isn't in purgatory, return 1 if the inode is allocated
697  * and 0 if it is not.  For all other cases (not in cache, being torn
698  * down, etc.), return a negative error code.
699  *
700  * The caller has to prevent inode allocation and freeing activity,
701  * presumably by locking the AGI buffer.   This is to ensure that an
702  * inode cannot transition from allocated to freed until the caller is
703  * ready to allow that.  If the inode is in an intermediate state (new,
704  * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
705  * inode is not in the cache, -ENOENT will be returned.  The caller must
706  * deal with these scenarios appropriately.
707  *
708  * This is a specialized use case for the online scrubber; if you're
709  * reading this, you probably want xfs_iget.
710  */
711 int
712 xfs_icache_inode_is_allocated(
713 	struct xfs_mount	*mp,
714 	struct xfs_trans	*tp,
715 	xfs_ino_t		ino,
716 	bool			*inuse)
717 {
718 	struct xfs_inode	*ip;
719 	int			error;
720 
721 	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
722 	if (error)
723 		return error;
724 
725 	*inuse = !!(VFS_I(ip)->i_mode);
726 	xfs_irele(ip);
727 	return 0;
728 }
729 
730 /*
731  * The inode lookup is done in batches to keep the amount of lock traffic and
732  * radix tree lookups to a minimum. The batch size is a trade off between
733  * lookup reduction and stack usage. This is in the reclaim path, so we can't
734  * be too greedy.
735  */
736 #define XFS_LOOKUP_BATCH	32
737 
738 STATIC int
739 xfs_inode_ag_walk_grab(
740 	struct xfs_inode	*ip,
741 	int			flags)
742 {
743 	struct inode		*inode = VFS_I(ip);
744 	bool			newinos = !!(flags & XFS_AGITER_INEW_WAIT);
745 
746 	ASSERT(rcu_read_lock_held());
747 
748 	/*
749 	 * check for stale RCU freed inode
750 	 *
751 	 * If the inode has been reallocated, it doesn't matter if it's not in
752 	 * the AG we are walking - we are walking for writeback, so if it
753 	 * passes all the "valid inode" checks and is dirty, then we'll write
754 	 * it back anyway.  If it has been reallocated and still being
755 	 * initialised, the XFS_INEW check below will catch it.
756 	 */
757 	spin_lock(&ip->i_flags_lock);
758 	if (!ip->i_ino)
759 		goto out_unlock_noent;
760 
761 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
762 	if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
763 	    __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
764 		goto out_unlock_noent;
765 	spin_unlock(&ip->i_flags_lock);
766 
767 	/* nothing to sync during shutdown */
768 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
769 		return -EFSCORRUPTED;
770 
771 	/* If we can't grab the inode, it must on it's way to reclaim. */
772 	if (!igrab(inode))
773 		return -ENOENT;
774 
775 	/* inode is valid */
776 	return 0;
777 
778 out_unlock_noent:
779 	spin_unlock(&ip->i_flags_lock);
780 	return -ENOENT;
781 }
782 
783 STATIC int
784 xfs_inode_ag_walk(
785 	struct xfs_mount	*mp,
786 	struct xfs_perag	*pag,
787 	int			(*execute)(struct xfs_inode *ip, int flags,
788 					   void *args),
789 	int			flags,
790 	void			*args,
791 	int			tag,
792 	int			iter_flags)
793 {
794 	uint32_t		first_index;
795 	int			last_error = 0;
796 	int			skipped;
797 	int			done;
798 	int			nr_found;
799 
800 restart:
801 	done = 0;
802 	skipped = 0;
803 	first_index = 0;
804 	nr_found = 0;
805 	do {
806 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
807 		int		error = 0;
808 		int		i;
809 
810 		rcu_read_lock();
811 
812 		if (tag == -1)
813 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
814 					(void **)batch, first_index,
815 					XFS_LOOKUP_BATCH);
816 		else
817 			nr_found = radix_tree_gang_lookup_tag(
818 					&pag->pag_ici_root,
819 					(void **) batch, first_index,
820 					XFS_LOOKUP_BATCH, tag);
821 
822 		if (!nr_found) {
823 			rcu_read_unlock();
824 			break;
825 		}
826 
827 		/*
828 		 * Grab the inodes before we drop the lock. if we found
829 		 * nothing, nr == 0 and the loop will be skipped.
830 		 */
831 		for (i = 0; i < nr_found; i++) {
832 			struct xfs_inode *ip = batch[i];
833 
834 			if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
835 				batch[i] = NULL;
836 
837 			/*
838 			 * Update the index for the next lookup. Catch
839 			 * overflows into the next AG range which can occur if
840 			 * we have inodes in the last block of the AG and we
841 			 * are currently pointing to the last inode.
842 			 *
843 			 * Because we may see inodes that are from the wrong AG
844 			 * due to RCU freeing and reallocation, only update the
845 			 * index if it lies in this AG. It was a race that lead
846 			 * us to see this inode, so another lookup from the
847 			 * same index will not find it again.
848 			 */
849 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
850 				continue;
851 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
852 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
853 				done = 1;
854 		}
855 
856 		/* unlock now we've grabbed the inodes. */
857 		rcu_read_unlock();
858 
859 		for (i = 0; i < nr_found; i++) {
860 			if (!batch[i])
861 				continue;
862 			if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
863 			    xfs_iflags_test(batch[i], XFS_INEW))
864 				xfs_inew_wait(batch[i]);
865 			error = execute(batch[i], flags, args);
866 			xfs_irele(batch[i]);
867 			if (error == -EAGAIN) {
868 				skipped++;
869 				continue;
870 			}
871 			if (error && last_error != -EFSCORRUPTED)
872 				last_error = error;
873 		}
874 
875 		/* bail out if the filesystem is corrupted.  */
876 		if (error == -EFSCORRUPTED)
877 			break;
878 
879 		cond_resched();
880 
881 	} while (nr_found && !done);
882 
883 	if (skipped) {
884 		delay(1);
885 		goto restart;
886 	}
887 	return last_error;
888 }
889 
890 /*
891  * Background scanning to trim post-EOF preallocated space. This is queued
892  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
893  */
894 void
895 xfs_queue_eofblocks(
896 	struct xfs_mount *mp)
897 {
898 	rcu_read_lock();
899 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
900 		queue_delayed_work(mp->m_eofblocks_workqueue,
901 				   &mp->m_eofblocks_work,
902 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
903 	rcu_read_unlock();
904 }
905 
906 void
907 xfs_eofblocks_worker(
908 	struct work_struct *work)
909 {
910 	struct xfs_mount *mp = container_of(to_delayed_work(work),
911 				struct xfs_mount, m_eofblocks_work);
912 	xfs_icache_free_eofblocks(mp, NULL);
913 	xfs_queue_eofblocks(mp);
914 }
915 
916 /*
917  * Background scanning to trim preallocated CoW space. This is queued
918  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
919  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
920  */
921 void
922 xfs_queue_cowblocks(
923 	struct xfs_mount *mp)
924 {
925 	rcu_read_lock();
926 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
927 		queue_delayed_work(mp->m_eofblocks_workqueue,
928 				   &mp->m_cowblocks_work,
929 				   msecs_to_jiffies(xfs_cowb_secs * 1000));
930 	rcu_read_unlock();
931 }
932 
933 void
934 xfs_cowblocks_worker(
935 	struct work_struct *work)
936 {
937 	struct xfs_mount *mp = container_of(to_delayed_work(work),
938 				struct xfs_mount, m_cowblocks_work);
939 	xfs_icache_free_cowblocks(mp, NULL);
940 	xfs_queue_cowblocks(mp);
941 }
942 
943 int
944 xfs_inode_ag_iterator_flags(
945 	struct xfs_mount	*mp,
946 	int			(*execute)(struct xfs_inode *ip, int flags,
947 					   void *args),
948 	int			flags,
949 	void			*args,
950 	int			iter_flags)
951 {
952 	struct xfs_perag	*pag;
953 	int			error = 0;
954 	int			last_error = 0;
955 	xfs_agnumber_t		ag;
956 
957 	ag = 0;
958 	while ((pag = xfs_perag_get(mp, ag))) {
959 		ag = pag->pag_agno + 1;
960 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
961 					  iter_flags);
962 		xfs_perag_put(pag);
963 		if (error) {
964 			last_error = error;
965 			if (error == -EFSCORRUPTED)
966 				break;
967 		}
968 	}
969 	return last_error;
970 }
971 
972 int
973 xfs_inode_ag_iterator(
974 	struct xfs_mount	*mp,
975 	int			(*execute)(struct xfs_inode *ip, int flags,
976 					   void *args),
977 	int			flags,
978 	void			*args)
979 {
980 	return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
981 }
982 
983 int
984 xfs_inode_ag_iterator_tag(
985 	struct xfs_mount	*mp,
986 	int			(*execute)(struct xfs_inode *ip, int flags,
987 					   void *args),
988 	int			flags,
989 	void			*args,
990 	int			tag)
991 {
992 	struct xfs_perag	*pag;
993 	int			error = 0;
994 	int			last_error = 0;
995 	xfs_agnumber_t		ag;
996 
997 	ag = 0;
998 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
999 		ag = pag->pag_agno + 1;
1000 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
1001 					  0);
1002 		xfs_perag_put(pag);
1003 		if (error) {
1004 			last_error = error;
1005 			if (error == -EFSCORRUPTED)
1006 				break;
1007 		}
1008 	}
1009 	return last_error;
1010 }
1011 
1012 /*
1013  * Grab the inode for reclaim exclusively.
1014  * Return 0 if we grabbed it, non-zero otherwise.
1015  */
1016 STATIC int
1017 xfs_reclaim_inode_grab(
1018 	struct xfs_inode	*ip,
1019 	int			flags)
1020 {
1021 	ASSERT(rcu_read_lock_held());
1022 
1023 	/* quick check for stale RCU freed inode */
1024 	if (!ip->i_ino)
1025 		return 1;
1026 
1027 	/*
1028 	 * If we are asked for non-blocking operation, do unlocked checks to
1029 	 * see if the inode already is being flushed or in reclaim to avoid
1030 	 * lock traffic.
1031 	 */
1032 	if ((flags & SYNC_TRYLOCK) &&
1033 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1034 		return 1;
1035 
1036 	/*
1037 	 * The radix tree lock here protects a thread in xfs_iget from racing
1038 	 * with us starting reclaim on the inode.  Once we have the
1039 	 * XFS_IRECLAIM flag set it will not touch us.
1040 	 *
1041 	 * Due to RCU lookup, we may find inodes that have been freed and only
1042 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
1043 	 * aren't candidates for reclaim at all, so we must check the
1044 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1045 	 */
1046 	spin_lock(&ip->i_flags_lock);
1047 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1048 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1049 		/* not a reclaim candidate. */
1050 		spin_unlock(&ip->i_flags_lock);
1051 		return 1;
1052 	}
1053 	__xfs_iflags_set(ip, XFS_IRECLAIM);
1054 	spin_unlock(&ip->i_flags_lock);
1055 	return 0;
1056 }
1057 
1058 /*
1059  * Inodes in different states need to be treated differently. The following
1060  * table lists the inode states and the reclaim actions necessary:
1061  *
1062  *	inode state	     iflush ret		required action
1063  *      ---------------      ----------         ---------------
1064  *	bad			-		reclaim
1065  *	shutdown		EIO		unpin and reclaim
1066  *	clean, unpinned		0		reclaim
1067  *	stale, unpinned		0		reclaim
1068  *	clean, pinned(*)	0		requeue
1069  *	stale, pinned		EAGAIN		requeue
1070  *	dirty, async		-		requeue
1071  *	dirty, sync		0		reclaim
1072  *
1073  * (*) dgc: I don't think the clean, pinned state is possible but it gets
1074  * handled anyway given the order of checks implemented.
1075  *
1076  * Also, because we get the flush lock first, we know that any inode that has
1077  * been flushed delwri has had the flush completed by the time we check that
1078  * the inode is clean.
1079  *
1080  * Note that because the inode is flushed delayed write by AIL pushing, the
1081  * flush lock may already be held here and waiting on it can result in very
1082  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
1083  * the caller should push the AIL first before trying to reclaim inodes to
1084  * minimise the amount of time spent waiting.  For background relaim, we only
1085  * bother to reclaim clean inodes anyway.
1086  *
1087  * Hence the order of actions after gaining the locks should be:
1088  *	bad		=> reclaim
1089  *	shutdown	=> unpin and reclaim
1090  *	pinned, async	=> requeue
1091  *	pinned, sync	=> unpin
1092  *	stale		=> reclaim
1093  *	clean		=> reclaim
1094  *	dirty, async	=> requeue
1095  *	dirty, sync	=> flush, wait and reclaim
1096  */
1097 STATIC int
1098 xfs_reclaim_inode(
1099 	struct xfs_inode	*ip,
1100 	struct xfs_perag	*pag,
1101 	int			sync_mode)
1102 {
1103 	struct xfs_buf		*bp = NULL;
1104 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
1105 	int			error;
1106 
1107 restart:
1108 	error = 0;
1109 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1110 	if (!xfs_iflock_nowait(ip)) {
1111 		if (!(sync_mode & SYNC_WAIT))
1112 			goto out;
1113 		xfs_iflock(ip);
1114 	}
1115 
1116 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1117 		xfs_iunpin_wait(ip);
1118 		/* xfs_iflush_abort() drops the flush lock */
1119 		xfs_iflush_abort(ip, false);
1120 		goto reclaim;
1121 	}
1122 	if (xfs_ipincount(ip)) {
1123 		if (!(sync_mode & SYNC_WAIT))
1124 			goto out_ifunlock;
1125 		xfs_iunpin_wait(ip);
1126 	}
1127 	if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1128 		xfs_ifunlock(ip);
1129 		goto reclaim;
1130 	}
1131 
1132 	/*
1133 	 * Never flush out dirty data during non-blocking reclaim, as it would
1134 	 * just contend with AIL pushing trying to do the same job.
1135 	 */
1136 	if (!(sync_mode & SYNC_WAIT))
1137 		goto out_ifunlock;
1138 
1139 	/*
1140 	 * Now we have an inode that needs flushing.
1141 	 *
1142 	 * Note that xfs_iflush will never block on the inode buffer lock, as
1143 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1144 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
1145 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1146 	 * result in an ABBA deadlock with xfs_ifree_cluster().
1147 	 *
1148 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
1149 	 * cache to mark them stale, if we hit this case we don't actually want
1150 	 * to do IO here - we want the inode marked stale so we can simply
1151 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
1152 	 * inode, back off and try again.  Hopefully the next pass through will
1153 	 * see the stale flag set on the inode.
1154 	 */
1155 	error = xfs_iflush(ip, &bp);
1156 	if (error == -EAGAIN) {
1157 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1158 		/* backoff longer than in xfs_ifree_cluster */
1159 		delay(2);
1160 		goto restart;
1161 	}
1162 
1163 	if (!error) {
1164 		error = xfs_bwrite(bp);
1165 		xfs_buf_relse(bp);
1166 	}
1167 
1168 reclaim:
1169 	ASSERT(!xfs_isiflocked(ip));
1170 
1171 	/*
1172 	 * Because we use RCU freeing we need to ensure the inode always appears
1173 	 * to be reclaimed with an invalid inode number when in the free state.
1174 	 * We do this as early as possible under the ILOCK so that
1175 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1176 	 * detect races with us here. By doing this, we guarantee that once
1177 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1178 	 * it will see either a valid inode that will serialise correctly, or it
1179 	 * will see an invalid inode that it can skip.
1180 	 */
1181 	spin_lock(&ip->i_flags_lock);
1182 	ip->i_flags = XFS_IRECLAIM;
1183 	ip->i_ino = 0;
1184 	spin_unlock(&ip->i_flags_lock);
1185 
1186 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1187 
1188 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1189 	/*
1190 	 * Remove the inode from the per-AG radix tree.
1191 	 *
1192 	 * Because radix_tree_delete won't complain even if the item was never
1193 	 * added to the tree assert that it's been there before to catch
1194 	 * problems with the inode life time early on.
1195 	 */
1196 	spin_lock(&pag->pag_ici_lock);
1197 	if (!radix_tree_delete(&pag->pag_ici_root,
1198 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
1199 		ASSERT(0);
1200 	xfs_perag_clear_reclaim_tag(pag);
1201 	spin_unlock(&pag->pag_ici_lock);
1202 
1203 	/*
1204 	 * Here we do an (almost) spurious inode lock in order to coordinate
1205 	 * with inode cache radix tree lookups.  This is because the lookup
1206 	 * can reference the inodes in the cache without taking references.
1207 	 *
1208 	 * We make that OK here by ensuring that we wait until the inode is
1209 	 * unlocked after the lookup before we go ahead and free it.
1210 	 */
1211 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1212 	xfs_qm_dqdetach(ip);
1213 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1214 
1215 	__xfs_inode_free(ip);
1216 	return error;
1217 
1218 out_ifunlock:
1219 	xfs_ifunlock(ip);
1220 out:
1221 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1222 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1223 	/*
1224 	 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1225 	 * a short while. However, this just burns CPU time scanning the tree
1226 	 * waiting for IO to complete and the reclaim work never goes back to
1227 	 * the idle state. Instead, return 0 to let the next scheduled
1228 	 * background reclaim attempt to reclaim the inode again.
1229 	 */
1230 	return 0;
1231 }
1232 
1233 /*
1234  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1235  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1236  * then a shut down during filesystem unmount reclaim walk leak all the
1237  * unreclaimed inodes.
1238  */
1239 STATIC int
1240 xfs_reclaim_inodes_ag(
1241 	struct xfs_mount	*mp,
1242 	int			flags,
1243 	int			*nr_to_scan)
1244 {
1245 	struct xfs_perag	*pag;
1246 	int			error = 0;
1247 	int			last_error = 0;
1248 	xfs_agnumber_t		ag;
1249 	int			trylock = flags & SYNC_TRYLOCK;
1250 	int			skipped;
1251 
1252 restart:
1253 	ag = 0;
1254 	skipped = 0;
1255 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1256 		unsigned long	first_index = 0;
1257 		int		done = 0;
1258 		int		nr_found = 0;
1259 
1260 		ag = pag->pag_agno + 1;
1261 
1262 		if (trylock) {
1263 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1264 				skipped++;
1265 				xfs_perag_put(pag);
1266 				continue;
1267 			}
1268 			first_index = pag->pag_ici_reclaim_cursor;
1269 		} else
1270 			mutex_lock(&pag->pag_ici_reclaim_lock);
1271 
1272 		do {
1273 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1274 			int	i;
1275 
1276 			rcu_read_lock();
1277 			nr_found = radix_tree_gang_lookup_tag(
1278 					&pag->pag_ici_root,
1279 					(void **)batch, first_index,
1280 					XFS_LOOKUP_BATCH,
1281 					XFS_ICI_RECLAIM_TAG);
1282 			if (!nr_found) {
1283 				done = 1;
1284 				rcu_read_unlock();
1285 				break;
1286 			}
1287 
1288 			/*
1289 			 * Grab the inodes before we drop the lock. if we found
1290 			 * nothing, nr == 0 and the loop will be skipped.
1291 			 */
1292 			for (i = 0; i < nr_found; i++) {
1293 				struct xfs_inode *ip = batch[i];
1294 
1295 				if (done || xfs_reclaim_inode_grab(ip, flags))
1296 					batch[i] = NULL;
1297 
1298 				/*
1299 				 * Update the index for the next lookup. Catch
1300 				 * overflows into the next AG range which can
1301 				 * occur if we have inodes in the last block of
1302 				 * the AG and we are currently pointing to the
1303 				 * last inode.
1304 				 *
1305 				 * Because we may see inodes that are from the
1306 				 * wrong AG due to RCU freeing and
1307 				 * reallocation, only update the index if it
1308 				 * lies in this AG. It was a race that lead us
1309 				 * to see this inode, so another lookup from
1310 				 * the same index will not find it again.
1311 				 */
1312 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1313 								pag->pag_agno)
1314 					continue;
1315 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1316 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1317 					done = 1;
1318 			}
1319 
1320 			/* unlock now we've grabbed the inodes. */
1321 			rcu_read_unlock();
1322 
1323 			for (i = 0; i < nr_found; i++) {
1324 				if (!batch[i])
1325 					continue;
1326 				error = xfs_reclaim_inode(batch[i], pag, flags);
1327 				if (error && last_error != -EFSCORRUPTED)
1328 					last_error = error;
1329 			}
1330 
1331 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1332 
1333 			cond_resched();
1334 
1335 		} while (nr_found && !done && *nr_to_scan > 0);
1336 
1337 		if (trylock && !done)
1338 			pag->pag_ici_reclaim_cursor = first_index;
1339 		else
1340 			pag->pag_ici_reclaim_cursor = 0;
1341 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1342 		xfs_perag_put(pag);
1343 	}
1344 
1345 	/*
1346 	 * if we skipped any AG, and we still have scan count remaining, do
1347 	 * another pass this time using blocking reclaim semantics (i.e
1348 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1349 	 * ensure that when we get more reclaimers than AGs we block rather
1350 	 * than spin trying to execute reclaim.
1351 	 */
1352 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1353 		trylock = 0;
1354 		goto restart;
1355 	}
1356 	return last_error;
1357 }
1358 
1359 int
1360 xfs_reclaim_inodes(
1361 	xfs_mount_t	*mp,
1362 	int		mode)
1363 {
1364 	int		nr_to_scan = INT_MAX;
1365 
1366 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1367 }
1368 
1369 /*
1370  * Scan a certain number of inodes for reclaim.
1371  *
1372  * When called we make sure that there is a background (fast) inode reclaim in
1373  * progress, while we will throttle the speed of reclaim via doing synchronous
1374  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1375  * them to be cleaned, which we hope will not be very long due to the
1376  * background walker having already kicked the IO off on those dirty inodes.
1377  */
1378 long
1379 xfs_reclaim_inodes_nr(
1380 	struct xfs_mount	*mp,
1381 	int			nr_to_scan)
1382 {
1383 	/* kick background reclaimer and push the AIL */
1384 	xfs_reclaim_work_queue(mp);
1385 	xfs_ail_push_all(mp->m_ail);
1386 
1387 	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1388 }
1389 
1390 /*
1391  * Return the number of reclaimable inodes in the filesystem for
1392  * the shrinker to determine how much to reclaim.
1393  */
1394 int
1395 xfs_reclaim_inodes_count(
1396 	struct xfs_mount	*mp)
1397 {
1398 	struct xfs_perag	*pag;
1399 	xfs_agnumber_t		ag = 0;
1400 	int			reclaimable = 0;
1401 
1402 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1403 		ag = pag->pag_agno + 1;
1404 		reclaimable += pag->pag_ici_reclaimable;
1405 		xfs_perag_put(pag);
1406 	}
1407 	return reclaimable;
1408 }
1409 
1410 STATIC int
1411 xfs_inode_match_id(
1412 	struct xfs_inode	*ip,
1413 	struct xfs_eofblocks	*eofb)
1414 {
1415 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1416 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1417 		return 0;
1418 
1419 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1420 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1421 		return 0;
1422 
1423 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1424 	    xfs_get_projid(ip) != eofb->eof_prid)
1425 		return 0;
1426 
1427 	return 1;
1428 }
1429 
1430 /*
1431  * A union-based inode filtering algorithm. Process the inode if any of the
1432  * criteria match. This is for global/internal scans only.
1433  */
1434 STATIC int
1435 xfs_inode_match_id_union(
1436 	struct xfs_inode	*ip,
1437 	struct xfs_eofblocks	*eofb)
1438 {
1439 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1440 	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1441 		return 1;
1442 
1443 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1444 	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1445 		return 1;
1446 
1447 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1448 	    xfs_get_projid(ip) == eofb->eof_prid)
1449 		return 1;
1450 
1451 	return 0;
1452 }
1453 
1454 STATIC int
1455 xfs_inode_free_eofblocks(
1456 	struct xfs_inode	*ip,
1457 	int			flags,
1458 	void			*args)
1459 {
1460 	int ret = 0;
1461 	struct xfs_eofblocks *eofb = args;
1462 	int match;
1463 
1464 	if (!xfs_can_free_eofblocks(ip, false)) {
1465 		/* inode could be preallocated or append-only */
1466 		trace_xfs_inode_free_eofblocks_invalid(ip);
1467 		xfs_inode_clear_eofblocks_tag(ip);
1468 		return 0;
1469 	}
1470 
1471 	/*
1472 	 * If the mapping is dirty the operation can block and wait for some
1473 	 * time. Unless we are waiting, skip it.
1474 	 */
1475 	if (!(flags & SYNC_WAIT) &&
1476 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1477 		return 0;
1478 
1479 	if (eofb) {
1480 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1481 			match = xfs_inode_match_id_union(ip, eofb);
1482 		else
1483 			match = xfs_inode_match_id(ip, eofb);
1484 		if (!match)
1485 			return 0;
1486 
1487 		/* skip the inode if the file size is too small */
1488 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1489 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1490 			return 0;
1491 	}
1492 
1493 	/*
1494 	 * If the caller is waiting, return -EAGAIN to keep the background
1495 	 * scanner moving and revisit the inode in a subsequent pass.
1496 	 */
1497 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1498 		if (flags & SYNC_WAIT)
1499 			ret = -EAGAIN;
1500 		return ret;
1501 	}
1502 	ret = xfs_free_eofblocks(ip);
1503 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1504 
1505 	return ret;
1506 }
1507 
1508 static int
1509 __xfs_icache_free_eofblocks(
1510 	struct xfs_mount	*mp,
1511 	struct xfs_eofblocks	*eofb,
1512 	int			(*execute)(struct xfs_inode *ip, int flags,
1513 					   void *args),
1514 	int			tag)
1515 {
1516 	int flags = SYNC_TRYLOCK;
1517 
1518 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1519 		flags = SYNC_WAIT;
1520 
1521 	return xfs_inode_ag_iterator_tag(mp, execute, flags,
1522 					 eofb, tag);
1523 }
1524 
1525 int
1526 xfs_icache_free_eofblocks(
1527 	struct xfs_mount	*mp,
1528 	struct xfs_eofblocks	*eofb)
1529 {
1530 	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1531 			XFS_ICI_EOFBLOCKS_TAG);
1532 }
1533 
1534 /*
1535  * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1536  * multiple quotas, we don't know exactly which quota caused an allocation
1537  * failure. We make a best effort by including each quota under low free space
1538  * conditions (less than 1% free space) in the scan.
1539  */
1540 static int
1541 __xfs_inode_free_quota_eofblocks(
1542 	struct xfs_inode	*ip,
1543 	int			(*execute)(struct xfs_mount *mp,
1544 					   struct xfs_eofblocks	*eofb))
1545 {
1546 	int scan = 0;
1547 	struct xfs_eofblocks eofb = {0};
1548 	struct xfs_dquot *dq;
1549 
1550 	/*
1551 	 * Run a sync scan to increase effectiveness and use the union filter to
1552 	 * cover all applicable quotas in a single scan.
1553 	 */
1554 	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1555 
1556 	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1557 		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1558 		if (dq && xfs_dquot_lowsp(dq)) {
1559 			eofb.eof_uid = VFS_I(ip)->i_uid;
1560 			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1561 			scan = 1;
1562 		}
1563 	}
1564 
1565 	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1566 		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1567 		if (dq && xfs_dquot_lowsp(dq)) {
1568 			eofb.eof_gid = VFS_I(ip)->i_gid;
1569 			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1570 			scan = 1;
1571 		}
1572 	}
1573 
1574 	if (scan)
1575 		execute(ip->i_mount, &eofb);
1576 
1577 	return scan;
1578 }
1579 
1580 int
1581 xfs_inode_free_quota_eofblocks(
1582 	struct xfs_inode *ip)
1583 {
1584 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1585 }
1586 
1587 static inline unsigned long
1588 xfs_iflag_for_tag(
1589 	int		tag)
1590 {
1591 	switch (tag) {
1592 	case XFS_ICI_EOFBLOCKS_TAG:
1593 		return XFS_IEOFBLOCKS;
1594 	case XFS_ICI_COWBLOCKS_TAG:
1595 		return XFS_ICOWBLOCKS;
1596 	default:
1597 		ASSERT(0);
1598 		return 0;
1599 	}
1600 }
1601 
1602 static void
1603 __xfs_inode_set_blocks_tag(
1604 	xfs_inode_t	*ip,
1605 	void		(*execute)(struct xfs_mount *mp),
1606 	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1607 				  int error, unsigned long caller_ip),
1608 	int		tag)
1609 {
1610 	struct xfs_mount *mp = ip->i_mount;
1611 	struct xfs_perag *pag;
1612 	int tagged;
1613 
1614 	/*
1615 	 * Don't bother locking the AG and looking up in the radix trees
1616 	 * if we already know that we have the tag set.
1617 	 */
1618 	if (ip->i_flags & xfs_iflag_for_tag(tag))
1619 		return;
1620 	spin_lock(&ip->i_flags_lock);
1621 	ip->i_flags |= xfs_iflag_for_tag(tag);
1622 	spin_unlock(&ip->i_flags_lock);
1623 
1624 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1625 	spin_lock(&pag->pag_ici_lock);
1626 
1627 	tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1628 	radix_tree_tag_set(&pag->pag_ici_root,
1629 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1630 	if (!tagged) {
1631 		/* propagate the eofblocks tag up into the perag radix tree */
1632 		spin_lock(&ip->i_mount->m_perag_lock);
1633 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1634 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1635 				   tag);
1636 		spin_unlock(&ip->i_mount->m_perag_lock);
1637 
1638 		/* kick off background trimming */
1639 		execute(ip->i_mount);
1640 
1641 		set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1642 	}
1643 
1644 	spin_unlock(&pag->pag_ici_lock);
1645 	xfs_perag_put(pag);
1646 }
1647 
1648 void
1649 xfs_inode_set_eofblocks_tag(
1650 	xfs_inode_t	*ip)
1651 {
1652 	trace_xfs_inode_set_eofblocks_tag(ip);
1653 	return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1654 			trace_xfs_perag_set_eofblocks,
1655 			XFS_ICI_EOFBLOCKS_TAG);
1656 }
1657 
1658 static void
1659 __xfs_inode_clear_blocks_tag(
1660 	xfs_inode_t	*ip,
1661 	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1662 				    int error, unsigned long caller_ip),
1663 	int		tag)
1664 {
1665 	struct xfs_mount *mp = ip->i_mount;
1666 	struct xfs_perag *pag;
1667 
1668 	spin_lock(&ip->i_flags_lock);
1669 	ip->i_flags &= ~xfs_iflag_for_tag(tag);
1670 	spin_unlock(&ip->i_flags_lock);
1671 
1672 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1673 	spin_lock(&pag->pag_ici_lock);
1674 
1675 	radix_tree_tag_clear(&pag->pag_ici_root,
1676 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1677 	if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1678 		/* clear the eofblocks tag from the perag radix tree */
1679 		spin_lock(&ip->i_mount->m_perag_lock);
1680 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1681 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1682 				     tag);
1683 		spin_unlock(&ip->i_mount->m_perag_lock);
1684 		clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1685 	}
1686 
1687 	spin_unlock(&pag->pag_ici_lock);
1688 	xfs_perag_put(pag);
1689 }
1690 
1691 void
1692 xfs_inode_clear_eofblocks_tag(
1693 	xfs_inode_t	*ip)
1694 {
1695 	trace_xfs_inode_clear_eofblocks_tag(ip);
1696 	return __xfs_inode_clear_blocks_tag(ip,
1697 			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1698 }
1699 
1700 /*
1701  * Set ourselves up to free CoW blocks from this file.  If it's already clean
1702  * then we can bail out quickly, but otherwise we must back off if the file
1703  * is undergoing some kind of write.
1704  */
1705 static bool
1706 xfs_prep_free_cowblocks(
1707 	struct xfs_inode	*ip)
1708 {
1709 	/*
1710 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1711 	 * possible the inode was fully unshared since it was originally tagged.
1712 	 */
1713 	if (!xfs_inode_has_cow_data(ip)) {
1714 		trace_xfs_inode_free_cowblocks_invalid(ip);
1715 		xfs_inode_clear_cowblocks_tag(ip);
1716 		return false;
1717 	}
1718 
1719 	/*
1720 	 * If the mapping is dirty or under writeback we cannot touch the
1721 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1722 	 */
1723 	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1724 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1725 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1726 	    atomic_read(&VFS_I(ip)->i_dio_count))
1727 		return false;
1728 
1729 	return true;
1730 }
1731 
1732 /*
1733  * Automatic CoW Reservation Freeing
1734  *
1735  * These functions automatically garbage collect leftover CoW reservations
1736  * that were made on behalf of a cowextsize hint when we start to run out
1737  * of quota or when the reservations sit around for too long.  If the file
1738  * has dirty pages or is undergoing writeback, its CoW reservations will
1739  * be retained.
1740  *
1741  * The actual garbage collection piggybacks off the same code that runs
1742  * the speculative EOF preallocation garbage collector.
1743  */
1744 STATIC int
1745 xfs_inode_free_cowblocks(
1746 	struct xfs_inode	*ip,
1747 	int			flags,
1748 	void			*args)
1749 {
1750 	struct xfs_eofblocks	*eofb = args;
1751 	int			match;
1752 	int			ret = 0;
1753 
1754 	if (!xfs_prep_free_cowblocks(ip))
1755 		return 0;
1756 
1757 	if (eofb) {
1758 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1759 			match = xfs_inode_match_id_union(ip, eofb);
1760 		else
1761 			match = xfs_inode_match_id(ip, eofb);
1762 		if (!match)
1763 			return 0;
1764 
1765 		/* skip the inode if the file size is too small */
1766 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1767 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1768 			return 0;
1769 	}
1770 
1771 	/* Free the CoW blocks */
1772 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
1773 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1774 
1775 	/*
1776 	 * Check again, nobody else should be able to dirty blocks or change
1777 	 * the reflink iflag now that we have the first two locks held.
1778 	 */
1779 	if (xfs_prep_free_cowblocks(ip))
1780 		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1781 
1782 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1783 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1784 
1785 	return ret;
1786 }
1787 
1788 int
1789 xfs_icache_free_cowblocks(
1790 	struct xfs_mount	*mp,
1791 	struct xfs_eofblocks	*eofb)
1792 {
1793 	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1794 			XFS_ICI_COWBLOCKS_TAG);
1795 }
1796 
1797 int
1798 xfs_inode_free_quota_cowblocks(
1799 	struct xfs_inode *ip)
1800 {
1801 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1802 }
1803 
1804 void
1805 xfs_inode_set_cowblocks_tag(
1806 	xfs_inode_t	*ip)
1807 {
1808 	trace_xfs_inode_set_cowblocks_tag(ip);
1809 	return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1810 			trace_xfs_perag_set_cowblocks,
1811 			XFS_ICI_COWBLOCKS_TAG);
1812 }
1813 
1814 void
1815 xfs_inode_clear_cowblocks_tag(
1816 	xfs_inode_t	*ip)
1817 {
1818 	trace_xfs_inode_clear_cowblocks_tag(ip);
1819 	return __xfs_inode_clear_blocks_tag(ip,
1820 			trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1821 }
1822 
1823 /* Disable post-EOF and CoW block auto-reclamation. */
1824 void
1825 xfs_stop_block_reaping(
1826 	struct xfs_mount	*mp)
1827 {
1828 	cancel_delayed_work_sync(&mp->m_eofblocks_work);
1829 	cancel_delayed_work_sync(&mp->m_cowblocks_work);
1830 }
1831 
1832 /* Enable post-EOF and CoW block auto-reclamation. */
1833 void
1834 xfs_start_block_reaping(
1835 	struct xfs_mount	*mp)
1836 {
1837 	xfs_queue_eofblocks(mp);
1838 	xfs_queue_cowblocks(mp);
1839 }
1840