xref: /linux/fs/xfs/xfs_icache.c (revision f3f5edc5e41e038cf66d124a4cbacf6ff0983513)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_inode_item.h"
17 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_dquot_item.h"
22 #include "xfs_dquot.h"
23 #include "xfs_reflink.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_ag.h"
26 #include "xfs_log_priv.h"
27 #include "xfs_health.h"
28 #include "xfs_da_format.h"
29 #include "xfs_dir2.h"
30 #include "xfs_metafile.h"
31 
32 #include <linux/iversion.h>
33 
34 /* Radix tree tags for incore inode tree. */
35 
36 /* inode is to be reclaimed */
37 #define XFS_ICI_RECLAIM_TAG	0
38 /* Inode has speculative preallocations (posteof or cow) to clean. */
39 #define XFS_ICI_BLOCKGC_TAG	1
40 
41 /*
42  * The goal for walking incore inodes.  These can correspond with incore inode
43  * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
44  */
45 enum xfs_icwalk_goal {
46 	/* Goals directly associated with tagged inodes. */
47 	XFS_ICWALK_BLOCKGC	= XFS_ICI_BLOCKGC_TAG,
48 	XFS_ICWALK_RECLAIM	= XFS_ICI_RECLAIM_TAG,
49 };
50 
51 static int xfs_icwalk(struct xfs_mount *mp,
52 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
53 static int xfs_icwalk_ag(struct xfs_perag *pag,
54 		enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
55 
56 /*
57  * Private inode cache walk flags for struct xfs_icwalk.  Must not
58  * coincide with XFS_ICWALK_FLAGS_VALID.
59  */
60 
61 /* Stop scanning after icw_scan_limit inodes. */
62 #define XFS_ICWALK_FLAG_SCAN_LIMIT	(1U << 28)
63 
64 #define XFS_ICWALK_FLAG_RECLAIM_SICK	(1U << 27)
65 #define XFS_ICWALK_FLAG_UNION		(1U << 26) /* union filter algorithm */
66 
67 #define XFS_ICWALK_PRIVATE_FLAGS	(XFS_ICWALK_FLAG_SCAN_LIMIT | \
68 					 XFS_ICWALK_FLAG_RECLAIM_SICK | \
69 					 XFS_ICWALK_FLAG_UNION)
70 
71 /* Marks for the perag xarray */
72 #define XFS_PERAG_RECLAIM_MARK	XA_MARK_0
73 #define XFS_PERAG_BLOCKGC_MARK	XA_MARK_1
74 
ici_tag_to_mark(unsigned int tag)75 static inline xa_mark_t ici_tag_to_mark(unsigned int tag)
76 {
77 	if (tag == XFS_ICI_RECLAIM_TAG)
78 		return XFS_PERAG_RECLAIM_MARK;
79 	ASSERT(tag == XFS_ICI_BLOCKGC_TAG);
80 	return XFS_PERAG_BLOCKGC_MARK;
81 }
82 
83 /*
84  * Allocate and initialise an xfs_inode.
85  */
86 struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)87 xfs_inode_alloc(
88 	struct xfs_mount	*mp,
89 	xfs_ino_t		ino)
90 {
91 	struct xfs_inode	*ip;
92 
93 	/*
94 	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
95 	 * and return NULL here on ENOMEM.
96 	 */
97 	ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
98 
99 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
100 		kmem_cache_free(xfs_inode_cache, ip);
101 		return NULL;
102 	}
103 
104 	/* VFS doesn't initialise i_mode! */
105 	VFS_I(ip)->i_mode = 0;
106 	mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
107 				    M_IGEO(mp)->min_folio_order);
108 
109 	XFS_STATS_INC(mp, vn_active);
110 	ASSERT(atomic_read(&ip->i_pincount) == 0);
111 	ASSERT(ip->i_ino == 0);
112 
113 	/* initialise the xfs inode */
114 	ip->i_ino = ino;
115 	ip->i_mount = mp;
116 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
117 	ip->i_cowfp = NULL;
118 	memset(&ip->i_af, 0, sizeof(ip->i_af));
119 	ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
120 	memset(&ip->i_df, 0, sizeof(ip->i_df));
121 	ip->i_flags = 0;
122 	ip->i_delayed_blks = 0;
123 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
124 	ip->i_nblocks = 0;
125 	ip->i_forkoff = 0;
126 	ip->i_sick = 0;
127 	ip->i_checked = 0;
128 	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
129 	INIT_LIST_HEAD(&ip->i_ioend_list);
130 	spin_lock_init(&ip->i_ioend_lock);
131 	ip->i_next_unlinked = NULLAGINO;
132 	ip->i_prev_unlinked = 0;
133 
134 	return ip;
135 }
136 
137 STATIC void
xfs_inode_free_callback(struct rcu_head * head)138 xfs_inode_free_callback(
139 	struct rcu_head		*head)
140 {
141 	struct inode		*inode = container_of(head, struct inode, i_rcu);
142 	struct xfs_inode	*ip = XFS_I(inode);
143 
144 	switch (VFS_I(ip)->i_mode & S_IFMT) {
145 	case S_IFREG:
146 	case S_IFDIR:
147 	case S_IFLNK:
148 		xfs_idestroy_fork(&ip->i_df);
149 		break;
150 	}
151 
152 	xfs_ifork_zap_attr(ip);
153 
154 	if (ip->i_cowfp) {
155 		xfs_idestroy_fork(ip->i_cowfp);
156 		kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
157 	}
158 	if (ip->i_itemp) {
159 		ASSERT(!test_bit(XFS_LI_IN_AIL,
160 				 &ip->i_itemp->ili_item.li_flags));
161 		xfs_inode_item_destroy(ip);
162 		ip->i_itemp = NULL;
163 	}
164 
165 	kmem_cache_free(xfs_inode_cache, ip);
166 }
167 
168 static void
__xfs_inode_free(struct xfs_inode * ip)169 __xfs_inode_free(
170 	struct xfs_inode	*ip)
171 {
172 	/* asserts to verify all state is correct here */
173 	ASSERT(atomic_read(&ip->i_pincount) == 0);
174 	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
175 	XFS_STATS_DEC(ip->i_mount, vn_active);
176 
177 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
178 }
179 
180 void
xfs_inode_free(struct xfs_inode * ip)181 xfs_inode_free(
182 	struct xfs_inode	*ip)
183 {
184 	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
185 
186 	/*
187 	 * Because we use RCU freeing we need to ensure the inode always
188 	 * appears to be reclaimed with an invalid inode number when in the
189 	 * free state. The ip->i_flags_lock provides the barrier against lookup
190 	 * races.
191 	 */
192 	spin_lock(&ip->i_flags_lock);
193 	ip->i_flags = XFS_IRECLAIM;
194 	ip->i_ino = 0;
195 	spin_unlock(&ip->i_flags_lock);
196 
197 	__xfs_inode_free(ip);
198 }
199 
200 /*
201  * Queue background inode reclaim work if there are reclaimable inodes and there
202  * isn't reclaim work already scheduled or in progress.
203  */
204 static void
xfs_reclaim_work_queue(struct xfs_mount * mp)205 xfs_reclaim_work_queue(
206 	struct xfs_mount        *mp)
207 {
208 
209 	rcu_read_lock();
210 	if (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) {
211 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
212 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
213 	}
214 	rcu_read_unlock();
215 }
216 
217 /*
218  * Background scanning to trim preallocated space. This is queued based on the
219  * 'speculative_prealloc_lifetime' tunable (5m by default).
220  */
221 static inline void
xfs_blockgc_queue(struct xfs_perag * pag)222 xfs_blockgc_queue(
223 	struct xfs_perag	*pag)
224 {
225 	struct xfs_mount	*mp = pag_mount(pag);
226 
227 	if (!xfs_is_blockgc_enabled(mp))
228 		return;
229 
230 	rcu_read_lock();
231 	if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
232 		queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work,
233 				   secs_to_jiffies(xfs_blockgc_secs));
234 	rcu_read_unlock();
235 }
236 
237 /* Set a tag on both the AG incore inode tree and the AG radix tree. */
238 static void
xfs_perag_set_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)239 xfs_perag_set_inode_tag(
240 	struct xfs_perag	*pag,
241 	xfs_agino_t		agino,
242 	unsigned int		tag)
243 {
244 	bool			was_tagged;
245 
246 	lockdep_assert_held(&pag->pag_ici_lock);
247 
248 	was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
249 	radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
250 
251 	if (tag == XFS_ICI_RECLAIM_TAG)
252 		pag->pag_ici_reclaimable++;
253 
254 	if (was_tagged)
255 		return;
256 
257 	/* propagate the tag up into the pag xarray tree */
258 	xfs_group_set_mark(pag_group(pag), ici_tag_to_mark(tag));
259 
260 	/* start background work */
261 	switch (tag) {
262 	case XFS_ICI_RECLAIM_TAG:
263 		xfs_reclaim_work_queue(pag_mount(pag));
264 		break;
265 	case XFS_ICI_BLOCKGC_TAG:
266 		xfs_blockgc_queue(pag);
267 		break;
268 	}
269 
270 	trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
271 }
272 
273 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
274 static void
xfs_perag_clear_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)275 xfs_perag_clear_inode_tag(
276 	struct xfs_perag	*pag,
277 	xfs_agino_t		agino,
278 	unsigned int		tag)
279 {
280 	lockdep_assert_held(&pag->pag_ici_lock);
281 
282 	/*
283 	 * Reclaim can signal (with a null agino) that it cleared its own tag
284 	 * by removing the inode from the radix tree.
285 	 */
286 	if (agino != NULLAGINO)
287 		radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
288 	else
289 		ASSERT(tag == XFS_ICI_RECLAIM_TAG);
290 
291 	if (tag == XFS_ICI_RECLAIM_TAG)
292 		pag->pag_ici_reclaimable--;
293 
294 	if (radix_tree_tagged(&pag->pag_ici_root, tag))
295 		return;
296 
297 	/* clear the tag from the pag xarray */
298 	xfs_group_clear_mark(pag_group(pag), ici_tag_to_mark(tag));
299 	trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
300 }
301 
302 /*
303  * Find the next AG after @pag, or the first AG if @pag is NULL.
304  */
305 static struct xfs_perag *
xfs_perag_grab_next_tag(struct xfs_mount * mp,struct xfs_perag * pag,int tag)306 xfs_perag_grab_next_tag(
307 	struct xfs_mount	*mp,
308 	struct xfs_perag	*pag,
309 	int			tag)
310 {
311 	return to_perag(xfs_group_grab_next_mark(mp,
312 			pag ? pag_group(pag) : NULL,
313 			ici_tag_to_mark(tag), XG_TYPE_AG));
314 }
315 
316 /*
317  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
318  * part of the structure. This is made more complex by the fact we store
319  * information about the on-disk values in the VFS inode and so we can't just
320  * overwrite the values unconditionally. Hence we save the parameters we
321  * need to retain across reinitialisation, and rewrite them into the VFS inode
322  * after reinitialisation even if it fails.
323  */
324 static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)325 xfs_reinit_inode(
326 	struct xfs_mount	*mp,
327 	struct inode		*inode)
328 {
329 	int			error;
330 	uint32_t		nlink = inode->i_nlink;
331 	uint32_t		generation = inode->i_generation;
332 	uint64_t		version = inode_peek_iversion(inode);
333 	umode_t			mode = inode->i_mode;
334 	dev_t			dev = inode->i_rdev;
335 	kuid_t			uid = inode->i_uid;
336 	kgid_t			gid = inode->i_gid;
337 	unsigned long		state = inode->i_state;
338 
339 	error = inode_init_always(mp->m_super, inode);
340 
341 	set_nlink(inode, nlink);
342 	inode->i_generation = generation;
343 	inode_set_iversion_queried(inode, version);
344 	inode->i_mode = mode;
345 	inode->i_rdev = dev;
346 	inode->i_uid = uid;
347 	inode->i_gid = gid;
348 	inode->i_state = state;
349 	mapping_set_folio_min_order(inode->i_mapping,
350 				    M_IGEO(mp)->min_folio_order);
351 	return error;
352 }
353 
354 /*
355  * Carefully nudge an inode whose VFS state has been torn down back into a
356  * usable state.  Drops the i_flags_lock and the rcu read lock.
357  */
358 static int
xfs_iget_recycle(struct xfs_perag * pag,struct xfs_inode * ip)359 xfs_iget_recycle(
360 	struct xfs_perag	*pag,
361 	struct xfs_inode	*ip) __releases(&ip->i_flags_lock)
362 {
363 	struct xfs_mount	*mp = ip->i_mount;
364 	struct inode		*inode = VFS_I(ip);
365 	int			error;
366 
367 	trace_xfs_iget_recycle(ip);
368 
369 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
370 		return -EAGAIN;
371 
372 	/*
373 	 * We need to make it look like the inode is being reclaimed to prevent
374 	 * the actual reclaim workers from stomping over us while we recycle
375 	 * the inode.  We can't clear the radix tree tag yet as it requires
376 	 * pag_ici_lock to be held exclusive.
377 	 */
378 	ip->i_flags |= XFS_IRECLAIM;
379 
380 	spin_unlock(&ip->i_flags_lock);
381 	rcu_read_unlock();
382 
383 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
384 	error = xfs_reinit_inode(mp, inode);
385 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
386 	if (error) {
387 		/*
388 		 * Re-initializing the inode failed, and we are in deep
389 		 * trouble.  Try to re-add it to the reclaim list.
390 		 */
391 		rcu_read_lock();
392 		spin_lock(&ip->i_flags_lock);
393 		ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
394 		ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
395 		spin_unlock(&ip->i_flags_lock);
396 		rcu_read_unlock();
397 
398 		trace_xfs_iget_recycle_fail(ip);
399 		return error;
400 	}
401 
402 	spin_lock(&pag->pag_ici_lock);
403 	spin_lock(&ip->i_flags_lock);
404 
405 	/*
406 	 * Clear the per-lifetime state in the inode as we are now effectively
407 	 * a new inode and need to return to the initial state before reuse
408 	 * occurs.
409 	 */
410 	ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
411 	ip->i_flags |= XFS_INEW;
412 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
413 			XFS_ICI_RECLAIM_TAG);
414 	inode->i_state = I_NEW;
415 	spin_unlock(&ip->i_flags_lock);
416 	spin_unlock(&pag->pag_ici_lock);
417 
418 	return 0;
419 }
420 
421 /*
422  * If we are allocating a new inode, then check what was returned is
423  * actually a free, empty inode. If we are not allocating an inode,
424  * then check we didn't find a free inode.
425  *
426  * Returns:
427  *	0		if the inode free state matches the lookup context
428  *	-ENOENT		if the inode is free and we are not allocating
429  *	-EFSCORRUPTED	if there is any state mismatch at all
430  */
431 static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)432 xfs_iget_check_free_state(
433 	struct xfs_inode	*ip,
434 	int			flags)
435 {
436 	if (flags & XFS_IGET_CREATE) {
437 		/* should be a free inode */
438 		if (VFS_I(ip)->i_mode != 0) {
439 			xfs_warn(ip->i_mount,
440 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
441 				ip->i_ino, VFS_I(ip)->i_mode);
442 			xfs_agno_mark_sick(ip->i_mount,
443 					XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
444 					XFS_SICK_AG_INOBT);
445 			return -EFSCORRUPTED;
446 		}
447 
448 		if (ip->i_nblocks != 0) {
449 			xfs_warn(ip->i_mount,
450 "Corruption detected! Free inode 0x%llx has blocks allocated!",
451 				ip->i_ino);
452 			xfs_agno_mark_sick(ip->i_mount,
453 					XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
454 					XFS_SICK_AG_INOBT);
455 			return -EFSCORRUPTED;
456 		}
457 		return 0;
458 	}
459 
460 	/* should be an allocated inode */
461 	if (VFS_I(ip)->i_mode == 0)
462 		return -ENOENT;
463 
464 	return 0;
465 }
466 
467 /* Make all pending inactivation work start immediately. */
468 static bool
xfs_inodegc_queue_all(struct xfs_mount * mp)469 xfs_inodegc_queue_all(
470 	struct xfs_mount	*mp)
471 {
472 	struct xfs_inodegc	*gc;
473 	int			cpu;
474 	bool			ret = false;
475 
476 	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
477 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
478 		if (!llist_empty(&gc->list)) {
479 			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
480 			ret = true;
481 		}
482 	}
483 
484 	return ret;
485 }
486 
487 /* Wait for all queued work and collect errors */
488 static int
xfs_inodegc_wait_all(struct xfs_mount * mp)489 xfs_inodegc_wait_all(
490 	struct xfs_mount	*mp)
491 {
492 	int			cpu;
493 	int			error = 0;
494 
495 	flush_workqueue(mp->m_inodegc_wq);
496 	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
497 		struct xfs_inodegc	*gc;
498 
499 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
500 		if (gc->error && !error)
501 			error = gc->error;
502 		gc->error = 0;
503 	}
504 
505 	return error;
506 }
507 
508 /*
509  * Check the validity of the inode we just found it the cache
510  */
511 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)512 xfs_iget_cache_hit(
513 	struct xfs_perag	*pag,
514 	struct xfs_inode	*ip,
515 	xfs_ino_t		ino,
516 	int			flags,
517 	int			lock_flags) __releases(RCU)
518 {
519 	struct inode		*inode = VFS_I(ip);
520 	struct xfs_mount	*mp = ip->i_mount;
521 	int			error;
522 
523 	/*
524 	 * check for re-use of an inode within an RCU grace period due to the
525 	 * radix tree nodes not being updated yet. We monitor for this by
526 	 * setting the inode number to zero before freeing the inode structure.
527 	 * If the inode has been reallocated and set up, then the inode number
528 	 * will not match, so check for that, too.
529 	 */
530 	spin_lock(&ip->i_flags_lock);
531 	if (ip->i_ino != ino)
532 		goto out_skip;
533 
534 	/*
535 	 * If we are racing with another cache hit that is currently
536 	 * instantiating this inode or currently recycling it out of
537 	 * reclaimable state, wait for the initialisation to complete
538 	 * before continuing.
539 	 *
540 	 * If we're racing with the inactivation worker we also want to wait.
541 	 * If we're creating a new file, it's possible that the worker
542 	 * previously marked the inode as free on disk but hasn't finished
543 	 * updating the incore state yet.  The AGI buffer will be dirty and
544 	 * locked to the icreate transaction, so a synchronous push of the
545 	 * inodegc workers would result in deadlock.  For a regular iget, the
546 	 * worker is running already, so we might as well wait.
547 	 *
548 	 * XXX(hch): eventually we should do something equivalent to
549 	 *	     wait_on_inode to wait for these flags to be cleared
550 	 *	     instead of polling for it.
551 	 */
552 	if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
553 		goto out_skip;
554 
555 	if (ip->i_flags & XFS_NEED_INACTIVE) {
556 		/* Unlinked inodes cannot be re-grabbed. */
557 		if (VFS_I(ip)->i_nlink == 0) {
558 			error = -ENOENT;
559 			goto out_error;
560 		}
561 		goto out_inodegc_flush;
562 	}
563 
564 	/*
565 	 * Check the inode free state is valid. This also detects lookup
566 	 * racing with unlinks.
567 	 */
568 	error = xfs_iget_check_free_state(ip, flags);
569 	if (error)
570 		goto out_error;
571 
572 	/* Skip inodes that have no vfs state. */
573 	if ((flags & XFS_IGET_INCORE) &&
574 	    (ip->i_flags & XFS_IRECLAIMABLE))
575 		goto out_skip;
576 
577 	/* The inode fits the selection criteria; process it. */
578 	if (ip->i_flags & XFS_IRECLAIMABLE) {
579 		/* Drops i_flags_lock and RCU read lock. */
580 		error = xfs_iget_recycle(pag, ip);
581 		if (error == -EAGAIN)
582 			goto out_skip;
583 		if (error)
584 			return error;
585 	} else {
586 		/* If the VFS inode is being torn down, pause and try again. */
587 		if (!igrab(inode))
588 			goto out_skip;
589 
590 		/* We've got a live one. */
591 		spin_unlock(&ip->i_flags_lock);
592 		rcu_read_unlock();
593 		trace_xfs_iget_hit(ip);
594 	}
595 
596 	if (lock_flags != 0)
597 		xfs_ilock(ip, lock_flags);
598 
599 	if (!(flags & XFS_IGET_INCORE))
600 		xfs_iflags_clear(ip, XFS_ISTALE);
601 	XFS_STATS_INC(mp, xs_ig_found);
602 
603 	return 0;
604 
605 out_skip:
606 	trace_xfs_iget_skip(ip);
607 	XFS_STATS_INC(mp, xs_ig_frecycle);
608 	error = -EAGAIN;
609 out_error:
610 	spin_unlock(&ip->i_flags_lock);
611 	rcu_read_unlock();
612 	return error;
613 
614 out_inodegc_flush:
615 	spin_unlock(&ip->i_flags_lock);
616 	rcu_read_unlock();
617 	/*
618 	 * Do not wait for the workers, because the caller could hold an AGI
619 	 * buffer lock.  We're just going to sleep in a loop anyway.
620 	 */
621 	if (xfs_is_inodegc_enabled(mp))
622 		xfs_inodegc_queue_all(mp);
623 	return -EAGAIN;
624 }
625 
626 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)627 xfs_iget_cache_miss(
628 	struct xfs_mount	*mp,
629 	struct xfs_perag	*pag,
630 	xfs_trans_t		*tp,
631 	xfs_ino_t		ino,
632 	struct xfs_inode	**ipp,
633 	int			flags,
634 	int			lock_flags)
635 {
636 	struct xfs_inode	*ip;
637 	int			error;
638 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
639 
640 	ip = xfs_inode_alloc(mp, ino);
641 	if (!ip)
642 		return -ENOMEM;
643 
644 	error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
645 	if (error)
646 		goto out_destroy;
647 
648 	/*
649 	 * For version 5 superblocks, if we are initialising a new inode and we
650 	 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
651 	 * simply build the new inode core with a random generation number.
652 	 *
653 	 * For version 4 (and older) superblocks, log recovery is dependent on
654 	 * the i_flushiter field being initialised from the current on-disk
655 	 * value and hence we must also read the inode off disk even when
656 	 * initializing new inodes.
657 	 */
658 	if (xfs_has_v3inodes(mp) &&
659 	    (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
660 		VFS_I(ip)->i_generation = get_random_u32();
661 	} else {
662 		struct xfs_buf		*bp;
663 
664 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
665 		if (error)
666 			goto out_destroy;
667 
668 		error = xfs_inode_from_disk(ip,
669 				xfs_buf_offset(bp, ip->i_imap.im_boffset));
670 		if (!error)
671 			xfs_buf_set_ref(bp, XFS_INO_REF);
672 		else
673 			xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
674 		xfs_trans_brelse(tp, bp);
675 
676 		if (error)
677 			goto out_destroy;
678 	}
679 
680 	trace_xfs_iget_miss(ip);
681 
682 	/*
683 	 * Check the inode free state is valid. This also detects lookup
684 	 * racing with unlinks.
685 	 */
686 	error = xfs_iget_check_free_state(ip, flags);
687 	if (error)
688 		goto out_destroy;
689 
690 	/*
691 	 * Preload the radix tree so we can insert safely under the
692 	 * write spinlock. Note that we cannot sleep inside the preload
693 	 * region.
694 	 */
695 	if (radix_tree_preload(GFP_KERNEL | __GFP_NOLOCKDEP)) {
696 		error = -EAGAIN;
697 		goto out_destroy;
698 	}
699 
700 	/*
701 	 * Because the inode hasn't been added to the radix-tree yet it can't
702 	 * be found by another thread, so we can do the non-sleeping lock here.
703 	 */
704 	if (lock_flags) {
705 		if (!xfs_ilock_nowait(ip, lock_flags))
706 			BUG();
707 	}
708 
709 	/*
710 	 * These values must be set before inserting the inode into the radix
711 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
712 	 * RCU locking mechanism) can find it and that lookup must see that this
713 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
714 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
715 	 * memory barrier that ensures this detection works correctly at lookup
716 	 * time.
717 	 */
718 	if (flags & XFS_IGET_DONTCACHE)
719 		d_mark_dontcache(VFS_I(ip));
720 	ip->i_udquot = NULL;
721 	ip->i_gdquot = NULL;
722 	ip->i_pdquot = NULL;
723 	xfs_iflags_set(ip, XFS_INEW);
724 
725 	/* insert the new inode */
726 	spin_lock(&pag->pag_ici_lock);
727 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
728 	if (unlikely(error)) {
729 		WARN_ON(error != -EEXIST);
730 		XFS_STATS_INC(mp, xs_ig_dup);
731 		error = -EAGAIN;
732 		goto out_preload_end;
733 	}
734 	spin_unlock(&pag->pag_ici_lock);
735 	radix_tree_preload_end();
736 
737 	*ipp = ip;
738 	return 0;
739 
740 out_preload_end:
741 	spin_unlock(&pag->pag_ici_lock);
742 	radix_tree_preload_end();
743 	if (lock_flags)
744 		xfs_iunlock(ip, lock_flags);
745 out_destroy:
746 	__destroy_inode(VFS_I(ip));
747 	xfs_inode_free(ip);
748 	return error;
749 }
750 
751 /*
752  * Look up an inode by number in the given file system.  The inode is looked up
753  * in the cache held in each AG.  If the inode is found in the cache, initialise
754  * the vfs inode if necessary.
755  *
756  * If it is not in core, read it in from the file system's device, add it to the
757  * cache and initialise the vfs inode.
758  *
759  * The inode is locked according to the value of the lock_flags parameter.
760  * Inode lookup is only done during metadata operations and not as part of the
761  * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
762  */
763 int
xfs_iget(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,uint flags,uint lock_flags,struct xfs_inode ** ipp)764 xfs_iget(
765 	struct xfs_mount	*mp,
766 	struct xfs_trans	*tp,
767 	xfs_ino_t		ino,
768 	uint			flags,
769 	uint			lock_flags,
770 	struct xfs_inode	**ipp)
771 {
772 	struct xfs_inode	*ip;
773 	struct xfs_perag	*pag;
774 	xfs_agino_t		agino;
775 	int			error;
776 
777 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
778 
779 	/* reject inode numbers outside existing AGs */
780 	if (!xfs_verify_ino(mp, ino))
781 		return -EINVAL;
782 
783 	XFS_STATS_INC(mp, xs_ig_attempts);
784 
785 	/* get the perag structure and ensure that it's inode capable */
786 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
787 	agino = XFS_INO_TO_AGINO(mp, ino);
788 
789 again:
790 	error = 0;
791 	rcu_read_lock();
792 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
793 
794 	if (ip) {
795 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
796 		if (error)
797 			goto out_error_or_again;
798 	} else {
799 		rcu_read_unlock();
800 		if (flags & XFS_IGET_INCORE) {
801 			error = -ENODATA;
802 			goto out_error_or_again;
803 		}
804 		XFS_STATS_INC(mp, xs_ig_missed);
805 
806 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
807 							flags, lock_flags);
808 		if (error)
809 			goto out_error_or_again;
810 	}
811 	xfs_perag_put(pag);
812 
813 	*ipp = ip;
814 
815 	/*
816 	 * If we have a real type for an on-disk inode, we can setup the inode
817 	 * now.	 If it's a new inode being created, xfs_init_new_inode will
818 	 * handle it.
819 	 */
820 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
821 		xfs_setup_existing_inode(ip);
822 	return 0;
823 
824 out_error_or_again:
825 	if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
826 	    error == -EAGAIN) {
827 		delay(1);
828 		goto again;
829 	}
830 	xfs_perag_put(pag);
831 	return error;
832 }
833 
834 /*
835  * Get a metadata inode.
836  *
837  * The metafile type must match the file mode exactly, and for files in the
838  * metadata directory tree, it must match the inode's metatype exactly.
839  */
840 int
xfs_trans_metafile_iget(struct xfs_trans * tp,xfs_ino_t ino,enum xfs_metafile_type metafile_type,struct xfs_inode ** ipp)841 xfs_trans_metafile_iget(
842 	struct xfs_trans	*tp,
843 	xfs_ino_t		ino,
844 	enum xfs_metafile_type	metafile_type,
845 	struct xfs_inode	**ipp)
846 {
847 	struct xfs_mount	*mp = tp->t_mountp;
848 	struct xfs_inode	*ip;
849 	umode_t			mode;
850 	int			error;
851 
852 	error = xfs_iget(mp, tp, ino, 0, 0, &ip);
853 	if (error == -EFSCORRUPTED || error == -EINVAL)
854 		goto whine;
855 	if (error)
856 		return error;
857 
858 	if (VFS_I(ip)->i_nlink == 0)
859 		goto bad_rele;
860 
861 	if (metafile_type == XFS_METAFILE_DIR)
862 		mode = S_IFDIR;
863 	else
864 		mode = S_IFREG;
865 	if (inode_wrong_type(VFS_I(ip), mode))
866 		goto bad_rele;
867 	if (xfs_has_metadir(mp)) {
868 		if (!xfs_is_metadir_inode(ip))
869 			goto bad_rele;
870 		if (metafile_type != ip->i_metatype)
871 			goto bad_rele;
872 	}
873 
874 	*ipp = ip;
875 	return 0;
876 bad_rele:
877 	xfs_irele(ip);
878 whine:
879 	xfs_err(mp, "metadata inode 0x%llx type %u is corrupt", ino,
880 			metafile_type);
881 	xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
882 	return -EFSCORRUPTED;
883 }
884 
885 /* Grab a metadata file if the caller doesn't already have a transaction. */
886 int
xfs_metafile_iget(struct xfs_mount * mp,xfs_ino_t ino,enum xfs_metafile_type metafile_type,struct xfs_inode ** ipp)887 xfs_metafile_iget(
888 	struct xfs_mount	*mp,
889 	xfs_ino_t		ino,
890 	enum xfs_metafile_type	metafile_type,
891 	struct xfs_inode	**ipp)
892 {
893 	struct xfs_trans	*tp;
894 	int			error;
895 
896 	tp = xfs_trans_alloc_empty(mp);
897 	error = xfs_trans_metafile_iget(tp, ino, metafile_type, ipp);
898 	xfs_trans_cancel(tp);
899 	return error;
900 }
901 
902 /*
903  * Grab the inode for reclaim exclusively.
904  *
905  * We have found this inode via a lookup under RCU, so the inode may have
906  * already been freed, or it may be in the process of being recycled by
907  * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
908  * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
909  * will not be set. Hence we need to check for both these flag conditions to
910  * avoid inodes that are no longer reclaim candidates.
911  *
912  * Note: checking for other state flags here, under the i_flags_lock or not, is
913  * racy and should be avoided. Those races should be resolved only after we have
914  * ensured that we are able to reclaim this inode and the world can see that we
915  * are going to reclaim it.
916  *
917  * Return true if we grabbed it, false otherwise.
918  */
919 static bool
xfs_reclaim_igrab(struct xfs_inode * ip,struct xfs_icwalk * icw)920 xfs_reclaim_igrab(
921 	struct xfs_inode	*ip,
922 	struct xfs_icwalk	*icw)
923 {
924 	ASSERT(rcu_read_lock_held());
925 
926 	spin_lock(&ip->i_flags_lock);
927 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
928 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
929 		/* not a reclaim candidate. */
930 		spin_unlock(&ip->i_flags_lock);
931 		return false;
932 	}
933 
934 	/* Don't reclaim a sick inode unless the caller asked for it. */
935 	if (ip->i_sick &&
936 	    (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
937 		spin_unlock(&ip->i_flags_lock);
938 		return false;
939 	}
940 
941 	__xfs_iflags_set(ip, XFS_IRECLAIM);
942 	spin_unlock(&ip->i_flags_lock);
943 	return true;
944 }
945 
946 /*
947  * Inode reclaim is non-blocking, so the default action if progress cannot be
948  * made is to "requeue" the inode for reclaim by unlocking it and clearing the
949  * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
950  * blocking anymore and hence we can wait for the inode to be able to reclaim
951  * it.
952  *
953  * We do no IO here - if callers require inodes to be cleaned they must push the
954  * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
955  * done in the background in a non-blocking manner, and enables memory reclaim
956  * to make progress without blocking.
957  */
958 static void
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag)959 xfs_reclaim_inode(
960 	struct xfs_inode	*ip,
961 	struct xfs_perag	*pag)
962 {
963 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
964 
965 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
966 		goto out;
967 	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
968 		goto out_iunlock;
969 
970 	/*
971 	 * Check for log shutdown because aborting the inode can move the log
972 	 * tail and corrupt in memory state. This is fine if the log is shut
973 	 * down, but if the log is still active and only the mount is shut down
974 	 * then the in-memory log tail movement caused by the abort can be
975 	 * incorrectly propagated to disk.
976 	 */
977 	if (xlog_is_shutdown(ip->i_mount->m_log)) {
978 		xfs_iunpin_wait(ip);
979 		/*
980 		 * Avoid a ABBA deadlock on the inode cluster buffer vs
981 		 * concurrent xfs_ifree_cluster() trying to mark the inode
982 		 * stale. We don't need the inode locked to run the flush abort
983 		 * code, but the flush abort needs to lock the cluster buffer.
984 		 */
985 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
986 		xfs_iflush_shutdown_abort(ip);
987 		xfs_ilock(ip, XFS_ILOCK_EXCL);
988 		goto reclaim;
989 	}
990 	if (xfs_ipincount(ip))
991 		goto out_clear_flush;
992 	if (!xfs_inode_clean(ip))
993 		goto out_clear_flush;
994 
995 	xfs_iflags_clear(ip, XFS_IFLUSHING);
996 reclaim:
997 	trace_xfs_inode_reclaiming(ip);
998 
999 	/*
1000 	 * Because we use RCU freeing we need to ensure the inode always appears
1001 	 * to be reclaimed with an invalid inode number when in the free state.
1002 	 * We do this as early as possible under the ILOCK so that
1003 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1004 	 * detect races with us here. By doing this, we guarantee that once
1005 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1006 	 * it will see either a valid inode that will serialise correctly, or it
1007 	 * will see an invalid inode that it can skip.
1008 	 */
1009 	spin_lock(&ip->i_flags_lock);
1010 	ip->i_flags = XFS_IRECLAIM;
1011 	ip->i_ino = 0;
1012 	ip->i_sick = 0;
1013 	ip->i_checked = 0;
1014 	spin_unlock(&ip->i_flags_lock);
1015 
1016 	ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
1017 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1018 
1019 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1020 	/*
1021 	 * Remove the inode from the per-AG radix tree.
1022 	 *
1023 	 * Because radix_tree_delete won't complain even if the item was never
1024 	 * added to the tree assert that it's been there before to catch
1025 	 * problems with the inode life time early on.
1026 	 */
1027 	spin_lock(&pag->pag_ici_lock);
1028 	if (!radix_tree_delete(&pag->pag_ici_root,
1029 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
1030 		ASSERT(0);
1031 	xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
1032 	spin_unlock(&pag->pag_ici_lock);
1033 
1034 	/*
1035 	 * Here we do an (almost) spurious inode lock in order to coordinate
1036 	 * with inode cache radix tree lookups.  This is because the lookup
1037 	 * can reference the inodes in the cache without taking references.
1038 	 *
1039 	 * We make that OK here by ensuring that we wait until the inode is
1040 	 * unlocked after the lookup before we go ahead and free it.
1041 	 */
1042 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1043 	ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
1044 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1045 	ASSERT(xfs_inode_clean(ip));
1046 
1047 	__xfs_inode_free(ip);
1048 	return;
1049 
1050 out_clear_flush:
1051 	xfs_iflags_clear(ip, XFS_IFLUSHING);
1052 out_iunlock:
1053 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1054 out:
1055 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1056 }
1057 
1058 /* Reclaim sick inodes if we're unmounting or the fs went down. */
1059 static inline bool
xfs_want_reclaim_sick(struct xfs_mount * mp)1060 xfs_want_reclaim_sick(
1061 	struct xfs_mount	*mp)
1062 {
1063 	return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
1064 	       xfs_is_shutdown(mp);
1065 }
1066 
1067 void
xfs_reclaim_inodes(struct xfs_mount * mp)1068 xfs_reclaim_inodes(
1069 	struct xfs_mount	*mp)
1070 {
1071 	struct xfs_icwalk	icw = {
1072 		.icw_flags	= 0,
1073 	};
1074 
1075 	if (xfs_want_reclaim_sick(mp))
1076 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1077 
1078 	while (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) {
1079 		xfs_ail_push_all_sync(mp->m_ail);
1080 		xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1081 	}
1082 }
1083 
1084 /*
1085  * The shrinker infrastructure determines how many inodes we should scan for
1086  * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1087  * push the AIL here. We also want to proactively free up memory if we can to
1088  * minimise the amount of work memory reclaim has to do so we kick the
1089  * background reclaim if it isn't already scheduled.
1090  */
1091 long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,unsigned long nr_to_scan)1092 xfs_reclaim_inodes_nr(
1093 	struct xfs_mount	*mp,
1094 	unsigned long		nr_to_scan)
1095 {
1096 	struct xfs_icwalk	icw = {
1097 		.icw_flags	= XFS_ICWALK_FLAG_SCAN_LIMIT,
1098 		.icw_scan_limit	= min_t(unsigned long, LONG_MAX, nr_to_scan),
1099 	};
1100 
1101 	if (xfs_want_reclaim_sick(mp))
1102 		icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1103 
1104 	/* kick background reclaimer and push the AIL */
1105 	xfs_reclaim_work_queue(mp);
1106 	xfs_ail_push_all(mp->m_ail);
1107 
1108 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1109 	return 0;
1110 }
1111 
1112 /*
1113  * Return the number of reclaimable inodes in the filesystem for
1114  * the shrinker to determine how much to reclaim.
1115  */
1116 long
xfs_reclaim_inodes_count(struct xfs_mount * mp)1117 xfs_reclaim_inodes_count(
1118 	struct xfs_mount	*mp)
1119 {
1120 	XA_STATE		(xas, &mp->m_groups[XG_TYPE_AG].xa, 0);
1121 	long			reclaimable = 0;
1122 	struct xfs_perag	*pag;
1123 
1124 	rcu_read_lock();
1125 	xas_for_each_marked(&xas, pag, ULONG_MAX, XFS_PERAG_RECLAIM_MARK) {
1126 		trace_xfs_reclaim_inodes_count(pag, _THIS_IP_);
1127 		reclaimable += pag->pag_ici_reclaimable;
1128 	}
1129 	rcu_read_unlock();
1130 
1131 	return reclaimable;
1132 }
1133 
1134 STATIC bool
xfs_icwalk_match_id(struct xfs_inode * ip,struct xfs_icwalk * icw)1135 xfs_icwalk_match_id(
1136 	struct xfs_inode	*ip,
1137 	struct xfs_icwalk	*icw)
1138 {
1139 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1140 	    !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1141 		return false;
1142 
1143 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1144 	    !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1145 		return false;
1146 
1147 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1148 	    ip->i_projid != icw->icw_prid)
1149 		return false;
1150 
1151 	return true;
1152 }
1153 
1154 /*
1155  * A union-based inode filtering algorithm. Process the inode if any of the
1156  * criteria match. This is for global/internal scans only.
1157  */
1158 STATIC bool
xfs_icwalk_match_id_union(struct xfs_inode * ip,struct xfs_icwalk * icw)1159 xfs_icwalk_match_id_union(
1160 	struct xfs_inode	*ip,
1161 	struct xfs_icwalk	*icw)
1162 {
1163 	if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1164 	    uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1165 		return true;
1166 
1167 	if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1168 	    gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1169 		return true;
1170 
1171 	if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1172 	    ip->i_projid == icw->icw_prid)
1173 		return true;
1174 
1175 	return false;
1176 }
1177 
1178 /*
1179  * Is this inode @ip eligible for eof/cow block reclamation, given some
1180  * filtering parameters @icw?  The inode is eligible if @icw is null or
1181  * if the predicate functions match.
1182  */
1183 static bool
xfs_icwalk_match(struct xfs_inode * ip,struct xfs_icwalk * icw)1184 xfs_icwalk_match(
1185 	struct xfs_inode	*ip,
1186 	struct xfs_icwalk	*icw)
1187 {
1188 	bool			match;
1189 
1190 	if (!icw)
1191 		return true;
1192 
1193 	if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1194 		match = xfs_icwalk_match_id_union(ip, icw);
1195 	else
1196 		match = xfs_icwalk_match_id(ip, icw);
1197 	if (!match)
1198 		return false;
1199 
1200 	/* skip the inode if the file size is too small */
1201 	if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1202 	    XFS_ISIZE(ip) < icw->icw_min_file_size)
1203 		return false;
1204 
1205 	return true;
1206 }
1207 
1208 /*
1209  * This is a fast pass over the inode cache to try to get reclaim moving on as
1210  * many inodes as possible in a short period of time. It kicks itself every few
1211  * seconds, as well as being kicked by the inode cache shrinker when memory
1212  * goes low.
1213  */
1214 void
xfs_reclaim_worker(struct work_struct * work)1215 xfs_reclaim_worker(
1216 	struct work_struct *work)
1217 {
1218 	struct xfs_mount *mp = container_of(to_delayed_work(work),
1219 					struct xfs_mount, m_reclaim_work);
1220 
1221 	xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1222 	xfs_reclaim_work_queue(mp);
1223 }
1224 
1225 STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)1226 xfs_inode_free_eofblocks(
1227 	struct xfs_inode	*ip,
1228 	struct xfs_icwalk	*icw,
1229 	unsigned int		*lockflags)
1230 {
1231 	bool			wait;
1232 
1233 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1234 
1235 	if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1236 		return 0;
1237 
1238 	/*
1239 	 * If the mapping is dirty the operation can block and wait for some
1240 	 * time. Unless we are waiting, skip it.
1241 	 */
1242 	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1243 		return 0;
1244 
1245 	if (!xfs_icwalk_match(ip, icw))
1246 		return 0;
1247 
1248 	/*
1249 	 * If the caller is waiting, return -EAGAIN to keep the background
1250 	 * scanner moving and revisit the inode in a subsequent pass.
1251 	 */
1252 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1253 		if (wait)
1254 			return -EAGAIN;
1255 		return 0;
1256 	}
1257 	*lockflags |= XFS_IOLOCK_EXCL;
1258 
1259 	if (xfs_can_free_eofblocks(ip))
1260 		return xfs_free_eofblocks(ip);
1261 
1262 	/* inode could be preallocated */
1263 	trace_xfs_inode_free_eofblocks_invalid(ip);
1264 	xfs_inode_clear_eofblocks_tag(ip);
1265 	return 0;
1266 }
1267 
1268 static void
xfs_blockgc_set_iflag(struct xfs_inode * ip,unsigned long iflag)1269 xfs_blockgc_set_iflag(
1270 	struct xfs_inode	*ip,
1271 	unsigned long		iflag)
1272 {
1273 	struct xfs_mount	*mp = ip->i_mount;
1274 	struct xfs_perag	*pag;
1275 
1276 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1277 
1278 	/*
1279 	 * Don't bother locking the AG and looking up in the radix trees
1280 	 * if we already know that we have the tag set.
1281 	 */
1282 	if (ip->i_flags & iflag)
1283 		return;
1284 	spin_lock(&ip->i_flags_lock);
1285 	ip->i_flags |= iflag;
1286 	spin_unlock(&ip->i_flags_lock);
1287 
1288 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1289 	spin_lock(&pag->pag_ici_lock);
1290 
1291 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1292 			XFS_ICI_BLOCKGC_TAG);
1293 
1294 	spin_unlock(&pag->pag_ici_lock);
1295 	xfs_perag_put(pag);
1296 }
1297 
1298 void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1299 xfs_inode_set_eofblocks_tag(
1300 	xfs_inode_t	*ip)
1301 {
1302 	trace_xfs_inode_set_eofblocks_tag(ip);
1303 	return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1304 }
1305 
1306 static void
xfs_blockgc_clear_iflag(struct xfs_inode * ip,unsigned long iflag)1307 xfs_blockgc_clear_iflag(
1308 	struct xfs_inode	*ip,
1309 	unsigned long		iflag)
1310 {
1311 	struct xfs_mount	*mp = ip->i_mount;
1312 	struct xfs_perag	*pag;
1313 	bool			clear_tag;
1314 
1315 	ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1316 
1317 	spin_lock(&ip->i_flags_lock);
1318 	ip->i_flags &= ~iflag;
1319 	clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1320 	spin_unlock(&ip->i_flags_lock);
1321 
1322 	if (!clear_tag)
1323 		return;
1324 
1325 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1326 	spin_lock(&pag->pag_ici_lock);
1327 
1328 	xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1329 			XFS_ICI_BLOCKGC_TAG);
1330 
1331 	spin_unlock(&pag->pag_ici_lock);
1332 	xfs_perag_put(pag);
1333 }
1334 
1335 void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1336 xfs_inode_clear_eofblocks_tag(
1337 	xfs_inode_t	*ip)
1338 {
1339 	trace_xfs_inode_clear_eofblocks_tag(ip);
1340 	return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1341 }
1342 
1343 /*
1344  * Prepare to free COW fork blocks from an inode.
1345  */
1346 static bool
xfs_prep_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw)1347 xfs_prep_free_cowblocks(
1348 	struct xfs_inode	*ip,
1349 	struct xfs_icwalk	*icw)
1350 {
1351 	bool			sync;
1352 
1353 	sync = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1354 
1355 	/*
1356 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1357 	 * possible the inode was fully unshared since it was originally tagged.
1358 	 */
1359 	if (!xfs_inode_has_cow_data(ip)) {
1360 		trace_xfs_inode_free_cowblocks_invalid(ip);
1361 		xfs_inode_clear_cowblocks_tag(ip);
1362 		return false;
1363 	}
1364 
1365 	/*
1366 	 * A cowblocks trim of an inode can have a significant effect on
1367 	 * fragmentation even when a reasonable COW extent size hint is set.
1368 	 * Therefore, we prefer to not process cowblocks unless they are clean
1369 	 * and idle. We can never process a cowblocks inode that is dirty or has
1370 	 * in-flight I/O under any circumstances, because outstanding writeback
1371 	 * or dio expects targeted COW fork blocks exist through write
1372 	 * completion where they can be remapped into the data fork.
1373 	 *
1374 	 * Therefore, the heuristic used here is to never process inodes
1375 	 * currently opened for write from background (i.e. non-sync) scans. For
1376 	 * sync scans, use the pagecache/dio state of the inode to ensure we
1377 	 * never free COW fork blocks out from under pending I/O.
1378 	 */
1379 	if (!sync && inode_is_open_for_write(VFS_I(ip)))
1380 		return false;
1381 	return xfs_can_free_cowblocks(ip);
1382 }
1383 
1384 /*
1385  * Automatic CoW Reservation Freeing
1386  *
1387  * These functions automatically garbage collect leftover CoW reservations
1388  * that were made on behalf of a cowextsize hint when we start to run out
1389  * of quota or when the reservations sit around for too long.  If the file
1390  * has dirty pages or is undergoing writeback, its CoW reservations will
1391  * be retained.
1392  *
1393  * The actual garbage collection piggybacks off the same code that runs
1394  * the speculative EOF preallocation garbage collector.
1395  */
1396 STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)1397 xfs_inode_free_cowblocks(
1398 	struct xfs_inode	*ip,
1399 	struct xfs_icwalk	*icw,
1400 	unsigned int		*lockflags)
1401 {
1402 	bool			wait;
1403 	int			ret = 0;
1404 
1405 	wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1406 
1407 	if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1408 		return 0;
1409 
1410 	if (!xfs_prep_free_cowblocks(ip, icw))
1411 		return 0;
1412 
1413 	if (!xfs_icwalk_match(ip, icw))
1414 		return 0;
1415 
1416 	/*
1417 	 * If the caller is waiting, return -EAGAIN to keep the background
1418 	 * scanner moving and revisit the inode in a subsequent pass.
1419 	 */
1420 	if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1421 	    !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1422 		if (wait)
1423 			return -EAGAIN;
1424 		return 0;
1425 	}
1426 	*lockflags |= XFS_IOLOCK_EXCL;
1427 
1428 	if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1429 		if (wait)
1430 			return -EAGAIN;
1431 		return 0;
1432 	}
1433 	*lockflags |= XFS_MMAPLOCK_EXCL;
1434 
1435 	/*
1436 	 * Check again, nobody else should be able to dirty blocks or change
1437 	 * the reflink iflag now that we have the first two locks held.
1438 	 */
1439 	if (xfs_prep_free_cowblocks(ip, icw))
1440 		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1441 	return ret;
1442 }
1443 
1444 void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)1445 xfs_inode_set_cowblocks_tag(
1446 	xfs_inode_t	*ip)
1447 {
1448 	trace_xfs_inode_set_cowblocks_tag(ip);
1449 	return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1450 }
1451 
1452 void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)1453 xfs_inode_clear_cowblocks_tag(
1454 	xfs_inode_t	*ip)
1455 {
1456 	trace_xfs_inode_clear_cowblocks_tag(ip);
1457 	return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1458 }
1459 
1460 /* Disable post-EOF and CoW block auto-reclamation. */
1461 void
xfs_blockgc_stop(struct xfs_mount * mp)1462 xfs_blockgc_stop(
1463 	struct xfs_mount	*mp)
1464 {
1465 	struct xfs_perag	*pag = NULL;
1466 
1467 	if (!xfs_clear_blockgc_enabled(mp))
1468 		return;
1469 
1470 	while ((pag = xfs_perag_next(mp, pag)))
1471 		cancel_delayed_work_sync(&pag->pag_blockgc_work);
1472 	trace_xfs_blockgc_stop(mp, __return_address);
1473 }
1474 
1475 /* Enable post-EOF and CoW block auto-reclamation. */
1476 void
xfs_blockgc_start(struct xfs_mount * mp)1477 xfs_blockgc_start(
1478 	struct xfs_mount	*mp)
1479 {
1480 	struct xfs_perag	*pag = NULL;
1481 
1482 	if (xfs_set_blockgc_enabled(mp))
1483 		return;
1484 
1485 	trace_xfs_blockgc_start(mp, __return_address);
1486 	while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
1487 		xfs_blockgc_queue(pag);
1488 }
1489 
1490 /* Don't try to run block gc on an inode that's in any of these states. */
1491 #define XFS_BLOCKGC_NOGRAB_IFLAGS	(XFS_INEW | \
1492 					 XFS_NEED_INACTIVE | \
1493 					 XFS_INACTIVATING | \
1494 					 XFS_IRECLAIMABLE | \
1495 					 XFS_IRECLAIM)
1496 /*
1497  * Decide if the given @ip is eligible for garbage collection of speculative
1498  * preallocations, and grab it if so.  Returns true if it's ready to go or
1499  * false if we should just ignore it.
1500  */
1501 static bool
xfs_blockgc_igrab(struct xfs_inode * ip)1502 xfs_blockgc_igrab(
1503 	struct xfs_inode	*ip)
1504 {
1505 	struct inode		*inode = VFS_I(ip);
1506 
1507 	ASSERT(rcu_read_lock_held());
1508 
1509 	/* Check for stale RCU freed inode */
1510 	spin_lock(&ip->i_flags_lock);
1511 	if (!ip->i_ino)
1512 		goto out_unlock_noent;
1513 
1514 	if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1515 		goto out_unlock_noent;
1516 	spin_unlock(&ip->i_flags_lock);
1517 
1518 	/* nothing to sync during shutdown */
1519 	if (xfs_is_shutdown(ip->i_mount))
1520 		return false;
1521 
1522 	/* If we can't grab the inode, it must on it's way to reclaim. */
1523 	if (!igrab(inode))
1524 		return false;
1525 
1526 	/* inode is valid */
1527 	return true;
1528 
1529 out_unlock_noent:
1530 	spin_unlock(&ip->i_flags_lock);
1531 	return false;
1532 }
1533 
1534 /* Scan one incore inode for block preallocations that we can remove. */
1535 static int
xfs_blockgc_scan_inode(struct xfs_inode * ip,struct xfs_icwalk * icw)1536 xfs_blockgc_scan_inode(
1537 	struct xfs_inode	*ip,
1538 	struct xfs_icwalk	*icw)
1539 {
1540 	unsigned int		lockflags = 0;
1541 	int			error;
1542 
1543 	error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1544 	if (error)
1545 		goto unlock;
1546 
1547 	error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1548 unlock:
1549 	if (lockflags)
1550 		xfs_iunlock(ip, lockflags);
1551 	xfs_irele(ip);
1552 	return error;
1553 }
1554 
1555 /* Background worker that trims preallocated space. */
1556 void
xfs_blockgc_worker(struct work_struct * work)1557 xfs_blockgc_worker(
1558 	struct work_struct	*work)
1559 {
1560 	struct xfs_perag	*pag = container_of(to_delayed_work(work),
1561 					struct xfs_perag, pag_blockgc_work);
1562 	struct xfs_mount	*mp = pag_mount(pag);
1563 	int			error;
1564 
1565 	trace_xfs_blockgc_worker(mp, __return_address);
1566 
1567 	error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1568 	if (error)
1569 		xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1570 				pag_agno(pag), error);
1571 	xfs_blockgc_queue(pag);
1572 }
1573 
1574 /*
1575  * Try to free space in the filesystem by purging inactive inodes, eofblocks
1576  * and cowblocks.
1577  */
1578 int
xfs_blockgc_free_space(struct xfs_mount * mp,struct xfs_icwalk * icw)1579 xfs_blockgc_free_space(
1580 	struct xfs_mount	*mp,
1581 	struct xfs_icwalk	*icw)
1582 {
1583 	int			error;
1584 
1585 	trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1586 
1587 	error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1588 	if (error)
1589 		return error;
1590 
1591 	return xfs_inodegc_flush(mp);
1592 }
1593 
1594 /*
1595  * Reclaim all the free space that we can by scheduling the background blockgc
1596  * and inodegc workers immediately and waiting for them all to clear.
1597  */
1598 int
xfs_blockgc_flush_all(struct xfs_mount * mp)1599 xfs_blockgc_flush_all(
1600 	struct xfs_mount	*mp)
1601 {
1602 	struct xfs_perag	*pag = NULL;
1603 
1604 	trace_xfs_blockgc_flush_all(mp, __return_address);
1605 
1606 	/*
1607 	 * For each blockgc worker, move its queue time up to now.  If it wasn't
1608 	 * queued, it will not be requeued.  Then flush whatever is left.
1609 	 */
1610 	while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
1611 		mod_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 0);
1612 
1613 	while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
1614 		flush_delayed_work(&pag->pag_blockgc_work);
1615 
1616 	return xfs_inodegc_flush(mp);
1617 }
1618 
1619 /*
1620  * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
1621  * quota caused an allocation failure, so we make a best effort by including
1622  * each quota under low free space conditions (less than 1% free space) in the
1623  * scan.
1624  *
1625  * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
1626  * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1627  * MMAPLOCK.
1628  */
1629 int
xfs_blockgc_free_dquots(struct xfs_mount * mp,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,unsigned int iwalk_flags)1630 xfs_blockgc_free_dquots(
1631 	struct xfs_mount	*mp,
1632 	struct xfs_dquot	*udqp,
1633 	struct xfs_dquot	*gdqp,
1634 	struct xfs_dquot	*pdqp,
1635 	unsigned int		iwalk_flags)
1636 {
1637 	struct xfs_icwalk	icw = {0};
1638 	bool			do_work = false;
1639 
1640 	if (!udqp && !gdqp && !pdqp)
1641 		return 0;
1642 
1643 	/*
1644 	 * Run a scan to free blocks using the union filter to cover all
1645 	 * applicable quotas in a single scan.
1646 	 */
1647 	icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1648 
1649 	if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1650 		icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1651 		icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1652 		do_work = true;
1653 	}
1654 
1655 	if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1656 		icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1657 		icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1658 		do_work = true;
1659 	}
1660 
1661 	if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1662 		icw.icw_prid = pdqp->q_id;
1663 		icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1664 		do_work = true;
1665 	}
1666 
1667 	if (!do_work)
1668 		return 0;
1669 
1670 	return xfs_blockgc_free_space(mp, &icw);
1671 }
1672 
1673 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1674 int
xfs_blockgc_free_quota(struct xfs_inode * ip,unsigned int iwalk_flags)1675 xfs_blockgc_free_quota(
1676 	struct xfs_inode	*ip,
1677 	unsigned int		iwalk_flags)
1678 {
1679 	return xfs_blockgc_free_dquots(ip->i_mount,
1680 			xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1681 			xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1682 			xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1683 }
1684 
1685 /* XFS Inode Cache Walking Code */
1686 
1687 /*
1688  * The inode lookup is done in batches to keep the amount of lock traffic and
1689  * radix tree lookups to a minimum. The batch size is a trade off between
1690  * lookup reduction and stack usage. This is in the reclaim path, so we can't
1691  * be too greedy.
1692  */
1693 #define XFS_LOOKUP_BATCH	32
1694 
1695 
1696 /*
1697  * Decide if we want to grab this inode in anticipation of doing work towards
1698  * the goal.
1699  */
1700 static inline bool
xfs_icwalk_igrab(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_icwalk * icw)1701 xfs_icwalk_igrab(
1702 	enum xfs_icwalk_goal	goal,
1703 	struct xfs_inode	*ip,
1704 	struct xfs_icwalk	*icw)
1705 {
1706 	switch (goal) {
1707 	case XFS_ICWALK_BLOCKGC:
1708 		return xfs_blockgc_igrab(ip);
1709 	case XFS_ICWALK_RECLAIM:
1710 		return xfs_reclaim_igrab(ip, icw);
1711 	default:
1712 		return false;
1713 	}
1714 }
1715 
1716 /*
1717  * Process an inode.  Each processing function must handle any state changes
1718  * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
1719  */
1720 static inline int
xfs_icwalk_process_inode(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_perag * pag,struct xfs_icwalk * icw)1721 xfs_icwalk_process_inode(
1722 	enum xfs_icwalk_goal	goal,
1723 	struct xfs_inode	*ip,
1724 	struct xfs_perag	*pag,
1725 	struct xfs_icwalk	*icw)
1726 {
1727 	int			error = 0;
1728 
1729 	switch (goal) {
1730 	case XFS_ICWALK_BLOCKGC:
1731 		error = xfs_blockgc_scan_inode(ip, icw);
1732 		break;
1733 	case XFS_ICWALK_RECLAIM:
1734 		xfs_reclaim_inode(ip, pag);
1735 		break;
1736 	}
1737 	return error;
1738 }
1739 
1740 /*
1741  * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1742  * process them in some manner.
1743  */
1744 static int
xfs_icwalk_ag(struct xfs_perag * pag,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1745 xfs_icwalk_ag(
1746 	struct xfs_perag	*pag,
1747 	enum xfs_icwalk_goal	goal,
1748 	struct xfs_icwalk	*icw)
1749 {
1750 	struct xfs_mount	*mp = pag_mount(pag);
1751 	uint32_t		first_index;
1752 	int			last_error = 0;
1753 	int			skipped;
1754 	bool			done;
1755 	int			nr_found;
1756 
1757 restart:
1758 	done = false;
1759 	skipped = 0;
1760 	if (goal == XFS_ICWALK_RECLAIM)
1761 		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1762 	else
1763 		first_index = 0;
1764 	nr_found = 0;
1765 	do {
1766 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1767 		int		error = 0;
1768 		int		i;
1769 
1770 		rcu_read_lock();
1771 
1772 		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1773 				(void **) batch, first_index,
1774 				XFS_LOOKUP_BATCH, goal);
1775 		if (!nr_found) {
1776 			done = true;
1777 			rcu_read_unlock();
1778 			break;
1779 		}
1780 
1781 		/*
1782 		 * Grab the inodes before we drop the lock. if we found
1783 		 * nothing, nr == 0 and the loop will be skipped.
1784 		 */
1785 		for (i = 0; i < nr_found; i++) {
1786 			struct xfs_inode *ip = batch[i];
1787 
1788 			if (done || !xfs_icwalk_igrab(goal, ip, icw))
1789 				batch[i] = NULL;
1790 
1791 			/*
1792 			 * Update the index for the next lookup. Catch
1793 			 * overflows into the next AG range which can occur if
1794 			 * we have inodes in the last block of the AG and we
1795 			 * are currently pointing to the last inode.
1796 			 *
1797 			 * Because we may see inodes that are from the wrong AG
1798 			 * due to RCU freeing and reallocation, only update the
1799 			 * index if it lies in this AG. It was a race that lead
1800 			 * us to see this inode, so another lookup from the
1801 			 * same index will not find it again.
1802 			 */
1803 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag))
1804 				continue;
1805 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1806 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1807 				done = true;
1808 		}
1809 
1810 		/* unlock now we've grabbed the inodes. */
1811 		rcu_read_unlock();
1812 
1813 		for (i = 0; i < nr_found; i++) {
1814 			if (!batch[i])
1815 				continue;
1816 			error = xfs_icwalk_process_inode(goal, batch[i], pag,
1817 					icw);
1818 			if (error == -EAGAIN) {
1819 				skipped++;
1820 				continue;
1821 			}
1822 			if (error && last_error != -EFSCORRUPTED)
1823 				last_error = error;
1824 		}
1825 
1826 		/* bail out if the filesystem is corrupted.  */
1827 		if (error == -EFSCORRUPTED)
1828 			break;
1829 
1830 		cond_resched();
1831 
1832 		if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1833 			icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1834 			if (icw->icw_scan_limit <= 0)
1835 				break;
1836 		}
1837 	} while (nr_found && !done);
1838 
1839 	if (goal == XFS_ICWALK_RECLAIM) {
1840 		if (done)
1841 			first_index = 0;
1842 		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1843 	}
1844 
1845 	if (skipped) {
1846 		delay(1);
1847 		goto restart;
1848 	}
1849 	return last_error;
1850 }
1851 
1852 /* Walk all incore inodes to achieve a given goal. */
1853 static int
xfs_icwalk(struct xfs_mount * mp,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1854 xfs_icwalk(
1855 	struct xfs_mount	*mp,
1856 	enum xfs_icwalk_goal	goal,
1857 	struct xfs_icwalk	*icw)
1858 {
1859 	struct xfs_perag	*pag = NULL;
1860 	int			error = 0;
1861 	int			last_error = 0;
1862 
1863 	while ((pag = xfs_perag_grab_next_tag(mp, pag, goal))) {
1864 		error = xfs_icwalk_ag(pag, goal, icw);
1865 		if (error) {
1866 			last_error = error;
1867 			if (error == -EFSCORRUPTED) {
1868 				xfs_perag_rele(pag);
1869 				break;
1870 			}
1871 		}
1872 	}
1873 	return last_error;
1874 	BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1875 }
1876 
1877 #ifdef DEBUG
1878 static void
xfs_check_delalloc(struct xfs_inode * ip,int whichfork)1879 xfs_check_delalloc(
1880 	struct xfs_inode	*ip,
1881 	int			whichfork)
1882 {
1883 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1884 	struct xfs_bmbt_irec	got;
1885 	struct xfs_iext_cursor	icur;
1886 
1887 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1888 		return;
1889 	do {
1890 		if (isnullstartblock(got.br_startblock)) {
1891 			xfs_warn(ip->i_mount,
1892 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1893 				ip->i_ino,
1894 				whichfork == XFS_DATA_FORK ? "data" : "cow",
1895 				got.br_startoff, got.br_blockcount);
1896 		}
1897 	} while (xfs_iext_next_extent(ifp, &icur, &got));
1898 }
1899 #else
1900 #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
1901 #endif
1902 
1903 /* Schedule the inode for reclaim. */
1904 static void
xfs_inodegc_set_reclaimable(struct xfs_inode * ip)1905 xfs_inodegc_set_reclaimable(
1906 	struct xfs_inode	*ip)
1907 {
1908 	struct xfs_mount	*mp = ip->i_mount;
1909 	struct xfs_perag	*pag;
1910 
1911 	if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1912 		xfs_check_delalloc(ip, XFS_DATA_FORK);
1913 		xfs_check_delalloc(ip, XFS_COW_FORK);
1914 		ASSERT(0);
1915 	}
1916 
1917 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1918 	spin_lock(&pag->pag_ici_lock);
1919 	spin_lock(&ip->i_flags_lock);
1920 
1921 	trace_xfs_inode_set_reclaimable(ip);
1922 	ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1923 	ip->i_flags |= XFS_IRECLAIMABLE;
1924 	xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1925 			XFS_ICI_RECLAIM_TAG);
1926 
1927 	spin_unlock(&ip->i_flags_lock);
1928 	spin_unlock(&pag->pag_ici_lock);
1929 	xfs_perag_put(pag);
1930 }
1931 
1932 /*
1933  * Free all speculative preallocations and possibly even the inode itself.
1934  * This is the last chance to make changes to an otherwise unreferenced file
1935  * before incore reclamation happens.
1936  */
1937 static int
xfs_inodegc_inactivate(struct xfs_inode * ip)1938 xfs_inodegc_inactivate(
1939 	struct xfs_inode	*ip)
1940 {
1941 	int			error;
1942 
1943 	trace_xfs_inode_inactivating(ip);
1944 	error = xfs_inactive(ip);
1945 	xfs_inodegc_set_reclaimable(ip);
1946 	return error;
1947 
1948 }
1949 
1950 void
xfs_inodegc_worker(struct work_struct * work)1951 xfs_inodegc_worker(
1952 	struct work_struct	*work)
1953 {
1954 	struct xfs_inodegc	*gc = container_of(to_delayed_work(work),
1955 						struct xfs_inodegc, work);
1956 	struct llist_node	*node = llist_del_all(&gc->list);
1957 	struct xfs_inode	*ip, *n;
1958 	struct xfs_mount	*mp = gc->mp;
1959 	unsigned int		nofs_flag;
1960 
1961 	/*
1962 	 * Clear the cpu mask bit and ensure that we have seen the latest
1963 	 * update of the gc structure associated with this CPU. This matches
1964 	 * with the release semantics used when setting the cpumask bit in
1965 	 * xfs_inodegc_queue.
1966 	 */
1967 	cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
1968 	smp_mb__after_atomic();
1969 
1970 	WRITE_ONCE(gc->items, 0);
1971 
1972 	if (!node)
1973 		return;
1974 
1975 	/*
1976 	 * We can allocate memory here while doing writeback on behalf of
1977 	 * memory reclaim.  To avoid memory allocation deadlocks set the
1978 	 * task-wide nofs context for the following operations.
1979 	 */
1980 	nofs_flag = memalloc_nofs_save();
1981 
1982 	ip = llist_entry(node, struct xfs_inode, i_gclist);
1983 	trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1984 
1985 	WRITE_ONCE(gc->shrinker_hits, 0);
1986 	llist_for_each_entry_safe(ip, n, node, i_gclist) {
1987 		int	error;
1988 
1989 		xfs_iflags_set(ip, XFS_INACTIVATING);
1990 		error = xfs_inodegc_inactivate(ip);
1991 		if (error && !gc->error)
1992 			gc->error = error;
1993 	}
1994 
1995 	memalloc_nofs_restore(nofs_flag);
1996 }
1997 
1998 /*
1999  * Expedite all pending inodegc work to run immediately. This does not wait for
2000  * completion of the work.
2001  */
2002 void
xfs_inodegc_push(struct xfs_mount * mp)2003 xfs_inodegc_push(
2004 	struct xfs_mount	*mp)
2005 {
2006 	if (!xfs_is_inodegc_enabled(mp))
2007 		return;
2008 	trace_xfs_inodegc_push(mp, __return_address);
2009 	xfs_inodegc_queue_all(mp);
2010 }
2011 
2012 /*
2013  * Force all currently queued inode inactivation work to run immediately and
2014  * wait for the work to finish.
2015  */
2016 int
xfs_inodegc_flush(struct xfs_mount * mp)2017 xfs_inodegc_flush(
2018 	struct xfs_mount	*mp)
2019 {
2020 	xfs_inodegc_push(mp);
2021 	trace_xfs_inodegc_flush(mp, __return_address);
2022 	return xfs_inodegc_wait_all(mp);
2023 }
2024 
2025 /*
2026  * Flush all the pending work and then disable the inode inactivation background
2027  * workers and wait for them to stop.  Caller must hold sb->s_umount to
2028  * coordinate changes in the inodegc_enabled state.
2029  */
2030 void
xfs_inodegc_stop(struct xfs_mount * mp)2031 xfs_inodegc_stop(
2032 	struct xfs_mount	*mp)
2033 {
2034 	bool			rerun;
2035 
2036 	if (!xfs_clear_inodegc_enabled(mp))
2037 		return;
2038 
2039 	/*
2040 	 * Drain all pending inodegc work, including inodes that could be
2041 	 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
2042 	 * threads that sample the inodegc state just prior to us clearing it.
2043 	 * The inodegc flag state prevents new threads from queuing more
2044 	 * inodes, so we queue pending work items and flush the workqueue until
2045 	 * all inodegc lists are empty.  IOWs, we cannot use drain_workqueue
2046 	 * here because it does not allow other unserialized mechanisms to
2047 	 * reschedule inodegc work while this draining is in progress.
2048 	 */
2049 	xfs_inodegc_queue_all(mp);
2050 	do {
2051 		flush_workqueue(mp->m_inodegc_wq);
2052 		rerun = xfs_inodegc_queue_all(mp);
2053 	} while (rerun);
2054 
2055 	trace_xfs_inodegc_stop(mp, __return_address);
2056 }
2057 
2058 /*
2059  * Enable the inode inactivation background workers and schedule deferred inode
2060  * inactivation work if there is any.  Caller must hold sb->s_umount to
2061  * coordinate changes in the inodegc_enabled state.
2062  */
2063 void
xfs_inodegc_start(struct xfs_mount * mp)2064 xfs_inodegc_start(
2065 	struct xfs_mount	*mp)
2066 {
2067 	if (xfs_set_inodegc_enabled(mp))
2068 		return;
2069 
2070 	trace_xfs_inodegc_start(mp, __return_address);
2071 	xfs_inodegc_queue_all(mp);
2072 }
2073 
2074 #ifdef CONFIG_XFS_RT
2075 static inline bool
xfs_inodegc_want_queue_rt_file(struct xfs_inode * ip)2076 xfs_inodegc_want_queue_rt_file(
2077 	struct xfs_inode	*ip)
2078 {
2079 	struct xfs_mount	*mp = ip->i_mount;
2080 
2081 	if (!XFS_IS_REALTIME_INODE(ip) || xfs_has_zoned(mp))
2082 		return false;
2083 
2084 	if (xfs_compare_freecounter(mp, XC_FREE_RTEXTENTS,
2085 				mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
2086 				XFS_FDBLOCKS_BATCH) < 0)
2087 		return true;
2088 
2089 	return false;
2090 }
2091 #else
2092 # define xfs_inodegc_want_queue_rt_file(ip)	(false)
2093 #endif /* CONFIG_XFS_RT */
2094 
2095 /*
2096  * Schedule the inactivation worker when:
2097  *
2098  *  - We've accumulated more than one inode cluster buffer's worth of inodes.
2099  *  - There is less than 5% free space left.
2100  *  - Any of the quotas for this inode are near an enforcement limit.
2101  */
2102 static inline bool
xfs_inodegc_want_queue_work(struct xfs_inode * ip,unsigned int items)2103 xfs_inodegc_want_queue_work(
2104 	struct xfs_inode	*ip,
2105 	unsigned int		items)
2106 {
2107 	struct xfs_mount	*mp = ip->i_mount;
2108 
2109 	if (items > mp->m_ino_geo.inodes_per_cluster)
2110 		return true;
2111 
2112 	if (xfs_compare_freecounter(mp, XC_FREE_BLOCKS,
2113 				mp->m_low_space[XFS_LOWSP_5_PCNT],
2114 				XFS_FDBLOCKS_BATCH) < 0)
2115 		return true;
2116 
2117 	if (xfs_inodegc_want_queue_rt_file(ip))
2118 		return true;
2119 
2120 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2121 		return true;
2122 
2123 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2124 		return true;
2125 
2126 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2127 		return true;
2128 
2129 	return false;
2130 }
2131 
2132 /*
2133  * Upper bound on the number of inodes in each AG that can be queued for
2134  * inactivation at any given time, to avoid monopolizing the workqueue.
2135  */
2136 #define XFS_INODEGC_MAX_BACKLOG		(4 * XFS_INODES_PER_CHUNK)
2137 
2138 /*
2139  * Make the frontend wait for inactivations when:
2140  *
2141  *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
2142  *  - The queue depth exceeds the maximum allowable percpu backlog.
2143  *
2144  * Note: If we are in a NOFS context here (e.g. current thread is running a
2145  * transaction) the we don't want to block here as inodegc progress may require
2146  * filesystem resources we hold to make progress and that could result in a
2147  * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
2148  */
2149 static inline bool
xfs_inodegc_want_flush_work(struct xfs_inode * ip,unsigned int items,unsigned int shrinker_hits)2150 xfs_inodegc_want_flush_work(
2151 	struct xfs_inode	*ip,
2152 	unsigned int		items,
2153 	unsigned int		shrinker_hits)
2154 {
2155 	if (current->flags & PF_MEMALLOC_NOFS)
2156 		return false;
2157 
2158 	if (shrinker_hits > 0)
2159 		return true;
2160 
2161 	if (items > XFS_INODEGC_MAX_BACKLOG)
2162 		return true;
2163 
2164 	return false;
2165 }
2166 
2167 /*
2168  * Queue a background inactivation worker if there are inodes that need to be
2169  * inactivated and higher level xfs code hasn't disabled the background
2170  * workers.
2171  */
2172 static void
xfs_inodegc_queue(struct xfs_inode * ip)2173 xfs_inodegc_queue(
2174 	struct xfs_inode	*ip)
2175 {
2176 	struct xfs_mount	*mp = ip->i_mount;
2177 	struct xfs_inodegc	*gc;
2178 	int			items;
2179 	unsigned int		shrinker_hits;
2180 	unsigned int		cpu_nr;
2181 	unsigned long		queue_delay = 1;
2182 
2183 	trace_xfs_inode_set_need_inactive(ip);
2184 	spin_lock(&ip->i_flags_lock);
2185 	ip->i_flags |= XFS_NEED_INACTIVE;
2186 	spin_unlock(&ip->i_flags_lock);
2187 
2188 	cpu_nr = get_cpu();
2189 	gc = this_cpu_ptr(mp->m_inodegc);
2190 	llist_add(&ip->i_gclist, &gc->list);
2191 	items = READ_ONCE(gc->items);
2192 	WRITE_ONCE(gc->items, items + 1);
2193 	shrinker_hits = READ_ONCE(gc->shrinker_hits);
2194 
2195 	/*
2196 	 * Ensure the list add is always seen by anyone who finds the cpumask
2197 	 * bit set. This effectively gives the cpumask bit set operation
2198 	 * release ordering semantics.
2199 	 */
2200 	smp_mb__before_atomic();
2201 	if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
2202 		cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
2203 
2204 	/*
2205 	 * We queue the work while holding the current CPU so that the work
2206 	 * is scheduled to run on this CPU.
2207 	 */
2208 	if (!xfs_is_inodegc_enabled(mp)) {
2209 		put_cpu();
2210 		return;
2211 	}
2212 
2213 	if (xfs_inodegc_want_queue_work(ip, items))
2214 		queue_delay = 0;
2215 
2216 	trace_xfs_inodegc_queue(mp, __return_address);
2217 	mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2218 			queue_delay);
2219 	put_cpu();
2220 
2221 	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2222 		trace_xfs_inodegc_throttle(mp, __return_address);
2223 		flush_delayed_work(&gc->work);
2224 	}
2225 }
2226 
2227 /*
2228  * We set the inode flag atomically with the radix tree tag.  Once we get tag
2229  * lookups on the radix tree, this inode flag can go away.
2230  *
2231  * We always use background reclaim here because even if the inode is clean, it
2232  * still may be under IO and hence we have wait for IO completion to occur
2233  * before we can reclaim the inode. The background reclaim path handles this
2234  * more efficiently than we can here, so simply let background reclaim tear down
2235  * all inodes.
2236  */
2237 void
xfs_inode_mark_reclaimable(struct xfs_inode * ip)2238 xfs_inode_mark_reclaimable(
2239 	struct xfs_inode	*ip)
2240 {
2241 	struct xfs_mount	*mp = ip->i_mount;
2242 	bool			need_inactive;
2243 
2244 	XFS_STATS_INC(mp, vn_reclaim);
2245 
2246 	/*
2247 	 * We should never get here with any of the reclaim flags already set.
2248 	 */
2249 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2250 
2251 	need_inactive = xfs_inode_needs_inactive(ip);
2252 	if (need_inactive) {
2253 		xfs_inodegc_queue(ip);
2254 		return;
2255 	}
2256 
2257 	/* Going straight to reclaim, so drop the dquots. */
2258 	xfs_qm_dqdetach(ip);
2259 	xfs_inodegc_set_reclaimable(ip);
2260 }
2261 
2262 /*
2263  * Register a phony shrinker so that we can run background inodegc sooner when
2264  * there's memory pressure.  Inactivation does not itself free any memory but
2265  * it does make inodes reclaimable, which eventually frees memory.
2266  *
2267  * The count function, seek value, and batch value are crafted to trigger the
2268  * scan function during the second round of scanning.  Hopefully this means
2269  * that we reclaimed enough memory that initiating metadata transactions won't
2270  * make things worse.
2271  */
2272 #define XFS_INODEGC_SHRINKER_COUNT	(1UL << DEF_PRIORITY)
2273 #define XFS_INODEGC_SHRINKER_BATCH	((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2274 
2275 static unsigned long
xfs_inodegc_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)2276 xfs_inodegc_shrinker_count(
2277 	struct shrinker		*shrink,
2278 	struct shrink_control	*sc)
2279 {
2280 	struct xfs_mount	*mp = shrink->private_data;
2281 	struct xfs_inodegc	*gc;
2282 	int			cpu;
2283 
2284 	if (!xfs_is_inodegc_enabled(mp))
2285 		return 0;
2286 
2287 	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2288 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
2289 		if (!llist_empty(&gc->list))
2290 			return XFS_INODEGC_SHRINKER_COUNT;
2291 	}
2292 
2293 	return 0;
2294 }
2295 
2296 static unsigned long
xfs_inodegc_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)2297 xfs_inodegc_shrinker_scan(
2298 	struct shrinker		*shrink,
2299 	struct shrink_control	*sc)
2300 {
2301 	struct xfs_mount	*mp = shrink->private_data;
2302 	struct xfs_inodegc	*gc;
2303 	int			cpu;
2304 	bool			no_items = true;
2305 
2306 	if (!xfs_is_inodegc_enabled(mp))
2307 		return SHRINK_STOP;
2308 
2309 	trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2310 
2311 	for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2312 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
2313 		if (!llist_empty(&gc->list)) {
2314 			unsigned int	h = READ_ONCE(gc->shrinker_hits);
2315 
2316 			WRITE_ONCE(gc->shrinker_hits, h + 1);
2317 			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2318 			no_items = false;
2319 		}
2320 	}
2321 
2322 	/*
2323 	 * If there are no inodes to inactivate, we don't want the shrinker
2324 	 * to think there's deferred work to call us back about.
2325 	 */
2326 	if (no_items)
2327 		return LONG_MAX;
2328 
2329 	return SHRINK_STOP;
2330 }
2331 
2332 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2333 int
xfs_inodegc_register_shrinker(struct xfs_mount * mp)2334 xfs_inodegc_register_shrinker(
2335 	struct xfs_mount	*mp)
2336 {
2337 	mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB,
2338 						"xfs-inodegc:%s",
2339 						mp->m_super->s_id);
2340 	if (!mp->m_inodegc_shrinker)
2341 		return -ENOMEM;
2342 
2343 	mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count;
2344 	mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan;
2345 	mp->m_inodegc_shrinker->seeks = 0;
2346 	mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH;
2347 	mp->m_inodegc_shrinker->private_data = mp;
2348 
2349 	shrinker_register(mp->m_inodegc_shrinker);
2350 
2351 	return 0;
2352 }
2353