1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_inode_item.h"
17 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_dquot_item.h"
22 #include "xfs_dquot.h"
23 #include "xfs_reflink.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_ag.h"
26 #include "xfs_log_priv.h"
27 #include "xfs_health.h"
28 #include "xfs_da_format.h"
29 #include "xfs_dir2.h"
30 #include "xfs_metafile.h"
31
32 #include <linux/iversion.h>
33
34 /* Radix tree tags for incore inode tree. */
35
36 /* inode is to be reclaimed */
37 #define XFS_ICI_RECLAIM_TAG 0
38 /* Inode has speculative preallocations (posteof or cow) to clean. */
39 #define XFS_ICI_BLOCKGC_TAG 1
40
41 /*
42 * The goal for walking incore inodes. These can correspond with incore inode
43 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
44 */
45 enum xfs_icwalk_goal {
46 /* Goals directly associated with tagged inodes. */
47 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
48 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
49 };
50
51 static int xfs_icwalk(struct xfs_mount *mp,
52 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
53 static int xfs_icwalk_ag(struct xfs_perag *pag,
54 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
55
56 /*
57 * Private inode cache walk flags for struct xfs_icwalk. Must not
58 * coincide with XFS_ICWALK_FLAGS_VALID.
59 */
60
61 /* Stop scanning after icw_scan_limit inodes. */
62 #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
63
64 #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
65 #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
66
67 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
68 XFS_ICWALK_FLAG_RECLAIM_SICK | \
69 XFS_ICWALK_FLAG_UNION)
70
71 /* Marks for the perag xarray */
72 #define XFS_PERAG_RECLAIM_MARK XA_MARK_0
73 #define XFS_PERAG_BLOCKGC_MARK XA_MARK_1
74
ici_tag_to_mark(unsigned int tag)75 static inline xa_mark_t ici_tag_to_mark(unsigned int tag)
76 {
77 if (tag == XFS_ICI_RECLAIM_TAG)
78 return XFS_PERAG_RECLAIM_MARK;
79 ASSERT(tag == XFS_ICI_BLOCKGC_TAG);
80 return XFS_PERAG_BLOCKGC_MARK;
81 }
82
83 /*
84 * Allocate and initialise an xfs_inode.
85 */
86 struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)87 xfs_inode_alloc(
88 struct xfs_mount *mp,
89 xfs_ino_t ino)
90 {
91 struct xfs_inode *ip;
92
93 /*
94 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
95 * and return NULL here on ENOMEM.
96 */
97 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
98
99 if (inode_init_always(mp->m_super, VFS_I(ip))) {
100 kmem_cache_free(xfs_inode_cache, ip);
101 return NULL;
102 }
103
104 /* VFS doesn't initialise i_mode! */
105 VFS_I(ip)->i_mode = 0;
106 mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
107 M_IGEO(mp)->min_folio_order);
108
109 XFS_STATS_INC(mp, vn_active);
110 ASSERT(atomic_read(&ip->i_pincount) == 0);
111 ASSERT(ip->i_ino == 0);
112
113 /* initialise the xfs inode */
114 ip->i_ino = ino;
115 ip->i_mount = mp;
116 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
117 ip->i_cowfp = NULL;
118 memset(&ip->i_af, 0, sizeof(ip->i_af));
119 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
120 memset(&ip->i_df, 0, sizeof(ip->i_df));
121 ip->i_flags = 0;
122 ip->i_delayed_blks = 0;
123 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
124 ip->i_nblocks = 0;
125 ip->i_forkoff = 0;
126 ip->i_sick = 0;
127 ip->i_checked = 0;
128 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
129 INIT_LIST_HEAD(&ip->i_ioend_list);
130 spin_lock_init(&ip->i_ioend_lock);
131 ip->i_next_unlinked = NULLAGINO;
132 ip->i_prev_unlinked = 0;
133
134 return ip;
135 }
136
137 STATIC void
xfs_inode_free_callback(struct rcu_head * head)138 xfs_inode_free_callback(
139 struct rcu_head *head)
140 {
141 struct inode *inode = container_of(head, struct inode, i_rcu);
142 struct xfs_inode *ip = XFS_I(inode);
143
144 switch (VFS_I(ip)->i_mode & S_IFMT) {
145 case S_IFREG:
146 case S_IFDIR:
147 case S_IFLNK:
148 xfs_idestroy_fork(&ip->i_df);
149 break;
150 }
151
152 xfs_ifork_zap_attr(ip);
153
154 if (ip->i_cowfp) {
155 xfs_idestroy_fork(ip->i_cowfp);
156 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
157 }
158 if (ip->i_itemp) {
159 ASSERT(!test_bit(XFS_LI_IN_AIL,
160 &ip->i_itemp->ili_item.li_flags));
161 xfs_inode_item_destroy(ip);
162 ip->i_itemp = NULL;
163 }
164
165 kmem_cache_free(xfs_inode_cache, ip);
166 }
167
168 static void
__xfs_inode_free(struct xfs_inode * ip)169 __xfs_inode_free(
170 struct xfs_inode *ip)
171 {
172 /* asserts to verify all state is correct here */
173 ASSERT(atomic_read(&ip->i_pincount) == 0);
174 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
175 XFS_STATS_DEC(ip->i_mount, vn_active);
176
177 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
178 }
179
180 void
xfs_inode_free(struct xfs_inode * ip)181 xfs_inode_free(
182 struct xfs_inode *ip)
183 {
184 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
185
186 /*
187 * Because we use RCU freeing we need to ensure the inode always
188 * appears to be reclaimed with an invalid inode number when in the
189 * free state. The ip->i_flags_lock provides the barrier against lookup
190 * races.
191 */
192 spin_lock(&ip->i_flags_lock);
193 ip->i_flags = XFS_IRECLAIM;
194 ip->i_ino = 0;
195 spin_unlock(&ip->i_flags_lock);
196
197 __xfs_inode_free(ip);
198 }
199
200 /*
201 * Queue background inode reclaim work if there are reclaimable inodes and there
202 * isn't reclaim work already scheduled or in progress.
203 */
204 static void
xfs_reclaim_work_queue(struct xfs_mount * mp)205 xfs_reclaim_work_queue(
206 struct xfs_mount *mp)
207 {
208
209 rcu_read_lock();
210 if (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) {
211 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
212 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
213 }
214 rcu_read_unlock();
215 }
216
217 /*
218 * Background scanning to trim preallocated space. This is queued based on the
219 * 'speculative_prealloc_lifetime' tunable (5m by default).
220 */
221 static inline void
xfs_blockgc_queue(struct xfs_perag * pag)222 xfs_blockgc_queue(
223 struct xfs_perag *pag)
224 {
225 struct xfs_mount *mp = pag_mount(pag);
226
227 if (!xfs_is_blockgc_enabled(mp))
228 return;
229
230 rcu_read_lock();
231 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
232 queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work,
233 secs_to_jiffies(xfs_blockgc_secs));
234 rcu_read_unlock();
235 }
236
237 /* Set a tag on both the AG incore inode tree and the AG radix tree. */
238 static void
xfs_perag_set_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)239 xfs_perag_set_inode_tag(
240 struct xfs_perag *pag,
241 xfs_agino_t agino,
242 unsigned int tag)
243 {
244 bool was_tagged;
245
246 lockdep_assert_held(&pag->pag_ici_lock);
247
248 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
249 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
250
251 if (tag == XFS_ICI_RECLAIM_TAG)
252 pag->pag_ici_reclaimable++;
253
254 if (was_tagged)
255 return;
256
257 /* propagate the tag up into the pag xarray tree */
258 xfs_group_set_mark(pag_group(pag), ici_tag_to_mark(tag));
259
260 /* start background work */
261 switch (tag) {
262 case XFS_ICI_RECLAIM_TAG:
263 xfs_reclaim_work_queue(pag_mount(pag));
264 break;
265 case XFS_ICI_BLOCKGC_TAG:
266 xfs_blockgc_queue(pag);
267 break;
268 }
269
270 trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
271 }
272
273 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
274 static void
xfs_perag_clear_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)275 xfs_perag_clear_inode_tag(
276 struct xfs_perag *pag,
277 xfs_agino_t agino,
278 unsigned int tag)
279 {
280 lockdep_assert_held(&pag->pag_ici_lock);
281
282 /*
283 * Reclaim can signal (with a null agino) that it cleared its own tag
284 * by removing the inode from the radix tree.
285 */
286 if (agino != NULLAGINO)
287 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
288 else
289 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
290
291 if (tag == XFS_ICI_RECLAIM_TAG)
292 pag->pag_ici_reclaimable--;
293
294 if (radix_tree_tagged(&pag->pag_ici_root, tag))
295 return;
296
297 /* clear the tag from the pag xarray */
298 xfs_group_clear_mark(pag_group(pag), ici_tag_to_mark(tag));
299 trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
300 }
301
302 /*
303 * Find the next AG after @pag, or the first AG if @pag is NULL.
304 */
305 static struct xfs_perag *
xfs_perag_grab_next_tag(struct xfs_mount * mp,struct xfs_perag * pag,int tag)306 xfs_perag_grab_next_tag(
307 struct xfs_mount *mp,
308 struct xfs_perag *pag,
309 int tag)
310 {
311 return to_perag(xfs_group_grab_next_mark(mp,
312 pag ? pag_group(pag) : NULL,
313 ici_tag_to_mark(tag), XG_TYPE_AG));
314 }
315
316 /*
317 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
318 * part of the structure. This is made more complex by the fact we store
319 * information about the on-disk values in the VFS inode and so we can't just
320 * overwrite the values unconditionally. Hence we save the parameters we
321 * need to retain across reinitialisation, and rewrite them into the VFS inode
322 * after reinitialisation even if it fails.
323 */
324 static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)325 xfs_reinit_inode(
326 struct xfs_mount *mp,
327 struct inode *inode)
328 {
329 int error;
330 uint32_t nlink = inode->i_nlink;
331 uint32_t generation = inode->i_generation;
332 uint64_t version = inode_peek_iversion(inode);
333 umode_t mode = inode->i_mode;
334 dev_t dev = inode->i_rdev;
335 kuid_t uid = inode->i_uid;
336 kgid_t gid = inode->i_gid;
337 unsigned long state = inode->i_state;
338
339 error = inode_init_always(mp->m_super, inode);
340
341 set_nlink(inode, nlink);
342 inode->i_generation = generation;
343 inode_set_iversion_queried(inode, version);
344 inode->i_mode = mode;
345 inode->i_rdev = dev;
346 inode->i_uid = uid;
347 inode->i_gid = gid;
348 inode->i_state = state;
349 mapping_set_folio_min_order(inode->i_mapping,
350 M_IGEO(mp)->min_folio_order);
351 return error;
352 }
353
354 /*
355 * Carefully nudge an inode whose VFS state has been torn down back into a
356 * usable state. Drops the i_flags_lock and the rcu read lock.
357 */
358 static int
xfs_iget_recycle(struct xfs_perag * pag,struct xfs_inode * ip)359 xfs_iget_recycle(
360 struct xfs_perag *pag,
361 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
362 {
363 struct xfs_mount *mp = ip->i_mount;
364 struct inode *inode = VFS_I(ip);
365 int error;
366
367 trace_xfs_iget_recycle(ip);
368
369 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
370 return -EAGAIN;
371
372 /*
373 * We need to make it look like the inode is being reclaimed to prevent
374 * the actual reclaim workers from stomping over us while we recycle
375 * the inode. We can't clear the radix tree tag yet as it requires
376 * pag_ici_lock to be held exclusive.
377 */
378 ip->i_flags |= XFS_IRECLAIM;
379
380 spin_unlock(&ip->i_flags_lock);
381 rcu_read_unlock();
382
383 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
384 error = xfs_reinit_inode(mp, inode);
385 xfs_iunlock(ip, XFS_ILOCK_EXCL);
386 if (error) {
387 /*
388 * Re-initializing the inode failed, and we are in deep
389 * trouble. Try to re-add it to the reclaim list.
390 */
391 rcu_read_lock();
392 spin_lock(&ip->i_flags_lock);
393 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
394 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
395 spin_unlock(&ip->i_flags_lock);
396 rcu_read_unlock();
397
398 trace_xfs_iget_recycle_fail(ip);
399 return error;
400 }
401
402 spin_lock(&pag->pag_ici_lock);
403 spin_lock(&ip->i_flags_lock);
404
405 /*
406 * Clear the per-lifetime state in the inode as we are now effectively
407 * a new inode and need to return to the initial state before reuse
408 * occurs.
409 */
410 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
411 ip->i_flags |= XFS_INEW;
412 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
413 XFS_ICI_RECLAIM_TAG);
414 inode->i_state = I_NEW;
415 spin_unlock(&ip->i_flags_lock);
416 spin_unlock(&pag->pag_ici_lock);
417
418 return 0;
419 }
420
421 /*
422 * If we are allocating a new inode, then check what was returned is
423 * actually a free, empty inode. If we are not allocating an inode,
424 * then check we didn't find a free inode.
425 *
426 * Returns:
427 * 0 if the inode free state matches the lookup context
428 * -ENOENT if the inode is free and we are not allocating
429 * -EFSCORRUPTED if there is any state mismatch at all
430 */
431 static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)432 xfs_iget_check_free_state(
433 struct xfs_inode *ip,
434 int flags)
435 {
436 if (flags & XFS_IGET_CREATE) {
437 /* should be a free inode */
438 if (VFS_I(ip)->i_mode != 0) {
439 xfs_warn(ip->i_mount,
440 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
441 ip->i_ino, VFS_I(ip)->i_mode);
442 xfs_agno_mark_sick(ip->i_mount,
443 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
444 XFS_SICK_AG_INOBT);
445 return -EFSCORRUPTED;
446 }
447
448 if (ip->i_nblocks != 0) {
449 xfs_warn(ip->i_mount,
450 "Corruption detected! Free inode 0x%llx has blocks allocated!",
451 ip->i_ino);
452 xfs_agno_mark_sick(ip->i_mount,
453 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
454 XFS_SICK_AG_INOBT);
455 return -EFSCORRUPTED;
456 }
457 return 0;
458 }
459
460 /* should be an allocated inode */
461 if (VFS_I(ip)->i_mode == 0)
462 return -ENOENT;
463
464 return 0;
465 }
466
467 /* Make all pending inactivation work start immediately. */
468 static bool
xfs_inodegc_queue_all(struct xfs_mount * mp)469 xfs_inodegc_queue_all(
470 struct xfs_mount *mp)
471 {
472 struct xfs_inodegc *gc;
473 int cpu;
474 bool ret = false;
475
476 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
477 gc = per_cpu_ptr(mp->m_inodegc, cpu);
478 if (!llist_empty(&gc->list)) {
479 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
480 ret = true;
481 }
482 }
483
484 return ret;
485 }
486
487 /* Wait for all queued work and collect errors */
488 static int
xfs_inodegc_wait_all(struct xfs_mount * mp)489 xfs_inodegc_wait_all(
490 struct xfs_mount *mp)
491 {
492 int cpu;
493 int error = 0;
494
495 flush_workqueue(mp->m_inodegc_wq);
496 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
497 struct xfs_inodegc *gc;
498
499 gc = per_cpu_ptr(mp->m_inodegc, cpu);
500 if (gc->error && !error)
501 error = gc->error;
502 gc->error = 0;
503 }
504
505 return error;
506 }
507
508 /*
509 * Check the validity of the inode we just found it the cache
510 */
511 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)512 xfs_iget_cache_hit(
513 struct xfs_perag *pag,
514 struct xfs_inode *ip,
515 xfs_ino_t ino,
516 int flags,
517 int lock_flags) __releases(RCU)
518 {
519 struct inode *inode = VFS_I(ip);
520 struct xfs_mount *mp = ip->i_mount;
521 int error;
522
523 /*
524 * check for re-use of an inode within an RCU grace period due to the
525 * radix tree nodes not being updated yet. We monitor for this by
526 * setting the inode number to zero before freeing the inode structure.
527 * If the inode has been reallocated and set up, then the inode number
528 * will not match, so check for that, too.
529 */
530 spin_lock(&ip->i_flags_lock);
531 if (ip->i_ino != ino)
532 goto out_skip;
533
534 /*
535 * If we are racing with another cache hit that is currently
536 * instantiating this inode or currently recycling it out of
537 * reclaimable state, wait for the initialisation to complete
538 * before continuing.
539 *
540 * If we're racing with the inactivation worker we also want to wait.
541 * If we're creating a new file, it's possible that the worker
542 * previously marked the inode as free on disk but hasn't finished
543 * updating the incore state yet. The AGI buffer will be dirty and
544 * locked to the icreate transaction, so a synchronous push of the
545 * inodegc workers would result in deadlock. For a regular iget, the
546 * worker is running already, so we might as well wait.
547 *
548 * XXX(hch): eventually we should do something equivalent to
549 * wait_on_inode to wait for these flags to be cleared
550 * instead of polling for it.
551 */
552 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
553 goto out_skip;
554
555 if (ip->i_flags & XFS_NEED_INACTIVE) {
556 /* Unlinked inodes cannot be re-grabbed. */
557 if (VFS_I(ip)->i_nlink == 0) {
558 error = -ENOENT;
559 goto out_error;
560 }
561 goto out_inodegc_flush;
562 }
563
564 /*
565 * Check the inode free state is valid. This also detects lookup
566 * racing with unlinks.
567 */
568 error = xfs_iget_check_free_state(ip, flags);
569 if (error)
570 goto out_error;
571
572 /* Skip inodes that have no vfs state. */
573 if ((flags & XFS_IGET_INCORE) &&
574 (ip->i_flags & XFS_IRECLAIMABLE))
575 goto out_skip;
576
577 /* The inode fits the selection criteria; process it. */
578 if (ip->i_flags & XFS_IRECLAIMABLE) {
579 /* Drops i_flags_lock and RCU read lock. */
580 error = xfs_iget_recycle(pag, ip);
581 if (error == -EAGAIN)
582 goto out_skip;
583 if (error)
584 return error;
585 } else {
586 /* If the VFS inode is being torn down, pause and try again. */
587 if (!igrab(inode))
588 goto out_skip;
589
590 /* We've got a live one. */
591 spin_unlock(&ip->i_flags_lock);
592 rcu_read_unlock();
593 trace_xfs_iget_hit(ip);
594 }
595
596 if (lock_flags != 0)
597 xfs_ilock(ip, lock_flags);
598
599 if (!(flags & XFS_IGET_INCORE))
600 xfs_iflags_clear(ip, XFS_ISTALE);
601 XFS_STATS_INC(mp, xs_ig_found);
602
603 return 0;
604
605 out_skip:
606 trace_xfs_iget_skip(ip);
607 XFS_STATS_INC(mp, xs_ig_frecycle);
608 error = -EAGAIN;
609 out_error:
610 spin_unlock(&ip->i_flags_lock);
611 rcu_read_unlock();
612 return error;
613
614 out_inodegc_flush:
615 spin_unlock(&ip->i_flags_lock);
616 rcu_read_unlock();
617 /*
618 * Do not wait for the workers, because the caller could hold an AGI
619 * buffer lock. We're just going to sleep in a loop anyway.
620 */
621 if (xfs_is_inodegc_enabled(mp))
622 xfs_inodegc_queue_all(mp);
623 return -EAGAIN;
624 }
625
626 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)627 xfs_iget_cache_miss(
628 struct xfs_mount *mp,
629 struct xfs_perag *pag,
630 xfs_trans_t *tp,
631 xfs_ino_t ino,
632 struct xfs_inode **ipp,
633 int flags,
634 int lock_flags)
635 {
636 struct xfs_inode *ip;
637 int error;
638 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
639
640 ip = xfs_inode_alloc(mp, ino);
641 if (!ip)
642 return -ENOMEM;
643
644 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
645 if (error)
646 goto out_destroy;
647
648 /*
649 * For version 5 superblocks, if we are initialising a new inode and we
650 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
651 * simply build the new inode core with a random generation number.
652 *
653 * For version 4 (and older) superblocks, log recovery is dependent on
654 * the i_flushiter field being initialised from the current on-disk
655 * value and hence we must also read the inode off disk even when
656 * initializing new inodes.
657 */
658 if (xfs_has_v3inodes(mp) &&
659 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
660 VFS_I(ip)->i_generation = get_random_u32();
661 } else {
662 struct xfs_buf *bp;
663
664 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
665 if (error)
666 goto out_destroy;
667
668 error = xfs_inode_from_disk(ip,
669 xfs_buf_offset(bp, ip->i_imap.im_boffset));
670 if (!error)
671 xfs_buf_set_ref(bp, XFS_INO_REF);
672 else
673 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
674 xfs_trans_brelse(tp, bp);
675
676 if (error)
677 goto out_destroy;
678 }
679
680 trace_xfs_iget_miss(ip);
681
682 /*
683 * Check the inode free state is valid. This also detects lookup
684 * racing with unlinks.
685 */
686 error = xfs_iget_check_free_state(ip, flags);
687 if (error)
688 goto out_destroy;
689
690 /*
691 * Preload the radix tree so we can insert safely under the
692 * write spinlock. Note that we cannot sleep inside the preload
693 * region.
694 */
695 if (radix_tree_preload(GFP_KERNEL | __GFP_NOLOCKDEP)) {
696 error = -EAGAIN;
697 goto out_destroy;
698 }
699
700 /*
701 * Because the inode hasn't been added to the radix-tree yet it can't
702 * be found by another thread, so we can do the non-sleeping lock here.
703 */
704 if (lock_flags) {
705 if (!xfs_ilock_nowait(ip, lock_flags))
706 BUG();
707 }
708
709 /*
710 * These values must be set before inserting the inode into the radix
711 * tree as the moment it is inserted a concurrent lookup (allowed by the
712 * RCU locking mechanism) can find it and that lookup must see that this
713 * is an inode currently under construction (i.e. that XFS_INEW is set).
714 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
715 * memory barrier that ensures this detection works correctly at lookup
716 * time.
717 */
718 if (flags & XFS_IGET_DONTCACHE)
719 d_mark_dontcache(VFS_I(ip));
720 ip->i_udquot = NULL;
721 ip->i_gdquot = NULL;
722 ip->i_pdquot = NULL;
723 xfs_iflags_set(ip, XFS_INEW);
724
725 /* insert the new inode */
726 spin_lock(&pag->pag_ici_lock);
727 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
728 if (unlikely(error)) {
729 WARN_ON(error != -EEXIST);
730 XFS_STATS_INC(mp, xs_ig_dup);
731 error = -EAGAIN;
732 goto out_preload_end;
733 }
734 spin_unlock(&pag->pag_ici_lock);
735 radix_tree_preload_end();
736
737 *ipp = ip;
738 return 0;
739
740 out_preload_end:
741 spin_unlock(&pag->pag_ici_lock);
742 radix_tree_preload_end();
743 if (lock_flags)
744 xfs_iunlock(ip, lock_flags);
745 out_destroy:
746 __destroy_inode(VFS_I(ip));
747 xfs_inode_free(ip);
748 return error;
749 }
750
751 /*
752 * Look up an inode by number in the given file system. The inode is looked up
753 * in the cache held in each AG. If the inode is found in the cache, initialise
754 * the vfs inode if necessary.
755 *
756 * If it is not in core, read it in from the file system's device, add it to the
757 * cache and initialise the vfs inode.
758 *
759 * The inode is locked according to the value of the lock_flags parameter.
760 * Inode lookup is only done during metadata operations and not as part of the
761 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
762 */
763 int
xfs_iget(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,uint flags,uint lock_flags,struct xfs_inode ** ipp)764 xfs_iget(
765 struct xfs_mount *mp,
766 struct xfs_trans *tp,
767 xfs_ino_t ino,
768 uint flags,
769 uint lock_flags,
770 struct xfs_inode **ipp)
771 {
772 struct xfs_inode *ip;
773 struct xfs_perag *pag;
774 xfs_agino_t agino;
775 int error;
776
777 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
778
779 /* reject inode numbers outside existing AGs */
780 if (!xfs_verify_ino(mp, ino))
781 return -EINVAL;
782
783 XFS_STATS_INC(mp, xs_ig_attempts);
784
785 /* get the perag structure and ensure that it's inode capable */
786 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
787 agino = XFS_INO_TO_AGINO(mp, ino);
788
789 again:
790 error = 0;
791 rcu_read_lock();
792 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
793
794 if (ip) {
795 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
796 if (error)
797 goto out_error_or_again;
798 } else {
799 rcu_read_unlock();
800 if (flags & XFS_IGET_INCORE) {
801 error = -ENODATA;
802 goto out_error_or_again;
803 }
804 XFS_STATS_INC(mp, xs_ig_missed);
805
806 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
807 flags, lock_flags);
808 if (error)
809 goto out_error_or_again;
810 }
811 xfs_perag_put(pag);
812
813 *ipp = ip;
814
815 /*
816 * If we have a real type for an on-disk inode, we can setup the inode
817 * now. If it's a new inode being created, xfs_init_new_inode will
818 * handle it.
819 */
820 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
821 xfs_setup_existing_inode(ip);
822 return 0;
823
824 out_error_or_again:
825 if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
826 error == -EAGAIN) {
827 delay(1);
828 goto again;
829 }
830 xfs_perag_put(pag);
831 return error;
832 }
833
834 /*
835 * Get a metadata inode.
836 *
837 * The metafile type must match the file mode exactly, and for files in the
838 * metadata directory tree, it must match the inode's metatype exactly.
839 */
840 int
xfs_trans_metafile_iget(struct xfs_trans * tp,xfs_ino_t ino,enum xfs_metafile_type metafile_type,struct xfs_inode ** ipp)841 xfs_trans_metafile_iget(
842 struct xfs_trans *tp,
843 xfs_ino_t ino,
844 enum xfs_metafile_type metafile_type,
845 struct xfs_inode **ipp)
846 {
847 struct xfs_mount *mp = tp->t_mountp;
848 struct xfs_inode *ip;
849 umode_t mode;
850 int error;
851
852 error = xfs_iget(mp, tp, ino, 0, 0, &ip);
853 if (error == -EFSCORRUPTED || error == -EINVAL)
854 goto whine;
855 if (error)
856 return error;
857
858 if (VFS_I(ip)->i_nlink == 0)
859 goto bad_rele;
860
861 if (metafile_type == XFS_METAFILE_DIR)
862 mode = S_IFDIR;
863 else
864 mode = S_IFREG;
865 if (inode_wrong_type(VFS_I(ip), mode))
866 goto bad_rele;
867 if (xfs_has_metadir(mp)) {
868 if (!xfs_is_metadir_inode(ip))
869 goto bad_rele;
870 if (metafile_type != ip->i_metatype)
871 goto bad_rele;
872 }
873
874 *ipp = ip;
875 return 0;
876 bad_rele:
877 xfs_irele(ip);
878 whine:
879 xfs_err(mp, "metadata inode 0x%llx type %u is corrupt", ino,
880 metafile_type);
881 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
882 return -EFSCORRUPTED;
883 }
884
885 /* Grab a metadata file if the caller doesn't already have a transaction. */
886 int
xfs_metafile_iget(struct xfs_mount * mp,xfs_ino_t ino,enum xfs_metafile_type metafile_type,struct xfs_inode ** ipp)887 xfs_metafile_iget(
888 struct xfs_mount *mp,
889 xfs_ino_t ino,
890 enum xfs_metafile_type metafile_type,
891 struct xfs_inode **ipp)
892 {
893 struct xfs_trans *tp;
894 int error;
895
896 error = xfs_trans_alloc_empty(mp, &tp);
897 if (error)
898 return error;
899
900 error = xfs_trans_metafile_iget(tp, ino, metafile_type, ipp);
901 xfs_trans_cancel(tp);
902 return error;
903 }
904
905 /*
906 * Grab the inode for reclaim exclusively.
907 *
908 * We have found this inode via a lookup under RCU, so the inode may have
909 * already been freed, or it may be in the process of being recycled by
910 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
911 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
912 * will not be set. Hence we need to check for both these flag conditions to
913 * avoid inodes that are no longer reclaim candidates.
914 *
915 * Note: checking for other state flags here, under the i_flags_lock or not, is
916 * racy and should be avoided. Those races should be resolved only after we have
917 * ensured that we are able to reclaim this inode and the world can see that we
918 * are going to reclaim it.
919 *
920 * Return true if we grabbed it, false otherwise.
921 */
922 static bool
xfs_reclaim_igrab(struct xfs_inode * ip,struct xfs_icwalk * icw)923 xfs_reclaim_igrab(
924 struct xfs_inode *ip,
925 struct xfs_icwalk *icw)
926 {
927 ASSERT(rcu_read_lock_held());
928
929 spin_lock(&ip->i_flags_lock);
930 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
931 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
932 /* not a reclaim candidate. */
933 spin_unlock(&ip->i_flags_lock);
934 return false;
935 }
936
937 /* Don't reclaim a sick inode unless the caller asked for it. */
938 if (ip->i_sick &&
939 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
940 spin_unlock(&ip->i_flags_lock);
941 return false;
942 }
943
944 __xfs_iflags_set(ip, XFS_IRECLAIM);
945 spin_unlock(&ip->i_flags_lock);
946 return true;
947 }
948
949 /*
950 * Inode reclaim is non-blocking, so the default action if progress cannot be
951 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
952 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
953 * blocking anymore and hence we can wait for the inode to be able to reclaim
954 * it.
955 *
956 * We do no IO here - if callers require inodes to be cleaned they must push the
957 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
958 * done in the background in a non-blocking manner, and enables memory reclaim
959 * to make progress without blocking.
960 */
961 static void
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag)962 xfs_reclaim_inode(
963 struct xfs_inode *ip,
964 struct xfs_perag *pag)
965 {
966 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
967
968 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
969 goto out;
970 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
971 goto out_iunlock;
972
973 /*
974 * Check for log shutdown because aborting the inode can move the log
975 * tail and corrupt in memory state. This is fine if the log is shut
976 * down, but if the log is still active and only the mount is shut down
977 * then the in-memory log tail movement caused by the abort can be
978 * incorrectly propagated to disk.
979 */
980 if (xlog_is_shutdown(ip->i_mount->m_log)) {
981 xfs_iunpin_wait(ip);
982 /*
983 * Avoid a ABBA deadlock on the inode cluster buffer vs
984 * concurrent xfs_ifree_cluster() trying to mark the inode
985 * stale. We don't need the inode locked to run the flush abort
986 * code, but the flush abort needs to lock the cluster buffer.
987 */
988 xfs_iunlock(ip, XFS_ILOCK_EXCL);
989 xfs_iflush_shutdown_abort(ip);
990 xfs_ilock(ip, XFS_ILOCK_EXCL);
991 goto reclaim;
992 }
993 if (xfs_ipincount(ip))
994 goto out_clear_flush;
995 if (!xfs_inode_clean(ip))
996 goto out_clear_flush;
997
998 xfs_iflags_clear(ip, XFS_IFLUSHING);
999 reclaim:
1000 trace_xfs_inode_reclaiming(ip);
1001
1002 /*
1003 * Because we use RCU freeing we need to ensure the inode always appears
1004 * to be reclaimed with an invalid inode number when in the free state.
1005 * We do this as early as possible under the ILOCK so that
1006 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1007 * detect races with us here. By doing this, we guarantee that once
1008 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1009 * it will see either a valid inode that will serialise correctly, or it
1010 * will see an invalid inode that it can skip.
1011 */
1012 spin_lock(&ip->i_flags_lock);
1013 ip->i_flags = XFS_IRECLAIM;
1014 ip->i_ino = 0;
1015 ip->i_sick = 0;
1016 ip->i_checked = 0;
1017 spin_unlock(&ip->i_flags_lock);
1018
1019 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
1020 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1021
1022 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1023 /*
1024 * Remove the inode from the per-AG radix tree.
1025 *
1026 * Because radix_tree_delete won't complain even if the item was never
1027 * added to the tree assert that it's been there before to catch
1028 * problems with the inode life time early on.
1029 */
1030 spin_lock(&pag->pag_ici_lock);
1031 if (!radix_tree_delete(&pag->pag_ici_root,
1032 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1033 ASSERT(0);
1034 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
1035 spin_unlock(&pag->pag_ici_lock);
1036
1037 /*
1038 * Here we do an (almost) spurious inode lock in order to coordinate
1039 * with inode cache radix tree lookups. This is because the lookup
1040 * can reference the inodes in the cache without taking references.
1041 *
1042 * We make that OK here by ensuring that we wait until the inode is
1043 * unlocked after the lookup before we go ahead and free it.
1044 */
1045 xfs_ilock(ip, XFS_ILOCK_EXCL);
1046 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
1047 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1048 ASSERT(xfs_inode_clean(ip));
1049
1050 __xfs_inode_free(ip);
1051 return;
1052
1053 out_clear_flush:
1054 xfs_iflags_clear(ip, XFS_IFLUSHING);
1055 out_iunlock:
1056 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1057 out:
1058 xfs_iflags_clear(ip, XFS_IRECLAIM);
1059 }
1060
1061 /* Reclaim sick inodes if we're unmounting or the fs went down. */
1062 static inline bool
xfs_want_reclaim_sick(struct xfs_mount * mp)1063 xfs_want_reclaim_sick(
1064 struct xfs_mount *mp)
1065 {
1066 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
1067 xfs_is_shutdown(mp);
1068 }
1069
1070 void
xfs_reclaim_inodes(struct xfs_mount * mp)1071 xfs_reclaim_inodes(
1072 struct xfs_mount *mp)
1073 {
1074 struct xfs_icwalk icw = {
1075 .icw_flags = 0,
1076 };
1077
1078 if (xfs_want_reclaim_sick(mp))
1079 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1080
1081 while (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) {
1082 xfs_ail_push_all_sync(mp->m_ail);
1083 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1084 }
1085 }
1086
1087 /*
1088 * The shrinker infrastructure determines how many inodes we should scan for
1089 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1090 * push the AIL here. We also want to proactively free up memory if we can to
1091 * minimise the amount of work memory reclaim has to do so we kick the
1092 * background reclaim if it isn't already scheduled.
1093 */
1094 long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,unsigned long nr_to_scan)1095 xfs_reclaim_inodes_nr(
1096 struct xfs_mount *mp,
1097 unsigned long nr_to_scan)
1098 {
1099 struct xfs_icwalk icw = {
1100 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
1101 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
1102 };
1103
1104 if (xfs_want_reclaim_sick(mp))
1105 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1106
1107 /* kick background reclaimer and push the AIL */
1108 xfs_reclaim_work_queue(mp);
1109 xfs_ail_push_all(mp->m_ail);
1110
1111 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1112 return 0;
1113 }
1114
1115 /*
1116 * Return the number of reclaimable inodes in the filesystem for
1117 * the shrinker to determine how much to reclaim.
1118 */
1119 long
xfs_reclaim_inodes_count(struct xfs_mount * mp)1120 xfs_reclaim_inodes_count(
1121 struct xfs_mount *mp)
1122 {
1123 XA_STATE (xas, &mp->m_groups[XG_TYPE_AG].xa, 0);
1124 long reclaimable = 0;
1125 struct xfs_perag *pag;
1126
1127 rcu_read_lock();
1128 xas_for_each_marked(&xas, pag, ULONG_MAX, XFS_PERAG_RECLAIM_MARK) {
1129 trace_xfs_reclaim_inodes_count(pag, _THIS_IP_);
1130 reclaimable += pag->pag_ici_reclaimable;
1131 }
1132 rcu_read_unlock();
1133
1134 return reclaimable;
1135 }
1136
1137 STATIC bool
xfs_icwalk_match_id(struct xfs_inode * ip,struct xfs_icwalk * icw)1138 xfs_icwalk_match_id(
1139 struct xfs_inode *ip,
1140 struct xfs_icwalk *icw)
1141 {
1142 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1143 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1144 return false;
1145
1146 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1147 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1148 return false;
1149
1150 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1151 ip->i_projid != icw->icw_prid)
1152 return false;
1153
1154 return true;
1155 }
1156
1157 /*
1158 * A union-based inode filtering algorithm. Process the inode if any of the
1159 * criteria match. This is for global/internal scans only.
1160 */
1161 STATIC bool
xfs_icwalk_match_id_union(struct xfs_inode * ip,struct xfs_icwalk * icw)1162 xfs_icwalk_match_id_union(
1163 struct xfs_inode *ip,
1164 struct xfs_icwalk *icw)
1165 {
1166 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1167 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1168 return true;
1169
1170 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1171 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1172 return true;
1173
1174 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1175 ip->i_projid == icw->icw_prid)
1176 return true;
1177
1178 return false;
1179 }
1180
1181 /*
1182 * Is this inode @ip eligible for eof/cow block reclamation, given some
1183 * filtering parameters @icw? The inode is eligible if @icw is null or
1184 * if the predicate functions match.
1185 */
1186 static bool
xfs_icwalk_match(struct xfs_inode * ip,struct xfs_icwalk * icw)1187 xfs_icwalk_match(
1188 struct xfs_inode *ip,
1189 struct xfs_icwalk *icw)
1190 {
1191 bool match;
1192
1193 if (!icw)
1194 return true;
1195
1196 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1197 match = xfs_icwalk_match_id_union(ip, icw);
1198 else
1199 match = xfs_icwalk_match_id(ip, icw);
1200 if (!match)
1201 return false;
1202
1203 /* skip the inode if the file size is too small */
1204 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1205 XFS_ISIZE(ip) < icw->icw_min_file_size)
1206 return false;
1207
1208 return true;
1209 }
1210
1211 /*
1212 * This is a fast pass over the inode cache to try to get reclaim moving on as
1213 * many inodes as possible in a short period of time. It kicks itself every few
1214 * seconds, as well as being kicked by the inode cache shrinker when memory
1215 * goes low.
1216 */
1217 void
xfs_reclaim_worker(struct work_struct * work)1218 xfs_reclaim_worker(
1219 struct work_struct *work)
1220 {
1221 struct xfs_mount *mp = container_of(to_delayed_work(work),
1222 struct xfs_mount, m_reclaim_work);
1223
1224 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1225 xfs_reclaim_work_queue(mp);
1226 }
1227
1228 STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)1229 xfs_inode_free_eofblocks(
1230 struct xfs_inode *ip,
1231 struct xfs_icwalk *icw,
1232 unsigned int *lockflags)
1233 {
1234 bool wait;
1235
1236 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1237
1238 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1239 return 0;
1240
1241 /*
1242 * If the mapping is dirty the operation can block and wait for some
1243 * time. Unless we are waiting, skip it.
1244 */
1245 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1246 return 0;
1247
1248 if (!xfs_icwalk_match(ip, icw))
1249 return 0;
1250
1251 /*
1252 * If the caller is waiting, return -EAGAIN to keep the background
1253 * scanner moving and revisit the inode in a subsequent pass.
1254 */
1255 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1256 if (wait)
1257 return -EAGAIN;
1258 return 0;
1259 }
1260 *lockflags |= XFS_IOLOCK_EXCL;
1261
1262 if (xfs_can_free_eofblocks(ip))
1263 return xfs_free_eofblocks(ip);
1264
1265 /* inode could be preallocated */
1266 trace_xfs_inode_free_eofblocks_invalid(ip);
1267 xfs_inode_clear_eofblocks_tag(ip);
1268 return 0;
1269 }
1270
1271 static void
xfs_blockgc_set_iflag(struct xfs_inode * ip,unsigned long iflag)1272 xfs_blockgc_set_iflag(
1273 struct xfs_inode *ip,
1274 unsigned long iflag)
1275 {
1276 struct xfs_mount *mp = ip->i_mount;
1277 struct xfs_perag *pag;
1278
1279 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1280
1281 /*
1282 * Don't bother locking the AG and looking up in the radix trees
1283 * if we already know that we have the tag set.
1284 */
1285 if (ip->i_flags & iflag)
1286 return;
1287 spin_lock(&ip->i_flags_lock);
1288 ip->i_flags |= iflag;
1289 spin_unlock(&ip->i_flags_lock);
1290
1291 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1292 spin_lock(&pag->pag_ici_lock);
1293
1294 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1295 XFS_ICI_BLOCKGC_TAG);
1296
1297 spin_unlock(&pag->pag_ici_lock);
1298 xfs_perag_put(pag);
1299 }
1300
1301 void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1302 xfs_inode_set_eofblocks_tag(
1303 xfs_inode_t *ip)
1304 {
1305 trace_xfs_inode_set_eofblocks_tag(ip);
1306 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1307 }
1308
1309 static void
xfs_blockgc_clear_iflag(struct xfs_inode * ip,unsigned long iflag)1310 xfs_blockgc_clear_iflag(
1311 struct xfs_inode *ip,
1312 unsigned long iflag)
1313 {
1314 struct xfs_mount *mp = ip->i_mount;
1315 struct xfs_perag *pag;
1316 bool clear_tag;
1317
1318 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1319
1320 spin_lock(&ip->i_flags_lock);
1321 ip->i_flags &= ~iflag;
1322 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1323 spin_unlock(&ip->i_flags_lock);
1324
1325 if (!clear_tag)
1326 return;
1327
1328 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1329 spin_lock(&pag->pag_ici_lock);
1330
1331 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1332 XFS_ICI_BLOCKGC_TAG);
1333
1334 spin_unlock(&pag->pag_ici_lock);
1335 xfs_perag_put(pag);
1336 }
1337
1338 void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1339 xfs_inode_clear_eofblocks_tag(
1340 xfs_inode_t *ip)
1341 {
1342 trace_xfs_inode_clear_eofblocks_tag(ip);
1343 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1344 }
1345
1346 /*
1347 * Prepare to free COW fork blocks from an inode.
1348 */
1349 static bool
xfs_prep_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw)1350 xfs_prep_free_cowblocks(
1351 struct xfs_inode *ip,
1352 struct xfs_icwalk *icw)
1353 {
1354 bool sync;
1355
1356 sync = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1357
1358 /*
1359 * Just clear the tag if we have an empty cow fork or none at all. It's
1360 * possible the inode was fully unshared since it was originally tagged.
1361 */
1362 if (!xfs_inode_has_cow_data(ip)) {
1363 trace_xfs_inode_free_cowblocks_invalid(ip);
1364 xfs_inode_clear_cowblocks_tag(ip);
1365 return false;
1366 }
1367
1368 /*
1369 * A cowblocks trim of an inode can have a significant effect on
1370 * fragmentation even when a reasonable COW extent size hint is set.
1371 * Therefore, we prefer to not process cowblocks unless they are clean
1372 * and idle. We can never process a cowblocks inode that is dirty or has
1373 * in-flight I/O under any circumstances, because outstanding writeback
1374 * or dio expects targeted COW fork blocks exist through write
1375 * completion where they can be remapped into the data fork.
1376 *
1377 * Therefore, the heuristic used here is to never process inodes
1378 * currently opened for write from background (i.e. non-sync) scans. For
1379 * sync scans, use the pagecache/dio state of the inode to ensure we
1380 * never free COW fork blocks out from under pending I/O.
1381 */
1382 if (!sync && inode_is_open_for_write(VFS_I(ip)))
1383 return false;
1384 return xfs_can_free_cowblocks(ip);
1385 }
1386
1387 /*
1388 * Automatic CoW Reservation Freeing
1389 *
1390 * These functions automatically garbage collect leftover CoW reservations
1391 * that were made on behalf of a cowextsize hint when we start to run out
1392 * of quota or when the reservations sit around for too long. If the file
1393 * has dirty pages or is undergoing writeback, its CoW reservations will
1394 * be retained.
1395 *
1396 * The actual garbage collection piggybacks off the same code that runs
1397 * the speculative EOF preallocation garbage collector.
1398 */
1399 STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)1400 xfs_inode_free_cowblocks(
1401 struct xfs_inode *ip,
1402 struct xfs_icwalk *icw,
1403 unsigned int *lockflags)
1404 {
1405 bool wait;
1406 int ret = 0;
1407
1408 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1409
1410 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1411 return 0;
1412
1413 if (!xfs_prep_free_cowblocks(ip, icw))
1414 return 0;
1415
1416 if (!xfs_icwalk_match(ip, icw))
1417 return 0;
1418
1419 /*
1420 * If the caller is waiting, return -EAGAIN to keep the background
1421 * scanner moving and revisit the inode in a subsequent pass.
1422 */
1423 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1424 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1425 if (wait)
1426 return -EAGAIN;
1427 return 0;
1428 }
1429 *lockflags |= XFS_IOLOCK_EXCL;
1430
1431 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1432 if (wait)
1433 return -EAGAIN;
1434 return 0;
1435 }
1436 *lockflags |= XFS_MMAPLOCK_EXCL;
1437
1438 /*
1439 * Check again, nobody else should be able to dirty blocks or change
1440 * the reflink iflag now that we have the first two locks held.
1441 */
1442 if (xfs_prep_free_cowblocks(ip, icw))
1443 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1444 return ret;
1445 }
1446
1447 void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)1448 xfs_inode_set_cowblocks_tag(
1449 xfs_inode_t *ip)
1450 {
1451 trace_xfs_inode_set_cowblocks_tag(ip);
1452 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1453 }
1454
1455 void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)1456 xfs_inode_clear_cowblocks_tag(
1457 xfs_inode_t *ip)
1458 {
1459 trace_xfs_inode_clear_cowblocks_tag(ip);
1460 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1461 }
1462
1463 /* Disable post-EOF and CoW block auto-reclamation. */
1464 void
xfs_blockgc_stop(struct xfs_mount * mp)1465 xfs_blockgc_stop(
1466 struct xfs_mount *mp)
1467 {
1468 struct xfs_perag *pag = NULL;
1469
1470 if (!xfs_clear_blockgc_enabled(mp))
1471 return;
1472
1473 while ((pag = xfs_perag_next(mp, pag)))
1474 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1475 trace_xfs_blockgc_stop(mp, __return_address);
1476 }
1477
1478 /* Enable post-EOF and CoW block auto-reclamation. */
1479 void
xfs_blockgc_start(struct xfs_mount * mp)1480 xfs_blockgc_start(
1481 struct xfs_mount *mp)
1482 {
1483 struct xfs_perag *pag = NULL;
1484
1485 if (xfs_set_blockgc_enabled(mp))
1486 return;
1487
1488 trace_xfs_blockgc_start(mp, __return_address);
1489 while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
1490 xfs_blockgc_queue(pag);
1491 }
1492
1493 /* Don't try to run block gc on an inode that's in any of these states. */
1494 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1495 XFS_NEED_INACTIVE | \
1496 XFS_INACTIVATING | \
1497 XFS_IRECLAIMABLE | \
1498 XFS_IRECLAIM)
1499 /*
1500 * Decide if the given @ip is eligible for garbage collection of speculative
1501 * preallocations, and grab it if so. Returns true if it's ready to go or
1502 * false if we should just ignore it.
1503 */
1504 static bool
xfs_blockgc_igrab(struct xfs_inode * ip)1505 xfs_blockgc_igrab(
1506 struct xfs_inode *ip)
1507 {
1508 struct inode *inode = VFS_I(ip);
1509
1510 ASSERT(rcu_read_lock_held());
1511
1512 /* Check for stale RCU freed inode */
1513 spin_lock(&ip->i_flags_lock);
1514 if (!ip->i_ino)
1515 goto out_unlock_noent;
1516
1517 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1518 goto out_unlock_noent;
1519 spin_unlock(&ip->i_flags_lock);
1520
1521 /* nothing to sync during shutdown */
1522 if (xfs_is_shutdown(ip->i_mount))
1523 return false;
1524
1525 /* If we can't grab the inode, it must on it's way to reclaim. */
1526 if (!igrab(inode))
1527 return false;
1528
1529 /* inode is valid */
1530 return true;
1531
1532 out_unlock_noent:
1533 spin_unlock(&ip->i_flags_lock);
1534 return false;
1535 }
1536
1537 /* Scan one incore inode for block preallocations that we can remove. */
1538 static int
xfs_blockgc_scan_inode(struct xfs_inode * ip,struct xfs_icwalk * icw)1539 xfs_blockgc_scan_inode(
1540 struct xfs_inode *ip,
1541 struct xfs_icwalk *icw)
1542 {
1543 unsigned int lockflags = 0;
1544 int error;
1545
1546 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1547 if (error)
1548 goto unlock;
1549
1550 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1551 unlock:
1552 if (lockflags)
1553 xfs_iunlock(ip, lockflags);
1554 xfs_irele(ip);
1555 return error;
1556 }
1557
1558 /* Background worker that trims preallocated space. */
1559 void
xfs_blockgc_worker(struct work_struct * work)1560 xfs_blockgc_worker(
1561 struct work_struct *work)
1562 {
1563 struct xfs_perag *pag = container_of(to_delayed_work(work),
1564 struct xfs_perag, pag_blockgc_work);
1565 struct xfs_mount *mp = pag_mount(pag);
1566 int error;
1567
1568 trace_xfs_blockgc_worker(mp, __return_address);
1569
1570 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1571 if (error)
1572 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1573 pag_agno(pag), error);
1574 xfs_blockgc_queue(pag);
1575 }
1576
1577 /*
1578 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1579 * and cowblocks.
1580 */
1581 int
xfs_blockgc_free_space(struct xfs_mount * mp,struct xfs_icwalk * icw)1582 xfs_blockgc_free_space(
1583 struct xfs_mount *mp,
1584 struct xfs_icwalk *icw)
1585 {
1586 int error;
1587
1588 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1589
1590 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1591 if (error)
1592 return error;
1593
1594 return xfs_inodegc_flush(mp);
1595 }
1596
1597 /*
1598 * Reclaim all the free space that we can by scheduling the background blockgc
1599 * and inodegc workers immediately and waiting for them all to clear.
1600 */
1601 int
xfs_blockgc_flush_all(struct xfs_mount * mp)1602 xfs_blockgc_flush_all(
1603 struct xfs_mount *mp)
1604 {
1605 struct xfs_perag *pag = NULL;
1606
1607 trace_xfs_blockgc_flush_all(mp, __return_address);
1608
1609 /*
1610 * For each blockgc worker, move its queue time up to now. If it wasn't
1611 * queued, it will not be requeued. Then flush whatever is left.
1612 */
1613 while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
1614 mod_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 0);
1615
1616 while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
1617 flush_delayed_work(&pag->pag_blockgc_work);
1618
1619 return xfs_inodegc_flush(mp);
1620 }
1621
1622 /*
1623 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1624 * quota caused an allocation failure, so we make a best effort by including
1625 * each quota under low free space conditions (less than 1% free space) in the
1626 * scan.
1627 *
1628 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1629 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1630 * MMAPLOCK.
1631 */
1632 int
xfs_blockgc_free_dquots(struct xfs_mount * mp,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,unsigned int iwalk_flags)1633 xfs_blockgc_free_dquots(
1634 struct xfs_mount *mp,
1635 struct xfs_dquot *udqp,
1636 struct xfs_dquot *gdqp,
1637 struct xfs_dquot *pdqp,
1638 unsigned int iwalk_flags)
1639 {
1640 struct xfs_icwalk icw = {0};
1641 bool do_work = false;
1642
1643 if (!udqp && !gdqp && !pdqp)
1644 return 0;
1645
1646 /*
1647 * Run a scan to free blocks using the union filter to cover all
1648 * applicable quotas in a single scan.
1649 */
1650 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1651
1652 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1653 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1654 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1655 do_work = true;
1656 }
1657
1658 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1659 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1660 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1661 do_work = true;
1662 }
1663
1664 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1665 icw.icw_prid = pdqp->q_id;
1666 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1667 do_work = true;
1668 }
1669
1670 if (!do_work)
1671 return 0;
1672
1673 return xfs_blockgc_free_space(mp, &icw);
1674 }
1675
1676 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1677 int
xfs_blockgc_free_quota(struct xfs_inode * ip,unsigned int iwalk_flags)1678 xfs_blockgc_free_quota(
1679 struct xfs_inode *ip,
1680 unsigned int iwalk_flags)
1681 {
1682 return xfs_blockgc_free_dquots(ip->i_mount,
1683 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1684 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1685 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1686 }
1687
1688 /* XFS Inode Cache Walking Code */
1689
1690 /*
1691 * The inode lookup is done in batches to keep the amount of lock traffic and
1692 * radix tree lookups to a minimum. The batch size is a trade off between
1693 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1694 * be too greedy.
1695 */
1696 #define XFS_LOOKUP_BATCH 32
1697
1698
1699 /*
1700 * Decide if we want to grab this inode in anticipation of doing work towards
1701 * the goal.
1702 */
1703 static inline bool
xfs_icwalk_igrab(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_icwalk * icw)1704 xfs_icwalk_igrab(
1705 enum xfs_icwalk_goal goal,
1706 struct xfs_inode *ip,
1707 struct xfs_icwalk *icw)
1708 {
1709 switch (goal) {
1710 case XFS_ICWALK_BLOCKGC:
1711 return xfs_blockgc_igrab(ip);
1712 case XFS_ICWALK_RECLAIM:
1713 return xfs_reclaim_igrab(ip, icw);
1714 default:
1715 return false;
1716 }
1717 }
1718
1719 /*
1720 * Process an inode. Each processing function must handle any state changes
1721 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1722 */
1723 static inline int
xfs_icwalk_process_inode(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_perag * pag,struct xfs_icwalk * icw)1724 xfs_icwalk_process_inode(
1725 enum xfs_icwalk_goal goal,
1726 struct xfs_inode *ip,
1727 struct xfs_perag *pag,
1728 struct xfs_icwalk *icw)
1729 {
1730 int error = 0;
1731
1732 switch (goal) {
1733 case XFS_ICWALK_BLOCKGC:
1734 error = xfs_blockgc_scan_inode(ip, icw);
1735 break;
1736 case XFS_ICWALK_RECLAIM:
1737 xfs_reclaim_inode(ip, pag);
1738 break;
1739 }
1740 return error;
1741 }
1742
1743 /*
1744 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1745 * process them in some manner.
1746 */
1747 static int
xfs_icwalk_ag(struct xfs_perag * pag,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1748 xfs_icwalk_ag(
1749 struct xfs_perag *pag,
1750 enum xfs_icwalk_goal goal,
1751 struct xfs_icwalk *icw)
1752 {
1753 struct xfs_mount *mp = pag_mount(pag);
1754 uint32_t first_index;
1755 int last_error = 0;
1756 int skipped;
1757 bool done;
1758 int nr_found;
1759
1760 restart:
1761 done = false;
1762 skipped = 0;
1763 if (goal == XFS_ICWALK_RECLAIM)
1764 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1765 else
1766 first_index = 0;
1767 nr_found = 0;
1768 do {
1769 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1770 int error = 0;
1771 int i;
1772
1773 rcu_read_lock();
1774
1775 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1776 (void **) batch, first_index,
1777 XFS_LOOKUP_BATCH, goal);
1778 if (!nr_found) {
1779 done = true;
1780 rcu_read_unlock();
1781 break;
1782 }
1783
1784 /*
1785 * Grab the inodes before we drop the lock. if we found
1786 * nothing, nr == 0 and the loop will be skipped.
1787 */
1788 for (i = 0; i < nr_found; i++) {
1789 struct xfs_inode *ip = batch[i];
1790
1791 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1792 batch[i] = NULL;
1793
1794 /*
1795 * Update the index for the next lookup. Catch
1796 * overflows into the next AG range which can occur if
1797 * we have inodes in the last block of the AG and we
1798 * are currently pointing to the last inode.
1799 *
1800 * Because we may see inodes that are from the wrong AG
1801 * due to RCU freeing and reallocation, only update the
1802 * index if it lies in this AG. It was a race that lead
1803 * us to see this inode, so another lookup from the
1804 * same index will not find it again.
1805 */
1806 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag))
1807 continue;
1808 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1809 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1810 done = true;
1811 }
1812
1813 /* unlock now we've grabbed the inodes. */
1814 rcu_read_unlock();
1815
1816 for (i = 0; i < nr_found; i++) {
1817 if (!batch[i])
1818 continue;
1819 error = xfs_icwalk_process_inode(goal, batch[i], pag,
1820 icw);
1821 if (error == -EAGAIN) {
1822 skipped++;
1823 continue;
1824 }
1825 if (error && last_error != -EFSCORRUPTED)
1826 last_error = error;
1827 }
1828
1829 /* bail out if the filesystem is corrupted. */
1830 if (error == -EFSCORRUPTED)
1831 break;
1832
1833 cond_resched();
1834
1835 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1836 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1837 if (icw->icw_scan_limit <= 0)
1838 break;
1839 }
1840 } while (nr_found && !done);
1841
1842 if (goal == XFS_ICWALK_RECLAIM) {
1843 if (done)
1844 first_index = 0;
1845 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1846 }
1847
1848 if (skipped) {
1849 delay(1);
1850 goto restart;
1851 }
1852 return last_error;
1853 }
1854
1855 /* Walk all incore inodes to achieve a given goal. */
1856 static int
xfs_icwalk(struct xfs_mount * mp,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1857 xfs_icwalk(
1858 struct xfs_mount *mp,
1859 enum xfs_icwalk_goal goal,
1860 struct xfs_icwalk *icw)
1861 {
1862 struct xfs_perag *pag = NULL;
1863 int error = 0;
1864 int last_error = 0;
1865
1866 while ((pag = xfs_perag_grab_next_tag(mp, pag, goal))) {
1867 error = xfs_icwalk_ag(pag, goal, icw);
1868 if (error) {
1869 last_error = error;
1870 if (error == -EFSCORRUPTED) {
1871 xfs_perag_rele(pag);
1872 break;
1873 }
1874 }
1875 }
1876 return last_error;
1877 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1878 }
1879
1880 #ifdef DEBUG
1881 static void
xfs_check_delalloc(struct xfs_inode * ip,int whichfork)1882 xfs_check_delalloc(
1883 struct xfs_inode *ip,
1884 int whichfork)
1885 {
1886 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1887 struct xfs_bmbt_irec got;
1888 struct xfs_iext_cursor icur;
1889
1890 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1891 return;
1892 do {
1893 if (isnullstartblock(got.br_startblock)) {
1894 xfs_warn(ip->i_mount,
1895 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1896 ip->i_ino,
1897 whichfork == XFS_DATA_FORK ? "data" : "cow",
1898 got.br_startoff, got.br_blockcount);
1899 }
1900 } while (xfs_iext_next_extent(ifp, &icur, &got));
1901 }
1902 #else
1903 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1904 #endif
1905
1906 /* Schedule the inode for reclaim. */
1907 static void
xfs_inodegc_set_reclaimable(struct xfs_inode * ip)1908 xfs_inodegc_set_reclaimable(
1909 struct xfs_inode *ip)
1910 {
1911 struct xfs_mount *mp = ip->i_mount;
1912 struct xfs_perag *pag;
1913
1914 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1915 xfs_check_delalloc(ip, XFS_DATA_FORK);
1916 xfs_check_delalloc(ip, XFS_COW_FORK);
1917 ASSERT(0);
1918 }
1919
1920 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1921 spin_lock(&pag->pag_ici_lock);
1922 spin_lock(&ip->i_flags_lock);
1923
1924 trace_xfs_inode_set_reclaimable(ip);
1925 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1926 ip->i_flags |= XFS_IRECLAIMABLE;
1927 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1928 XFS_ICI_RECLAIM_TAG);
1929
1930 spin_unlock(&ip->i_flags_lock);
1931 spin_unlock(&pag->pag_ici_lock);
1932 xfs_perag_put(pag);
1933 }
1934
1935 /*
1936 * Free all speculative preallocations and possibly even the inode itself.
1937 * This is the last chance to make changes to an otherwise unreferenced file
1938 * before incore reclamation happens.
1939 */
1940 static int
xfs_inodegc_inactivate(struct xfs_inode * ip)1941 xfs_inodegc_inactivate(
1942 struct xfs_inode *ip)
1943 {
1944 int error;
1945
1946 trace_xfs_inode_inactivating(ip);
1947 error = xfs_inactive(ip);
1948 xfs_inodegc_set_reclaimable(ip);
1949 return error;
1950
1951 }
1952
1953 void
xfs_inodegc_worker(struct work_struct * work)1954 xfs_inodegc_worker(
1955 struct work_struct *work)
1956 {
1957 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1958 struct xfs_inodegc, work);
1959 struct llist_node *node = llist_del_all(&gc->list);
1960 struct xfs_inode *ip, *n;
1961 struct xfs_mount *mp = gc->mp;
1962 unsigned int nofs_flag;
1963
1964 /*
1965 * Clear the cpu mask bit and ensure that we have seen the latest
1966 * update of the gc structure associated with this CPU. This matches
1967 * with the release semantics used when setting the cpumask bit in
1968 * xfs_inodegc_queue.
1969 */
1970 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
1971 smp_mb__after_atomic();
1972
1973 WRITE_ONCE(gc->items, 0);
1974
1975 if (!node)
1976 return;
1977
1978 /*
1979 * We can allocate memory here while doing writeback on behalf of
1980 * memory reclaim. To avoid memory allocation deadlocks set the
1981 * task-wide nofs context for the following operations.
1982 */
1983 nofs_flag = memalloc_nofs_save();
1984
1985 ip = llist_entry(node, struct xfs_inode, i_gclist);
1986 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1987
1988 WRITE_ONCE(gc->shrinker_hits, 0);
1989 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1990 int error;
1991
1992 xfs_iflags_set(ip, XFS_INACTIVATING);
1993 error = xfs_inodegc_inactivate(ip);
1994 if (error && !gc->error)
1995 gc->error = error;
1996 }
1997
1998 memalloc_nofs_restore(nofs_flag);
1999 }
2000
2001 /*
2002 * Expedite all pending inodegc work to run immediately. This does not wait for
2003 * completion of the work.
2004 */
2005 void
xfs_inodegc_push(struct xfs_mount * mp)2006 xfs_inodegc_push(
2007 struct xfs_mount *mp)
2008 {
2009 if (!xfs_is_inodegc_enabled(mp))
2010 return;
2011 trace_xfs_inodegc_push(mp, __return_address);
2012 xfs_inodegc_queue_all(mp);
2013 }
2014
2015 /*
2016 * Force all currently queued inode inactivation work to run immediately and
2017 * wait for the work to finish.
2018 */
2019 int
xfs_inodegc_flush(struct xfs_mount * mp)2020 xfs_inodegc_flush(
2021 struct xfs_mount *mp)
2022 {
2023 xfs_inodegc_push(mp);
2024 trace_xfs_inodegc_flush(mp, __return_address);
2025 return xfs_inodegc_wait_all(mp);
2026 }
2027
2028 /*
2029 * Flush all the pending work and then disable the inode inactivation background
2030 * workers and wait for them to stop. Caller must hold sb->s_umount to
2031 * coordinate changes in the inodegc_enabled state.
2032 */
2033 void
xfs_inodegc_stop(struct xfs_mount * mp)2034 xfs_inodegc_stop(
2035 struct xfs_mount *mp)
2036 {
2037 bool rerun;
2038
2039 if (!xfs_clear_inodegc_enabled(mp))
2040 return;
2041
2042 /*
2043 * Drain all pending inodegc work, including inodes that could be
2044 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
2045 * threads that sample the inodegc state just prior to us clearing it.
2046 * The inodegc flag state prevents new threads from queuing more
2047 * inodes, so we queue pending work items and flush the workqueue until
2048 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
2049 * here because it does not allow other unserialized mechanisms to
2050 * reschedule inodegc work while this draining is in progress.
2051 */
2052 xfs_inodegc_queue_all(mp);
2053 do {
2054 flush_workqueue(mp->m_inodegc_wq);
2055 rerun = xfs_inodegc_queue_all(mp);
2056 } while (rerun);
2057
2058 trace_xfs_inodegc_stop(mp, __return_address);
2059 }
2060
2061 /*
2062 * Enable the inode inactivation background workers and schedule deferred inode
2063 * inactivation work if there is any. Caller must hold sb->s_umount to
2064 * coordinate changes in the inodegc_enabled state.
2065 */
2066 void
xfs_inodegc_start(struct xfs_mount * mp)2067 xfs_inodegc_start(
2068 struct xfs_mount *mp)
2069 {
2070 if (xfs_set_inodegc_enabled(mp))
2071 return;
2072
2073 trace_xfs_inodegc_start(mp, __return_address);
2074 xfs_inodegc_queue_all(mp);
2075 }
2076
2077 #ifdef CONFIG_XFS_RT
2078 static inline bool
xfs_inodegc_want_queue_rt_file(struct xfs_inode * ip)2079 xfs_inodegc_want_queue_rt_file(
2080 struct xfs_inode *ip)
2081 {
2082 struct xfs_mount *mp = ip->i_mount;
2083
2084 if (!XFS_IS_REALTIME_INODE(ip) || xfs_has_zoned(mp))
2085 return false;
2086
2087 if (xfs_compare_freecounter(mp, XC_FREE_RTEXTENTS,
2088 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
2089 XFS_FDBLOCKS_BATCH) < 0)
2090 return true;
2091
2092 return false;
2093 }
2094 #else
2095 # define xfs_inodegc_want_queue_rt_file(ip) (false)
2096 #endif /* CONFIG_XFS_RT */
2097
2098 /*
2099 * Schedule the inactivation worker when:
2100 *
2101 * - We've accumulated more than one inode cluster buffer's worth of inodes.
2102 * - There is less than 5% free space left.
2103 * - Any of the quotas for this inode are near an enforcement limit.
2104 */
2105 static inline bool
xfs_inodegc_want_queue_work(struct xfs_inode * ip,unsigned int items)2106 xfs_inodegc_want_queue_work(
2107 struct xfs_inode *ip,
2108 unsigned int items)
2109 {
2110 struct xfs_mount *mp = ip->i_mount;
2111
2112 if (items > mp->m_ino_geo.inodes_per_cluster)
2113 return true;
2114
2115 if (xfs_compare_freecounter(mp, XC_FREE_BLOCKS,
2116 mp->m_low_space[XFS_LOWSP_5_PCNT],
2117 XFS_FDBLOCKS_BATCH) < 0)
2118 return true;
2119
2120 if (xfs_inodegc_want_queue_rt_file(ip))
2121 return true;
2122
2123 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2124 return true;
2125
2126 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2127 return true;
2128
2129 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2130 return true;
2131
2132 return false;
2133 }
2134
2135 /*
2136 * Upper bound on the number of inodes in each AG that can be queued for
2137 * inactivation at any given time, to avoid monopolizing the workqueue.
2138 */
2139 #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2140
2141 /*
2142 * Make the frontend wait for inactivations when:
2143 *
2144 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2145 * - The queue depth exceeds the maximum allowable percpu backlog.
2146 *
2147 * Note: If we are in a NOFS context here (e.g. current thread is running a
2148 * transaction) the we don't want to block here as inodegc progress may require
2149 * filesystem resources we hold to make progress and that could result in a
2150 * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
2151 */
2152 static inline bool
xfs_inodegc_want_flush_work(struct xfs_inode * ip,unsigned int items,unsigned int shrinker_hits)2153 xfs_inodegc_want_flush_work(
2154 struct xfs_inode *ip,
2155 unsigned int items,
2156 unsigned int shrinker_hits)
2157 {
2158 if (current->flags & PF_MEMALLOC_NOFS)
2159 return false;
2160
2161 if (shrinker_hits > 0)
2162 return true;
2163
2164 if (items > XFS_INODEGC_MAX_BACKLOG)
2165 return true;
2166
2167 return false;
2168 }
2169
2170 /*
2171 * Queue a background inactivation worker if there are inodes that need to be
2172 * inactivated and higher level xfs code hasn't disabled the background
2173 * workers.
2174 */
2175 static void
xfs_inodegc_queue(struct xfs_inode * ip)2176 xfs_inodegc_queue(
2177 struct xfs_inode *ip)
2178 {
2179 struct xfs_mount *mp = ip->i_mount;
2180 struct xfs_inodegc *gc;
2181 int items;
2182 unsigned int shrinker_hits;
2183 unsigned int cpu_nr;
2184 unsigned long queue_delay = 1;
2185
2186 trace_xfs_inode_set_need_inactive(ip);
2187 spin_lock(&ip->i_flags_lock);
2188 ip->i_flags |= XFS_NEED_INACTIVE;
2189 spin_unlock(&ip->i_flags_lock);
2190
2191 cpu_nr = get_cpu();
2192 gc = this_cpu_ptr(mp->m_inodegc);
2193 llist_add(&ip->i_gclist, &gc->list);
2194 items = READ_ONCE(gc->items);
2195 WRITE_ONCE(gc->items, items + 1);
2196 shrinker_hits = READ_ONCE(gc->shrinker_hits);
2197
2198 /*
2199 * Ensure the list add is always seen by anyone who finds the cpumask
2200 * bit set. This effectively gives the cpumask bit set operation
2201 * release ordering semantics.
2202 */
2203 smp_mb__before_atomic();
2204 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
2205 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
2206
2207 /*
2208 * We queue the work while holding the current CPU so that the work
2209 * is scheduled to run on this CPU.
2210 */
2211 if (!xfs_is_inodegc_enabled(mp)) {
2212 put_cpu();
2213 return;
2214 }
2215
2216 if (xfs_inodegc_want_queue_work(ip, items))
2217 queue_delay = 0;
2218
2219 trace_xfs_inodegc_queue(mp, __return_address);
2220 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2221 queue_delay);
2222 put_cpu();
2223
2224 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2225 trace_xfs_inodegc_throttle(mp, __return_address);
2226 flush_delayed_work(&gc->work);
2227 }
2228 }
2229
2230 /*
2231 * We set the inode flag atomically with the radix tree tag. Once we get tag
2232 * lookups on the radix tree, this inode flag can go away.
2233 *
2234 * We always use background reclaim here because even if the inode is clean, it
2235 * still may be under IO and hence we have wait for IO completion to occur
2236 * before we can reclaim the inode. The background reclaim path handles this
2237 * more efficiently than we can here, so simply let background reclaim tear down
2238 * all inodes.
2239 */
2240 void
xfs_inode_mark_reclaimable(struct xfs_inode * ip)2241 xfs_inode_mark_reclaimable(
2242 struct xfs_inode *ip)
2243 {
2244 struct xfs_mount *mp = ip->i_mount;
2245 bool need_inactive;
2246
2247 XFS_STATS_INC(mp, vn_reclaim);
2248
2249 /*
2250 * We should never get here with any of the reclaim flags already set.
2251 */
2252 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2253
2254 need_inactive = xfs_inode_needs_inactive(ip);
2255 if (need_inactive) {
2256 xfs_inodegc_queue(ip);
2257 return;
2258 }
2259
2260 /* Going straight to reclaim, so drop the dquots. */
2261 xfs_qm_dqdetach(ip);
2262 xfs_inodegc_set_reclaimable(ip);
2263 }
2264
2265 /*
2266 * Register a phony shrinker so that we can run background inodegc sooner when
2267 * there's memory pressure. Inactivation does not itself free any memory but
2268 * it does make inodes reclaimable, which eventually frees memory.
2269 *
2270 * The count function, seek value, and batch value are crafted to trigger the
2271 * scan function during the second round of scanning. Hopefully this means
2272 * that we reclaimed enough memory that initiating metadata transactions won't
2273 * make things worse.
2274 */
2275 #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2276 #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2277
2278 static unsigned long
xfs_inodegc_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)2279 xfs_inodegc_shrinker_count(
2280 struct shrinker *shrink,
2281 struct shrink_control *sc)
2282 {
2283 struct xfs_mount *mp = shrink->private_data;
2284 struct xfs_inodegc *gc;
2285 int cpu;
2286
2287 if (!xfs_is_inodegc_enabled(mp))
2288 return 0;
2289
2290 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2291 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2292 if (!llist_empty(&gc->list))
2293 return XFS_INODEGC_SHRINKER_COUNT;
2294 }
2295
2296 return 0;
2297 }
2298
2299 static unsigned long
xfs_inodegc_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)2300 xfs_inodegc_shrinker_scan(
2301 struct shrinker *shrink,
2302 struct shrink_control *sc)
2303 {
2304 struct xfs_mount *mp = shrink->private_data;
2305 struct xfs_inodegc *gc;
2306 int cpu;
2307 bool no_items = true;
2308
2309 if (!xfs_is_inodegc_enabled(mp))
2310 return SHRINK_STOP;
2311
2312 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2313
2314 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2315 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2316 if (!llist_empty(&gc->list)) {
2317 unsigned int h = READ_ONCE(gc->shrinker_hits);
2318
2319 WRITE_ONCE(gc->shrinker_hits, h + 1);
2320 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2321 no_items = false;
2322 }
2323 }
2324
2325 /*
2326 * If there are no inodes to inactivate, we don't want the shrinker
2327 * to think there's deferred work to call us back about.
2328 */
2329 if (no_items)
2330 return LONG_MAX;
2331
2332 return SHRINK_STOP;
2333 }
2334
2335 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2336 int
xfs_inodegc_register_shrinker(struct xfs_mount * mp)2337 xfs_inodegc_register_shrinker(
2338 struct xfs_mount *mp)
2339 {
2340 mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB,
2341 "xfs-inodegc:%s",
2342 mp->m_super->s_id);
2343 if (!mp->m_inodegc_shrinker)
2344 return -ENOMEM;
2345
2346 mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count;
2347 mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan;
2348 mp->m_inodegc_shrinker->seeks = 0;
2349 mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH;
2350 mp->m_inodegc_shrinker->private_data = mp;
2351
2352 shrinker_register(mp->m_inodegc_shrinker);
2353
2354 return 0;
2355 }
2356