1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode.h"
16 #include "xfs_icache.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_refcount_btree.h"
22 #include "xfs_rmap.h"
23 #include "xfs_rmap_btree.h"
24 #include "xfs_log.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_da_format.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_dir2_priv.h"
29 #include "xfs_dir2.h"
30 #include "xfs_attr.h"
31 #include "xfs_reflink.h"
32 #include "xfs_ag.h"
33 #include "xfs_error.h"
34 #include "xfs_quota.h"
35 #include "xfs_exchmaps.h"
36 #include "xfs_rtbitmap.h"
37 #include "xfs_rtgroup.h"
38 #include "xfs_rtrmap_btree.h"
39 #include "xfs_bmap_util.h"
40 #include "xfs_rtrefcount_btree.h"
41 #include "scrub/scrub.h"
42 #include "scrub/common.h"
43 #include "scrub/trace.h"
44 #include "scrub/repair.h"
45 #include "scrub/health.h"
46 #include "scrub/tempfile.h"
47
48 /* Common code for the metadata scrubbers. */
49
50 /*
51 * Handling operational errors.
52 *
53 * The *_process_error() family of functions are used to process error return
54 * codes from functions called as part of a scrub operation.
55 *
56 * If there's no error, we return true to tell the caller that it's ok
57 * to move on to the next check in its list.
58 *
59 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
60 * caller that something bad happened, and we preserve *error so that
61 * the caller can return the *error up the stack to userspace.
62 *
63 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
64 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words,
65 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
66 * not via return codes. We return false to tell the caller that
67 * something bad happened. Since the error has been cleared, the caller
68 * will (presumably) return that zero and scrubbing will move on to
69 * whatever's next.
70 *
71 * ftrace can be used to record the precise metadata location and the
72 * approximate code location of the failed operation.
73 */
74
75 /* Check for operational errors. */
76 static bool
__xchk_process_error(struct xfs_scrub * sc,xfs_agnumber_t agno,xfs_agblock_t bno,int * error,__u32 errflag,void * ret_ip)77 __xchk_process_error(
78 struct xfs_scrub *sc,
79 xfs_agnumber_t agno,
80 xfs_agblock_t bno,
81 int *error,
82 __u32 errflag,
83 void *ret_ip)
84 {
85 switch (*error) {
86 case 0:
87 return true;
88 case -EDEADLOCK:
89 case -ECHRNG:
90 /* Used to restart an op with deadlock avoidance. */
91 trace_xchk_deadlock_retry(
92 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
93 sc->sm, *error);
94 break;
95 case -ECANCELED:
96 /*
97 * ECANCELED here means that the caller set one of the scrub
98 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit
99 * quickly. Set error to zero and do not continue.
100 */
101 trace_xchk_op_error(sc, agno, bno, *error, ret_ip);
102 *error = 0;
103 break;
104 case -EFSBADCRC:
105 case -EFSCORRUPTED:
106 case -EIO:
107 case -ENODATA:
108 /* Note the badness but don't abort. */
109 sc->sm->sm_flags |= errflag;
110 *error = 0;
111 fallthrough;
112 default:
113 trace_xchk_op_error(sc, agno, bno, *error, ret_ip);
114 break;
115 }
116 return false;
117 }
118
119 bool
xchk_process_error(struct xfs_scrub * sc,xfs_agnumber_t agno,xfs_agblock_t bno,int * error)120 xchk_process_error(
121 struct xfs_scrub *sc,
122 xfs_agnumber_t agno,
123 xfs_agblock_t bno,
124 int *error)
125 {
126 return __xchk_process_error(sc, agno, bno, error,
127 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
128 }
129
130 bool
xchk_process_rt_error(struct xfs_scrub * sc,xfs_rgnumber_t rgno,xfs_rgblock_t rgbno,int * error)131 xchk_process_rt_error(
132 struct xfs_scrub *sc,
133 xfs_rgnumber_t rgno,
134 xfs_rgblock_t rgbno,
135 int *error)
136 {
137 return __xchk_process_error(sc, rgno, rgbno, error,
138 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
139 }
140
141 bool
xchk_xref_process_error(struct xfs_scrub * sc,xfs_agnumber_t agno,xfs_agblock_t bno,int * error)142 xchk_xref_process_error(
143 struct xfs_scrub *sc,
144 xfs_agnumber_t agno,
145 xfs_agblock_t bno,
146 int *error)
147 {
148 return __xchk_process_error(sc, agno, bno, error,
149 XFS_SCRUB_OFLAG_XFAIL, __return_address);
150 }
151
152 /* Check for operational errors for a file offset. */
153 static bool
__xchk_fblock_process_error(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset,int * error,__u32 errflag,void * ret_ip)154 __xchk_fblock_process_error(
155 struct xfs_scrub *sc,
156 int whichfork,
157 xfs_fileoff_t offset,
158 int *error,
159 __u32 errflag,
160 void *ret_ip)
161 {
162 switch (*error) {
163 case 0:
164 return true;
165 case -EDEADLOCK:
166 case -ECHRNG:
167 /* Used to restart an op with deadlock avoidance. */
168 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
169 break;
170 case -ECANCELED:
171 /*
172 * ECANCELED here means that the caller set one of the scrub
173 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit
174 * quickly. Set error to zero and do not continue.
175 */
176 trace_xchk_file_op_error(sc, whichfork, offset, *error,
177 ret_ip);
178 *error = 0;
179 break;
180 case -EFSBADCRC:
181 case -EFSCORRUPTED:
182 case -EIO:
183 case -ENODATA:
184 /* Note the badness but don't abort. */
185 sc->sm->sm_flags |= errflag;
186 *error = 0;
187 fallthrough;
188 default:
189 trace_xchk_file_op_error(sc, whichfork, offset, *error,
190 ret_ip);
191 break;
192 }
193 return false;
194 }
195
196 bool
xchk_fblock_process_error(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset,int * error)197 xchk_fblock_process_error(
198 struct xfs_scrub *sc,
199 int whichfork,
200 xfs_fileoff_t offset,
201 int *error)
202 {
203 return __xchk_fblock_process_error(sc, whichfork, offset, error,
204 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
205 }
206
207 bool
xchk_fblock_xref_process_error(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset,int * error)208 xchk_fblock_xref_process_error(
209 struct xfs_scrub *sc,
210 int whichfork,
211 xfs_fileoff_t offset,
212 int *error)
213 {
214 return __xchk_fblock_process_error(sc, whichfork, offset, error,
215 XFS_SCRUB_OFLAG_XFAIL, __return_address);
216 }
217
218 /*
219 * Handling scrub corruption/optimization/warning checks.
220 *
221 * The *_set_{corrupt,preen,warning}() family of functions are used to
222 * record the presence of metadata that is incorrect (corrupt), could be
223 * optimized somehow (preen), or should be flagged for administrative
224 * review but is not incorrect (warn).
225 *
226 * ftrace can be used to record the precise metadata location and
227 * approximate code location of the failed check.
228 */
229
230 /* Record a block which could be optimized. */
231 void
xchk_block_set_preen(struct xfs_scrub * sc,struct xfs_buf * bp)232 xchk_block_set_preen(
233 struct xfs_scrub *sc,
234 struct xfs_buf *bp)
235 {
236 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
237 trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address);
238 }
239
240 /*
241 * Record an inode which could be optimized. The trace data will
242 * include the block given by bp if bp is given; otherwise it will use
243 * the block location of the inode record itself.
244 */
245 void
xchk_ino_set_preen(struct xfs_scrub * sc,xfs_ino_t ino)246 xchk_ino_set_preen(
247 struct xfs_scrub *sc,
248 xfs_ino_t ino)
249 {
250 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
251 trace_xchk_ino_preen(sc, ino, __return_address);
252 }
253
254 /* Record a block indexed by a file fork that could be optimized. */
255 void
xchk_fblock_set_preen(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)256 xchk_fblock_set_preen(
257 struct xfs_scrub *sc,
258 int whichfork,
259 xfs_fileoff_t offset)
260 {
261 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
262 trace_xchk_fblock_preen(sc, whichfork, offset, __return_address);
263 }
264
265 /* Record something being wrong with the filesystem primary superblock. */
266 void
xchk_set_corrupt(struct xfs_scrub * sc)267 xchk_set_corrupt(
268 struct xfs_scrub *sc)
269 {
270 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
271 trace_xchk_fs_error(sc, 0, __return_address);
272 }
273
274 /* Record a corrupt block. */
275 void
xchk_block_set_corrupt(struct xfs_scrub * sc,struct xfs_buf * bp)276 xchk_block_set_corrupt(
277 struct xfs_scrub *sc,
278 struct xfs_buf *bp)
279 {
280 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
281 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
282 }
283
284 #ifdef CONFIG_XFS_QUOTA
285 /* Record a corrupt quota counter. */
286 void
xchk_qcheck_set_corrupt(struct xfs_scrub * sc,unsigned int dqtype,xfs_dqid_t id)287 xchk_qcheck_set_corrupt(
288 struct xfs_scrub *sc,
289 unsigned int dqtype,
290 xfs_dqid_t id)
291 {
292 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
293 trace_xchk_qcheck_error(sc, dqtype, id, __return_address);
294 }
295 #endif
296
297 /* Record a corruption while cross-referencing. */
298 void
xchk_block_xref_set_corrupt(struct xfs_scrub * sc,struct xfs_buf * bp)299 xchk_block_xref_set_corrupt(
300 struct xfs_scrub *sc,
301 struct xfs_buf *bp)
302 {
303 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
304 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
305 }
306
307 /*
308 * Record a corrupt inode. The trace data will include the block given
309 * by bp if bp is given; otherwise it will use the block location of the
310 * inode record itself.
311 */
312 void
xchk_ino_set_corrupt(struct xfs_scrub * sc,xfs_ino_t ino)313 xchk_ino_set_corrupt(
314 struct xfs_scrub *sc,
315 xfs_ino_t ino)
316 {
317 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
318 trace_xchk_ino_error(sc, ino, __return_address);
319 }
320
321 /* Record a corruption while cross-referencing with an inode. */
322 void
xchk_ino_xref_set_corrupt(struct xfs_scrub * sc,xfs_ino_t ino)323 xchk_ino_xref_set_corrupt(
324 struct xfs_scrub *sc,
325 xfs_ino_t ino)
326 {
327 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
328 trace_xchk_ino_error(sc, ino, __return_address);
329 }
330
331 /* Record corruption in a block indexed by a file fork. */
332 void
xchk_fblock_set_corrupt(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)333 xchk_fblock_set_corrupt(
334 struct xfs_scrub *sc,
335 int whichfork,
336 xfs_fileoff_t offset)
337 {
338 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
339 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
340 }
341
342 /* Record a corruption while cross-referencing a fork block. */
343 void
xchk_fblock_xref_set_corrupt(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)344 xchk_fblock_xref_set_corrupt(
345 struct xfs_scrub *sc,
346 int whichfork,
347 xfs_fileoff_t offset)
348 {
349 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
350 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
351 }
352
353 /*
354 * Warn about inodes that need administrative review but is not
355 * incorrect.
356 */
357 void
xchk_ino_set_warning(struct xfs_scrub * sc,xfs_ino_t ino)358 xchk_ino_set_warning(
359 struct xfs_scrub *sc,
360 xfs_ino_t ino)
361 {
362 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
363 trace_xchk_ino_warning(sc, ino, __return_address);
364 }
365
366 /* Warn about a block indexed by a file fork that needs review. */
367 void
xchk_fblock_set_warning(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)368 xchk_fblock_set_warning(
369 struct xfs_scrub *sc,
370 int whichfork,
371 xfs_fileoff_t offset)
372 {
373 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
374 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
375 }
376
377 /* Signal an incomplete scrub. */
378 void
xchk_set_incomplete(struct xfs_scrub * sc)379 xchk_set_incomplete(
380 struct xfs_scrub *sc)
381 {
382 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
383 trace_xchk_incomplete(sc, __return_address);
384 }
385
386 /*
387 * rmap scrubbing -- compute the number of blocks with a given owner,
388 * at least according to the reverse mapping data.
389 */
390
391 struct xchk_rmap_ownedby_info {
392 const struct xfs_owner_info *oinfo;
393 xfs_filblks_t *blocks;
394 };
395
396 STATIC int
xchk_count_rmap_ownedby_irec(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)397 xchk_count_rmap_ownedby_irec(
398 struct xfs_btree_cur *cur,
399 const struct xfs_rmap_irec *rec,
400 void *priv)
401 {
402 struct xchk_rmap_ownedby_info *sroi = priv;
403 bool irec_attr;
404 bool oinfo_attr;
405
406 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
407 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
408
409 if (rec->rm_owner != sroi->oinfo->oi_owner)
410 return 0;
411
412 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
413 (*sroi->blocks) += rec->rm_blockcount;
414
415 return 0;
416 }
417
418 /*
419 * Calculate the number of blocks the rmap thinks are owned by something.
420 * The caller should pass us an rmapbt cursor.
421 */
422 int
xchk_count_rmap_ownedby_ag(struct xfs_scrub * sc,struct xfs_btree_cur * cur,const struct xfs_owner_info * oinfo,xfs_filblks_t * blocks)423 xchk_count_rmap_ownedby_ag(
424 struct xfs_scrub *sc,
425 struct xfs_btree_cur *cur,
426 const struct xfs_owner_info *oinfo,
427 xfs_filblks_t *blocks)
428 {
429 struct xchk_rmap_ownedby_info sroi = {
430 .oinfo = oinfo,
431 .blocks = blocks,
432 };
433
434 *blocks = 0;
435 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
436 &sroi);
437 }
438
439 /*
440 * AG scrubbing
441 *
442 * These helpers facilitate locking an allocation group's header
443 * buffers, setting up cursors for all btrees that are present, and
444 * cleaning everything up once we're through.
445 */
446
447 /* Decide if we want to return an AG header read failure. */
448 static inline bool
want_ag_read_header_failure(struct xfs_scrub * sc,unsigned int type)449 want_ag_read_header_failure(
450 struct xfs_scrub *sc,
451 unsigned int type)
452 {
453 /* Return all AG header read failures when scanning btrees. */
454 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
455 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
456 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
457 return true;
458 /*
459 * If we're scanning a given type of AG header, we only want to
460 * see read failures from that specific header. We'd like the
461 * other headers to cross-check them, but this isn't required.
462 */
463 if (sc->sm->sm_type == type)
464 return true;
465 return false;
466 }
467
468 /*
469 * Grab the AG header buffers for the attached perag structure.
470 *
471 * The headers should be released by xchk_ag_free, but as a fail safe we attach
472 * all the buffers we grab to the scrub transaction so they'll all be freed
473 * when we cancel it.
474 */
475 static inline int
xchk_perag_read_headers(struct xfs_scrub * sc,struct xchk_ag * sa)476 xchk_perag_read_headers(
477 struct xfs_scrub *sc,
478 struct xchk_ag *sa)
479 {
480 int error;
481
482 error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp);
483 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
484 return error;
485
486 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
487 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
488 return error;
489
490 return 0;
491 }
492
493 /*
494 * Grab the AG headers for the attached perag structure and wait for pending
495 * intents to drain.
496 */
497 int
xchk_perag_drain_and_lock(struct xfs_scrub * sc)498 xchk_perag_drain_and_lock(
499 struct xfs_scrub *sc)
500 {
501 struct xchk_ag *sa = &sc->sa;
502 int error = 0;
503
504 ASSERT(sa->pag != NULL);
505 ASSERT(sa->agi_bp == NULL);
506 ASSERT(sa->agf_bp == NULL);
507
508 do {
509 if (xchk_should_terminate(sc, &error))
510 return error;
511
512 error = xchk_perag_read_headers(sc, sa);
513 if (error)
514 return error;
515
516 /*
517 * If we've grabbed an inode for scrubbing then we assume that
518 * holding its ILOCK will suffice to coordinate with any intent
519 * chains involving this inode.
520 */
521 if (sc->ip)
522 return 0;
523
524 /*
525 * Decide if this AG is quiet enough for all metadata to be
526 * consistent with each other. XFS allows the AG header buffer
527 * locks to cycle across transaction rolls while processing
528 * chains of deferred ops, which means that there could be
529 * other threads in the middle of processing a chain of
530 * deferred ops. For regular operations we are careful about
531 * ordering operations to prevent collisions between threads
532 * (which is why we don't need a per-AG lock), but scrub and
533 * repair have to serialize against chained operations.
534 *
535 * We just locked all the AG headers buffers; now take a look
536 * to see if there are any intents in progress. If there are,
537 * drop the AG headers and wait for the intents to drain.
538 * Since we hold all the AG header locks for the duration of
539 * the scrub, this is the only time we have to sample the
540 * intents counter; any threads increasing it after this point
541 * can't possibly be in the middle of a chain of AG metadata
542 * updates.
543 *
544 * Obviously, this should be slanted against scrub and in favor
545 * of runtime threads.
546 */
547 if (!xfs_group_intent_busy(pag_group(sa->pag)))
548 return 0;
549
550 if (sa->agf_bp) {
551 xfs_trans_brelse(sc->tp, sa->agf_bp);
552 sa->agf_bp = NULL;
553 }
554
555 if (sa->agi_bp) {
556 xfs_trans_brelse(sc->tp, sa->agi_bp);
557 sa->agi_bp = NULL;
558 }
559
560 if (!(sc->flags & XCHK_FSGATES_DRAIN))
561 return -ECHRNG;
562 error = xfs_group_intent_drain(pag_group(sa->pag));
563 if (error == -ERESTARTSYS)
564 error = -EINTR;
565 } while (!error);
566
567 return error;
568 }
569
570 /*
571 * Grab the per-AG structure, grab all AG header buffers, and wait until there
572 * aren't any pending intents. Returns -ENOENT if we can't grab the perag
573 * structure.
574 */
575 int
xchk_ag_read_headers(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xchk_ag * sa)576 xchk_ag_read_headers(
577 struct xfs_scrub *sc,
578 xfs_agnumber_t agno,
579 struct xchk_ag *sa)
580 {
581 struct xfs_mount *mp = sc->mp;
582
583 ASSERT(!sa->pag);
584 sa->pag = xfs_perag_get(mp, agno);
585 if (!sa->pag)
586 return -ENOENT;
587
588 return xchk_perag_drain_and_lock(sc);
589 }
590
591 /* Release all the AG btree cursors. */
592 void
xchk_ag_btcur_free(struct xchk_ag * sa)593 xchk_ag_btcur_free(
594 struct xchk_ag *sa)
595 {
596 if (sa->refc_cur)
597 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
598 if (sa->rmap_cur)
599 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
600 if (sa->fino_cur)
601 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
602 if (sa->ino_cur)
603 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
604 if (sa->cnt_cur)
605 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
606 if (sa->bno_cur)
607 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
608
609 sa->refc_cur = NULL;
610 sa->rmap_cur = NULL;
611 sa->fino_cur = NULL;
612 sa->ino_cur = NULL;
613 sa->bno_cur = NULL;
614 sa->cnt_cur = NULL;
615 }
616
617 /* Initialize all the btree cursors for an AG. */
618 void
xchk_ag_btcur_init(struct xfs_scrub * sc,struct xchk_ag * sa)619 xchk_ag_btcur_init(
620 struct xfs_scrub *sc,
621 struct xchk_ag *sa)
622 {
623 struct xfs_mount *mp = sc->mp;
624
625 if (sa->agf_bp) {
626 /* Set up a bnobt cursor for cross-referencing. */
627 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp,
628 sa->pag);
629 xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur,
630 XFS_SCRUB_TYPE_BNOBT);
631
632 /* Set up a cntbt cursor for cross-referencing. */
633 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp,
634 sa->pag);
635 xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur,
636 XFS_SCRUB_TYPE_CNTBT);
637
638 /* Set up a rmapbt cursor for cross-referencing. */
639 if (xfs_has_rmapbt(mp)) {
640 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp,
641 sa->agf_bp, sa->pag);
642 xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur,
643 XFS_SCRUB_TYPE_RMAPBT);
644 }
645
646 /* Set up a refcountbt cursor for cross-referencing. */
647 if (xfs_has_reflink(mp)) {
648 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
649 sa->agf_bp, sa->pag);
650 xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur,
651 XFS_SCRUB_TYPE_REFCNTBT);
652 }
653 }
654
655 if (sa->agi_bp) {
656 /* Set up a inobt cursor for cross-referencing. */
657 sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp,
658 sa->agi_bp);
659 xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur,
660 XFS_SCRUB_TYPE_INOBT);
661
662 /* Set up a finobt cursor for cross-referencing. */
663 if (xfs_has_finobt(mp)) {
664 sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp,
665 sa->agi_bp);
666 xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur,
667 XFS_SCRUB_TYPE_FINOBT);
668 }
669 }
670 }
671
672 /* Release the AG header context and btree cursors. */
673 void
xchk_ag_free(struct xfs_scrub * sc,struct xchk_ag * sa)674 xchk_ag_free(
675 struct xfs_scrub *sc,
676 struct xchk_ag *sa)
677 {
678 xchk_ag_btcur_free(sa);
679 xrep_reset_perag_resv(sc);
680 if (sa->agf_bp) {
681 xfs_trans_brelse(sc->tp, sa->agf_bp);
682 sa->agf_bp = NULL;
683 }
684 if (sa->agi_bp) {
685 xfs_trans_brelse(sc->tp, sa->agi_bp);
686 sa->agi_bp = NULL;
687 }
688 if (sa->pag) {
689 xfs_perag_put(sa->pag);
690 sa->pag = NULL;
691 }
692 }
693
694 /*
695 * For scrub, grab the perag structure, the AGI, and the AGF headers, in that
696 * order. Locking order requires us to get the AGI before the AGF. We use the
697 * transaction to avoid deadlocking on crosslinked metadata buffers; either the
698 * caller passes one in (bmap scrub) or we have to create a transaction
699 * ourselves. Returns ENOENT if the perag struct cannot be grabbed.
700 */
701 int
xchk_ag_init(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xchk_ag * sa)702 xchk_ag_init(
703 struct xfs_scrub *sc,
704 xfs_agnumber_t agno,
705 struct xchk_ag *sa)
706 {
707 int error;
708
709 error = xchk_ag_read_headers(sc, agno, sa);
710 if (error)
711 return error;
712
713 xchk_ag_btcur_init(sc, sa);
714 return 0;
715 }
716
717 #ifdef CONFIG_XFS_RT
718 /*
719 * For scrubbing a realtime group, grab all the in-core resources we'll need to
720 * check the metadata, which means taking the ILOCK of the realtime group's
721 * metadata inodes. Callers must not join these inodes to the transaction with
722 * non-zero lockflags or concurrency problems will result. The @rtglock_flags
723 * argument takes XFS_RTGLOCK_* flags.
724 */
725 int
xchk_rtgroup_init(struct xfs_scrub * sc,xfs_rgnumber_t rgno,struct xchk_rt * sr)726 xchk_rtgroup_init(
727 struct xfs_scrub *sc,
728 xfs_rgnumber_t rgno,
729 struct xchk_rt *sr)
730 {
731 ASSERT(sr->rtg == NULL);
732 ASSERT(sr->rtlock_flags == 0);
733
734 sr->rtg = xfs_rtgroup_get(sc->mp, rgno);
735 if (!sr->rtg)
736 return -ENOENT;
737 return 0;
738 }
739
740 /* Lock all the rt group metadata inode ILOCKs and wait for intents. */
741 int
xchk_rtgroup_lock(struct xfs_scrub * sc,struct xchk_rt * sr,unsigned int rtglock_flags)742 xchk_rtgroup_lock(
743 struct xfs_scrub *sc,
744 struct xchk_rt *sr,
745 unsigned int rtglock_flags)
746 {
747 int error = 0;
748
749 ASSERT(sr->rtg != NULL);
750
751 /*
752 * If we're /only/ locking the rtbitmap in shared mode, then we're
753 * obviously not trying to compare records in two metadata inodes.
754 * There's no need to drain intents here because the caller (most
755 * likely the rgsuper scanner) doesn't need that level of consistency.
756 */
757 if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) {
758 xfs_rtgroup_lock(sr->rtg, rtglock_flags);
759 sr->rtlock_flags = rtglock_flags;
760 return 0;
761 }
762
763 do {
764 if (xchk_should_terminate(sc, &error))
765 return error;
766
767 xfs_rtgroup_lock(sr->rtg, rtglock_flags);
768
769 /*
770 * If we've grabbed a non-metadata file for scrubbing, we
771 * assume that holding its ILOCK will suffice to coordinate
772 * with any rt intent chains involving this inode.
773 */
774 if (sc->ip && !xfs_is_internal_inode(sc->ip))
775 break;
776
777 /*
778 * Decide if the rt group is quiet enough for all metadata to
779 * be consistent with each other. Regular file IO doesn't get
780 * to lock all the rt inodes at the same time, which means that
781 * there could be other threads in the middle of processing a
782 * chain of deferred ops.
783 *
784 * We just locked all the metadata inodes for this rt group;
785 * now take a look to see if there are any intents in progress.
786 * If there are, drop the rt group inode locks and wait for the
787 * intents to drain. Since we hold the rt group inode locks
788 * for the duration of the scrub, this is the only time we have
789 * to sample the intents counter; any threads increasing it
790 * after this point can't possibly be in the middle of a chain
791 * of rt metadata updates.
792 *
793 * Obviously, this should be slanted against scrub and in favor
794 * of runtime threads.
795 */
796 if (!xfs_group_intent_busy(rtg_group(sr->rtg)))
797 break;
798
799 xfs_rtgroup_unlock(sr->rtg, rtglock_flags);
800
801 if (!(sc->flags & XCHK_FSGATES_DRAIN))
802 return -ECHRNG;
803 error = xfs_group_intent_drain(rtg_group(sr->rtg));
804 if (error) {
805 if (error == -ERESTARTSYS)
806 error = -EINTR;
807 return error;
808 }
809 } while (1);
810
811 sr->rtlock_flags = rtglock_flags;
812
813 if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP))
814 sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg);
815
816 if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT))
817 sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg);
818
819 return 0;
820 }
821
822 /*
823 * Free all the btree cursors and other incore data relating to the realtime
824 * group. This has to be done /before/ committing (or cancelling) the scrub
825 * transaction.
826 */
827 void
xchk_rtgroup_btcur_free(struct xchk_rt * sr)828 xchk_rtgroup_btcur_free(
829 struct xchk_rt *sr)
830 {
831 if (sr->rmap_cur)
832 xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR);
833 if (sr->refc_cur)
834 xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR);
835
836 sr->refc_cur = NULL;
837 sr->rmap_cur = NULL;
838 }
839
840 /*
841 * Unlock the realtime group. This must be done /after/ committing (or
842 * cancelling) the scrub transaction.
843 */
844 void
xchk_rtgroup_unlock(struct xchk_rt * sr)845 xchk_rtgroup_unlock(
846 struct xchk_rt *sr)
847 {
848 ASSERT(sr->rtg != NULL);
849
850 if (sr->rtlock_flags) {
851 xfs_rtgroup_unlock(sr->rtg, sr->rtlock_flags);
852 sr->rtlock_flags = 0;
853 }
854 }
855
856 /*
857 * Unlock the realtime group and release its resources. This must be done
858 * /after/ committing (or cancelling) the scrub transaction.
859 */
860 void
xchk_rtgroup_free(struct xfs_scrub * sc,struct xchk_rt * sr)861 xchk_rtgroup_free(
862 struct xfs_scrub *sc,
863 struct xchk_rt *sr)
864 {
865 ASSERT(sr->rtg != NULL);
866
867 xchk_rtgroup_unlock(sr);
868
869 xfs_rtgroup_put(sr->rtg);
870 sr->rtg = NULL;
871 }
872 #endif /* CONFIG_XFS_RT */
873
874 /* Per-scrubber setup functions */
875
876 void
xchk_trans_cancel(struct xfs_scrub * sc)877 xchk_trans_cancel(
878 struct xfs_scrub *sc)
879 {
880 xfs_trans_cancel(sc->tp);
881 sc->tp = NULL;
882 }
883
884 void
xchk_trans_alloc_empty(struct xfs_scrub * sc)885 xchk_trans_alloc_empty(
886 struct xfs_scrub *sc)
887 {
888 sc->tp = xfs_trans_alloc_empty(sc->mp);
889 }
890
891 /*
892 * Grab an empty transaction so that we can re-grab locked buffers if
893 * one of our btrees turns out to be cyclic.
894 *
895 * If we're going to repair something, we need to ask for the largest possible
896 * log reservation so that we can handle the worst case scenario for metadata
897 * updates while rebuilding a metadata item. We also need to reserve as many
898 * blocks in the head transaction as we think we're going to need to rebuild
899 * the metadata object.
900 */
901 int
xchk_trans_alloc(struct xfs_scrub * sc,uint resblks)902 xchk_trans_alloc(
903 struct xfs_scrub *sc,
904 uint resblks)
905 {
906 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
907 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
908 resblks, 0, 0, &sc->tp);
909
910 xchk_trans_alloc_empty(sc);
911 return 0;
912 }
913
914 /* Set us up with a transaction and an empty context. */
915 int
xchk_setup_fs(struct xfs_scrub * sc)916 xchk_setup_fs(
917 struct xfs_scrub *sc)
918 {
919 uint resblks;
920
921 resblks = xrep_calc_ag_resblks(sc);
922 return xchk_trans_alloc(sc, resblks);
923 }
924
925 /* Set us up with a transaction and an empty context to repair rt metadata. */
926 int
xchk_setup_rt(struct xfs_scrub * sc)927 xchk_setup_rt(
928 struct xfs_scrub *sc)
929 {
930 return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc));
931 }
932
933 /* Set us up with AG headers and btree cursors. */
934 int
xchk_setup_ag_btree(struct xfs_scrub * sc,bool force_log)935 xchk_setup_ag_btree(
936 struct xfs_scrub *sc,
937 bool force_log)
938 {
939 struct xfs_mount *mp = sc->mp;
940 int error;
941
942 /*
943 * If the caller asks us to checkpont the log, do so. This
944 * expensive operation should be performed infrequently and only
945 * as a last resort. Any caller that sets force_log should
946 * document why they need to do so.
947 */
948 if (force_log) {
949 error = xchk_checkpoint_log(mp);
950 if (error)
951 return error;
952 }
953
954 error = xchk_setup_fs(sc);
955 if (error)
956 return error;
957
958 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
959 }
960
961 /* Push everything out of the log onto disk. */
962 int
xchk_checkpoint_log(struct xfs_mount * mp)963 xchk_checkpoint_log(
964 struct xfs_mount *mp)
965 {
966 int error;
967
968 error = xfs_log_force(mp, XFS_LOG_SYNC);
969 if (error)
970 return error;
971 xfs_ail_push_all_sync(mp->m_ail);
972 return 0;
973 }
974
975 /* Verify that an inode is allocated ondisk, then return its cached inode. */
976 int
xchk_iget(struct xfs_scrub * sc,xfs_ino_t inum,struct xfs_inode ** ipp)977 xchk_iget(
978 struct xfs_scrub *sc,
979 xfs_ino_t inum,
980 struct xfs_inode **ipp)
981 {
982 ASSERT(sc->tp != NULL);
983
984 return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp);
985 }
986
987 /*
988 * Try to grab an inode in a manner that avoids races with physical inode
989 * allocation. If we can't, return the locked AGI buffer so that the caller
990 * can single-step the loading process to see where things went wrong.
991 * Callers must have a valid scrub transaction.
992 *
993 * If the iget succeeds, return 0, a NULL AGI, and the inode.
994 *
995 * If the iget fails, return the error, the locked AGI, and a NULL inode. This
996 * can include -EINVAL and -ENOENT for invalid inode numbers or inodes that are
997 * no longer allocated; or any other corruption or runtime error.
998 *
999 * If the AGI read fails, return the error, a NULL AGI, and NULL inode.
1000 *
1001 * If a fatal signal is pending, return -EINTR, a NULL AGI, and a NULL inode.
1002 */
1003 int
xchk_iget_agi(struct xfs_scrub * sc,xfs_ino_t inum,struct xfs_buf ** agi_bpp,struct xfs_inode ** ipp)1004 xchk_iget_agi(
1005 struct xfs_scrub *sc,
1006 xfs_ino_t inum,
1007 struct xfs_buf **agi_bpp,
1008 struct xfs_inode **ipp)
1009 {
1010 struct xfs_mount *mp = sc->mp;
1011 struct xfs_trans *tp = sc->tp;
1012 struct xfs_perag *pag;
1013 int error;
1014
1015 ASSERT(sc->tp != NULL);
1016
1017 again:
1018 *agi_bpp = NULL;
1019 *ipp = NULL;
1020 error = 0;
1021
1022 if (xchk_should_terminate(sc, &error))
1023 return error;
1024
1025 /*
1026 * Attach the AGI buffer to the scrub transaction to avoid deadlocks
1027 * in the iget cache miss path.
1028 */
1029 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
1030 error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp);
1031 xfs_perag_put(pag);
1032 if (error)
1033 return error;
1034
1035 error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0,
1036 ipp);
1037 if (error == -EAGAIN) {
1038 /*
1039 * The inode may be in core but temporarily unavailable and may
1040 * require the AGI buffer before it can be returned. Drop the
1041 * AGI buffer and retry the lookup.
1042 *
1043 * Incore lookup will fail with EAGAIN on a cache hit if the
1044 * inode is queued to the inactivation list. The inactivation
1045 * worker may remove the inode from the unlinked list and hence
1046 * needs the AGI.
1047 *
1048 * Hence xchk_iget_agi() needs to drop the AGI lock on EAGAIN
1049 * to allow inodegc to make progress and move the inode to
1050 * IRECLAIMABLE state where xfs_iget will be able to return it
1051 * again if it can lock the inode.
1052 */
1053 xfs_trans_brelse(tp, *agi_bpp);
1054 delay(1);
1055 goto again;
1056 }
1057 if (error)
1058 return error;
1059
1060 /* We got the inode, so we can release the AGI. */
1061 ASSERT(*ipp != NULL);
1062 xfs_trans_brelse(tp, *agi_bpp);
1063 *agi_bpp = NULL;
1064 return 0;
1065 }
1066
1067 #ifdef CONFIG_XFS_QUOTA
1068 /*
1069 * Try to attach dquots to this inode if we think we might want to repair it.
1070 * Callers must not hold any ILOCKs. If the dquots are broken and cannot be
1071 * attached, a quotacheck will be scheduled.
1072 */
1073 int
xchk_ino_dqattach(struct xfs_scrub * sc)1074 xchk_ino_dqattach(
1075 struct xfs_scrub *sc)
1076 {
1077 ASSERT(sc->tp != NULL);
1078 ASSERT(sc->ip != NULL);
1079
1080 if (!xchk_could_repair(sc))
1081 return 0;
1082
1083 return xrep_ino_dqattach(sc);
1084 }
1085 #endif
1086
1087 /* Install an inode that we opened by handle for scrubbing. */
1088 int
xchk_install_handle_inode(struct xfs_scrub * sc,struct xfs_inode * ip)1089 xchk_install_handle_inode(
1090 struct xfs_scrub *sc,
1091 struct xfs_inode *ip)
1092 {
1093 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
1094 xchk_irele(sc, ip);
1095 return -ENOENT;
1096 }
1097
1098 sc->ip = ip;
1099 return 0;
1100 }
1101
1102 /*
1103 * Install an already-referenced inode for scrubbing. Get our own reference to
1104 * the inode to make disposal simpler. The inode must not be in I_FREEING or
1105 * I_WILL_FREE state!
1106 */
1107 int
xchk_install_live_inode(struct xfs_scrub * sc,struct xfs_inode * ip)1108 xchk_install_live_inode(
1109 struct xfs_scrub *sc,
1110 struct xfs_inode *ip)
1111 {
1112 if (!igrab(VFS_I(ip))) {
1113 xchk_ino_set_corrupt(sc, ip->i_ino);
1114 return -EFSCORRUPTED;
1115 }
1116
1117 sc->ip = ip;
1118 return 0;
1119 }
1120
1121 /*
1122 * In preparation to scrub metadata structures that hang off of an inode,
1123 * grab either the inode referenced in the scrub control structure or the
1124 * inode passed in. If the inumber does not reference an allocated inode
1125 * record, the function returns ENOENT to end the scrub early. The inode
1126 * is not locked.
1127 */
1128 int
xchk_iget_for_scrubbing(struct xfs_scrub * sc)1129 xchk_iget_for_scrubbing(
1130 struct xfs_scrub *sc)
1131 {
1132 struct xfs_imap imap;
1133 struct xfs_mount *mp = sc->mp;
1134 struct xfs_perag *pag;
1135 struct xfs_buf *agi_bp;
1136 struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
1137 struct xfs_inode *ip = NULL;
1138 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, sc->sm->sm_ino);
1139 int error;
1140
1141 ASSERT(sc->tp == NULL);
1142
1143 /* We want to scan the inode we already had opened. */
1144 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino)
1145 return xchk_install_live_inode(sc, ip_in);
1146
1147 /*
1148 * On pre-metadir filesystems, reject internal metadata files. For
1149 * metadir filesystems, limited scrubbing of any file in the metadata
1150 * directory tree by handle is allowed, because that is the only way to
1151 * validate the lack of parent pointers in the sb-root metadata inodes.
1152 */
1153 if (!xfs_has_metadir(mp) && xfs_is_sb_inum(mp, sc->sm->sm_ino))
1154 return -ENOENT;
1155 /* Reject obviously bad inode numbers. */
1156 if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino))
1157 return -ENOENT;
1158
1159 /* Try a safe untrusted iget. */
1160 error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip);
1161 if (!error)
1162 return xchk_install_handle_inode(sc, ip);
1163 if (error == -ENOENT)
1164 return error;
1165 if (error != -EINVAL)
1166 goto out_error;
1167
1168 /*
1169 * EINVAL with IGET_UNTRUSTED probably means one of several things:
1170 * userspace gave us an inode number that doesn't correspond to fs
1171 * space; the inode btree lacks a record for this inode; or there is a
1172 * record, and it says this inode is free.
1173 *
1174 * We want to look up this inode in the inobt to distinguish two
1175 * scenarios: (1) the inobt says the inode is free, in which case
1176 * there's nothing to do; and (2) the inobt says the inode is
1177 * allocated, but loading it failed due to corruption.
1178 *
1179 * Allocate a transaction and grab the AGI to prevent inobt activity
1180 * in this AG. Retry the iget in case someone allocated a new inode
1181 * after the first iget failed.
1182 */
1183 error = xchk_trans_alloc(sc, 0);
1184 if (error)
1185 goto out_error;
1186
1187 error = xchk_iget_agi(sc, sc->sm->sm_ino, &agi_bp, &ip);
1188 if (error == 0) {
1189 /* Actually got the inode, so install it. */
1190 xchk_trans_cancel(sc);
1191 return xchk_install_handle_inode(sc, ip);
1192 }
1193 if (error == -ENOENT)
1194 goto out_gone;
1195 if (error != -EINVAL)
1196 goto out_cancel;
1197
1198 /* Ensure that we have protected against inode allocation/freeing. */
1199 if (agi_bp == NULL) {
1200 ASSERT(agi_bp != NULL);
1201 error = -ECANCELED;
1202 goto out_cancel;
1203 }
1204
1205 /*
1206 * Untrusted iget failed a second time. Let's try an inobt lookup.
1207 * If the inobt thinks this the inode neither can exist inside the
1208 * filesystem nor is allocated, return ENOENT to signal that the check
1209 * can be skipped.
1210 *
1211 * If the lookup returns corruption, we'll mark this inode corrupt and
1212 * exit to userspace. There's little chance of fixing anything until
1213 * the inobt is straightened out, but there's nothing we can do here.
1214 *
1215 * If the lookup encounters any other error, exit to userspace.
1216 *
1217 * If the lookup succeeds, something else must be very wrong in the fs
1218 * such that setting up the incore inode failed in some strange way.
1219 * Treat those as corruptions.
1220 */
1221 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino));
1222 if (!pag) {
1223 error = -EFSCORRUPTED;
1224 goto out_cancel;
1225 }
1226
1227 error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap,
1228 XFS_IGET_UNTRUSTED);
1229 xfs_perag_put(pag);
1230 if (error == -EINVAL || error == -ENOENT)
1231 goto out_gone;
1232 if (!error)
1233 error = -EFSCORRUPTED;
1234
1235 out_cancel:
1236 xchk_trans_cancel(sc);
1237 out_error:
1238 trace_xchk_op_error(sc, agno, XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
1239 error, __return_address);
1240 return error;
1241 out_gone:
1242 /* The file is gone, so there's nothing to check. */
1243 xchk_trans_cancel(sc);
1244 return -ENOENT;
1245 }
1246
1247 /* Release an inode, possibly dropping it in the process. */
1248 void
xchk_irele(struct xfs_scrub * sc,struct xfs_inode * ip)1249 xchk_irele(
1250 struct xfs_scrub *sc,
1251 struct xfs_inode *ip)
1252 {
1253 if (sc->tp) {
1254 /*
1255 * If we are in a transaction, we /cannot/ drop the inode
1256 * ourselves, because the VFS will trigger writeback, which
1257 * can require a transaction. Clear DONTCACHE to force the
1258 * inode to the LRU, where someone else can take care of
1259 * dropping it.
1260 *
1261 * Note that when we grabbed our reference to the inode, it
1262 * could have had an active ref and DONTCACHE set if a sysadmin
1263 * is trying to coerce a change in file access mode. icache
1264 * hits do not clear DONTCACHE, so we must do it here.
1265 */
1266 spin_lock(&VFS_I(ip)->i_lock);
1267 inode_state_clear(VFS_I(ip), I_DONTCACHE);
1268 spin_unlock(&VFS_I(ip)->i_lock);
1269 }
1270
1271 xfs_irele(ip);
1272 }
1273
1274 /*
1275 * Set us up to scrub metadata mapped by a file's fork. Callers must not use
1276 * this to operate on user-accessible regular file data because the MMAPLOCK is
1277 * not taken.
1278 */
1279 int
xchk_setup_inode_contents(struct xfs_scrub * sc,unsigned int resblks)1280 xchk_setup_inode_contents(
1281 struct xfs_scrub *sc,
1282 unsigned int resblks)
1283 {
1284 int error;
1285
1286 error = xchk_iget_for_scrubbing(sc);
1287 if (error)
1288 return error;
1289
1290 error = xrep_tempfile_adjust_directory_tree(sc);
1291 if (error)
1292 return error;
1293
1294 /* Lock the inode so the VFS cannot touch this file. */
1295 xchk_ilock(sc, XFS_IOLOCK_EXCL);
1296
1297 error = xchk_trans_alloc(sc, resblks);
1298 if (error)
1299 goto out;
1300
1301 error = xchk_ino_dqattach(sc);
1302 if (error)
1303 goto out;
1304
1305 xchk_ilock(sc, XFS_ILOCK_EXCL);
1306 out:
1307 /* scrub teardown will unlock and release the inode for us */
1308 return error;
1309 }
1310
1311 void
xchk_ilock(struct xfs_scrub * sc,unsigned int ilock_flags)1312 xchk_ilock(
1313 struct xfs_scrub *sc,
1314 unsigned int ilock_flags)
1315 {
1316 xfs_ilock(sc->ip, ilock_flags);
1317 sc->ilock_flags |= ilock_flags;
1318 }
1319
1320 bool
xchk_ilock_nowait(struct xfs_scrub * sc,unsigned int ilock_flags)1321 xchk_ilock_nowait(
1322 struct xfs_scrub *sc,
1323 unsigned int ilock_flags)
1324 {
1325 if (xfs_ilock_nowait(sc->ip, ilock_flags)) {
1326 sc->ilock_flags |= ilock_flags;
1327 return true;
1328 }
1329
1330 return false;
1331 }
1332
1333 void
xchk_iunlock(struct xfs_scrub * sc,unsigned int ilock_flags)1334 xchk_iunlock(
1335 struct xfs_scrub *sc,
1336 unsigned int ilock_flags)
1337 {
1338 sc->ilock_flags &= ~ilock_flags;
1339 xfs_iunlock(sc->ip, ilock_flags);
1340 }
1341
1342 /*
1343 * Predicate that decides if we need to evaluate the cross-reference check.
1344 * If there was an error accessing the cross-reference btree, just delete
1345 * the cursor and skip the check.
1346 */
1347 bool
xchk_should_check_xref(struct xfs_scrub * sc,int * error,struct xfs_btree_cur ** curpp)1348 xchk_should_check_xref(
1349 struct xfs_scrub *sc,
1350 int *error,
1351 struct xfs_btree_cur **curpp)
1352 {
1353 /* No point in xref if we already know we're corrupt. */
1354 if (xchk_skip_xref(sc->sm))
1355 return false;
1356
1357 if (*error == 0)
1358 return true;
1359
1360 if (curpp) {
1361 /* If we've already given up on xref, just bail out. */
1362 if (!*curpp)
1363 return false;
1364
1365 /* xref error, delete cursor and bail out. */
1366 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
1367 *curpp = NULL;
1368 }
1369
1370 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
1371 trace_xchk_xref_error(sc, *error, __return_address);
1372
1373 /*
1374 * Errors encountered during cross-referencing with another
1375 * data structure should not cause this scrubber to abort.
1376 */
1377 *error = 0;
1378 return false;
1379 }
1380
1381 /* Run the structure verifiers on in-memory buffers to detect bad memory. */
1382 void
xchk_buffer_recheck(struct xfs_scrub * sc,struct xfs_buf * bp)1383 xchk_buffer_recheck(
1384 struct xfs_scrub *sc,
1385 struct xfs_buf *bp)
1386 {
1387 xfs_failaddr_t fa;
1388
1389 if (bp->b_ops == NULL) {
1390 xchk_block_set_corrupt(sc, bp);
1391 return;
1392 }
1393 if (bp->b_ops->verify_struct == NULL) {
1394 xchk_set_incomplete(sc);
1395 return;
1396 }
1397 fa = bp->b_ops->verify_struct(bp);
1398 if (!fa)
1399 return;
1400 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
1401 trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa);
1402 }
1403
1404 static inline int
xchk_metadata_inode_subtype(struct xfs_scrub * sc,unsigned int scrub_type)1405 xchk_metadata_inode_subtype(
1406 struct xfs_scrub *sc,
1407 unsigned int scrub_type)
1408 {
1409 struct xfs_scrub_subord *sub;
1410 int error;
1411
1412 sub = xchk_scrub_create_subord(sc, scrub_type);
1413 if (!sub)
1414 return -ENOMEM;
1415
1416 error = sub->sc.ops->scrub(&sub->sc);
1417 xchk_scrub_free_subord(sub);
1418 return error;
1419 }
1420
1421 /*
1422 * Scrub the attr/data forks of a metadata inode. The metadata inode must be
1423 * pointed to by sc->ip and the ILOCK must be held.
1424 */
1425 int
xchk_metadata_inode_forks(struct xfs_scrub * sc)1426 xchk_metadata_inode_forks(
1427 struct xfs_scrub *sc)
1428 {
1429 bool shared;
1430 int error;
1431
1432 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
1433 return 0;
1434
1435 /* Check the inode record. */
1436 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE);
1437 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
1438 return error;
1439
1440 /* Metadata inodes don't live on the rt device. */
1441 if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
1442 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1443 return 0;
1444 }
1445
1446 /* They should never participate in reflink. */
1447 if (xfs_is_reflink_inode(sc->ip)) {
1448 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1449 return 0;
1450 }
1451
1452 /* Invoke the data fork scrubber. */
1453 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD);
1454 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
1455 return error;
1456
1457 /* Look for incorrect shared blocks. */
1458 if (xfs_has_reflink(sc->mp)) {
1459 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
1460 &shared);
1461 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
1462 &error))
1463 return error;
1464 if (shared)
1465 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1466 }
1467
1468 /*
1469 * Metadata files can only have extended attributes on metadir
1470 * filesystems, either for parent pointers or for actual xattr data.
1471 */
1472 if (xfs_inode_hasattr(sc->ip)) {
1473 if (!xfs_has_metadir(sc->mp)) {
1474 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1475 return 0;
1476 }
1477
1478 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTA);
1479 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
1480 return error;
1481 }
1482
1483 return 0;
1484 }
1485
1486 /*
1487 * Enable filesystem hooks (i.e. runtime code patching) before starting a scrub
1488 * operation. Callers must not hold any locks that intersect with the CPU
1489 * hotplug lock (e.g. writeback locks) because code patching must halt the CPUs
1490 * to change kernel code.
1491 */
1492 void
xchk_fsgates_enable(struct xfs_scrub * sc,unsigned int scrub_fsgates)1493 xchk_fsgates_enable(
1494 struct xfs_scrub *sc,
1495 unsigned int scrub_fsgates)
1496 {
1497 ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL));
1498 ASSERT(!(sc->flags & scrub_fsgates));
1499
1500 trace_xchk_fsgates_enable(sc, scrub_fsgates);
1501
1502 if (scrub_fsgates & XCHK_FSGATES_DRAIN)
1503 xfs_defer_drain_wait_enable();
1504
1505 if (scrub_fsgates & XCHK_FSGATES_QUOTA)
1506 xfs_dqtrx_hook_enable();
1507
1508 if (scrub_fsgates & XCHK_FSGATES_DIRENTS)
1509 xfs_dir_hook_enable();
1510
1511 if (scrub_fsgates & XCHK_FSGATES_RMAP)
1512 xfs_rmap_hook_enable();
1513
1514 sc->flags |= scrub_fsgates;
1515 }
1516
1517 /*
1518 * Decide if this is this a cached inode that's also allocated. The caller
1519 * must hold a reference to an AG and the AGI buffer lock to prevent inodes
1520 * from being allocated or freed.
1521 *
1522 * Look up an inode by number in the given file system. If the inode number
1523 * is invalid, return -EINVAL. If the inode is not in cache, return -ENODATA.
1524 * If the inode is being reclaimed, return -ENODATA because we know the inode
1525 * cache cannot be updating the ondisk metadata.
1526 *
1527 * Otherwise, the incore inode is the one we want, and it is either live,
1528 * somewhere in the inactivation machinery, or reclaimable. The inode is
1529 * allocated if i_mode is nonzero. In all three cases, the cached inode will
1530 * be more up to date than the ondisk inode buffer, so we must use the incore
1531 * i_mode.
1532 */
1533 int
xchk_inode_is_allocated(struct xfs_scrub * sc,xfs_agino_t agino,bool * inuse)1534 xchk_inode_is_allocated(
1535 struct xfs_scrub *sc,
1536 xfs_agino_t agino,
1537 bool *inuse)
1538 {
1539 struct xfs_mount *mp = sc->mp;
1540 struct xfs_perag *pag = sc->sa.pag;
1541 xfs_ino_t ino;
1542 struct xfs_inode *ip;
1543 int error;
1544
1545 /* caller must hold perag reference */
1546 if (pag == NULL) {
1547 ASSERT(pag != NULL);
1548 return -EINVAL;
1549 }
1550
1551 /* caller must have AGI buffer */
1552 if (sc->sa.agi_bp == NULL) {
1553 ASSERT(sc->sa.agi_bp != NULL);
1554 return -EINVAL;
1555 }
1556
1557 /* reject inode numbers outside existing AGs */
1558 ino = xfs_agino_to_ino(pag, agino);
1559 if (!xfs_verify_ino(mp, ino))
1560 return -EINVAL;
1561
1562 error = -ENODATA;
1563 rcu_read_lock();
1564 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1565 if (!ip) {
1566 /* cache miss */
1567 goto out_rcu;
1568 }
1569
1570 /*
1571 * If the inode number doesn't match, the incore inode got reused
1572 * during an RCU grace period and the radix tree hasn't been updated.
1573 * This isn't the inode we want.
1574 */
1575 spin_lock(&ip->i_flags_lock);
1576 if (ip->i_ino != ino)
1577 goto out_skip;
1578
1579 trace_xchk_inode_is_allocated(ip);
1580
1581 /*
1582 * We have an incore inode that matches the inode we want, and the
1583 * caller holds the perag structure and the AGI buffer. Let's check
1584 * our assumptions below:
1585 */
1586
1587 #ifdef DEBUG
1588 /*
1589 * (1) If the incore inode is live (i.e. referenced from the dcache),
1590 * it will not be INEW, nor will it be in the inactivation or reclaim
1591 * machinery. The ondisk inode had better be allocated. This is the
1592 * most trivial case.
1593 */
1594 if (!(ip->i_flags & (XFS_NEED_INACTIVE | XFS_INEW | XFS_IRECLAIMABLE |
1595 XFS_INACTIVATING))) {
1596 /* live inode */
1597 ASSERT(VFS_I(ip)->i_mode != 0);
1598 }
1599
1600 /*
1601 * If the incore inode is INEW, there are several possibilities:
1602 *
1603 * (2) For a file that is being created, note that we allocate the
1604 * ondisk inode before allocating, initializing, and adding the incore
1605 * inode to the radix tree.
1606 *
1607 * (3) If the incore inode is being recycled, the inode has to be
1608 * allocated because we don't allow freed inodes to be recycled.
1609 * Recycling doesn't touch i_mode.
1610 */
1611 if (ip->i_flags & XFS_INEW) {
1612 /* created on disk already or recycling */
1613 ASSERT(VFS_I(ip)->i_mode != 0);
1614 }
1615
1616 /*
1617 * (4) If the inode is queued for inactivation (NEED_INACTIVE) but
1618 * inactivation has not started (!INACTIVATING), it is still allocated.
1619 */
1620 if ((ip->i_flags & XFS_NEED_INACTIVE) &&
1621 !(ip->i_flags & XFS_INACTIVATING)) {
1622 /* definitely before difree */
1623 ASSERT(VFS_I(ip)->i_mode != 0);
1624 }
1625 #endif
1626
1627 /*
1628 * If the incore inode is undergoing inactivation (INACTIVATING), there
1629 * are two possibilities:
1630 *
1631 * (5) It is before the point where it would get freed ondisk, in which
1632 * case i_mode is still nonzero.
1633 *
1634 * (6) It has already been freed, in which case i_mode is zero.
1635 *
1636 * We don't take the ILOCK here, but difree and dialloc update the AGI,
1637 * and we've taken the AGI buffer lock, which prevents that from
1638 * happening.
1639 */
1640
1641 /*
1642 * (7) Inodes undergoing inactivation (INACTIVATING) or queued for
1643 * reclaim (IRECLAIMABLE) could be allocated or free. i_mode still
1644 * reflects the ondisk state.
1645 */
1646
1647 /*
1648 * (8) If the inode is in IFLUSHING, it's safe to query i_mode because
1649 * the flush code uses i_mode to format the ondisk inode.
1650 */
1651
1652 /*
1653 * (9) If the inode is in IRECLAIM and was reachable via the radix
1654 * tree, it still has the same i_mode as it did before it entered
1655 * reclaim. The inode object is still alive because we hold the RCU
1656 * read lock.
1657 */
1658
1659 *inuse = VFS_I(ip)->i_mode != 0;
1660 error = 0;
1661
1662 out_skip:
1663 spin_unlock(&ip->i_flags_lock);
1664 out_rcu:
1665 rcu_read_unlock();
1666 return error;
1667 }
1668
1669 /* Is this inode a root directory for either tree? */
1670 bool
xchk_inode_is_dirtree_root(const struct xfs_inode * ip)1671 xchk_inode_is_dirtree_root(const struct xfs_inode *ip)
1672 {
1673 struct xfs_mount *mp = ip->i_mount;
1674
1675 return ip == mp->m_rootip ||
1676 (xfs_has_metadir(mp) && ip == mp->m_metadirip);
1677 }
1678
1679 /* Does the superblock point down to this inode? */
1680 bool
xchk_inode_is_sb_rooted(const struct xfs_inode * ip)1681 xchk_inode_is_sb_rooted(const struct xfs_inode *ip)
1682 {
1683 return xchk_inode_is_dirtree_root(ip) ||
1684 xfs_is_sb_inum(ip->i_mount, ip->i_ino);
1685 }
1686
1687 /* What is the root directory inumber for this inode? */
1688 xfs_ino_t
xchk_inode_rootdir_inum(const struct xfs_inode * ip)1689 xchk_inode_rootdir_inum(const struct xfs_inode *ip)
1690 {
1691 struct xfs_mount *mp = ip->i_mount;
1692
1693 if (xfs_is_metadir_inode(ip))
1694 return mp->m_metadirip->i_ino;
1695 return mp->m_rootip->i_ino;
1696 }
1697
1698 static int
xchk_meta_btree_count_blocks(struct xfs_scrub * sc,xfs_extnum_t * nextents,xfs_filblks_t * count)1699 xchk_meta_btree_count_blocks(
1700 struct xfs_scrub *sc,
1701 xfs_extnum_t *nextents,
1702 xfs_filblks_t *count)
1703 {
1704 struct xfs_btree_cur *cur;
1705 int error;
1706
1707 if (!sc->sr.rtg) {
1708 ASSERT(0);
1709 return -EFSCORRUPTED;
1710 }
1711
1712 switch (sc->ip->i_metatype) {
1713 case XFS_METAFILE_RTRMAP:
1714 cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
1715 break;
1716 case XFS_METAFILE_RTREFCOUNT:
1717 cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg);
1718 break;
1719 default:
1720 ASSERT(0);
1721 return -EFSCORRUPTED;
1722 }
1723
1724 error = xfs_btree_count_blocks(cur, count);
1725 xfs_btree_del_cursor(cur, error);
1726 if (!error) {
1727 *nextents = 0;
1728 (*count)--; /* don't count the btree iroot */
1729 }
1730 return error;
1731 }
1732
1733 /* Count the blocks used by a file, even if it's a metadata inode. */
1734 int
xchk_inode_count_blocks(struct xfs_scrub * sc,int whichfork,xfs_extnum_t * nextents,xfs_filblks_t * count)1735 xchk_inode_count_blocks(
1736 struct xfs_scrub *sc,
1737 int whichfork,
1738 xfs_extnum_t *nextents,
1739 xfs_filblks_t *count)
1740 {
1741 struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
1742
1743 if (!ifp) {
1744 *nextents = 0;
1745 *count = 0;
1746 return 0;
1747 }
1748
1749 if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) {
1750 ASSERT(whichfork == XFS_DATA_FORK);
1751 return xchk_meta_btree_count_blocks(sc, nextents, count);
1752 }
1753
1754 return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents,
1755 count);
1756 }
1757