1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_ialloc_btree.h"
18 #include "xfs_icache.h"
19 #include "xfs_rmap.h"
20 #include "scrub/scrub.h"
21 #include "scrub/common.h"
22 #include "scrub/btree.h"
23 #include "scrub/trace.h"
24 #include "xfs_ag.h"
25
26 /*
27 * Set us up to scrub inode btrees.
28 * If we detect a discrepancy between the inobt and the inode,
29 * try again after forcing logged inode cores out to disk.
30 */
31 int
xchk_setup_ag_iallocbt(struct xfs_scrub * sc)32 xchk_setup_ag_iallocbt(
33 struct xfs_scrub *sc)
34 {
35 if (xchk_need_intent_drain(sc))
36 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
37 return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
38 }
39
40 /* Inode btree scrubber. */
41
42 struct xchk_iallocbt {
43 /* Number of inodes we see while scanning inobt. */
44 unsigned long long inodes;
45
46 /* Expected next startino, for big block filesystems. */
47 xfs_agino_t next_startino;
48
49 /* Expected end of the current inode cluster. */
50 xfs_agino_t next_cluster_ino;
51 };
52
53 /*
54 * Does the finobt have a record for this inode with the same hole/free state?
55 * This is a bit complicated because of the following:
56 *
57 * - The finobt need not have a record if all inodes in the inobt record are
58 * allocated.
59 * - The finobt need not have a record if all inodes in the inobt record are
60 * free.
61 * - The finobt need not have a record if the inobt record says this is a hole.
62 * This likely doesn't happen in practice.
63 */
64 STATIC int
xchk_inobt_xref_finobt(struct xfs_scrub * sc,struct xfs_inobt_rec_incore * irec,xfs_agino_t agino,bool free,bool hole)65 xchk_inobt_xref_finobt(
66 struct xfs_scrub *sc,
67 struct xfs_inobt_rec_incore *irec,
68 xfs_agino_t agino,
69 bool free,
70 bool hole)
71 {
72 struct xfs_inobt_rec_incore frec;
73 struct xfs_btree_cur *cur = sc->sa.fino_cur;
74 bool ffree, fhole;
75 unsigned int frec_idx, fhole_idx;
76 int has_record;
77 int error;
78
79 ASSERT(xfs_btree_is_fino(cur->bc_ops));
80
81 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record);
82 if (error)
83 return error;
84 if (!has_record)
85 goto no_record;
86
87 error = xfs_inobt_get_rec(cur, &frec, &has_record);
88 if (!has_record)
89 return -EFSCORRUPTED;
90
91 if (frec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
92 goto no_record;
93
94 /* There's a finobt record; free and hole status must match. */
95 frec_idx = agino - frec.ir_startino;
96 ffree = frec.ir_free & (1ULL << frec_idx);
97 fhole_idx = frec_idx / XFS_INODES_PER_HOLEMASK_BIT;
98 fhole = frec.ir_holemask & (1U << fhole_idx);
99
100 if (ffree != free)
101 xchk_btree_xref_set_corrupt(sc, cur, 0);
102 if (fhole != hole)
103 xchk_btree_xref_set_corrupt(sc, cur, 0);
104 return 0;
105
106 no_record:
107 /* inobt record is fully allocated */
108 if (irec->ir_free == 0)
109 return 0;
110
111 /* inobt record is totally unallocated */
112 if (irec->ir_free == XFS_INOBT_ALL_FREE)
113 return 0;
114
115 /* inobt record says this is a hole */
116 if (hole)
117 return 0;
118
119 /* finobt doesn't care about allocated inodes */
120 if (!free)
121 return 0;
122
123 xchk_btree_xref_set_corrupt(sc, cur, 0);
124 return 0;
125 }
126
127 /*
128 * Make sure that each inode of this part of an inobt record has the same
129 * sparse and free status as the finobt.
130 */
131 STATIC void
xchk_inobt_chunk_xref_finobt(struct xfs_scrub * sc,struct xfs_inobt_rec_incore * irec,xfs_agino_t agino,unsigned int nr_inodes)132 xchk_inobt_chunk_xref_finobt(
133 struct xfs_scrub *sc,
134 struct xfs_inobt_rec_incore *irec,
135 xfs_agino_t agino,
136 unsigned int nr_inodes)
137 {
138 xfs_agino_t i;
139 unsigned int rec_idx;
140 int error;
141
142 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT);
143
144 if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm))
145 return;
146
147 for (i = agino, rec_idx = agino - irec->ir_startino;
148 i < agino + nr_inodes;
149 i++, rec_idx++) {
150 bool free, hole;
151 unsigned int hole_idx;
152
153 free = irec->ir_free & (1ULL << rec_idx);
154 hole_idx = rec_idx / XFS_INODES_PER_HOLEMASK_BIT;
155 hole = irec->ir_holemask & (1U << hole_idx);
156
157 error = xchk_inobt_xref_finobt(sc, irec, i, free, hole);
158 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
159 return;
160 }
161 }
162
163 /*
164 * Does the inobt have a record for this inode with the same hole/free state?
165 * The inobt must always have a record if there's a finobt record.
166 */
167 STATIC int
xchk_finobt_xref_inobt(struct xfs_scrub * sc,struct xfs_inobt_rec_incore * frec,xfs_agino_t agino,bool ffree,bool fhole)168 xchk_finobt_xref_inobt(
169 struct xfs_scrub *sc,
170 struct xfs_inobt_rec_incore *frec,
171 xfs_agino_t agino,
172 bool ffree,
173 bool fhole)
174 {
175 struct xfs_inobt_rec_incore irec;
176 struct xfs_btree_cur *cur = sc->sa.ino_cur;
177 bool free, hole;
178 unsigned int rec_idx, hole_idx;
179 int has_record;
180 int error;
181
182 ASSERT(xfs_btree_is_ino(cur->bc_ops));
183
184 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record);
185 if (error)
186 return error;
187 if (!has_record)
188 goto no_record;
189
190 error = xfs_inobt_get_rec(cur, &irec, &has_record);
191 if (!has_record)
192 return -EFSCORRUPTED;
193
194 if (irec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
195 goto no_record;
196
197 /* There's an inobt record; free and hole status must match. */
198 rec_idx = agino - irec.ir_startino;
199 free = irec.ir_free & (1ULL << rec_idx);
200 hole_idx = rec_idx / XFS_INODES_PER_HOLEMASK_BIT;
201 hole = irec.ir_holemask & (1U << hole_idx);
202
203 if (ffree != free)
204 xchk_btree_xref_set_corrupt(sc, cur, 0);
205 if (fhole != hole)
206 xchk_btree_xref_set_corrupt(sc, cur, 0);
207 return 0;
208
209 no_record:
210 /* finobt should never have a record for which the inobt does not */
211 xchk_btree_xref_set_corrupt(sc, cur, 0);
212 return 0;
213 }
214
215 /*
216 * Make sure that each inode of this part of an finobt record has the same
217 * sparse and free status as the inobt.
218 */
219 STATIC void
xchk_finobt_chunk_xref_inobt(struct xfs_scrub * sc,struct xfs_inobt_rec_incore * frec,xfs_agino_t agino,unsigned int nr_inodes)220 xchk_finobt_chunk_xref_inobt(
221 struct xfs_scrub *sc,
222 struct xfs_inobt_rec_incore *frec,
223 xfs_agino_t agino,
224 unsigned int nr_inodes)
225 {
226 xfs_agino_t i;
227 unsigned int rec_idx;
228 int error;
229
230 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT);
231
232 if (!sc->sa.ino_cur || xchk_skip_xref(sc->sm))
233 return;
234
235 for (i = agino, rec_idx = agino - frec->ir_startino;
236 i < agino + nr_inodes;
237 i++, rec_idx++) {
238 bool ffree, fhole;
239 unsigned int hole_idx;
240
241 ffree = frec->ir_free & (1ULL << rec_idx);
242 hole_idx = rec_idx / XFS_INODES_PER_HOLEMASK_BIT;
243 fhole = frec->ir_holemask & (1U << hole_idx);
244
245 error = xchk_finobt_xref_inobt(sc, frec, i, ffree, fhole);
246 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
247 return;
248 }
249 }
250
251 /* Is this chunk worth checking and cross-referencing? */
252 STATIC bool
xchk_iallocbt_chunk(struct xchk_btree * bs,struct xfs_inobt_rec_incore * irec,xfs_agino_t agino,unsigned int nr_inodes)253 xchk_iallocbt_chunk(
254 struct xchk_btree *bs,
255 struct xfs_inobt_rec_incore *irec,
256 xfs_agino_t agino,
257 unsigned int nr_inodes)
258 {
259 struct xfs_scrub *sc = bs->sc;
260 struct xfs_mount *mp = bs->cur->bc_mp;
261 struct xfs_perag *pag = to_perag(bs->cur->bc_group);
262 xfs_agblock_t agbno;
263 xfs_extlen_t len;
264
265 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
266 len = XFS_B_TO_FSB(mp, nr_inodes * mp->m_sb.sb_inodesize);
267
268 if (!xfs_verify_agbext(pag, agbno, len))
269 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
270
271 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
272 return false;
273
274 xchk_xref_is_used_space(sc, agbno, len);
275 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
276 xchk_inobt_chunk_xref_finobt(sc, irec, agino, nr_inodes);
277 else
278 xchk_finobt_chunk_xref_inobt(sc, irec, agino, nr_inodes);
279 xchk_xref_is_only_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
280 xchk_xref_is_not_shared(sc, agbno, len);
281 xchk_xref_is_not_cow_staging(sc, agbno, len);
282 return true;
283 }
284
285 /*
286 * Check that an inode's allocation status matches ir_free in the inobt
287 * record. First we try querying the in-core inode state, and if the inode
288 * isn't loaded we examine the on-disk inode directly.
289 *
290 * Since there can be 1:M and M:1 mappings between inobt records and inode
291 * clusters, we pass in the inode location information as an inobt record;
292 * the index of an inode cluster within the inobt record (as well as the
293 * cluster buffer itself); and the index of the inode within the cluster.
294 *
295 * @irec is the inobt record.
296 * @irec_ino is the inode offset from the start of the record.
297 * @dip is the on-disk inode.
298 */
299 STATIC int
xchk_iallocbt_check_cluster_ifree(struct xchk_btree * bs,struct xfs_inobt_rec_incore * irec,unsigned int irec_ino,struct xfs_dinode * dip)300 xchk_iallocbt_check_cluster_ifree(
301 struct xchk_btree *bs,
302 struct xfs_inobt_rec_incore *irec,
303 unsigned int irec_ino,
304 struct xfs_dinode *dip)
305 {
306 xfs_ino_t fsino;
307 xfs_agino_t agino;
308 bool irec_free;
309 bool ino_inuse;
310 bool freemask_ok;
311 int error = 0;
312
313 if (xchk_should_terminate(bs->sc, &error))
314 return error;
315
316 /*
317 * Given an inobt record and the offset of an inode from the start of
318 * the record, compute which fs inode we're talking about.
319 */
320 agino = irec->ir_startino + irec_ino;
321 fsino = xfs_agino_to_ino(to_perag(bs->cur->bc_group), agino);
322 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
323
324 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
325 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
326 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
327 goto out;
328 }
329
330 error = xchk_inode_is_allocated(bs->sc, agino, &ino_inuse);
331 if (error == -ENODATA) {
332 /* Not cached, just read the disk buffer */
333 freemask_ok = irec_free ^ !!(dip->di_mode);
334 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
335 return -EDEADLOCK;
336 } else if (error < 0) {
337 /*
338 * Inode is only half assembled, or there was an IO error,
339 * or the verifier failed, so don't bother trying to check.
340 * The inode scrubber can deal with this.
341 */
342 goto out;
343 } else {
344 /* Inode is all there. */
345 freemask_ok = irec_free ^ ino_inuse;
346 }
347 if (!freemask_ok)
348 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
349 out:
350 return 0;
351 }
352
353 /*
354 * Check that the holemask and freemask of a hypothetical inode cluster match
355 * what's actually on disk. If sparse inodes are enabled, the cluster does
356 * not actually have to map to inodes if the corresponding holemask bit is set.
357 *
358 * @cluster_base is the first inode in the cluster within the @irec.
359 */
360 STATIC int
xchk_iallocbt_check_cluster(struct xchk_btree * bs,struct xfs_inobt_rec_incore * irec,unsigned int cluster_base)361 xchk_iallocbt_check_cluster(
362 struct xchk_btree *bs,
363 struct xfs_inobt_rec_incore *irec,
364 unsigned int cluster_base)
365 {
366 struct xfs_imap imap;
367 struct xfs_mount *mp = bs->cur->bc_mp;
368 struct xfs_buf *cluster_bp;
369 unsigned int nr_inodes;
370 xfs_agblock_t agbno;
371 unsigned int cluster_index;
372 uint16_t cluster_mask = 0;
373 uint16_t ir_holemask;
374 int error = 0;
375
376 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
377 M_IGEO(mp)->inodes_per_cluster);
378
379 /* Map this inode cluster */
380 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
381
382 /* Compute a bitmask for this cluster that can be used for holemask. */
383 for (cluster_index = 0;
384 cluster_index < nr_inodes;
385 cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
386 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
387 XFS_INODES_PER_HOLEMASK_BIT);
388
389 /*
390 * Map the first inode of this cluster to a buffer and offset.
391 * Be careful about inobt records that don't align with the start of
392 * the inode buffer when block sizes are large enough to hold multiple
393 * inode chunks. When this happens, cluster_base will be zero but
394 * ir_startino can be large enough to make im_boffset nonzero.
395 */
396 ir_holemask = (irec->ir_holemask & cluster_mask);
397 imap.im_blkno = xfs_agbno_to_daddr(to_perag(bs->cur->bc_group), agbno);
398 imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
399 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
400 mp->m_sb.sb_inodelog;
401
402 if (imap.im_boffset != 0 && cluster_base != 0) {
403 ASSERT(imap.im_boffset == 0 || cluster_base == 0);
404 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
405 return 0;
406 }
407
408 trace_xchk_iallocbt_check_cluster(to_perag(bs->cur->bc_group),
409 irec->ir_startino, imap.im_blkno, imap.im_len,
410 cluster_base, nr_inodes, cluster_mask, ir_holemask,
411 XFS_INO_TO_OFFSET(mp, irec->ir_startino +
412 cluster_base));
413
414 /* The whole cluster must be a hole or not a hole. */
415 if (ir_holemask != cluster_mask && ir_holemask != 0) {
416 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
417 return 0;
418 }
419
420 /* If any part of this is a hole, skip it. */
421 if (ir_holemask) {
422 xchk_xref_is_not_owned_by(bs->sc, agbno,
423 M_IGEO(mp)->blocks_per_cluster,
424 &XFS_RMAP_OINFO_INODES);
425 return 0;
426 }
427
428 xchk_xref_is_only_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster,
429 &XFS_RMAP_OINFO_INODES);
430
431 /* Grab the inode cluster buffer. */
432 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
433 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
434 return error;
435
436 /* Check free status of each inode within this cluster. */
437 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
438 struct xfs_dinode *dip;
439
440 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
441 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
442 break;
443 }
444
445 dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
446 error = xchk_iallocbt_check_cluster_ifree(bs, irec,
447 cluster_base + cluster_index, dip);
448 if (error)
449 break;
450 imap.im_boffset += mp->m_sb.sb_inodesize;
451 }
452
453 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
454 return error;
455 }
456
457 /*
458 * For all the inode clusters that could map to this inobt record, make sure
459 * that the holemask makes sense and that the allocation status of each inode
460 * matches the freemask.
461 */
462 STATIC int
xchk_iallocbt_check_clusters(struct xchk_btree * bs,struct xfs_inobt_rec_incore * irec)463 xchk_iallocbt_check_clusters(
464 struct xchk_btree *bs,
465 struct xfs_inobt_rec_incore *irec)
466 {
467 unsigned int cluster_base;
468 int error = 0;
469
470 /*
471 * For the common case where this inobt record maps to multiple inode
472 * clusters this will call _check_cluster for each cluster.
473 *
474 * For the case that multiple inobt records map to a single cluster,
475 * this will call _check_cluster once.
476 */
477 for (cluster_base = 0;
478 cluster_base < XFS_INODES_PER_CHUNK;
479 cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
480 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
481 if (error)
482 break;
483 }
484
485 return error;
486 }
487
488 /*
489 * Make sure this inode btree record is aligned properly. Because a fs block
490 * contains multiple inodes, we check that the inobt record is aligned to the
491 * correct inode, not just the correct block on disk. This results in a finer
492 * grained corruption check.
493 */
494 STATIC void
xchk_iallocbt_rec_alignment(struct xchk_btree * bs,struct xfs_inobt_rec_incore * irec)495 xchk_iallocbt_rec_alignment(
496 struct xchk_btree *bs,
497 struct xfs_inobt_rec_incore *irec)
498 {
499 struct xfs_mount *mp = bs->sc->mp;
500 struct xchk_iallocbt *iabt = bs->private;
501 struct xfs_ino_geometry *igeo = M_IGEO(mp);
502
503 /*
504 * finobt records have different positioning requirements than inobt
505 * records: each finobt record must have a corresponding inobt record.
506 * That is checked in the xref function, so for now we only catch the
507 * obvious case where the record isn't at all aligned properly.
508 *
509 * Note that if a fs block contains more than a single chunk of inodes,
510 * we will have finobt records only for those chunks containing free
511 * inodes, and therefore expect chunk alignment of finobt records.
512 * Otherwise, we expect that the finobt record is aligned to the
513 * cluster alignment as told by the superblock.
514 */
515 if (xfs_btree_is_fino(bs->cur->bc_ops)) {
516 unsigned int imask;
517
518 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
519 igeo->cluster_align_inodes) - 1;
520 if (irec->ir_startino & imask)
521 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
522 return;
523 }
524
525 if (iabt->next_startino != NULLAGINO) {
526 /*
527 * We're midway through a cluster of inodes that is mapped by
528 * multiple inobt records. Did we get the record for the next
529 * irec in the sequence?
530 */
531 if (irec->ir_startino != iabt->next_startino) {
532 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
533 return;
534 }
535
536 iabt->next_startino += XFS_INODES_PER_CHUNK;
537
538 /* Are we done with the cluster? */
539 if (iabt->next_startino >= iabt->next_cluster_ino) {
540 iabt->next_startino = NULLAGINO;
541 iabt->next_cluster_ino = NULLAGINO;
542 }
543 return;
544 }
545
546 /* inobt records must be aligned to cluster and inoalignmnt size. */
547 if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) {
548 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
549 return;
550 }
551
552 if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) {
553 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
554 return;
555 }
556
557 if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK)
558 return;
559
560 /*
561 * If this is the start of an inode cluster that can be mapped by
562 * multiple inobt records, the next inobt record must follow exactly
563 * after this one.
564 */
565 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
566 iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster;
567 }
568
569 /* Scrub an inobt/finobt record. */
570 STATIC int
xchk_iallocbt_rec(struct xchk_btree * bs,const union xfs_btree_rec * rec)571 xchk_iallocbt_rec(
572 struct xchk_btree *bs,
573 const union xfs_btree_rec *rec)
574 {
575 struct xfs_mount *mp = bs->cur->bc_mp;
576 struct xchk_iallocbt *iabt = bs->private;
577 struct xfs_inobt_rec_incore irec;
578 uint64_t holes;
579 xfs_agino_t agino;
580 int holecount;
581 int i;
582 int error = 0;
583 uint16_t holemask;
584
585 xfs_inobt_btrec_to_irec(mp, rec, &irec);
586 if (xfs_inobt_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
587 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
588 return 0;
589 }
590
591 agino = irec.ir_startino;
592
593 xchk_iallocbt_rec_alignment(bs, &irec);
594 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
595 goto out;
596
597 iabt->inodes += irec.ir_count;
598
599 /* Handle non-sparse inodes */
600 if (!xfs_inobt_issparse(irec.ir_holemask)) {
601 if (irec.ir_count != XFS_INODES_PER_CHUNK)
602 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
603
604 if (!xchk_iallocbt_chunk(bs, &irec, agino,
605 XFS_INODES_PER_CHUNK))
606 goto out;
607 goto check_clusters;
608 }
609
610 /* Check each chunk of a sparse inode cluster. */
611 holemask = irec.ir_holemask;
612 holecount = 0;
613 holes = ~xfs_inobt_irec_to_allocmask(&irec);
614 if ((holes & irec.ir_free) != holes ||
615 irec.ir_freecount > irec.ir_count)
616 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
617
618 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
619 if (holemask & 1)
620 holecount += XFS_INODES_PER_HOLEMASK_BIT;
621 else if (!xchk_iallocbt_chunk(bs, &irec, agino,
622 XFS_INODES_PER_HOLEMASK_BIT))
623 goto out;
624 holemask >>= 1;
625 agino += XFS_INODES_PER_HOLEMASK_BIT;
626 }
627
628 if (holecount > XFS_INODES_PER_CHUNK ||
629 holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
630 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
631
632 check_clusters:
633 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
634 goto out;
635
636 error = xchk_iallocbt_check_clusters(bs, &irec);
637 if (error)
638 goto out;
639
640 out:
641 return error;
642 }
643
644 /*
645 * Make sure the inode btrees are as large as the rmap thinks they are.
646 * Don't bother if we're missing btree cursors, as we're already corrupt.
647 */
648 STATIC void
xchk_iallocbt_xref_rmap_btreeblks(struct xfs_scrub * sc)649 xchk_iallocbt_xref_rmap_btreeblks(
650 struct xfs_scrub *sc)
651 {
652 xfs_filblks_t blocks;
653 xfs_filblks_t inobt_blocks = 0;
654 xfs_filblks_t finobt_blocks = 0;
655 int error;
656
657 if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
658 (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
659 xchk_skip_xref(sc->sm))
660 return;
661
662 /* Check that we saw as many inobt blocks as the rmap says. */
663 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
664 if (!xchk_process_error(sc, 0, 0, &error))
665 return;
666
667 if (sc->sa.fino_cur) {
668 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
669 if (!xchk_process_error(sc, 0, 0, &error))
670 return;
671 }
672
673 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
674 &XFS_RMAP_OINFO_INOBT, &blocks);
675 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
676 return;
677 if (blocks != inobt_blocks + finobt_blocks)
678 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
679 }
680
681 /*
682 * Make sure that the inobt records point to the same number of blocks as
683 * the rmap says are owned by inodes.
684 */
685 STATIC void
xchk_iallocbt_xref_rmap_inodes(struct xfs_scrub * sc,unsigned long long inodes)686 xchk_iallocbt_xref_rmap_inodes(
687 struct xfs_scrub *sc,
688 unsigned long long inodes)
689 {
690 xfs_filblks_t blocks;
691 xfs_filblks_t inode_blocks;
692 int error;
693
694 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
695 return;
696
697 /* Check that we saw as many inode blocks as the rmap knows about. */
698 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
699 &XFS_RMAP_OINFO_INODES, &blocks);
700 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
701 return;
702 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
703 if (blocks != inode_blocks)
704 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
705 }
706
707 /* Scrub one of the inode btrees for some AG. */
708 int
xchk_iallocbt(struct xfs_scrub * sc)709 xchk_iallocbt(
710 struct xfs_scrub *sc)
711 {
712 struct xfs_btree_cur *cur;
713 struct xchk_iallocbt iabt = {
714 .inodes = 0,
715 .next_startino = NULLAGINO,
716 .next_cluster_ino = NULLAGINO,
717 };
718 int error;
719
720 switch (sc->sm->sm_type) {
721 case XFS_SCRUB_TYPE_INOBT:
722 cur = sc->sa.ino_cur;
723 break;
724 case XFS_SCRUB_TYPE_FINOBT:
725 cur = sc->sa.fino_cur;
726 break;
727 default:
728 ASSERT(0);
729 return -EIO;
730 }
731
732 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
733 &iabt);
734 if (error)
735 return error;
736
737 xchk_iallocbt_xref_rmap_btreeblks(sc);
738
739 /*
740 * If we're scrubbing the inode btree, inode_blocks is the number of
741 * blocks pointed to by all the inode chunk records. Therefore, we
742 * should compare to the number of inode chunk blocks that the rmap
743 * knows about. We can't do this for the finobt since it only points
744 * to inode chunks with free inodes.
745 */
746 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
747 xchk_iallocbt_xref_rmap_inodes(sc, iabt.inodes);
748 return error;
749 }
750
751 /* See if an inode btree has (or doesn't have) an inode chunk record. */
752 static inline void
xchk_xref_inode_check(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len,struct xfs_btree_cur ** icur,enum xbtree_recpacking expected)753 xchk_xref_inode_check(
754 struct xfs_scrub *sc,
755 xfs_agblock_t agbno,
756 xfs_extlen_t len,
757 struct xfs_btree_cur **icur,
758 enum xbtree_recpacking expected)
759 {
760 enum xbtree_recpacking outcome;
761 int error;
762
763 if (!(*icur) || xchk_skip_xref(sc->sm))
764 return;
765
766 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &outcome);
767 if (!xchk_should_check_xref(sc, &error, icur))
768 return;
769 if (outcome != expected)
770 xchk_btree_xref_set_corrupt(sc, *icur, 0);
771 }
772
773 /* xref check that the extent is not covered by inodes */
774 void
xchk_xref_is_not_inode_chunk(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)775 xchk_xref_is_not_inode_chunk(
776 struct xfs_scrub *sc,
777 xfs_agblock_t agbno,
778 xfs_extlen_t len)
779 {
780 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur,
781 XBTREE_RECPACKING_EMPTY);
782 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur,
783 XBTREE_RECPACKING_EMPTY);
784 }
785
786 /* xref check that the extent is covered by inodes */
787 void
xchk_xref_is_inode_chunk(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)788 xchk_xref_is_inode_chunk(
789 struct xfs_scrub *sc,
790 xfs_agblock_t agbno,
791 xfs_extlen_t len)
792 {
793 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur,
794 XBTREE_RECPACKING_FULL);
795 }
796