1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2019-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_alloc.h" 13 #include "xfs_ialloc.h" 14 #include "xfs_health.h" 15 #include "xfs_btree.h" 16 #include "xfs_ag.h" 17 #include "xfs_rtalloc.h" 18 #include "xfs_inode.h" 19 #include "scrub/scrub.h" 20 #include "scrub/common.h" 21 #include "scrub/trace.h" 22 23 /* 24 * FS Summary Counters 25 * =================== 26 * 27 * The basics of filesystem summary counter checking are that we iterate the 28 * AGs counting the number of free blocks, free space btree blocks, per-AG 29 * reservations, inodes, delayed allocation reservations, and free inodes. 30 * Then we compare what we computed against the in-core counters. 31 * 32 * However, the reality is that summary counters are a tricky beast to check. 33 * While we /could/ freeze the filesystem and scramble around the AGs counting 34 * the free blocks, in practice we prefer not do that for a scan because 35 * freezing is costly. To get around this, we added a per-cpu counter of the 36 * delalloc reservations so that we can rotor around the AGs relatively 37 * quickly, and we allow the counts to be slightly off because we're not taking 38 * any locks while we do this. 39 * 40 * So the first thing we do is warm up the buffer cache in the setup routine by 41 * walking all the AGs to make sure the incore per-AG structure has been 42 * initialized. The expected value calculation then iterates the incore per-AG 43 * structures as quickly as it can. We snapshot the percpu counters before and 44 * after this operation and use the difference in counter values to guess at 45 * our tolerance for mismatch between expected and actual counter values. 46 */ 47 48 struct xchk_fscounters { 49 struct xfs_scrub *sc; 50 uint64_t icount; 51 uint64_t ifree; 52 uint64_t fdblocks; 53 uint64_t frextents; 54 unsigned long long icount_min; 55 unsigned long long icount_max; 56 }; 57 58 /* 59 * Since the expected value computation is lockless but only browses incore 60 * values, the percpu counters should be fairly close to each other. However, 61 * we'll allow ourselves to be off by at least this (arbitrary) amount. 62 */ 63 #define XCHK_FSCOUNT_MIN_VARIANCE (512) 64 65 /* 66 * Make sure the per-AG structure has been initialized from the on-disk header 67 * contents and trust that the incore counters match the ondisk counters. (The 68 * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the 69 * summary counters after checking all AG headers). Do this from the setup 70 * function so that the inner AG aggregation loop runs as quickly as possible. 71 * 72 * This function runs during the setup phase /before/ we start checking any 73 * metadata. 74 */ 75 STATIC int 76 xchk_fscount_warmup( 77 struct xfs_scrub *sc) 78 { 79 struct xfs_mount *mp = sc->mp; 80 struct xfs_buf *agi_bp = NULL; 81 struct xfs_buf *agf_bp = NULL; 82 struct xfs_perag *pag = NULL; 83 xfs_agnumber_t agno; 84 int error = 0; 85 86 for_each_perag(mp, agno, pag) { 87 if (xchk_should_terminate(sc, &error)) 88 break; 89 if (xfs_perag_initialised_agi(pag) && 90 xfs_perag_initialised_agf(pag)) 91 continue; 92 93 /* Lock both AG headers. */ 94 error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp); 95 if (error) 96 break; 97 error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp); 98 if (error) 99 break; 100 101 /* 102 * These are supposed to be initialized by the header read 103 * function. 104 */ 105 if (!xfs_perag_initialised_agi(pag) || 106 !xfs_perag_initialised_agf(pag)) { 107 error = -EFSCORRUPTED; 108 break; 109 } 110 111 xfs_buf_relse(agf_bp); 112 agf_bp = NULL; 113 xfs_buf_relse(agi_bp); 114 agi_bp = NULL; 115 } 116 117 if (agf_bp) 118 xfs_buf_relse(agf_bp); 119 if (agi_bp) 120 xfs_buf_relse(agi_bp); 121 if (pag) 122 xfs_perag_rele(pag); 123 return error; 124 } 125 126 int 127 xchk_setup_fscounters( 128 struct xfs_scrub *sc) 129 { 130 struct xchk_fscounters *fsc; 131 int error; 132 133 /* 134 * If the AGF doesn't track btreeblks, we have to lock the AGF to count 135 * btree block usage by walking the actual btrees. 136 */ 137 if (!xfs_has_lazysbcount(sc->mp)) 138 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 139 140 sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS); 141 if (!sc->buf) 142 return -ENOMEM; 143 fsc = sc->buf; 144 fsc->sc = sc; 145 146 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); 147 148 /* We must get the incore counters set up before we can proceed. */ 149 error = xchk_fscount_warmup(sc); 150 if (error) 151 return error; 152 153 return xchk_trans_alloc(sc, 0); 154 } 155 156 /* 157 * Part 1: Collecting filesystem summary counts. For each AG, we add its 158 * summary counts (total inodes, free inodes, free data blocks) to an incore 159 * copy of the overall filesystem summary counts. 160 * 161 * To avoid false corruption reports in part 2, any failure in this part must 162 * set the INCOMPLETE flag even when a negative errno is returned. This care 163 * must be taken with certain errno values (i.e. EFSBADCRC, EFSCORRUPTED, 164 * ECANCELED) that are absorbed into a scrub state flag update by 165 * xchk_*_process_error. 166 */ 167 168 /* Count free space btree blocks manually for pre-lazysbcount filesystems. */ 169 static int 170 xchk_fscount_btreeblks( 171 struct xfs_scrub *sc, 172 struct xchk_fscounters *fsc, 173 xfs_agnumber_t agno) 174 { 175 xfs_extlen_t blocks; 176 int error; 177 178 error = xchk_ag_init_existing(sc, agno, &sc->sa); 179 if (error) 180 goto out_free; 181 182 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 183 if (error) 184 goto out_free; 185 fsc->fdblocks += blocks - 1; 186 187 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 188 if (error) 189 goto out_free; 190 fsc->fdblocks += blocks - 1; 191 192 out_free: 193 xchk_ag_free(sc, &sc->sa); 194 return error; 195 } 196 197 /* 198 * Calculate what the global in-core counters ought to be from the incore 199 * per-AG structure. Callers can compare this to the actual in-core counters 200 * to estimate by how much both in-core and on-disk counters need to be 201 * adjusted. 202 */ 203 STATIC int 204 xchk_fscount_aggregate_agcounts( 205 struct xfs_scrub *sc, 206 struct xchk_fscounters *fsc) 207 { 208 struct xfs_mount *mp = sc->mp; 209 struct xfs_perag *pag; 210 uint64_t delayed; 211 xfs_agnumber_t agno; 212 int tries = 8; 213 int error = 0; 214 215 retry: 216 fsc->icount = 0; 217 fsc->ifree = 0; 218 fsc->fdblocks = 0; 219 220 for_each_perag(mp, agno, pag) { 221 if (xchk_should_terminate(sc, &error)) 222 break; 223 224 /* This somehow got unset since the warmup? */ 225 if (!xfs_perag_initialised_agi(pag) || 226 !xfs_perag_initialised_agf(pag)) { 227 error = -EFSCORRUPTED; 228 break; 229 } 230 231 /* Count all the inodes */ 232 fsc->icount += pag->pagi_count; 233 fsc->ifree += pag->pagi_freecount; 234 235 /* Add up the free/freelist/bnobt/cntbt blocks */ 236 fsc->fdblocks += pag->pagf_freeblks; 237 fsc->fdblocks += pag->pagf_flcount; 238 if (xfs_has_lazysbcount(sc->mp)) { 239 fsc->fdblocks += pag->pagf_btreeblks; 240 } else { 241 error = xchk_fscount_btreeblks(sc, fsc, agno); 242 if (error) 243 break; 244 } 245 246 /* 247 * Per-AG reservations are taken out of the incore counters, 248 * so they must be left out of the free blocks computation. 249 */ 250 fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; 251 fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; 252 253 } 254 if (pag) 255 xfs_perag_rele(pag); 256 if (error) { 257 xchk_set_incomplete(sc); 258 return error; 259 } 260 261 /* 262 * The global incore space reservation is taken from the incore 263 * counters, so leave that out of the computation. 264 */ 265 fsc->fdblocks -= mp->m_resblks_avail; 266 267 /* 268 * Delayed allocation reservations are taken out of the incore counters 269 * but not recorded on disk, so leave them and their indlen blocks out 270 * of the computation. 271 */ 272 delayed = percpu_counter_sum(&mp->m_delalloc_blks); 273 fsc->fdblocks -= delayed; 274 275 trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, 276 delayed); 277 278 279 /* Bail out if the values we compute are totally nonsense. */ 280 if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || 281 fsc->fdblocks > mp->m_sb.sb_dblocks || 282 fsc->ifree > fsc->icount_max) 283 return -EFSCORRUPTED; 284 285 /* 286 * If ifree > icount then we probably had some perturbation in the 287 * counters while we were calculating things. We'll try a few times 288 * to maintain ifree <= icount before giving up. 289 */ 290 if (fsc->ifree > fsc->icount) { 291 if (tries--) 292 goto retry; 293 xchk_set_incomplete(sc); 294 return 0; 295 } 296 297 return 0; 298 } 299 300 #ifdef CONFIG_XFS_RT 301 STATIC int 302 xchk_fscount_add_frextent( 303 struct xfs_mount *mp, 304 struct xfs_trans *tp, 305 const struct xfs_rtalloc_rec *rec, 306 void *priv) 307 { 308 struct xchk_fscounters *fsc = priv; 309 int error = 0; 310 311 fsc->frextents += rec->ar_extcount; 312 313 xchk_should_terminate(fsc->sc, &error); 314 return error; 315 } 316 317 /* Calculate the number of free realtime extents from the realtime bitmap. */ 318 STATIC int 319 xchk_fscount_count_frextents( 320 struct xfs_scrub *sc, 321 struct xchk_fscounters *fsc) 322 { 323 struct xfs_mount *mp = sc->mp; 324 int error; 325 326 fsc->frextents = 0; 327 if (!xfs_has_realtime(mp)) 328 return 0; 329 330 xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); 331 error = xfs_rtalloc_query_all(sc->mp, sc->tp, 332 xchk_fscount_add_frextent, fsc); 333 if (error) { 334 xchk_set_incomplete(sc); 335 goto out_unlock; 336 } 337 338 out_unlock: 339 xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); 340 return error; 341 } 342 #else 343 STATIC int 344 xchk_fscount_count_frextents( 345 struct xfs_scrub *sc, 346 struct xchk_fscounters *fsc) 347 { 348 fsc->frextents = 0; 349 return 0; 350 } 351 #endif /* CONFIG_XFS_RT */ 352 353 /* 354 * Part 2: Comparing filesystem summary counters. All we have to do here is 355 * sum the percpu counters and compare them to what we've observed. 356 */ 357 358 /* 359 * Is the @counter reasonably close to the @expected value? 360 * 361 * We neither locked nor froze anything in the filesystem while aggregating the 362 * per-AG data to compute the @expected value, which means that the counter 363 * could have changed. We know the @old_value of the summation of the counter 364 * before the aggregation, and we re-sum the counter now. If the expected 365 * value falls between the two summations, we're ok. 366 * 367 * Otherwise, we /might/ have a problem. If the change in the summations is 368 * more than we want to tolerate, the filesystem is probably busy and we should 369 * just send back INCOMPLETE and see if userspace will try again. 370 */ 371 static inline bool 372 xchk_fscount_within_range( 373 struct xfs_scrub *sc, 374 const int64_t old_value, 375 struct percpu_counter *counter, 376 uint64_t expected) 377 { 378 int64_t min_value, max_value; 379 int64_t curr_value = percpu_counter_sum(counter); 380 381 trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, 382 old_value); 383 384 /* Negative values are always wrong. */ 385 if (curr_value < 0) 386 return false; 387 388 /* Exact matches are always ok. */ 389 if (curr_value == expected) 390 return true; 391 392 min_value = min(old_value, curr_value); 393 max_value = max(old_value, curr_value); 394 395 /* Within the before-and-after range is ok. */ 396 if (expected >= min_value && expected <= max_value) 397 return true; 398 399 /* 400 * If the difference between the two summations is too large, the fs 401 * might just be busy and so we'll mark the scrub incomplete. Return 402 * true here so that we don't mark the counter corrupt. 403 * 404 * XXX: In the future when userspace can grant scrub permission to 405 * quiesce the filesystem to solve the outsized variance problem, this 406 * check should be moved up and the return code changed to signal to 407 * userspace that we need quiesce permission. 408 */ 409 if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { 410 xchk_set_incomplete(sc); 411 return true; 412 } 413 414 return false; 415 } 416 417 /* Check the superblock counters. */ 418 int 419 xchk_fscounters( 420 struct xfs_scrub *sc) 421 { 422 struct xfs_mount *mp = sc->mp; 423 struct xchk_fscounters *fsc = sc->buf; 424 int64_t icount, ifree, fdblocks, frextents; 425 int error; 426 427 /* Snapshot the percpu counters. */ 428 icount = percpu_counter_sum(&mp->m_icount); 429 ifree = percpu_counter_sum(&mp->m_ifree); 430 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 431 frextents = percpu_counter_sum(&mp->m_frextents); 432 433 /* No negative values, please! */ 434 if (icount < 0 || ifree < 0 || fdblocks < 0 || frextents < 0) 435 xchk_set_corrupt(sc); 436 437 /* See if icount is obviously wrong. */ 438 if (icount < fsc->icount_min || icount > fsc->icount_max) 439 xchk_set_corrupt(sc); 440 441 /* See if fdblocks is obviously wrong. */ 442 if (fdblocks > mp->m_sb.sb_dblocks) 443 xchk_set_corrupt(sc); 444 445 /* See if frextents is obviously wrong. */ 446 if (frextents > mp->m_sb.sb_rextents) 447 xchk_set_corrupt(sc); 448 449 /* 450 * XXX: We can't quiesce percpu counter updates, so exit early. 451 * This can be re-enabled when we gain exclusive freeze functionality. 452 */ 453 return 0; 454 455 /* 456 * If ifree exceeds icount by more than the minimum variance then 457 * something's probably wrong with the counters. 458 */ 459 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) 460 xchk_set_corrupt(sc); 461 462 /* Walk the incore AG headers to calculate the expected counters. */ 463 error = xchk_fscount_aggregate_agcounts(sc, fsc); 464 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 465 return error; 466 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 467 return 0; 468 469 /* Count the free extents counter for rt volumes. */ 470 error = xchk_fscount_count_frextents(sc, fsc); 471 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 472 return error; 473 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 474 return 0; 475 476 /* Compare the in-core counters with whatever we counted. */ 477 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) 478 xchk_set_corrupt(sc); 479 480 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) 481 xchk_set_corrupt(sc); 482 483 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, 484 fsc->fdblocks)) 485 xchk_set_corrupt(sc); 486 487 if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents, 488 fsc->frextents)) 489 xchk_set_corrupt(sc); 490 491 return 0; 492 } 493