1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2019-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_alloc.h" 13 #include "xfs_ialloc.h" 14 #include "xfs_health.h" 15 #include "xfs_btree.h" 16 #include "xfs_ag.h" 17 #include "xfs_rtalloc.h" 18 #include "xfs_inode.h" 19 #include "scrub/scrub.h" 20 #include "scrub/common.h" 21 #include "scrub/trace.h" 22 23 /* 24 * FS Summary Counters 25 * =================== 26 * 27 * The basics of filesystem summary counter checking are that we iterate the 28 * AGs counting the number of free blocks, free space btree blocks, per-AG 29 * reservations, inodes, delayed allocation reservations, and free inodes. 30 * Then we compare what we computed against the in-core counters. 31 * 32 * However, the reality is that summary counters are a tricky beast to check. 33 * While we /could/ freeze the filesystem and scramble around the AGs counting 34 * the free blocks, in practice we prefer not do that for a scan because 35 * freezing is costly. To get around this, we added a per-cpu counter of the 36 * delalloc reservations so that we can rotor around the AGs relatively 37 * quickly, and we allow the counts to be slightly off because we're not taking 38 * any locks while we do this. 39 * 40 * So the first thing we do is warm up the buffer cache in the setup routine by 41 * walking all the AGs to make sure the incore per-AG structure has been 42 * initialized. The expected value calculation then iterates the incore per-AG 43 * structures as quickly as it can. We snapshot the percpu counters before and 44 * after this operation and use the difference in counter values to guess at 45 * our tolerance for mismatch between expected and actual counter values. 46 */ 47 48 struct xchk_fscounters { 49 struct xfs_scrub *sc; 50 uint64_t icount; 51 uint64_t ifree; 52 uint64_t fdblocks; 53 uint64_t frextents; 54 unsigned long long icount_min; 55 unsigned long long icount_max; 56 }; 57 58 /* 59 * Since the expected value computation is lockless but only browses incore 60 * values, the percpu counters should be fairly close to each other. However, 61 * we'll allow ourselves to be off by at least this (arbitrary) amount. 62 */ 63 #define XCHK_FSCOUNT_MIN_VARIANCE (512) 64 65 /* 66 * Make sure the per-AG structure has been initialized from the on-disk header 67 * contents and trust that the incore counters match the ondisk counters. (The 68 * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the 69 * summary counters after checking all AG headers). Do this from the setup 70 * function so that the inner AG aggregation loop runs as quickly as possible. 71 * 72 * This function runs during the setup phase /before/ we start checking any 73 * metadata. 74 */ 75 STATIC int 76 xchk_fscount_warmup( 77 struct xfs_scrub *sc) 78 { 79 struct xfs_mount *mp = sc->mp; 80 struct xfs_buf *agi_bp = NULL; 81 struct xfs_buf *agf_bp = NULL; 82 struct xfs_perag *pag = NULL; 83 xfs_agnumber_t agno; 84 int error = 0; 85 86 for_each_perag(mp, agno, pag) { 87 if (xchk_should_terminate(sc, &error)) 88 break; 89 if (xfs_perag_initialised_agi(pag) && 90 xfs_perag_initialised_agf(pag)) 91 continue; 92 93 /* Lock both AG headers. */ 94 error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp); 95 if (error) 96 break; 97 error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp); 98 if (error) 99 break; 100 101 /* 102 * These are supposed to be initialized by the header read 103 * function. 104 */ 105 if (!xfs_perag_initialised_agi(pag) || 106 !xfs_perag_initialised_agf(pag)) { 107 error = -EFSCORRUPTED; 108 break; 109 } 110 111 xfs_buf_relse(agf_bp); 112 agf_bp = NULL; 113 xfs_buf_relse(agi_bp); 114 agi_bp = NULL; 115 } 116 117 if (agf_bp) 118 xfs_buf_relse(agf_bp); 119 if (agi_bp) 120 xfs_buf_relse(agi_bp); 121 if (pag) 122 xfs_perag_rele(pag); 123 return error; 124 } 125 126 int 127 xchk_setup_fscounters( 128 struct xfs_scrub *sc) 129 { 130 struct xchk_fscounters *fsc; 131 int error; 132 133 /* 134 * If the AGF doesn't track btreeblks, we have to lock the AGF to count 135 * btree block usage by walking the actual btrees. 136 */ 137 if (!xfs_has_lazysbcount(sc->mp)) 138 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 139 140 sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS); 141 if (!sc->buf) 142 return -ENOMEM; 143 fsc = sc->buf; 144 fsc->sc = sc; 145 146 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); 147 148 /* We must get the incore counters set up before we can proceed. */ 149 error = xchk_fscount_warmup(sc); 150 if (error) 151 return error; 152 153 /* 154 * Pause background reclaim while we're scrubbing to reduce the 155 * likelihood of background perturbations to the counters throwing off 156 * our calculations. 157 */ 158 xchk_stop_reaping(sc); 159 160 return xchk_trans_alloc(sc, 0); 161 } 162 163 /* 164 * Part 1: Collecting filesystem summary counts. For each AG, we add its 165 * summary counts (total inodes, free inodes, free data blocks) to an incore 166 * copy of the overall filesystem summary counts. 167 * 168 * To avoid false corruption reports in part 2, any failure in this part must 169 * set the INCOMPLETE flag even when a negative errno is returned. This care 170 * must be taken with certain errno values (i.e. EFSBADCRC, EFSCORRUPTED, 171 * ECANCELED) that are absorbed into a scrub state flag update by 172 * xchk_*_process_error. 173 */ 174 175 /* Count free space btree blocks manually for pre-lazysbcount filesystems. */ 176 static int 177 xchk_fscount_btreeblks( 178 struct xfs_scrub *sc, 179 struct xchk_fscounters *fsc, 180 xfs_agnumber_t agno) 181 { 182 xfs_extlen_t blocks; 183 int error; 184 185 error = xchk_ag_init_existing(sc, agno, &sc->sa); 186 if (error) 187 goto out_free; 188 189 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 190 if (error) 191 goto out_free; 192 fsc->fdblocks += blocks - 1; 193 194 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 195 if (error) 196 goto out_free; 197 fsc->fdblocks += blocks - 1; 198 199 out_free: 200 xchk_ag_free(sc, &sc->sa); 201 return error; 202 } 203 204 /* 205 * Calculate what the global in-core counters ought to be from the incore 206 * per-AG structure. Callers can compare this to the actual in-core counters 207 * to estimate by how much both in-core and on-disk counters need to be 208 * adjusted. 209 */ 210 STATIC int 211 xchk_fscount_aggregate_agcounts( 212 struct xfs_scrub *sc, 213 struct xchk_fscounters *fsc) 214 { 215 struct xfs_mount *mp = sc->mp; 216 struct xfs_perag *pag; 217 uint64_t delayed; 218 xfs_agnumber_t agno; 219 int tries = 8; 220 int error = 0; 221 222 retry: 223 fsc->icount = 0; 224 fsc->ifree = 0; 225 fsc->fdblocks = 0; 226 227 for_each_perag(mp, agno, pag) { 228 if (xchk_should_terminate(sc, &error)) 229 break; 230 231 /* This somehow got unset since the warmup? */ 232 if (!xfs_perag_initialised_agi(pag) || 233 !xfs_perag_initialised_agf(pag)) { 234 error = -EFSCORRUPTED; 235 break; 236 } 237 238 /* Count all the inodes */ 239 fsc->icount += pag->pagi_count; 240 fsc->ifree += pag->pagi_freecount; 241 242 /* Add up the free/freelist/bnobt/cntbt blocks */ 243 fsc->fdblocks += pag->pagf_freeblks; 244 fsc->fdblocks += pag->pagf_flcount; 245 if (xfs_has_lazysbcount(sc->mp)) { 246 fsc->fdblocks += pag->pagf_btreeblks; 247 } else { 248 error = xchk_fscount_btreeblks(sc, fsc, agno); 249 if (error) 250 break; 251 } 252 253 /* 254 * Per-AG reservations are taken out of the incore counters, 255 * so they must be left out of the free blocks computation. 256 */ 257 fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; 258 fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; 259 260 } 261 if (pag) 262 xfs_perag_rele(pag); 263 if (error) { 264 xchk_set_incomplete(sc); 265 return error; 266 } 267 268 /* 269 * The global incore space reservation is taken from the incore 270 * counters, so leave that out of the computation. 271 */ 272 fsc->fdblocks -= mp->m_resblks_avail; 273 274 /* 275 * Delayed allocation reservations are taken out of the incore counters 276 * but not recorded on disk, so leave them and their indlen blocks out 277 * of the computation. 278 */ 279 delayed = percpu_counter_sum(&mp->m_delalloc_blks); 280 fsc->fdblocks -= delayed; 281 282 trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, 283 delayed); 284 285 286 /* Bail out if the values we compute are totally nonsense. */ 287 if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || 288 fsc->fdblocks > mp->m_sb.sb_dblocks || 289 fsc->ifree > fsc->icount_max) 290 return -EFSCORRUPTED; 291 292 /* 293 * If ifree > icount then we probably had some perturbation in the 294 * counters while we were calculating things. We'll try a few times 295 * to maintain ifree <= icount before giving up. 296 */ 297 if (fsc->ifree > fsc->icount) { 298 if (tries--) 299 goto retry; 300 xchk_set_incomplete(sc); 301 return 0; 302 } 303 304 return 0; 305 } 306 307 #ifdef CONFIG_XFS_RT 308 STATIC int 309 xchk_fscount_add_frextent( 310 struct xfs_mount *mp, 311 struct xfs_trans *tp, 312 const struct xfs_rtalloc_rec *rec, 313 void *priv) 314 { 315 struct xchk_fscounters *fsc = priv; 316 int error = 0; 317 318 fsc->frextents += rec->ar_extcount; 319 320 xchk_should_terminate(fsc->sc, &error); 321 return error; 322 } 323 324 /* Calculate the number of free realtime extents from the realtime bitmap. */ 325 STATIC int 326 xchk_fscount_count_frextents( 327 struct xfs_scrub *sc, 328 struct xchk_fscounters *fsc) 329 { 330 struct xfs_mount *mp = sc->mp; 331 int error; 332 333 fsc->frextents = 0; 334 if (!xfs_has_realtime(mp)) 335 return 0; 336 337 xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); 338 error = xfs_rtalloc_query_all(sc->mp, sc->tp, 339 xchk_fscount_add_frextent, fsc); 340 if (error) { 341 xchk_set_incomplete(sc); 342 goto out_unlock; 343 } 344 345 out_unlock: 346 xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); 347 return error; 348 } 349 #else 350 STATIC int 351 xchk_fscount_count_frextents( 352 struct xfs_scrub *sc, 353 struct xchk_fscounters *fsc) 354 { 355 fsc->frextents = 0; 356 return 0; 357 } 358 #endif /* CONFIG_XFS_RT */ 359 360 /* 361 * Part 2: Comparing filesystem summary counters. All we have to do here is 362 * sum the percpu counters and compare them to what we've observed. 363 */ 364 365 /* 366 * Is the @counter reasonably close to the @expected value? 367 * 368 * We neither locked nor froze anything in the filesystem while aggregating the 369 * per-AG data to compute the @expected value, which means that the counter 370 * could have changed. We know the @old_value of the summation of the counter 371 * before the aggregation, and we re-sum the counter now. If the expected 372 * value falls between the two summations, we're ok. 373 * 374 * Otherwise, we /might/ have a problem. If the change in the summations is 375 * more than we want to tolerate, the filesystem is probably busy and we should 376 * just send back INCOMPLETE and see if userspace will try again. 377 */ 378 static inline bool 379 xchk_fscount_within_range( 380 struct xfs_scrub *sc, 381 const int64_t old_value, 382 struct percpu_counter *counter, 383 uint64_t expected) 384 { 385 int64_t min_value, max_value; 386 int64_t curr_value = percpu_counter_sum(counter); 387 388 trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, 389 old_value); 390 391 /* Negative values are always wrong. */ 392 if (curr_value < 0) 393 return false; 394 395 /* Exact matches are always ok. */ 396 if (curr_value == expected) 397 return true; 398 399 min_value = min(old_value, curr_value); 400 max_value = max(old_value, curr_value); 401 402 /* Within the before-and-after range is ok. */ 403 if (expected >= min_value && expected <= max_value) 404 return true; 405 406 /* 407 * If the difference between the two summations is too large, the fs 408 * might just be busy and so we'll mark the scrub incomplete. Return 409 * true here so that we don't mark the counter corrupt. 410 * 411 * XXX: In the future when userspace can grant scrub permission to 412 * quiesce the filesystem to solve the outsized variance problem, this 413 * check should be moved up and the return code changed to signal to 414 * userspace that we need quiesce permission. 415 */ 416 if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { 417 xchk_set_incomplete(sc); 418 return true; 419 } 420 421 return false; 422 } 423 424 /* Check the superblock counters. */ 425 int 426 xchk_fscounters( 427 struct xfs_scrub *sc) 428 { 429 struct xfs_mount *mp = sc->mp; 430 struct xchk_fscounters *fsc = sc->buf; 431 int64_t icount, ifree, fdblocks, frextents; 432 int error; 433 434 /* Snapshot the percpu counters. */ 435 icount = percpu_counter_sum(&mp->m_icount); 436 ifree = percpu_counter_sum(&mp->m_ifree); 437 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 438 frextents = percpu_counter_sum(&mp->m_frextents); 439 440 /* No negative values, please! */ 441 if (icount < 0 || ifree < 0 || fdblocks < 0 || frextents < 0) 442 xchk_set_corrupt(sc); 443 444 /* See if icount is obviously wrong. */ 445 if (icount < fsc->icount_min || icount > fsc->icount_max) 446 xchk_set_corrupt(sc); 447 448 /* See if fdblocks is obviously wrong. */ 449 if (fdblocks > mp->m_sb.sb_dblocks) 450 xchk_set_corrupt(sc); 451 452 /* See if frextents is obviously wrong. */ 453 if (frextents > mp->m_sb.sb_rextents) 454 xchk_set_corrupt(sc); 455 456 /* 457 * If ifree exceeds icount by more than the minimum variance then 458 * something's probably wrong with the counters. 459 */ 460 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) 461 xchk_set_corrupt(sc); 462 463 /* Walk the incore AG headers to calculate the expected counters. */ 464 error = xchk_fscount_aggregate_agcounts(sc, fsc); 465 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 466 return error; 467 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 468 return 0; 469 470 /* Count the free extents counter for rt volumes. */ 471 error = xchk_fscount_count_frextents(sc, fsc); 472 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 473 return error; 474 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 475 return 0; 476 477 /* Compare the in-core counters with whatever we counted. */ 478 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) 479 xchk_set_corrupt(sc); 480 481 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) 482 xchk_set_corrupt(sc); 483 484 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, 485 fsc->fdblocks)) 486 xchk_set_corrupt(sc); 487 488 if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents, 489 fsc->frextents)) 490 xchk_set_corrupt(sc); 491 492 return 0; 493 } 494