1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2019 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_sb.h" 13 #include "xfs_alloc.h" 14 #include "xfs_ialloc.h" 15 #include "xfs_health.h" 16 #include "xfs_btree.h" 17 #include "scrub/scrub.h" 18 #include "scrub/common.h" 19 #include "scrub/trace.h" 20 21 /* 22 * FS Summary Counters 23 * =================== 24 * 25 * The basics of filesystem summary counter checking are that we iterate the 26 * AGs counting the number of free blocks, free space btree blocks, per-AG 27 * reservations, inodes, delayed allocation reservations, and free inodes. 28 * Then we compare what we computed against the in-core counters. 29 * 30 * However, the reality is that summary counters are a tricky beast to check. 31 * While we /could/ freeze the filesystem and scramble around the AGs counting 32 * the free blocks, in practice we prefer not do that for a scan because 33 * freezing is costly. To get around this, we added a per-cpu counter of the 34 * delalloc reservations so that we can rotor around the AGs relatively 35 * quickly, and we allow the counts to be slightly off because we're not taking 36 * any locks while we do this. 37 * 38 * So the first thing we do is warm up the buffer cache in the setup routine by 39 * walking all the AGs to make sure the incore per-AG structure has been 40 * initialized. The expected value calculation then iterates the incore per-AG 41 * structures as quickly as it can. We snapshot the percpu counters before and 42 * after this operation and use the difference in counter values to guess at 43 * our tolerance for mismatch between expected and actual counter values. 44 */ 45 46 /* 47 * Since the expected value computation is lockless but only browses incore 48 * values, the percpu counters should be fairly close to each other. However, 49 * we'll allow ourselves to be off by at least this (arbitrary) amount. 50 */ 51 #define XCHK_FSCOUNT_MIN_VARIANCE (512) 52 53 /* 54 * Make sure the per-AG structure has been initialized from the on-disk header 55 * contents and trust that the incore counters match the ondisk counters. (The 56 * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the 57 * summary counters after checking all AG headers). Do this from the setup 58 * function so that the inner AG aggregation loop runs as quickly as possible. 59 * 60 * This function runs during the setup phase /before/ we start checking any 61 * metadata. 62 */ 63 STATIC int 64 xchk_fscount_warmup( 65 struct xfs_scrub *sc) 66 { 67 struct xfs_mount *mp = sc->mp; 68 struct xfs_buf *agi_bp = NULL; 69 struct xfs_buf *agf_bp = NULL; 70 struct xfs_perag *pag = NULL; 71 xfs_agnumber_t agno; 72 int error = 0; 73 74 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 75 pag = xfs_perag_get(mp, agno); 76 77 if (pag->pagi_init && pag->pagf_init) 78 goto next_loop_perag; 79 80 /* Lock both AG headers. */ 81 error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp); 82 if (error) 83 break; 84 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp); 85 if (error) 86 break; 87 88 /* 89 * These are supposed to be initialized by the header read 90 * function. 91 */ 92 error = -EFSCORRUPTED; 93 if (!pag->pagi_init || !pag->pagf_init) 94 break; 95 96 xfs_buf_relse(agf_bp); 97 agf_bp = NULL; 98 xfs_buf_relse(agi_bp); 99 agi_bp = NULL; 100 next_loop_perag: 101 xfs_perag_put(pag); 102 pag = NULL; 103 error = 0; 104 105 if (xchk_should_terminate(sc, &error)) 106 break; 107 } 108 109 if (agf_bp) 110 xfs_buf_relse(agf_bp); 111 if (agi_bp) 112 xfs_buf_relse(agi_bp); 113 if (pag) 114 xfs_perag_put(pag); 115 return error; 116 } 117 118 int 119 xchk_setup_fscounters( 120 struct xfs_scrub *sc) 121 { 122 struct xchk_fscounters *fsc; 123 int error; 124 125 sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); 126 if (!sc->buf) 127 return -ENOMEM; 128 fsc = sc->buf; 129 130 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); 131 132 /* We must get the incore counters set up before we can proceed. */ 133 error = xchk_fscount_warmup(sc); 134 if (error) 135 return error; 136 137 /* 138 * Pause background reclaim while we're scrubbing to reduce the 139 * likelihood of background perturbations to the counters throwing off 140 * our calculations. 141 */ 142 xchk_stop_reaping(sc); 143 144 return xchk_trans_alloc(sc, 0); 145 } 146 147 /* Count free space btree blocks manually for pre-lazysbcount filesystems. */ 148 static int 149 xchk_fscount_btreeblks( 150 struct xfs_scrub *sc, 151 struct xchk_fscounters *fsc, 152 xfs_agnumber_t agno) 153 { 154 xfs_extlen_t blocks; 155 int error; 156 157 error = xchk_ag_init(sc, agno, &sc->sa); 158 if (error) 159 return error; 160 161 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); 162 if (error) 163 goto out_free; 164 fsc->fdblocks += blocks - 1; 165 166 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); 167 if (error) 168 goto out_free; 169 fsc->fdblocks += blocks - 1; 170 171 out_free: 172 xchk_ag_free(sc, &sc->sa); 173 return error; 174 } 175 176 /* 177 * Calculate what the global in-core counters ought to be from the incore 178 * per-AG structure. Callers can compare this to the actual in-core counters 179 * to estimate by how much both in-core and on-disk counters need to be 180 * adjusted. 181 */ 182 STATIC int 183 xchk_fscount_aggregate_agcounts( 184 struct xfs_scrub *sc, 185 struct xchk_fscounters *fsc) 186 { 187 struct xfs_mount *mp = sc->mp; 188 struct xfs_perag *pag; 189 uint64_t delayed; 190 xfs_agnumber_t agno; 191 int tries = 8; 192 int error = 0; 193 194 retry: 195 fsc->icount = 0; 196 fsc->ifree = 0; 197 fsc->fdblocks = 0; 198 199 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 200 pag = xfs_perag_get(mp, agno); 201 202 /* This somehow got unset since the warmup? */ 203 if (!pag->pagi_init || !pag->pagf_init) { 204 xfs_perag_put(pag); 205 return -EFSCORRUPTED; 206 } 207 208 /* Count all the inodes */ 209 fsc->icount += pag->pagi_count; 210 fsc->ifree += pag->pagi_freecount; 211 212 /* Add up the free/freelist/bnobt/cntbt blocks */ 213 fsc->fdblocks += pag->pagf_freeblks; 214 fsc->fdblocks += pag->pagf_flcount; 215 if (xfs_sb_version_haslazysbcount(&sc->mp->m_sb)) { 216 fsc->fdblocks += pag->pagf_btreeblks; 217 } else { 218 error = xchk_fscount_btreeblks(sc, fsc, agno); 219 if (error) { 220 xfs_perag_put(pag); 221 break; 222 } 223 } 224 225 /* 226 * Per-AG reservations are taken out of the incore counters, 227 * so they must be left out of the free blocks computation. 228 */ 229 fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; 230 fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; 231 232 xfs_perag_put(pag); 233 234 if (xchk_should_terminate(sc, &error)) 235 break; 236 } 237 238 if (error) 239 return error; 240 241 /* 242 * The global incore space reservation is taken from the incore 243 * counters, so leave that out of the computation. 244 */ 245 fsc->fdblocks -= mp->m_resblks_avail; 246 247 /* 248 * Delayed allocation reservations are taken out of the incore counters 249 * but not recorded on disk, so leave them and their indlen blocks out 250 * of the computation. 251 */ 252 delayed = percpu_counter_sum(&mp->m_delalloc_blks); 253 fsc->fdblocks -= delayed; 254 255 trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, 256 delayed); 257 258 259 /* Bail out if the values we compute are totally nonsense. */ 260 if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || 261 fsc->fdblocks > mp->m_sb.sb_dblocks || 262 fsc->ifree > fsc->icount_max) 263 return -EFSCORRUPTED; 264 265 /* 266 * If ifree > icount then we probably had some perturbation in the 267 * counters while we were calculating things. We'll try a few times 268 * to maintain ifree <= icount before giving up. 269 */ 270 if (fsc->ifree > fsc->icount) { 271 if (tries--) 272 goto retry; 273 xchk_set_incomplete(sc); 274 return 0; 275 } 276 277 return 0; 278 } 279 280 /* 281 * Is the @counter reasonably close to the @expected value? 282 * 283 * We neither locked nor froze anything in the filesystem while aggregating the 284 * per-AG data to compute the @expected value, which means that the counter 285 * could have changed. We know the @old_value of the summation of the counter 286 * before the aggregation, and we re-sum the counter now. If the expected 287 * value falls between the two summations, we're ok. 288 * 289 * Otherwise, we /might/ have a problem. If the change in the summations is 290 * more than we want to tolerate, the filesystem is probably busy and we should 291 * just send back INCOMPLETE and see if userspace will try again. 292 */ 293 static inline bool 294 xchk_fscount_within_range( 295 struct xfs_scrub *sc, 296 const int64_t old_value, 297 struct percpu_counter *counter, 298 uint64_t expected) 299 { 300 int64_t min_value, max_value; 301 int64_t curr_value = percpu_counter_sum(counter); 302 303 trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, 304 old_value); 305 306 /* Negative values are always wrong. */ 307 if (curr_value < 0) 308 return false; 309 310 /* Exact matches are always ok. */ 311 if (curr_value == expected) 312 return true; 313 314 min_value = min(old_value, curr_value); 315 max_value = max(old_value, curr_value); 316 317 /* Within the before-and-after range is ok. */ 318 if (expected >= min_value && expected <= max_value) 319 return true; 320 321 /* 322 * If the difference between the two summations is too large, the fs 323 * might just be busy and so we'll mark the scrub incomplete. Return 324 * true here so that we don't mark the counter corrupt. 325 * 326 * XXX: In the future when userspace can grant scrub permission to 327 * quiesce the filesystem to solve the outsized variance problem, this 328 * check should be moved up and the return code changed to signal to 329 * userspace that we need quiesce permission. 330 */ 331 if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { 332 xchk_set_incomplete(sc); 333 return true; 334 } 335 336 return false; 337 } 338 339 /* Check the superblock counters. */ 340 int 341 xchk_fscounters( 342 struct xfs_scrub *sc) 343 { 344 struct xfs_mount *mp = sc->mp; 345 struct xchk_fscounters *fsc = sc->buf; 346 int64_t icount, ifree, fdblocks; 347 int error; 348 349 /* Snapshot the percpu counters. */ 350 icount = percpu_counter_sum(&mp->m_icount); 351 ifree = percpu_counter_sum(&mp->m_ifree); 352 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 353 354 /* No negative values, please! */ 355 if (icount < 0 || ifree < 0 || fdblocks < 0) 356 xchk_set_corrupt(sc); 357 358 /* See if icount is obviously wrong. */ 359 if (icount < fsc->icount_min || icount > fsc->icount_max) 360 xchk_set_corrupt(sc); 361 362 /* See if fdblocks is obviously wrong. */ 363 if (fdblocks > mp->m_sb.sb_dblocks) 364 xchk_set_corrupt(sc); 365 366 /* 367 * If ifree exceeds icount by more than the minimum variance then 368 * something's probably wrong with the counters. 369 */ 370 if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) 371 xchk_set_corrupt(sc); 372 373 /* Walk the incore AG headers to calculate the expected counters. */ 374 error = xchk_fscount_aggregate_agcounts(sc, fsc); 375 if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 376 return error; 377 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 378 return 0; 379 380 /* Compare the in-core counters with whatever we counted. */ 381 if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) 382 xchk_set_corrupt(sc); 383 384 if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) 385 xchk_set_corrupt(sc); 386 387 if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, 388 fsc->fdblocks)) 389 xchk_set_corrupt(sc); 390 391 return 0; 392 } 393