1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2020-2024 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans.h" 14 #include "xfs_inode.h" 15 #include "xfs_quota.h" 16 #include "xfs_qm.h" 17 #include "xfs_icache.h" 18 #include "xfs_bmap_util.h" 19 #include "xfs_ialloc.h" 20 #include "xfs_ag.h" 21 #include "scrub/scrub.h" 22 #include "scrub/common.h" 23 #include "scrub/repair.h" 24 #include "scrub/xfile.h" 25 #include "scrub/xfarray.h" 26 #include "scrub/iscan.h" 27 #include "scrub/quota.h" 28 #include "scrub/quotacheck.h" 29 #include "scrub/trace.h" 30 31 /* 32 * Live Quotacheck 33 * =============== 34 * 35 * Quota counters are "summary" metadata, in the sense that they are computed 36 * as the summation of the block usage counts for every file on the filesystem. 37 * Therefore, we compute the correct icount, bcount, and rtbcount values by 38 * creating a shadow quota counter structure and walking every inode. 39 */ 40 41 /* Track the quota deltas for a dquot in a transaction. */ 42 struct xqcheck_dqtrx { 43 xfs_dqtype_t q_type; 44 xfs_dqid_t q_id; 45 46 int64_t icount_delta; 47 48 int64_t bcount_delta; 49 int64_t delbcnt_delta; 50 51 int64_t rtbcount_delta; 52 int64_t delrtb_delta; 53 }; 54 55 #define XQCHECK_MAX_NR_DQTRXS (XFS_QM_TRANS_DQTYPES * XFS_QM_TRANS_MAXDQS) 56 57 /* 58 * Track the quota deltas for all dquots attached to a transaction if the 59 * quota deltas are being applied to an inode that we already scanned. 60 */ 61 struct xqcheck_dqacct { 62 struct rhash_head hash; 63 uintptr_t tx_id; 64 struct xqcheck_dqtrx dqtrx[XQCHECK_MAX_NR_DQTRXS]; 65 unsigned int refcount; 66 }; 67 68 /* Free a shadow dquot accounting structure. */ 69 static void 70 xqcheck_dqacct_free( 71 void *ptr, 72 void *arg) 73 { 74 struct xqcheck_dqacct *dqa = ptr; 75 76 kfree(dqa); 77 } 78 79 /* Set us up to scrub quota counters. */ 80 int 81 xchk_setup_quotacheck( 82 struct xfs_scrub *sc) 83 { 84 if (!XFS_IS_QUOTA_ON(sc->mp)) 85 return -ENOENT; 86 87 xchk_fsgates_enable(sc, XCHK_FSGATES_QUOTA); 88 89 sc->buf = kzalloc(sizeof(struct xqcheck), XCHK_GFP_FLAGS); 90 if (!sc->buf) 91 return -ENOMEM; 92 93 return xchk_setup_fs(sc); 94 } 95 96 /* 97 * Part 1: Collecting dquot resource usage counts. For each xfs_dquot attached 98 * to each inode, we create a shadow dquot, and compute the inode count and add 99 * the data/rt block usage from what we see. 100 * 101 * To avoid false corruption reports in part 2, any failure in this part must 102 * set the INCOMPLETE flag even when a negative errno is returned. This care 103 * must be taken with certain errno values (i.e. EFSBADCRC, EFSCORRUPTED, 104 * ECANCELED) that are absorbed into a scrub state flag update by 105 * xchk_*_process_error. Scrub and repair share the same incore data 106 * structures, so the INCOMPLETE flag is critical to prevent a repair based on 107 * insufficient information. 108 * 109 * Because we are scanning a live filesystem, it's possible that another thread 110 * will try to update the quota counters for an inode that we've already 111 * scanned. This will cause our counts to be incorrect. Therefore, we hook 112 * the live transaction code in two places: (1) when the callers update the 113 * per-transaction dqtrx structure to log quota counter updates; and (2) when 114 * transaction commit actually logs those updates to the incore dquot. By 115 * shadowing transaction updates in this manner, live quotacheck can ensure 116 * by locking the dquot and the shadow structure that its own copies are not 117 * out of date. Because the hook code runs in a different process context from 118 * the scrub code and the scrub state flags are not accessed atomically, 119 * failures in the hook code must abort the iscan and the scrubber must notice 120 * the aborted scan and set the incomplete flag. 121 * 122 * Note that we use srcu notifier hooks to minimize the overhead when live 123 * quotacheck is /not/ running. 124 */ 125 126 /* Update an incore dquot counter information from a live update. */ 127 static int 128 xqcheck_update_incore_counts( 129 struct xqcheck *xqc, 130 struct xfarray *counts, 131 xfs_dqid_t id, 132 int64_t inodes, 133 int64_t nblks, 134 int64_t rtblks) 135 { 136 struct xqcheck_dquot xcdq; 137 int error; 138 139 error = xfarray_load_sparse(counts, id, &xcdq); 140 if (error) 141 return error; 142 143 xcdq.flags |= XQCHECK_DQUOT_WRITTEN; 144 xcdq.icount += inodes; 145 xcdq.bcount += nblks; 146 xcdq.rtbcount += rtblks; 147 148 error = xfarray_store(counts, id, &xcdq); 149 if (error == -EFBIG) { 150 /* 151 * EFBIG means we tried to store data at too high a byte offset 152 * in the sparse array. IOWs, we cannot complete the check and 153 * must notify userspace that the check was incomplete. 154 */ 155 error = -ECANCELED; 156 } 157 return error; 158 } 159 160 /* Decide if this is the shadow dquot accounting structure for a transaction. */ 161 static int 162 xqcheck_dqacct_obj_cmpfn( 163 struct rhashtable_compare_arg *arg, 164 const void *obj) 165 { 166 const uintptr_t *tx_idp = arg->key; 167 const struct xqcheck_dqacct *dqa = obj; 168 169 if (dqa->tx_id != *tx_idp) 170 return 1; 171 return 0; 172 } 173 174 static const struct rhashtable_params xqcheck_dqacct_hash_params = { 175 .min_size = 32, 176 .key_len = sizeof(uintptr_t), 177 .key_offset = offsetof(struct xqcheck_dqacct, tx_id), 178 .head_offset = offsetof(struct xqcheck_dqacct, hash), 179 .automatic_shrinking = true, 180 .obj_cmpfn = xqcheck_dqacct_obj_cmpfn, 181 }; 182 183 /* Find a shadow dqtrx slot for the given dquot. */ 184 STATIC struct xqcheck_dqtrx * 185 xqcheck_get_dqtrx( 186 struct xqcheck_dqacct *dqa, 187 xfs_dqtype_t q_type, 188 xfs_dqid_t q_id) 189 { 190 int i; 191 192 for (i = 0; i < XQCHECK_MAX_NR_DQTRXS; i++) { 193 if (dqa->dqtrx[i].q_type == 0 || 194 (dqa->dqtrx[i].q_type == q_type && 195 dqa->dqtrx[i].q_id == q_id)) 196 return &dqa->dqtrx[i]; 197 } 198 199 return NULL; 200 } 201 202 /* 203 * Create and fill out a quota delta tracking structure to shadow the updates 204 * going on in the regular quota code. 205 */ 206 static int 207 xqcheck_mod_live_ino_dqtrx( 208 struct notifier_block *nb, 209 unsigned long action, 210 void *data) 211 { 212 struct xfs_mod_ino_dqtrx_params *p = data; 213 struct xqcheck *xqc; 214 struct xqcheck_dqacct *dqa; 215 struct xqcheck_dqtrx *dqtrx; 216 int error; 217 218 xqc = container_of(nb, struct xqcheck, qhook.mod_hook.nb); 219 220 /* Skip quota reservation fields. */ 221 switch (action) { 222 case XFS_TRANS_DQ_BCOUNT: 223 case XFS_TRANS_DQ_DELBCOUNT: 224 case XFS_TRANS_DQ_ICOUNT: 225 case XFS_TRANS_DQ_RTBCOUNT: 226 case XFS_TRANS_DQ_DELRTBCOUNT: 227 break; 228 default: 229 return NOTIFY_DONE; 230 } 231 232 /* Ignore dqtrx updates for quota types we don't care about. */ 233 switch (p->q_type) { 234 case XFS_DQTYPE_USER: 235 if (!xqc->ucounts) 236 return NOTIFY_DONE; 237 break; 238 case XFS_DQTYPE_GROUP: 239 if (!xqc->gcounts) 240 return NOTIFY_DONE; 241 break; 242 case XFS_DQTYPE_PROJ: 243 if (!xqc->pcounts) 244 return NOTIFY_DONE; 245 break; 246 default: 247 return NOTIFY_DONE; 248 } 249 250 /* Skip inodes that haven't been scanned yet. */ 251 if (!xchk_iscan_want_live_update(&xqc->iscan, p->ino)) 252 return NOTIFY_DONE; 253 254 /* Make a shadow quota accounting tracker for this transaction. */ 255 mutex_lock(&xqc->lock); 256 dqa = rhashtable_lookup_fast(&xqc->shadow_dquot_acct, &p->tx_id, 257 xqcheck_dqacct_hash_params); 258 if (!dqa) { 259 dqa = kzalloc(sizeof(struct xqcheck_dqacct), XCHK_GFP_FLAGS); 260 if (!dqa) 261 goto out_abort; 262 263 dqa->tx_id = p->tx_id; 264 error = rhashtable_insert_fast(&xqc->shadow_dquot_acct, 265 &dqa->hash, xqcheck_dqacct_hash_params); 266 if (error) 267 goto out_abort; 268 } 269 270 /* Find the shadow dqtrx (or an empty slot) here. */ 271 dqtrx = xqcheck_get_dqtrx(dqa, p->q_type, p->q_id); 272 if (!dqtrx) 273 goto out_abort; 274 if (dqtrx->q_type == 0) { 275 dqtrx->q_type = p->q_type; 276 dqtrx->q_id = p->q_id; 277 dqa->refcount++; 278 } 279 280 /* Update counter */ 281 switch (action) { 282 case XFS_TRANS_DQ_BCOUNT: 283 dqtrx->bcount_delta += p->delta; 284 break; 285 case XFS_TRANS_DQ_DELBCOUNT: 286 dqtrx->delbcnt_delta += p->delta; 287 break; 288 case XFS_TRANS_DQ_ICOUNT: 289 dqtrx->icount_delta += p->delta; 290 break; 291 case XFS_TRANS_DQ_RTBCOUNT: 292 dqtrx->rtbcount_delta += p->delta; 293 break; 294 case XFS_TRANS_DQ_DELRTBCOUNT: 295 dqtrx->delrtb_delta += p->delta; 296 break; 297 } 298 299 mutex_unlock(&xqc->lock); 300 return NOTIFY_DONE; 301 302 out_abort: 303 xchk_iscan_abort(&xqc->iscan); 304 mutex_unlock(&xqc->lock); 305 return NOTIFY_DONE; 306 } 307 308 /* 309 * Apply the transaction quota deltas to our shadow quota accounting info when 310 * the regular quota code are doing the same. 311 */ 312 static int 313 xqcheck_apply_live_dqtrx( 314 struct notifier_block *nb, 315 unsigned long action, 316 void *data) 317 { 318 struct xfs_apply_dqtrx_params *p = data; 319 struct xqcheck *xqc; 320 struct xqcheck_dqacct *dqa; 321 struct xqcheck_dqtrx *dqtrx; 322 struct xfarray *counts; 323 int error; 324 325 xqc = container_of(nb, struct xqcheck, qhook.apply_hook.nb); 326 327 /* Map the dquot type to an incore counter object. */ 328 switch (p->q_type) { 329 case XFS_DQTYPE_USER: 330 counts = xqc->ucounts; 331 break; 332 case XFS_DQTYPE_GROUP: 333 counts = xqc->gcounts; 334 break; 335 case XFS_DQTYPE_PROJ: 336 counts = xqc->pcounts; 337 break; 338 default: 339 return NOTIFY_DONE; 340 } 341 342 if (xchk_iscan_aborted(&xqc->iscan) || counts == NULL) 343 return NOTIFY_DONE; 344 345 /* 346 * Find the shadow dqtrx for this transaction and dquot, if any deltas 347 * need to be applied here. If not, we're finished early. 348 */ 349 mutex_lock(&xqc->lock); 350 dqa = rhashtable_lookup_fast(&xqc->shadow_dquot_acct, &p->tx_id, 351 xqcheck_dqacct_hash_params); 352 if (!dqa) 353 goto out_unlock; 354 dqtrx = xqcheck_get_dqtrx(dqa, p->q_type, p->q_id); 355 if (!dqtrx || dqtrx->q_type == 0) 356 goto out_unlock; 357 358 /* Update our shadow dquot if we're committing. */ 359 if (action == XFS_APPLY_DQTRX_COMMIT) { 360 error = xqcheck_update_incore_counts(xqc, counts, p->q_id, 361 dqtrx->icount_delta, 362 dqtrx->bcount_delta + dqtrx->delbcnt_delta, 363 dqtrx->rtbcount_delta + dqtrx->delrtb_delta); 364 if (error) 365 goto out_abort; 366 } 367 368 /* Free the shadow accounting structure if that was the last user. */ 369 dqa->refcount--; 370 if (dqa->refcount == 0) { 371 error = rhashtable_remove_fast(&xqc->shadow_dquot_acct, 372 &dqa->hash, xqcheck_dqacct_hash_params); 373 if (error) 374 goto out_abort; 375 xqcheck_dqacct_free(dqa, NULL); 376 } 377 378 mutex_unlock(&xqc->lock); 379 return NOTIFY_DONE; 380 381 out_abort: 382 xchk_iscan_abort(&xqc->iscan); 383 out_unlock: 384 mutex_unlock(&xqc->lock); 385 return NOTIFY_DONE; 386 } 387 388 /* Record this inode's quota usage in our shadow quota counter data. */ 389 STATIC int 390 xqcheck_collect_inode( 391 struct xqcheck *xqc, 392 struct xfs_inode *ip) 393 { 394 struct xfs_trans *tp = xqc->sc->tp; 395 xfs_filblks_t nblks, rtblks; 396 uint ilock_flags = 0; 397 xfs_dqid_t id; 398 bool isreg = S_ISREG(VFS_I(ip)->i_mode); 399 int error = 0; 400 401 if (xfs_is_metadir_inode(ip) || 402 xfs_is_quota_inode(&tp->t_mountp->m_sb, ip->i_ino)) { 403 /* 404 * Quota files are never counted towards quota, so we do not 405 * need to take the lock. Files do not switch between the 406 * metadata and regular directory trees without a reallocation, 407 * so we do not need to ILOCK them either. 408 */ 409 xchk_iscan_mark_visited(&xqc->iscan, ip); 410 return 0; 411 } 412 413 /* Figure out the data / rt device block counts. */ 414 xfs_ilock(ip, XFS_IOLOCK_SHARED); 415 if (isreg) 416 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 417 if (XFS_IS_REALTIME_INODE(ip)) { 418 /* 419 * Read in the data fork for rt files so that _count_blocks 420 * can count the number of blocks allocated from the rt volume. 421 * Inodes do not track that separately. 422 */ 423 ilock_flags = xfs_ilock_data_map_shared(ip); 424 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 425 if (error) 426 goto out_abort; 427 } else { 428 ilock_flags = XFS_ILOCK_SHARED; 429 xfs_ilock(ip, XFS_ILOCK_SHARED); 430 } 431 xfs_inode_count_blocks(tp, ip, &nblks, &rtblks); 432 433 if (xchk_iscan_aborted(&xqc->iscan)) { 434 error = -ECANCELED; 435 goto out_incomplete; 436 } 437 438 /* Update the shadow dquot counters. */ 439 mutex_lock(&xqc->lock); 440 if (xqc->ucounts) { 441 id = xfs_qm_id_for_quotatype(ip, XFS_DQTYPE_USER); 442 error = xqcheck_update_incore_counts(xqc, xqc->ucounts, id, 1, 443 nblks, rtblks); 444 if (error) 445 goto out_mutex; 446 } 447 448 if (xqc->gcounts) { 449 id = xfs_qm_id_for_quotatype(ip, XFS_DQTYPE_GROUP); 450 error = xqcheck_update_incore_counts(xqc, xqc->gcounts, id, 1, 451 nblks, rtblks); 452 if (error) 453 goto out_mutex; 454 } 455 456 if (xqc->pcounts) { 457 id = xfs_qm_id_for_quotatype(ip, XFS_DQTYPE_PROJ); 458 error = xqcheck_update_incore_counts(xqc, xqc->pcounts, id, 1, 459 nblks, rtblks); 460 if (error) 461 goto out_mutex; 462 } 463 mutex_unlock(&xqc->lock); 464 465 xchk_iscan_mark_visited(&xqc->iscan, ip); 466 goto out_ilock; 467 468 out_mutex: 469 mutex_unlock(&xqc->lock); 470 out_abort: 471 xchk_iscan_abort(&xqc->iscan); 472 out_incomplete: 473 xchk_set_incomplete(xqc->sc); 474 out_ilock: 475 xfs_iunlock(ip, ilock_flags); 476 if (isreg) 477 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 478 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 479 return error; 480 } 481 482 /* Walk all the allocated inodes and run a quota scan on them. */ 483 STATIC int 484 xqcheck_collect_counts( 485 struct xqcheck *xqc) 486 { 487 struct xfs_scrub *sc = xqc->sc; 488 struct xfs_inode *ip; 489 int error; 490 491 /* 492 * Set up for a potentially lengthy filesystem scan by reducing our 493 * transaction resource usage for the duration. Specifically: 494 * 495 * Cancel the transaction to release the log grant space while we scan 496 * the filesystem. 497 * 498 * Create a new empty transaction to eliminate the possibility of the 499 * inode scan deadlocking on cyclical metadata. 500 * 501 * We pass the empty transaction to the file scanning function to avoid 502 * repeatedly cycling empty transactions. This can be done without 503 * risk of deadlock between sb_internal and the IOLOCK (we take the 504 * IOLOCK to quiesce the file before scanning) because empty 505 * transactions do not take sb_internal. 506 */ 507 xchk_trans_cancel(sc); 508 error = xchk_trans_alloc_empty(sc); 509 if (error) 510 return error; 511 512 while ((error = xchk_iscan_iter(&xqc->iscan, &ip)) == 1) { 513 error = xqcheck_collect_inode(xqc, ip); 514 xchk_irele(sc, ip); 515 if (error) 516 break; 517 518 if (xchk_should_terminate(sc, &error)) 519 break; 520 } 521 xchk_iscan_iter_finish(&xqc->iscan); 522 if (error) { 523 xchk_set_incomplete(sc); 524 /* 525 * If we couldn't grab an inode that was busy with a state 526 * change, change the error code so that we exit to userspace 527 * as quickly as possible. 528 */ 529 if (error == -EBUSY) 530 return -ECANCELED; 531 return error; 532 } 533 534 /* 535 * Switch out for a real transaction in preparation for building a new 536 * tree. 537 */ 538 xchk_trans_cancel(sc); 539 return xchk_setup_fs(sc); 540 } 541 542 /* 543 * Part 2: Comparing dquot resource counters. Walk each xfs_dquot, comparing 544 * the resource usage counters against our shadow dquots; and then walk each 545 * shadow dquot (that wasn't covered in the first part), comparing it against 546 * the xfs_dquot. 547 */ 548 549 /* 550 * Check the dquot data against what we observed. Caller must hold the dquot 551 * lock. 552 */ 553 STATIC int 554 xqcheck_compare_dquot( 555 struct xqcheck *xqc, 556 xfs_dqtype_t dqtype, 557 struct xfs_dquot *dq) 558 { 559 struct xqcheck_dquot xcdq; 560 struct xfarray *counts = xqcheck_counters_for(xqc, dqtype); 561 int error; 562 563 if (xchk_iscan_aborted(&xqc->iscan)) { 564 xchk_set_incomplete(xqc->sc); 565 return -ECANCELED; 566 } 567 568 mutex_lock(&xqc->lock); 569 error = xfarray_load_sparse(counts, dq->q_id, &xcdq); 570 if (error) 571 goto out_unlock; 572 573 if (xcdq.icount != dq->q_ino.count) 574 xchk_qcheck_set_corrupt(xqc->sc, dqtype, dq->q_id); 575 576 if (xcdq.bcount != dq->q_blk.count) 577 xchk_qcheck_set_corrupt(xqc->sc, dqtype, dq->q_id); 578 579 if (xcdq.rtbcount != dq->q_rtb.count) 580 xchk_qcheck_set_corrupt(xqc->sc, dqtype, dq->q_id); 581 582 xcdq.flags |= (XQCHECK_DQUOT_COMPARE_SCANNED | XQCHECK_DQUOT_WRITTEN); 583 error = xfarray_store(counts, dq->q_id, &xcdq); 584 if (error == -EFBIG) { 585 /* 586 * EFBIG means we tried to store data at too high a byte offset 587 * in the sparse array. IOWs, we cannot complete the check and 588 * must notify userspace that the check was incomplete. This 589 * should never happen outside of the collection phase. 590 */ 591 xchk_set_incomplete(xqc->sc); 592 error = -ECANCELED; 593 } 594 mutex_unlock(&xqc->lock); 595 if (error) 596 return error; 597 598 if (xqc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 599 return -ECANCELED; 600 601 return 0; 602 603 out_unlock: 604 mutex_unlock(&xqc->lock); 605 return error; 606 } 607 608 /* 609 * Walk all the observed dquots, and make sure there's a matching incore 610 * dquot and that its counts match ours. 611 */ 612 STATIC int 613 xqcheck_walk_observations( 614 struct xqcheck *xqc, 615 xfs_dqtype_t dqtype) 616 { 617 struct xqcheck_dquot xcdq; 618 struct xfs_dquot *dq; 619 struct xfarray *counts = xqcheck_counters_for(xqc, dqtype); 620 xfarray_idx_t cur = XFARRAY_CURSOR_INIT; 621 int error; 622 623 mutex_lock(&xqc->lock); 624 while ((error = xfarray_iter(counts, &cur, &xcdq)) == 1) { 625 xfs_dqid_t id = cur - 1; 626 627 if (xcdq.flags & XQCHECK_DQUOT_COMPARE_SCANNED) 628 continue; 629 630 mutex_unlock(&xqc->lock); 631 632 error = xfs_qm_dqget(xqc->sc->mp, id, dqtype, false, &dq); 633 if (error == -ENOENT) { 634 xchk_qcheck_set_corrupt(xqc->sc, dqtype, id); 635 return 0; 636 } 637 if (error) 638 return error; 639 640 error = xqcheck_compare_dquot(xqc, dqtype, dq); 641 xfs_qm_dqput(dq); 642 if (error) 643 return error; 644 645 if (xchk_should_terminate(xqc->sc, &error)) 646 return error; 647 648 mutex_lock(&xqc->lock); 649 } 650 mutex_unlock(&xqc->lock); 651 652 return error; 653 } 654 655 /* Compare the quota counters we observed against the live dquots. */ 656 STATIC int 657 xqcheck_compare_dqtype( 658 struct xqcheck *xqc, 659 xfs_dqtype_t dqtype) 660 { 661 struct xchk_dqiter cursor = { }; 662 struct xfs_scrub *sc = xqc->sc; 663 struct xfs_dquot *dq; 664 int error; 665 666 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 667 return 0; 668 669 /* If the quota CHKD flag is cleared, we need to repair this quota. */ 670 if (!(xfs_quota_chkd_flag(dqtype) & sc->mp->m_qflags)) { 671 xchk_qcheck_set_corrupt(xqc->sc, dqtype, 0); 672 return 0; 673 } 674 675 /* Compare what we observed against the actual dquots. */ 676 xchk_dqiter_init(&cursor, sc, dqtype); 677 while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) { 678 error = xqcheck_compare_dquot(xqc, dqtype, dq); 679 xfs_qm_dqput(dq); 680 if (error) 681 break; 682 } 683 if (error) 684 return error; 685 686 /* Walk all the observed dquots and compare to the incore ones. */ 687 return xqcheck_walk_observations(xqc, dqtype); 688 } 689 690 /* Tear down everything associated with a quotacheck. */ 691 static void 692 xqcheck_teardown_scan( 693 void *priv) 694 { 695 struct xqcheck *xqc = priv; 696 struct xfs_quotainfo *qi = xqc->sc->mp->m_quotainfo; 697 698 /* Discourage any hook functions that might be running. */ 699 xchk_iscan_abort(&xqc->iscan); 700 701 /* 702 * As noted above, the apply hook is responsible for cleaning up the 703 * shadow dquot accounting data when a transaction completes. The mod 704 * hook must be removed before the apply hook so that we don't 705 * mistakenly leave an active shadow account for the mod hook to get 706 * its hands on. No hooks should be running after these functions 707 * return. 708 */ 709 xfs_dqtrx_hook_del(qi, &xqc->qhook); 710 711 if (xqc->shadow_dquot_acct.key_len) { 712 rhashtable_free_and_destroy(&xqc->shadow_dquot_acct, 713 xqcheck_dqacct_free, NULL); 714 xqc->shadow_dquot_acct.key_len = 0; 715 } 716 717 if (xqc->pcounts) { 718 xfarray_destroy(xqc->pcounts); 719 xqc->pcounts = NULL; 720 } 721 722 if (xqc->gcounts) { 723 xfarray_destroy(xqc->gcounts); 724 xqc->gcounts = NULL; 725 } 726 727 if (xqc->ucounts) { 728 xfarray_destroy(xqc->ucounts); 729 xqc->ucounts = NULL; 730 } 731 732 xchk_iscan_teardown(&xqc->iscan); 733 mutex_destroy(&xqc->lock); 734 xqc->sc = NULL; 735 } 736 737 /* 738 * Scan all inodes in the entire filesystem to generate quota counter data. 739 * If the scan is successful, the quota data will be left alive for a repair. 740 * If any error occurs, we'll tear everything down. 741 */ 742 STATIC int 743 xqcheck_setup_scan( 744 struct xfs_scrub *sc, 745 struct xqcheck *xqc) 746 { 747 char *descr; 748 struct xfs_quotainfo *qi = sc->mp->m_quotainfo; 749 unsigned long long max_dquots = XFS_DQ_ID_MAX + 1ULL; 750 int error; 751 752 ASSERT(xqc->sc == NULL); 753 xqc->sc = sc; 754 755 mutex_init(&xqc->lock); 756 757 /* Retry iget every tenth of a second for up to 30 seconds. */ 758 xchk_iscan_start(sc, 30000, 100, &xqc->iscan); 759 760 error = -ENOMEM; 761 if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_USER)) { 762 descr = xchk_xfile_descr(sc, "user dquot records"); 763 error = xfarray_create(descr, max_dquots, 764 sizeof(struct xqcheck_dquot), &xqc->ucounts); 765 kfree(descr); 766 if (error) 767 goto out_teardown; 768 } 769 770 if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_GROUP)) { 771 descr = xchk_xfile_descr(sc, "group dquot records"); 772 error = xfarray_create(descr, max_dquots, 773 sizeof(struct xqcheck_dquot), &xqc->gcounts); 774 kfree(descr); 775 if (error) 776 goto out_teardown; 777 } 778 779 if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_PROJ)) { 780 descr = xchk_xfile_descr(sc, "project dquot records"); 781 error = xfarray_create(descr, max_dquots, 782 sizeof(struct xqcheck_dquot), &xqc->pcounts); 783 kfree(descr); 784 if (error) 785 goto out_teardown; 786 } 787 788 /* 789 * Set up hash table to map transactions to our internal shadow dqtrx 790 * structures. 791 */ 792 error = rhashtable_init(&xqc->shadow_dquot_acct, 793 &xqcheck_dqacct_hash_params); 794 if (error) 795 goto out_teardown; 796 797 /* 798 * Hook into the quota code. The hook only triggers for inodes that 799 * were already scanned, and the scanner thread takes each inode's 800 * ILOCK, which means that any in-progress inode updates will finish 801 * before we can scan the inode. 802 * 803 * The apply hook (which removes the shadow dquot accounting struct) 804 * must be installed before the mod hook so that we never fail to catch 805 * the end of a quota update sequence and leave stale shadow data. 806 */ 807 ASSERT(sc->flags & XCHK_FSGATES_QUOTA); 808 xfs_dqtrx_hook_setup(&xqc->qhook, xqcheck_mod_live_ino_dqtrx, 809 xqcheck_apply_live_dqtrx); 810 811 error = xfs_dqtrx_hook_add(qi, &xqc->qhook); 812 if (error) 813 goto out_teardown; 814 815 /* Use deferred cleanup to pass the quota count data to repair. */ 816 sc->buf_cleanup = xqcheck_teardown_scan; 817 return 0; 818 819 out_teardown: 820 xqcheck_teardown_scan(xqc); 821 return error; 822 } 823 824 /* Scrub all counters for a given quota type. */ 825 int 826 xchk_quotacheck( 827 struct xfs_scrub *sc) 828 { 829 struct xqcheck *xqc = sc->buf; 830 int error = 0; 831 832 /* Check quota counters on the live filesystem. */ 833 error = xqcheck_setup_scan(sc, xqc); 834 if (error) 835 return error; 836 837 /* Walk all inodes, picking up quota information. */ 838 error = xqcheck_collect_counts(xqc); 839 if (!xchk_xref_process_error(sc, 0, 0, &error)) 840 return error; 841 842 /* Fail fast if we're not playing with a full dataset. */ 843 if (xchk_iscan_aborted(&xqc->iscan)) 844 xchk_set_incomplete(sc); 845 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 846 return 0; 847 848 /* Compare quota counters. */ 849 if (xqc->ucounts) { 850 error = xqcheck_compare_dqtype(xqc, XFS_DQTYPE_USER); 851 if (!xchk_xref_process_error(sc, 0, 0, &error)) 852 return error; 853 } 854 if (xqc->gcounts) { 855 error = xqcheck_compare_dqtype(xqc, XFS_DQTYPE_GROUP); 856 if (!xchk_xref_process_error(sc, 0, 0, &error)) 857 return error; 858 } 859 if (xqc->pcounts) { 860 error = xqcheck_compare_dqtype(xqc, XFS_DQTYPE_PROJ); 861 if (!xchk_xref_process_error(sc, 0, 0, &error)) 862 return error; 863 } 864 865 /* Check one last time for an incomplete dataset. */ 866 if (xchk_iscan_aborted(&xqc->iscan)) 867 xchk_set_incomplete(sc); 868 869 return 0; 870 } 871