common.c (ecc73f8a58c7844b04186726f8699ba97cec2ef9) common.c (d5c88131dbf01a30a222ad82d58e0c21a15f0d8e)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"

--- 382 unchanged lines hidden (view full) ---

391 * other headers to cross-check them, but this isn't required.
392 */
393 if (sc->sm->sm_type == type)
394 return true;
395 return false;
396}
397
398/*
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"

--- 382 unchanged lines hidden (view full) ---

391 * other headers to cross-check them, but this isn't required.
392 */
393 if (sc->sm->sm_type == type)
394 return true;
395 return false;
396}
397
398/*
399 * Grab the perag structure and all the headers for an AG.
399 * Grab the AG header buffers for the attached perag structure.
400 *
401 * The headers should be released by xchk_ag_free, but as a fail safe we attach
402 * all the buffers we grab to the scrub transaction so they'll all be freed
400 *
401 * The headers should be released by xchk_ag_free, but as a fail safe we attach
402 * all the buffers we grab to the scrub transaction so they'll all be freed
403 * when we cancel it. Returns ENOENT if we can't grab the perag structure.
403 * when we cancel it.
404 */
404 */
405int
406xchk_ag_read_headers(
405static inline int
406xchk_perag_read_headers(
407 struct xfs_scrub *sc,
407 struct xfs_scrub *sc,
408 xfs_agnumber_t agno,
409 struct xchk_ag *sa)
410{
408 struct xchk_ag *sa)
409{
411 struct xfs_mount *mp = sc->mp;
412 int error;
413
410 int error;
411
414 ASSERT(!sa->pag);
415 sa->pag = xfs_perag_get(mp, agno);
416 if (!sa->pag)
417 return -ENOENT;
418
419 error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp);
420 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
421 return error;
422
423 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
424 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
425 return error;
426
427 return 0;
428}
429
412 error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp);
413 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
414 return error;
415
416 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
417 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
418 return error;
419
420 return 0;
421}
422
423/*
424 * Grab the AG headers for the attached perag structure and wait for pending
425 * intents to drain.
426 */
427static int
428xchk_perag_drain_and_lock(
429 struct xfs_scrub *sc)
430{
431 struct xchk_ag *sa = &sc->sa;
432 int error = 0;
433
434 ASSERT(sa->pag != NULL);
435 ASSERT(sa->agi_bp == NULL);
436 ASSERT(sa->agf_bp == NULL);
437
438 do {
439 if (xchk_should_terminate(sc, &error))
440 return error;
441
442 error = xchk_perag_read_headers(sc, sa);
443 if (error)
444 return error;
445
446 /*
447 * If we've grabbed an inode for scrubbing then we assume that
448 * holding its ILOCK will suffice to coordinate with any intent
449 * chains involving this inode.
450 */
451 if (sc->ip)
452 return 0;
453
454 /*
455 * Decide if this AG is quiet enough for all metadata to be
456 * consistent with each other. XFS allows the AG header buffer
457 * locks to cycle across transaction rolls while processing
458 * chains of deferred ops, which means that there could be
459 * other threads in the middle of processing a chain of
460 * deferred ops. For regular operations we are careful about
461 * ordering operations to prevent collisions between threads
462 * (which is why we don't need a per-AG lock), but scrub and
463 * repair have to serialize against chained operations.
464 *
465 * We just locked all the AG headers buffers; now take a look
466 * to see if there are any intents in progress. If there are,
467 * drop the AG headers and wait for the intents to drain.
468 * Since we hold all the AG header locks for the duration of
469 * the scrub, this is the only time we have to sample the
470 * intents counter; any threads increasing it after this point
471 * can't possibly be in the middle of a chain of AG metadata
472 * updates.
473 *
474 * Obviously, this should be slanted against scrub and in favor
475 * of runtime threads.
476 */
477 if (!xfs_perag_intent_busy(sa->pag))
478 return 0;
479
480 if (sa->agf_bp) {
481 xfs_trans_brelse(sc->tp, sa->agf_bp);
482 sa->agf_bp = NULL;
483 }
484
485 if (sa->agi_bp) {
486 xfs_trans_brelse(sc->tp, sa->agi_bp);
487 sa->agi_bp = NULL;
488 }
489
490 error = xfs_perag_intent_drain(sa->pag);
491 if (error == -ERESTARTSYS)
492 error = -EINTR;
493 } while (!error);
494
495 return error;
496}
497
498/*
499 * Grab the per-AG structure, grab all AG header buffers, and wait until there
500 * aren't any pending intents. Returns -ENOENT if we can't grab the perag
501 * structure.
502 */
503int
504xchk_ag_read_headers(
505 struct xfs_scrub *sc,
506 xfs_agnumber_t agno,
507 struct xchk_ag *sa)
508{
509 struct xfs_mount *mp = sc->mp;
510
511 ASSERT(!sa->pag);
512 sa->pag = xfs_perag_get(mp, agno);
513 if (!sa->pag)
514 return -ENOENT;
515
516 return xchk_perag_drain_and_lock(sc);
517}
518
430/* Release all the AG btree cursors. */
431void
432xchk_ag_btcur_free(
433 struct xchk_ag *sa)
434{
435 if (sa->refc_cur)
436 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
437 if (sa->rmap_cur)

--- 481 unchanged lines hidden ---
519/* Release all the AG btree cursors. */
520void
521xchk_ag_btcur_free(
522 struct xchk_ag *sa)
523{
524 if (sa->refc_cur)
525 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
526 if (sa->rmap_cur)

--- 481 unchanged lines hidden ---