Lines Matching defs:txg
47 * either the DMU transaction group (txg) commits them to the stable pool
93 * a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
295 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
334 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
353 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
442 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
450 lwb->lwb_max_txg = txg;
473 zilog_dirty(zilog_t *zilog, uint64_t txg)
481 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
507 uint64_t txg = 0;
531 txg = dmu_tx_get_txg(tx);
534 zio_free_zil(zilog->zl_spa, txg, &blk);
538 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
549 lwb = zil_alloc_lwb(zilog, &blk, txg);
558 txg_wait_synced(zilog->zl_dmu_pool, txg);
581 uint64_t txg;
596 txg = dmu_tx_get_txg(tx);
600 ASSERT3U(zilog->zl_destroy_txg, <, txg);
601 zilog->zl_destroy_txg = txg;
611 zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
853 * the txg. If we have had an allocation failure and
854 * the txg is waiting to sync then we want want zil_sync()
867 * to the next block in the chain, so it's OK to let the txg in
933 uint64_t txg;
959 txg = dmu_tx_get_txg(tx);
992 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
995 ASSERT3U(bp->blk_birth, ==, txg);
1002 nlwb = zil_alloc_lwb(zilog, bp, txg);
1042 uint64_t txg = lrc->lrc_txg;
1071 txg_wait_synced(zilog->zl_dmu_pool, txg);
1085 if (txg > spa_freeze_txg(zilog->zl_spa))
1086 txg_wait_synced(zilog->zl_dmu_pool, txg);
1102 txg_wait_synced(zilog->zl_dmu_pool, txg);
1121 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1208 uint64_t otxg, txg;
1223 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1224 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1227 if (itxg->itxg_txg != txg) {
1252 uint64_t txg;
1257 * Object ids can be re-instantiated in the next txg so
1273 txg = ZILTEST_TXG;
1275 txg = dmu_tx_get_txg(tx);
1277 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1280 if (itxg->itxg_txg != txg) {
1292 itxg->itxg_txg = txg;
1323 zilog_dirty(zilog, txg);
1334 * have written out the uberblocks (i.e. txg has been comitted) so that
1375 uint64_t otxg, txg;
1384 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1385 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1388 if (itxg->itxg_txg != txg) {
1408 uint64_t otxg, txg;
1418 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1419 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1422 if (itxg->itxg_txg != txg) {
1457 uint64_t txg;
1488 txg = itx->itx_lr.lrc_txg;
1489 ASSERT(txg);
1491 if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1593 uint64_t txg = dmu_tx_get_txg(tx);
1595 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1615 if (zilog->zl_destroy_txg == txg) {
1639 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1642 zio_free_zil(spa, txg, &lwb->lwb_blk);
1744 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1790 uint64_t txg = 0;
1796 * for the zil. After a txg_wait_synced() on the txg we know all the
1803 txg = lwb->lwb_max_txg;
1805 if (txg)
1806 txg_wait_synced(zilog->zl_dmu_pool, txg);
2047 * The DMU's dnode layer doesn't see removes until the txg