Lines Matching defs:txg
44 * these transaction groups. Each successive transaction group (txg) is
47 * there may be an active txg associated with each state; each active txg may
49 * be up to three active txgs, and there is always a txg in the open state
52 * accepted into the txg in the open state, and are completed while the txg is
58 * When a new txg becomes active, it first enters the open state. New
60 * currently open txg. There is always a txg in the open state so that ZFS can
61 * accept new changes (though the txg may refuse new changes if it has hit
62 * some limit). ZFS advances the open txg to the next state for a variety of
68 * After a txg exits the open state, it enters the quiescing state. The
72 * operation without delaying either of the other states. Typically, a txg is
75 * transactions complete, the txg is ready to enter the next state.
102 * datasets. Note that when a synctask is initiated it enters the open txg,
103 * and ZFS then pushes that txg as quickly as possible to completion of the
107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
114 uint_t zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
117 * Prepare the txg subsystem.
120 txg_init(dsl_pool_t *dp, uint64_t txg)
151 tx->tx_open_txg = txg;
155 * Close down the txg subsystem.
296 * Get a handle on the currently open txg and keep it open.
298 * The txg is guaranteed to stay open until txg_rele_to_quiesce() is called for
299 * the handle. Once txg_rele_to_quiesce() has been called, the txg stays
304 * because the same txg can be returned multiple times in a row. This
321 uint64_t txg;
332 txg = tx->tx_open_txg;
335 tc->tc_count[txg & TXG_MASK]++;
339 th->th_txg = txg;
341 return (txg);
386 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
390 int g = txg & TXG_MASK;
394 * Grab all tc_open_locks so nobody else can get into this txg.
399 ASSERT(txg == tx->tx_open_txg);
403 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
413 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
414 spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
428 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
442 * Dispatch the commit callbacks registered on this txg to worker threads.
448 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
458 * only be called once a txg has been synced.
461 int g = txg & TXG_MASK;
533 uint64_t txg;
537 * on us, or the quiesce thread has handed off a txg to
563 * Wait until the quiesce thread hands off a txg to us,
582 * Consume the quiesced txg which has been handed off to
584 * able to quiesce another txg, so we must signal it.
587 txg = tx->tx_quiesced_txg;
589 tx->tx_syncing_txg = txg;
590 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
593 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
594 (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
598 txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp);
600 spa_sync(spa, txg);
605 tx->tx_synced_txg = txg;
607 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
613 txg_dispatch_callbacks(dp, txg);
627 uint64_t txg;
631 * However, we can only have one txg in "quiescing" or
633 * the "quiesced, waiting to sync" txg has been consumed
644 txg = tx->tx_open_txg;
645 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
646 (u_longlong_t)txg,
649 tx->tx_quiescing_txg = txg;
652 txg_quiesce(dp, txg);
656 * Hand this txg off to the sync thread.
658 dprintf("quiesce done, handing off txg %llu\n",
659 (u_longlong_t)txg);
661 tx->tx_quiesced_txg = txg;
662 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
670 * transaction group and there is already a waiting txg quiescing or quiesced.
671 * Abort the delay if this txg stalls or enters the quiescing state.
674 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
679 /* don't delay if this txg could transition to quiescing immediately */
680 if (tx->tx_open_txg > txg ||
681 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
685 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
691 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
702 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig)
710 if (txg == 0)
711 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
712 if (tx->tx_sync_txg_waiting < txg)
713 tx->tx_sync_txg_waiting = txg;
714 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
715 (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
717 while (tx->tx_synced_txg < txg) {
727 * to resume waiting for this txg.
743 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
745 VERIFY0(txg_wait_synced_impl(dp, txg, B_FALSE));
753 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg)
755 return (txg_wait_synced_impl(dp, txg, B_TRUE));
760 * when the current open txg should be quiesced immediately.
763 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
771 if (txg == 0)
772 txg = tx->tx_open_txg + 1;
773 if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
774 tx->tx_quiesce_txg_waiting = txg;
775 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
776 (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
778 while (tx->tx_open_txg < txg) {
797 * Pass in the txg number that should be synced.
800 txg_kick(dsl_pool_t *dp, uint64_t txg)
806 if (tx->tx_sync_txg_waiting >= txg)
810 if (tx->tx_sync_txg_waiting < txg) {
811 tx->tx_sync_txg_waiting = txg;
834 * Verify that this txg is active (open, quiescing, syncing). Non-active
835 * txg's should not be manipulated.
839 txg_verify(spa_t *spa, uint64_t txg)
842 if (txg <= TXG_INITIAL || txg == ZILTEST_TXG)
844 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
845 ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
846 ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
851 * Per-txg object lists.
868 txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
871 TXG_VERIFY(tl->tl_spa, txg);
872 return (tl->tl_head[txg & TXG_MASK] == NULL);
876 txg_list_empty(txg_list_t *tl, uint64_t txg)
879 boolean_t ret = txg_list_empty_impl(tl, txg);
899 * Returns true if all txg lists are empty.
918 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
920 int t = txg & TXG_MASK;
924 TXG_VERIFY(tl->tl_spa, txg);
943 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
945 int t = txg & TXG_MASK;
949 TXG_VERIFY(tl->tl_spa, txg);
971 txg_list_remove(txg_list_t *tl, uint64_t txg)
973 int t = txg & TXG_MASK;
977 TXG_VERIFY(tl->tl_spa, txg);
996 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
998 int t = txg & TXG_MASK;
1001 TXG_VERIFY(tl->tl_spa, txg);
1020 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
1022 int t = txg & TXG_MASK;
1025 TXG_VERIFY(tl->tl_spa, txg);
1030 * Walk a txg list
1033 txg_list_head(txg_list_t *tl, uint64_t txg)
1035 int t = txg & TXG_MASK;
1042 TXG_VERIFY(tl->tl_spa, txg);
1047 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
1049 int t = txg & TXG_MASK;
1052 TXG_VERIFY(tl->tl_spa, txg);
1077 "Max seconds worth of delta per txg");