Home
last modified time | relevance | path

Searched refs:TXG_MASK (Results 1 – 21 of 21) sorted by relevance

/illumos-gate/usr/src/uts/common/fs/zfs/
H A Dtxg.c303 tc->tc_count[txg & TXG_MASK]++; in txg_hold_open()
325 int g = th->th_txg & TXG_MASK; in txg_register_callbacks()
336 int g = th->th_txg & TXG_MASK; in txg_rele_to_sync()
357 int g = txg & TXG_MASK; in txg_quiesce()
422 int g = txg & TXG_MASK; in txg_dispatch_callbacks()
802 return (tl->tl_head[txg & TXG_MASK] == NULL); in txg_list_empty()
830 int t = txg & TXG_MASK; in txg_list_add()
855 int t = txg & TXG_MASK; in txg_list_add_tail()
883 int t = txg & TXG_MASK; in txg_list_remove()
908 int t = txg & TXG_MASK; in txg_list_remove_this()
[all …]
H A Ddnode.c394 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN; in dnode_setbonuslen()
396 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; in dnode_setbonuslen()
407 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; in dnode_setbonus_type()
417 dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK; in dnode_rm_spill()
657 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; in dnode_allocate()
658 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; in dnode_allocate()
659 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; in dnode_allocate()
660 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz; in dnode_allocate()
700 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize; in dnode_reallocate()
703 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen; in dnode_reallocate()
[all …]
H A Ddsl_pool.c652 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); in dsl_pool_sync()
660 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); in dsl_pool_sync()
661 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; in dsl_pool_sync()
697 !os->os_next_write_raw[txg & TXG_MASK]) { in dsl_pool_sync()
716 !os->os_next_write_raw[txg & TXG_MASK]) { in dsl_pool_sync()
889 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; in dsl_pool_dirty_space()
902 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { in dsl_pool_undirty_space()
904 space = dp->dp_dirty_pertxg[txg & TXG_MASK]; in dsl_pool_undirty_space()
906 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); in dsl_pool_undirty_space()
907 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; in dsl_pool_undirty_space()
H A Dvdev_removal.c549 int txgoff = (txg + i) & TXG_MASK; in free_from_removing_vdev()
608 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; in free_from_removing_vdev()
611 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); in free_from_removing_vdev()
632 svr->svr_bytes_done[txg & TXG_MASK] += size; in free_from_removing_vdev()
723 &svr->svr_new_segments[txg & TXG_MASK], tx); in vdev_mapping_sync()
732 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], in vdev_mapping_sync()
734 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, in vdev_mapping_sync()
736 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; in vdev_mapping_sync()
1011 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, in spa_vdev_copy_segment()
1026 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); in spa_vdev_copy_segment()
[all …]
H A Dvdev_initialize.c73 uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK]; in vdev_initialize_zap_update_sync()
74 vd->vdev_initialize_offset[txg & TXG_MASK] = 0; in vdev_initialize_zap_update_sync()
163 &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK]; in vdev_initialize_cb()
206 if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) { in vdev_initialize_write()
232 vd->vdev_initialize_offset[txg & TXG_MASK] = start + size; in vdev_initialize_write()
233 zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start, in vdev_initialize_write()
H A Dvdev_trim.c209 uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK]; in vdev_trim_zap_update_sync()
210 vd->vdev_trim_offset[txg & TXG_MASK] = 0; in vdev_trim_zap_update_sync()
371 &vd->vdev_trim_offset[zio->io_txg & TXG_MASK]; in vdev_trim_cb()
486 vd->vdev_trim_offset[txg & TXG_MASK] == 0) { in vdev_trim_range()
515 vd->vdev_trim_offset[txg & TXG_MASK] = start + size; in vdev_trim_range()
517 zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd, in vdev_trim_range()
H A Dzil.c609 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) in zilog_is_dirty_in_txg()
827 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in zil_claim()
857 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in zil_claim()
1771 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; in zil_remove_async()
1815 itxg = &zilog->zl_itxg[txg & TXG_MASK]; in zil_itx_assign()
1884 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; in zil_clean()
1937 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; in zil_get_commit_list()
1982 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; in zil_async_to_sync()
2854 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; in zil_sync()
3434 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = in zil_replaying()
H A Ddsl_dir.c1162 ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]); in dsl_dir_sync()
1164 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024); in dsl_dir_sync()
1165 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0; in dsl_dir_sync()
1180 space += dd->dd_space_towrite[i & TXG_MASK]; in dsl_dir_space_towrite()
1181 ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0); in dsl_dir_space_towrite()
1352 dd->dd_tempreserved[txg & TXG_MASK] += asize; in dsl_dir_tempreserve_impl()
1441 int txgidx = tx->tx_txg & TXG_MASK; in dsl_dir_tempreserve_clear()
1481 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; in dsl_dir_willuse_space()
H A Ddnode_sync.c45 int txgoff = tx->tx_txg & TXG_MASK; in dnode_increase_indirection()
559 int txgoff = tx->tx_txg & TXG_MASK; in dnode_sync_free()
625 int txgoff = tx->tx_txg & TXG_MASK; in dnode_sync()
H A Dmetaslab.c1876 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); in metaslab_verify_space()
3559 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], in metaslab_condense()
3823 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; in metaslab_sync()
4089 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); in metaslab_sync()
4091 & TXG_MASK])); in metaslab_sync()
4118 msp->ms_allocating[(txg + t) & TXG_MASK])); in metaslab_evict()
4301 msp->ms_allocating[(txg + t) & TXG_MASK])); in metaslab_sync_done()
4312 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); in metaslab_sync_done()
4536 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) in metaslab_block_alloc()
4539 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); in metaslab_block_alloc()
[all …]
H A Ddsl_dataset.c1166 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in dsl_dataset_zero_zil()
1329 !os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { in dsl_dataset_dirty()
1960 if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) { in dsl_dataset_sync()
1963 &ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx)); in dsl_dataset_sync()
1966 &ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx)); in dsl_dataset_sync()
1969 &ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx)); in dsl_dataset_sync()
1970 ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0; in dsl_dataset_sync()
1971 ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0; in dsl_dataset_sync()
1972 ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0; in dsl_dataset_sync()
2010 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE; in dsl_dataset_sync_done()
[all …]
H A Ddmu_object.c438 dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type = in dmu_object_zapify()
H A Dvdev_indirect.c551 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx); in spa_condense_indirect_commit_sync()
552 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK])); in spa_condense_indirect_commit_sync()
570 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in spa_condense_indirect_commit_entry()
H A Ddmu_objset.c1064 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = in dmu_objset_create_impl_dnstats()
1709 os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { in dmu_objset_sync()
1743 txgoff = tx->tx_txg & TXG_MASK; in dmu_objset_sync()
1800 return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK])); in dmu_objset_is_dirty()
H A Ddbuf.c1687 int txgoff = tx->tx_txg & TXG_MASK; in dbuf_dirty()
1914 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || in dbuf_dirty()
1915 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); in dbuf_dirty()
2040 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); in dbuf_undirty()
3645 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); in dbuf_sync_leaf()
H A Dspa.c8423 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
8575 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
8576 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
8670 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
H A Ddmu.c909 dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] += in dmu_free_long_range_impl()
H A Ddmu_recv.c1073 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in save_resume_state()
H A Ddsl_crypt.c2131 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in dsl_crypto_recv_raw_objset_sync()
H A Dzio.c1055 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); in zio_free()
/illumos-gate/usr/src/uts/common/fs/zfs/sys/
H A Dtxg.h41 #define TXG_MASK (TXG_SIZE - 1) /* mask for size */ macro
43 #define TXG_IDX (txg & TXG_MASK)