/freebsd/sys/contrib/openzfs/module/zfs/ |
H A D | txg.c | 337 tc->tc_count[txg & TXG_MASK]++; in txg_hold_open() 359 int g = th->th_txg & TXG_MASK; in txg_register_callbacks() 370 int g = th->th_txg & TXG_MASK; in txg_rele_to_sync() 392 int g = txg & TXG_MASK; in txg_quiesce() 463 int g = txg & TXG_MASK; in txg_dispatch_callbacks() 892 return (tl->tl_head[txg & TXG_MASK] == NULL); in txg_list_empty_impl() 940 int t = txg & TXG_MASK; in txg_list_add() 965 int t = txg & TXG_MASK; in txg_list_add_tail() 993 int t = txg & TXG_MASK; in txg_list_remove() 1018 int t = txg & TXG_MASK; in txg_list_remove_this() [all …]
|
H A D | dsl_pool.c | 619 aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], size); in dsl_pool_wrlog_count() 625 if (aggsum_compare(&dp->dp_wrlog_pertxg[txg & TXG_MASK], sync_min) > 0) in dsl_pool_wrlog_count() 642 delta = -(int64_t)aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]); in dsl_pool_wrlog_clear() 643 aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], delta); in dsl_pool_wrlog_clear() 646 (void) aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]); in dsl_pool_wrlog_clear() 731 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); in dsl_pool_sync() 732 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; in dsl_pool_sync() 768 !os->os_next_write_raw[txg & TXG_MASK]) { in dsl_pool_sync() 788 !os->os_next_write_raw[txg & TXG_MASK]) { in dsl_pool_sync() 834 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); in dsl_pool_sync() [all …]
|
H A D | dnode.c | 539 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN; in dnode_setbonuslen() 541 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; in dnode_setbonuslen() 552 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; in dnode_setbonus_type() 573 dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK; in dnode_rm_spill() 805 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; in dnode_allocate() 806 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; in dnode_allocate() 807 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; in dnode_allocate() 808 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz; in dnode_allocate() 849 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize; in dnode_reallocate() 852 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen; in dnode_reallocate() [all …]
|
H A D | vdev_removal.c | 731 int txgoff = (txg + i) & TXG_MASK; in free_from_removing_vdev() 790 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; in free_from_removing_vdev() 793 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); in free_from_removing_vdev() 815 svr->svr_bytes_done[txg & TXG_MASK] += size; in free_from_removing_vdev() 908 &svr->svr_new_segments[txg & TXG_MASK], tx); in vdev_mapping_sync() 917 zfs_range_tree_vacate(svr->svr_frees[txg & TXG_MASK], in vdev_mapping_sync() 919 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, in vdev_mapping_sync() 921 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; in vdev_mapping_sync() 1233 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, in spa_vdev_copy_segment() 1248 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); in spa_vdev_copy_segment() [all …]
|
H A D | vdev_rebuild.c | 202 if (vr->vr_scan_offset[txg & TXG_MASK] > 0) { in vdev_rebuild_update_sync() 203 vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK]; in vdev_rebuild_update_sync() 204 vr->vr_scan_offset[txg & TXG_MASK] = 0; in vdev_rebuild_update_sync() 502 uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK]; in vdev_rebuild_cb() 602 if (vr->vr_scan_offset[txg & TXG_MASK] == 0) { in vdev_rebuild_range() 603 vr->vr_scan_offset[txg & TXG_MASK] = start; in vdev_rebuild_range() 622 vr->vr_scan_offset[txg & TXG_MASK] = start + size; in vdev_rebuild_range() 626 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, &blk, in vdev_rebuild_range()
|
H A D | vdev_initialize.c | 76 uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK]; in vdev_initialize_zap_update_sync() 77 vd->vdev_initialize_offset[txg & TXG_MASK] = 0; in vdev_initialize_zap_update_sync() 216 &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK]; in vdev_initialize_cb() 259 if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) { in vdev_initialize_write() 284 vd->vdev_initialize_offset[txg & TXG_MASK] = start + size; in vdev_initialize_write() 285 zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start, in vdev_initialize_write()
|
H A D | dmu_redact.c | 524 list_t *list = &md->md_blocks[txg & TXG_MASK]; in redaction_list_update_sync() 526 &md->md_furthest[txg & TXG_MASK]; in redaction_list_update_sync() 560 md->md_synctask_txg[txg & TXG_MASK] = B_FALSE; in redaction_list_update_sync() 573 if (!md->md_synctask_txg[txg & TXG_MASK]) { in commit_rl_updates() 576 md->md_synctask_txg[txg & TXG_MASK] = B_TRUE; in commit_rl_updates() 579 md->md_furthest[txg & TXG_MASK].rbp_object = object; in commit_rl_updates() 580 md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid; in commit_rl_updates() 581 list_move_tail(&md->md_blocks[txg & TXG_MASK], in commit_rl_updates()
|
H A D | zil.c | 929 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) in zilog_is_dirty_in_txg() 1207 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in zil_claim() 1237 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in zil_claim() 1520 ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0); in zil_lwb_flush_vdevs_done() 1521 zilog->zl_lwb_inflight[txg & TXG_MASK]--; in zil_lwb_flush_vdevs_done() 1522 if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0) in zil_lwb_flush_vdevs_done() 1537 while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0) in zil_lwb_flush_wait_all() 2088 zilog->zl_lwb_inflight[txg & TXG_MASK]++; in zil_lwb_write_issue() 2675 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; in zil_remove_async() 2719 itxg = &zilog->zl_itxg[txg & TXG_MASK]; in zil_itx_assign() [all …]
|
H A D | vdev_trim.c | 233 uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK]; in vdev_trim_zap_update_sync() 234 vd->vdev_trim_offset[txg & TXG_MASK] = 0; in vdev_trim_zap_update_sync() 402 &vd->vdev_trim_offset[zio->io_txg & TXG_MASK]; in vdev_trim_cb() 537 vd->vdev_trim_offset[txg & TXG_MASK] == 0) { in vdev_trim_range() 565 vd->vdev_trim_offset[txg & TXG_MASK] = start + size; in vdev_trim_range() 575 zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd, in vdev_trim_range()
|
H A D | dsl_dir.c | 1170 ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]); in dsl_dir_sync() 1172 (u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024); in dsl_dir_sync() 1173 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0; in dsl_dir_sync() 1188 space += dd->dd_space_towrite[i & TXG_MASK]; in dsl_dir_space_towrite() 1379 dd->dd_tempreserved[txg & TXG_MASK] += asize; in dsl_dir_tempreserve_impl() 1477 int txgidx = tx->tx_txg & TXG_MASK; in dsl_dir_tempreserve_clear() 1521 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; in dsl_dir_willuse_space()
|
H A D | dnode_sync.c | 46 int txgoff = tx->tx_txg & TXG_MASK; in dnode_increase_indirection() 579 int txgoff = tx->tx_txg & TXG_MASK; in dnode_sync_free() 646 int txgoff = tx->tx_txg & TXG_MASK; in dnode_sync()
|
H A D | metaslab.c | 2155 TXG_MASK]); in metaslab_verify_space() 3934 zfs_range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], in metaslab_condense() 4229 zfs_range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; in metaslab_sync() 4480 ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK])); in metaslab_sync() 4482 & TXG_MASK])); in metaslab_sync() 4509 msp->ms_allocating[(txg + t) & TXG_MASK])); in metaslab_evict() 4645 ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK])); in metaslab_sync_done() 4858 if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) in metaslab_block_alloc() 4861 zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, in metaslab_block_alloc() 5750 zfs_range_tree_remove(msp->ms_allocating[txg & TXG_MASK], in metaslab_unalloc_dva() [all …]
|
H A D | dsl_dataset.c | 1303 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in dsl_dataset_zero_zil() 1485 !os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { in dsl_dataset_dirty() 2124 if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) { in dsl_dataset_sync() 2127 &ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx)); in dsl_dataset_sync() 2130 &ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx)); in dsl_dataset_sync() 2133 &ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx)); in dsl_dataset_sync() 2134 ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0; in dsl_dataset_sync() 2135 ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0; in dsl_dataset_sync() 2136 ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0; in dsl_dataset_sync() 2305 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE; in dsl_dataset_sync_done() [all …]
|
H A D | brt.c | 1200 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK]; in brt_pending_add() 1241 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK]; in brt_pending_remove() 1270 avl_swap(&brtvd->bv_tree, &brtvd->bv_pending_tree[txg & TXG_MASK]); in brt_pending_apply_vdev()
|
H A D | vdev_indirect.c | 554 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx); in spa_condense_indirect_commit_sync() 555 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK])); in spa_condense_indirect_commit_sync() 573 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in spa_condense_indirect_commit_entry()
|
H A D | dmu_objset.c | 1129 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = in dmu_objset_create_impl_dnstats() 1605 int txgoff = tx->tx_txg & TXG_MASK; in sync_meta_dnode_task() 1677 os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { in dmu_objset_sync() 1711 txgoff = tx->tx_txg & TXG_MASK; in dmu_objset_sync() 1775 return (!multilist_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK])); in dmu_objset_is_dirty()
|
H A D | dmu_object.c | 481 dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type = in dmu_object_zapify()
|
H A D | vdev_raidz.c | 3738 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in raidz_reflow_sync() 3894 vre->vre_bytes_copied_pertxg[rra->rra_txg & TXG_MASK] += in raidz_reflow_write_done() 3956 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in raidz_reflow_record_progress() 4081 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in raidz_reflow_impl() 4403 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in raidz_reflow_scratch_sync() 4497 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in vdev_raidz_reflow_copy_scratch()
|
H A D | dbuf.c | 2197 int txgoff = tx->tx_txg & TXG_MASK; in dbuf_dirty_lightweight() 2247 int txgoff = tx->tx_txg & TXG_MASK; in dbuf_dirty() 2436 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || in dbuf_dirty() 2437 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); in dbuf_dirty() 2599 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); in dbuf_undirty() 4860 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); in dbuf_sync_leaf()
|
H A D | spa_stats.c | 415 ts->ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK]; in spa_txg_history_init_io()
|
H A D | dmu.c | 1053 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK]; in dmu_free_long_range_impl() 1080 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] += in dmu_free_long_range_impl()
|
H A D | spa.c | 10157 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 10332 (void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]); 10333 spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 10439 ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
|
H A D | dsl_crypt.c | 2142 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; in dsl_crypto_recv_raw_objset_sync()
|
H A D | dmu_recv.c | 1622 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; in save_resume_state()
|
/freebsd/sys/contrib/openzfs/include/sys/ |
H A D | txg.h | 43 #define TXG_MASK (TXG_SIZE - 1) /* mask for size */ macro 45 #define TXG_IDX (txg & TXG_MASK)
|