Lines Matching +full:dcb +full:- +full:algorithm
1 // SPDX-License-Identifier: CDDL-1.0
10 * or https://opensource.org/licenses/CDDL-1.0.
70 tx->tx_dir = dd; in dmu_tx_create_dd()
72 tx->tx_pool = dd->dd_pool; in dmu_tx_create_dd()
73 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), in dmu_tx_create_dd()
75 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), in dmu_tx_create_dd()
77 tx->tx_start = gethrtime(); in dmu_tx_create_dd()
84 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); in dmu_tx_create()
85 tx->tx_objset = os; in dmu_tx_create()
94 TXG_VERIFY(dp->dp_spa, txg); in dmu_tx_create_assigned()
95 tx->tx_pool = dp; in dmu_tx_create_assigned()
96 tx->tx_txg = txg; in dmu_tx_create_assigned()
97 tx->tx_anyobj = TRUE; in dmu_tx_create_assigned()
105 return (tx->tx_anyobj); in dmu_tx_is_syncing()
111 return (tx->tx_anyobj); in dmu_tx_private_ok()
121 (void) zfs_refcount_add(&dn->dn_holds, tx); in dmu_tx_hold_dnode_impl()
122 if (tx->tx_txg != 0) { in dmu_tx_hold_dnode_impl()
123 mutex_enter(&dn->dn_mtx); in dmu_tx_hold_dnode_impl()
125 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a in dmu_tx_hold_dnode_impl()
129 ASSERT0(dn->dn_assigned_txg); in dmu_tx_hold_dnode_impl()
130 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_hold_dnode_impl()
131 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_hold_dnode_impl()
132 mutex_exit(&dn->dn_mtx); in dmu_tx_hold_dnode_impl()
137 txh->txh_tx = tx; in dmu_tx_hold_dnode_impl()
138 txh->txh_dnode = dn; in dmu_tx_hold_dnode_impl()
139 zfs_refcount_create(&txh->txh_space_towrite); in dmu_tx_hold_dnode_impl()
140 zfs_refcount_create(&txh->txh_memory_tohold); in dmu_tx_hold_dnode_impl()
141 txh->txh_type = type; in dmu_tx_hold_dnode_impl()
142 txh->txh_arg1 = arg1; in dmu_tx_hold_dnode_impl()
143 txh->txh_arg2 = arg2; in dmu_tx_hold_dnode_impl()
144 list_insert_tail(&tx->tx_holds, txh); in dmu_tx_hold_dnode_impl()
160 tx->tx_err = err; in dmu_tx_hold_object_impl()
183 * be needed to perform the transaction -- i.e, it will be read after
198 * layer). Typically code to do so does not exist in the caller -- it
215 rw_enter(&dn->dn_struct_rwlock, RW_READER); in dmu_tx_check_ioerr()
217 rw_exit(&dn->dn_struct_rwlock); in dmu_tx_check_ioerr()
235 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_write()
241 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); in dmu_tx_count_write()
248 * to perform the write: the first and last level-0 blocks (if in dmu_tx_count_write()
249 * they are not aligned, i.e. if they are partial-block writes), in dmu_tx_count_write()
250 * and all the level-1 blocks. in dmu_tx_count_write()
252 if (dn->dn_maxblkid == 0) { in dmu_tx_count_write()
253 if (off < dn->dn_datablksz && in dmu_tx_count_write()
254 (off > 0 || len < dn->dn_datablksz)) { in dmu_tx_count_write()
257 txh->txh_tx->tx_err = err; in dmu_tx_count_write()
261 zio_t *zio = zio_root(dn->dn_objset->os_spa, in dmu_tx_count_write()
264 /* first level-0 block */ in dmu_tx_count_write()
265 uint64_t start = off >> dn->dn_datablkshift; in dmu_tx_count_write()
266 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { in dmu_tx_count_write()
269 txh->txh_tx->tx_err = err; in dmu_tx_count_write()
273 /* last level-0 block */ in dmu_tx_count_write()
274 uint64_t end = (off + len - 1) >> dn->dn_datablkshift; in dmu_tx_count_write()
275 if (end != start && end <= dn->dn_maxblkid && in dmu_tx_count_write()
276 P2PHASE(off + len, dn->dn_datablksz)) { in dmu_tx_count_write()
279 txh->txh_tx->tx_err = err; in dmu_tx_count_write()
283 /* level-1 blocks */ in dmu_tx_count_write()
284 if (dn->dn_nlevels > 1) { in dmu_tx_count_write()
285 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_count_write()
290 txh->txh_tx->tx_err = err; in dmu_tx_count_write()
297 txh->txh_tx->tx_err = err; in dmu_tx_count_write()
305 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_append()
311 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); in dmu_tx_count_append()
318 * to perform the append; first level-0 block (if not aligned, i.e. in dmu_tx_count_append()
319 * if they are partial-block writes), no additional blocks are read. in dmu_tx_count_append()
321 if (dn->dn_maxblkid == 0) { in dmu_tx_count_append()
322 if (off < dn->dn_datablksz && in dmu_tx_count_append()
323 (off > 0 || len < dn->dn_datablksz)) { in dmu_tx_count_append()
326 txh->txh_tx->tx_err = err; in dmu_tx_count_append()
330 zio_t *zio = zio_root(dn->dn_objset->os_spa, in dmu_tx_count_append()
333 /* first level-0 block */ in dmu_tx_count_append()
334 uint64_t start = off >> dn->dn_datablkshift; in dmu_tx_count_append()
335 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { in dmu_tx_count_append()
338 txh->txh_tx->tx_err = err; in dmu_tx_count_append()
344 txh->txh_tx->tx_err = err; in dmu_tx_count_append()
352 (void) zfs_refcount_add_many(&txh->txh_space_towrite, in dmu_tx_count_dnode()
361 ASSERT0(tx->tx_txg); in dmu_tx_hold_write()
363 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); in dmu_tx_hold_write()
365 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_write()
378 ASSERT0(tx->tx_txg); in dmu_tx_hold_write_by_dnode()
380 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); in dmu_tx_hold_write_by_dnode()
399 ASSERT0(tx->tx_txg); in dmu_tx_hold_append()
402 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_append()
415 ASSERT0(tx->tx_txg); in dmu_tx_hold_append_by_dnode()
436 tx->tx_netfree = B_TRUE; in dmu_tx_mark_netfree()
442 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_count_free()
443 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_free()
446 ASSERT0(tx->tx_txg); in dmu_tx_count_free()
448 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) in dmu_tx_count_free()
451 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; in dmu_tx_count_free()
454 * For i/o error checking, we read the first and last level-0 in dmu_tx_count_free()
455 * blocks if they are not aligned, and all the level-1 blocks. in dmu_tx_count_free()
458 * any level-0 dbufs that will be completely freed. Therefore we must in dmu_tx_count_free()
460 * if they are blocksize-aligned. in dmu_tx_count_free()
462 if (dn->dn_datablkshift == 0) { in dmu_tx_count_free()
463 if (off != 0 || len < dn->dn_datablksz) in dmu_tx_count_free()
464 dmu_tx_count_write(txh, 0, dn->dn_datablksz); in dmu_tx_count_free()
467 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) in dmu_tx_count_free()
470 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) in dmu_tx_count_free()
475 * Check level-1 blocks. in dmu_tx_count_free()
477 if (dn->dn_nlevels > 1) { in dmu_tx_count_free()
478 int shift = dn->dn_datablkshift + dn->dn_indblkshift - in dmu_tx_count_free()
483 ASSERT(dn->dn_indblkshift != 0); in dmu_tx_count_free()
490 if (dn->dn_datablkshift == 0) in dmu_tx_count_free()
493 zio_t *zio = zio_root(tx->tx_pool->dp_spa, in dmu_tx_count_free()
502 tx->tx_err = err; in dmu_tx_count_free()
507 (void) zfs_refcount_add_many(&txh->txh_memory_tohold, in dmu_tx_count_free()
508 1 << dn->dn_indblkshift, FTAG); in dmu_tx_count_free()
512 tx->tx_err = err; in dmu_tx_count_free()
519 tx->tx_err = err; in dmu_tx_count_free()
530 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_free()
554 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_count_clone()
555 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_clone()
558 ASSERT0(tx->tx_txg); in dmu_tx_count_clone()
559 ASSERT(dn->dn_indblkshift != 0); in dmu_tx_count_clone()
563 (void) zfs_refcount_add_many(&txh->txh_memory_tohold, in dmu_tx_count_clone()
566 int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_count_clone()
570 (void) zfs_refcount_add_many(&txh->txh_space_towrite, in dmu_tx_count_clone()
571 (end - start + 1) << dn->dn_indblkshift, FTAG); in dmu_tx_count_clone()
573 zio_t *zio = zio_root(tx->tx_pool->dp_spa, in dmu_tx_count_clone()
578 tx->tx_err = err; in dmu_tx_count_clone()
584 tx->tx_err = err; in dmu_tx_count_clone()
593 ASSERT0(tx->tx_txg); in dmu_tx_hold_clone_by_dnode()
594 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); in dmu_tx_hold_clone_by_dnode()
606 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_hold_zap_impl()
607 dnode_t *dn = txh->txh_dnode; in dmu_tx_hold_zap_impl()
610 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap_impl()
615 * Modifying a almost-full microzap is around the worst case (128KB) in dmu_tx_hold_zap_impl()
618 * - 3 blocks overwritten: target leaf, ptrtbl block, header block in dmu_tx_hold_zap_impl()
619 * - 4 new blocks written if adding: in dmu_tx_hold_zap_impl()
620 * - 2 blocks for possibly split leaves, in dmu_tx_hold_zap_impl()
621 * - 2 grown ptrtbl blocks in dmu_tx_hold_zap_impl()
623 (void) zfs_refcount_add_many(&txh->txh_space_towrite, in dmu_tx_hold_zap_impl()
624 zap_get_micro_max_size(tx->tx_pool->dp_spa), FTAG); in dmu_tx_hold_zap_impl()
629 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); in dmu_tx_hold_zap_impl()
631 if (dn->dn_maxblkid == 0 || name == NULL) { in dmu_tx_hold_zap_impl()
638 tx->tx_err = err; in dmu_tx_hold_zap_impl()
648 tx->tx_err = err; in dmu_tx_hold_zap_impl()
658 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap()
660 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_zap()
671 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap_by_dnode()
684 ASSERT0(tx->tx_txg); in dmu_tx_hold_bonus()
686 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_bonus()
697 ASSERT0(tx->tx_txg); in dmu_tx_hold_bonus_by_dnode()
709 ASSERT0(tx->tx_txg); in dmu_tx_hold_space()
711 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_space()
715 &txh->txh_space_towrite, space, FTAG); in dmu_tx_hold_space()
728 ASSERT(tx->tx_txg != 0); in dmu_tx_dirty_buf()
729 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); in dmu_tx_dirty_buf()
730 ASSERT3U(dn->dn_object, ==, db->db.db_object); in dmu_tx_dirty_buf()
732 if (tx->tx_anyobj) { in dmu_tx_dirty_buf()
738 if (db->db.db_object == DMU_META_DNODE_OBJECT) { in dmu_tx_dirty_buf()
743 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_dirty_buf()
744 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_dirty_buf()
745 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_dirty_buf()
746 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) in dmu_tx_dirty_buf()
748 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { in dmu_tx_dirty_buf()
749 int datablkshift = dn->dn_datablkshift ? in dmu_tx_dirty_buf()
750 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; in dmu_tx_dirty_buf()
751 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_dirty_buf()
752 int shift = datablkshift + epbs * db->db_level; in dmu_tx_dirty_buf()
754 (txh->txh_arg1 >> shift); in dmu_tx_dirty_buf()
756 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); in dmu_tx_dirty_buf()
757 uint64_t blkid = db->db_blkid; in dmu_tx_dirty_buf()
762 txh->txh_type, (u_longlong_t)beginblk, in dmu_tx_dirty_buf()
765 switch (txh->txh_type) { in dmu_tx_dirty_buf()
788 txh->txh_arg2 == DMU_OBJECT_END)) in dmu_tx_dirty_buf()
813 txh->txh_arg2 == DMU_OBJECT_END)) in dmu_tx_dirty_buf()
844 txh->txh_type); in dmu_tx_dirty_buf()
854 (u_longlong_t)db->db.db_object, db->db_level, in dmu_tx_dirty_buf()
855 (u_longlong_t)db->db_blkid); in dmu_tx_dirty_buf()
879 * min_time = scale * (dirty - min) / (max - dirty)
892 * 10ms +-------------------------------------------------------------*+
911 * | zfs_delay_scale ----------> ******** |
912 * 0 +-------------------------------------*********----------------+
913 * 0% <- zfs_dirty_data_max -> 100%
926 * 100ms +-------------------------------------------------------------++
935 * + zfs_delay_scale ----------> ***** +
946 * +--------------------------------------------------------------+
947 * 0% <- zfs_dirty_data_max -> 100%
959 dsl_pool_t *dp = tx->tx_pool; in dmu_tx_delay()
971 * could cause a divide-by-zero if it's == the max. in dmu_tx_delay()
975 tx_time = zfs_delay_scale * (dirty - delay_min_bytes) / in dmu_tx_delay()
976 (zfs_dirty_data_max - dirty); in dmu_tx_delay()
980 wrlog = aggsum_upper_bound(&dp->dp_wrlog_total); in dmu_tx_delay()
986 tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) / in dmu_tx_delay()
987 (zfs_wrlog_data_max - wrlog), tx_time); in dmu_tx_delay()
995 if (now > tx->tx_start + tx_time) in dmu_tx_delay()
1001 mutex_enter(&dp->dp_lock); in dmu_tx_delay()
1002 wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time); in dmu_tx_delay()
1003 dp->dp_last_wakeup = wakeup; in dmu_tx_delay()
1004 mutex_exit(&dp->dp_lock); in dmu_tx_delay()
1028 * - this transaction's holds' txh_space_towrite
1030 * - dd_tempreserved[], which is the sum of in-flight transactions'
1034 * - dd_space_towrite[], which is the amount of dirtied dbufs.
1043 * Note that due to this algorithm, it is possible to exceed the allowed
1051 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_try_assign()
1053 ASSERT0(tx->tx_txg); in dmu_tx_try_assign()
1055 if (tx->tx_err) { in dmu_tx_try_assign()
1070 if (!tx->tx_dirty_delayed && in dmu_tx_try_assign()
1071 dsl_pool_need_wrlog_delay(tx->tx_pool)) { in dmu_tx_try_assign()
1072 tx->tx_wait_dirty = B_TRUE; in dmu_tx_try_assign()
1077 if (!tx->tx_dirty_delayed && in dmu_tx_try_assign()
1078 dsl_pool_need_dirty_delay(tx->tx_pool)) { in dmu_tx_try_assign()
1079 tx->tx_wait_dirty = B_TRUE; in dmu_tx_try_assign()
1084 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); in dmu_tx_try_assign()
1085 tx->tx_needassign_txh = NULL; in dmu_tx_try_assign()
1095 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_try_assign()
1096 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_try_assign()
1097 dnode_t *dn = txh->txh_dnode; in dmu_tx_try_assign()
1116 ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock)); in dmu_tx_try_assign()
1118 mutex_enter(&dn->dn_mtx); in dmu_tx_try_assign()
1119 if (dn->dn_assigned_txg == tx->tx_txg - 1) { in dmu_tx_try_assign()
1120 mutex_exit(&dn->dn_mtx); in dmu_tx_try_assign()
1121 tx->tx_needassign_txh = txh; in dmu_tx_try_assign()
1125 if (dn->dn_assigned_txg == 0) in dmu_tx_try_assign()
1126 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_try_assign()
1127 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_try_assign()
1128 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_try_assign()
1129 mutex_exit(&dn->dn_mtx); in dmu_tx_try_assign()
1131 towrite += zfs_refcount_count(&txh->txh_space_towrite); in dmu_tx_try_assign()
1132 tohold += zfs_refcount_count(&txh->txh_memory_tohold); in dmu_tx_try_assign()
1135 /* needed allocation: worst-case estimate of write space */ in dmu_tx_try_assign()
1136 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); in dmu_tx_try_assign()
1140 if (tx->tx_dir != NULL && asize != 0) { in dmu_tx_try_assign()
1141 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, in dmu_tx_try_assign()
1142 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); in dmu_tx_try_assign()
1155 if (tx->tx_txg == 0) in dmu_tx_unassign()
1158 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_unassign()
1164 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); in dmu_tx_unassign()
1165 txh && txh != tx->tx_needassign_txh; in dmu_tx_unassign()
1166 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_unassign()
1167 dnode_t *dn = txh->txh_dnode; in dmu_tx_unassign()
1171 mutex_enter(&dn->dn_mtx); in dmu_tx_unassign()
1172 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_unassign()
1174 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_unassign()
1175 dn->dn_assigned_txg = 0; in dmu_tx_unassign()
1176 cv_broadcast(&dn->dn_notxholds); in dmu_tx_unassign()
1178 mutex_exit(&dn->dn_mtx); in dmu_tx_unassign()
1181 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_unassign()
1183 tx->tx_lasttried_txg = tx->tx_txg; in dmu_tx_unassign()
1184 tx->tx_txg = 0; in dmu_tx_unassign()
1224 * 1 <- dmu_tx_get_txg(T1)
1226 * 2 <- dmu_tx_get_txg(T2)
1228 * 1 <- dmu_tx_get_txg(T3)
1235 ASSERT0(tx->tx_txg); in dmu_tx_assign()
1238 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); in dmu_tx_assign()
1241 IMPLY((flags & DMU_TX_WAIT), !dsl_pool_config_held(tx->tx_pool)); in dmu_tx_assign()
1244 tx->tx_dirty_delayed = B_TRUE; in dmu_tx_assign()
1247 tx->tx_break_on_suspend = B_TRUE; in dmu_tx_assign()
1266 spa_get_failmode(tx->tx_pool->dp_spa) == in dmu_tx_assign()
1306 tx->tx_break_on_suspend = B_FALSE; in dmu_tx_assign()
1316 tx->tx_break_on_suspend = B_TRUE; in dmu_tx_assign()
1319 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_assign()
1327 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_wait()
1328 dsl_pool_t *dp = tx->tx_pool; in dmu_tx_wait()
1331 ASSERT0(tx->tx_txg); in dmu_tx_wait()
1332 ASSERT(!dsl_pool_config_held(tx->tx_pool)); in dmu_tx_wait()
1342 (tx->tx_break_on_suspend ? TXG_WAIT_SUSPEND : TXG_WAIT_NONE); in dmu_tx_wait()
1346 if (tx->tx_wait_dirty) { in dmu_tx_wait()
1354 mutex_enter(&dp->dp_lock); in dmu_tx_wait()
1355 if (dp->dp_dirty_total >= zfs_dirty_data_max) in dmu_tx_wait()
1357 while (dp->dp_dirty_total >= zfs_dirty_data_max) in dmu_tx_wait()
1358 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); in dmu_tx_wait()
1359 dirty = dp->dp_dirty_total; in dmu_tx_wait()
1360 mutex_exit(&dp->dp_lock); in dmu_tx_wait()
1364 tx->tx_wait_dirty = B_FALSE; in dmu_tx_wait()
1372 tx->tx_dirty_delayed = B_TRUE; in dmu_tx_wait()
1373 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { in dmu_tx_wait()
1382 } else if (tx->tx_needassign_txh) { in dmu_tx_wait()
1383 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; in dmu_tx_wait()
1385 mutex_enter(&dn->dn_mtx); in dmu_tx_wait()
1386 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) in dmu_tx_wait()
1387 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); in dmu_tx_wait()
1388 mutex_exit(&dn->dn_mtx); in dmu_tx_wait()
1389 tx->tx_needassign_txh = NULL; in dmu_tx_wait()
1399 spa_tx_assign_add_nsecs(spa, gethrtime() - before); in dmu_tx_wait()
1407 while ((txh = list_head(&tx->tx_holds)) != NULL) { in dmu_tx_destroy()
1408 dnode_t *dn = txh->txh_dnode; in dmu_tx_destroy()
1410 list_remove(&tx->tx_holds, txh); in dmu_tx_destroy()
1411 zfs_refcount_destroy_many(&txh->txh_space_towrite, in dmu_tx_destroy()
1412 zfs_refcount_count(&txh->txh_space_towrite)); in dmu_tx_destroy()
1413 zfs_refcount_destroy_many(&txh->txh_memory_tohold, in dmu_tx_destroy()
1414 zfs_refcount_count(&txh->txh_memory_tohold)); in dmu_tx_destroy()
1420 list_destroy(&tx->tx_callbacks); in dmu_tx_destroy()
1421 list_destroy(&tx->tx_holds); in dmu_tx_destroy()
1429 ASSERT(tx->tx_txg != 0); in dmu_tx_commit()
1435 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_commit()
1436 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_commit()
1437 dnode_t *dn = txh->txh_dnode; in dmu_tx_commit()
1442 mutex_enter(&dn->dn_mtx); in dmu_tx_commit()
1443 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_commit()
1445 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_commit()
1446 dn->dn_assigned_txg = 0; in dmu_tx_commit()
1447 cv_broadcast(&dn->dn_notxholds); in dmu_tx_commit()
1449 mutex_exit(&dn->dn_mtx); in dmu_tx_commit()
1452 if (tx->tx_tempreserve_cookie) in dmu_tx_commit()
1453 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); in dmu_tx_commit()
1455 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_commit()
1456 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); in dmu_tx_commit()
1458 if (tx->tx_anyobj == FALSE) in dmu_tx_commit()
1459 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_commit()
1468 ASSERT0(tx->tx_txg); in dmu_tx_abort()
1471 if (tx->tx_tempreserve_cookie) in dmu_tx_abort()
1472 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); in dmu_tx_abort()
1477 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_abort()
1478 dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED)); in dmu_tx_abort()
1489 ASSERT(tx->tx_txg != 0); in dmu_tx_get_txg()
1490 return (tx->tx_txg); in dmu_tx_get_txg()
1496 ASSERT(tx->tx_pool != NULL); in dmu_tx_pool()
1497 return (tx->tx_pool); in dmu_tx_pool()
1510 dmu_tx_callback_t *dcb; in dmu_tx_callback_register() local
1512 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); in dmu_tx_callback_register()
1514 dcb->dcb_func = func; in dmu_tx_callback_register()
1515 dcb->dcb_data = data; in dmu_tx_callback_register()
1517 list_insert_tail(&tx->tx_callbacks, dcb); in dmu_tx_callback_register()
1526 dmu_tx_callback_t *dcb; in dmu_tx_do_callbacks() local
1528 while ((dcb = list_remove_tail(cb_list)) != NULL) { in dmu_tx_do_callbacks()
1529 dcb->dcb_func(dcb->dcb_data, error); in dmu_tx_do_callbacks()
1530 kmem_free(dcb, sizeof (dmu_tx_callback_t)); in dmu_tx_do_callbacks()
1551 if (!sa->sa_need_attr_registration) in dmu_tx_sa_registration_hold()
1554 for (int i = 0; i != sa->sa_num_attrs; i++) { in dmu_tx_sa_registration_hold()
1555 if (!sa->sa_attr_table[i].sa_registered) { in dmu_tx_sa_registration_hold()
1556 if (sa->sa_reg_attr_obj) in dmu_tx_sa_registration_hold()
1557 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, in dmu_tx_sa_registration_hold()
1558 B_TRUE, sa->sa_attr_table[i].sa_name); in dmu_tx_sa_registration_hold()
1561 B_TRUE, sa->sa_attr_table[i].sa_name); in dmu_tx_sa_registration_hold()
1571 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, in dmu_tx_hold_spill()
1574 (void) zfs_refcount_add_many(&txh->txh_space_towrite, in dmu_tx_hold_spill()
1581 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa_create()
1585 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa_create()
1588 if (tx->tx_objset->os_sa->sa_layout_attr_obj) { in dmu_tx_hold_sa_create()
1589 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1591 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa_create()
1592 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa_create()
1599 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill) in dmu_tx_hold_sa_create()
1602 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, in dmu_tx_hold_sa_create()
1619 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa()
1625 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; in dmu_tx_hold_sa()
1630 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa()
1633 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || in dmu_tx_hold_sa()
1634 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { in dmu_tx_hold_sa()
1635 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa()
1636 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa()
1643 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) in dmu_tx_hold_sa()
1644 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa()
1646 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { in dmu_tx_hold_sa()
1647 ASSERT0(tx->tx_txg); in dmu_tx_hold_sa()
1651 if (DB_DNODE(db)->dn_have_spill) { in dmu_tx_hold_sa()
1652 ASSERT0(tx->tx_txg); in dmu_tx_hold_sa()
1667 dmu_tx_ksp->ks_data = &dmu_tx_stats; in dmu_tx_init()