Lines Matching refs:tx

45 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
69 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); in dmu_tx_create_dd() local
70 tx->tx_dir = dd; in dmu_tx_create_dd()
72 tx->tx_pool = dd->dd_pool; in dmu_tx_create_dd()
73 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), in dmu_tx_create_dd()
75 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), in dmu_tx_create_dd()
77 tx->tx_start = gethrtime(); in dmu_tx_create_dd()
78 return (tx); in dmu_tx_create_dd()
84 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); in dmu_tx_create() local
85 tx->tx_objset = os; in dmu_tx_create()
86 return (tx); in dmu_tx_create()
92 dmu_tx_t *tx = dmu_tx_create_dd(NULL); in dmu_tx_create_assigned() local
95 tx->tx_pool = dp; in dmu_tx_create_assigned()
96 tx->tx_txg = txg; in dmu_tx_create_assigned()
97 tx->tx_anyobj = TRUE; in dmu_tx_create_assigned()
99 return (tx); in dmu_tx_create_assigned()
103 dmu_tx_is_syncing(dmu_tx_t *tx) in dmu_tx_is_syncing() argument
105 return (tx->tx_anyobj); in dmu_tx_is_syncing()
109 dmu_tx_private_ok(dmu_tx_t *tx) in dmu_tx_private_ok() argument
111 return (tx->tx_anyobj); in dmu_tx_private_ok()
115 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, in dmu_tx_hold_dnode_impl() argument
121 (void) zfs_refcount_add(&dn->dn_holds, tx); in dmu_tx_hold_dnode_impl()
122 if (tx->tx_txg != 0) { in dmu_tx_hold_dnode_impl()
130 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_hold_dnode_impl()
131 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_hold_dnode_impl()
137 txh->txh_tx = tx; in dmu_tx_hold_dnode_impl()
144 list_insert_tail(&tx->tx_holds, txh); in dmu_tx_hold_dnode_impl()
150 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, in dmu_tx_hold_object_impl() argument
160 tx->tx_err = err; in dmu_tx_hold_object_impl()
164 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); in dmu_tx_hold_object_impl()
171 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_add_new_object() argument
177 if (!dmu_tx_is_syncing(tx)) in dmu_tx_add_new_object()
178 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); in dmu_tx_add_new_object()
357 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) in dmu_tx_hold_write() argument
361 ASSERT0(tx->tx_txg); in dmu_tx_hold_write()
365 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_write()
374 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_write_by_dnode() argument
378 ASSERT0(tx->tx_txg); in dmu_tx_hold_write_by_dnode()
382 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); in dmu_tx_hold_write_by_dnode()
395 dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) in dmu_tx_hold_append() argument
399 ASSERT0(tx->tx_txg); in dmu_tx_hold_append()
402 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_append()
411 dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_append_by_dnode() argument
415 ASSERT0(tx->tx_txg); in dmu_tx_hold_append_by_dnode()
418 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END); in dmu_tx_hold_append_by_dnode()
434 dmu_tx_mark_netfree(dmu_tx_t *tx) in dmu_tx_mark_netfree() argument
436 tx->tx_netfree = B_TRUE; in dmu_tx_mark_netfree()
442 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_count_free() local
446 ASSERT(tx->tx_txg == 0); in dmu_tx_count_free()
493 zio_t *zio = zio_root(tx->tx_pool->dp_spa, in dmu_tx_count_free()
502 tx->tx_err = err; in dmu_tx_count_free()
512 tx->tx_err = err; in dmu_tx_count_free()
519 tx->tx_err = err; in dmu_tx_count_free()
526 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) in dmu_tx_hold_free() argument
530 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_free()
539 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) in dmu_tx_hold_free_by_dnode() argument
543 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); in dmu_tx_hold_free_by_dnode()
554 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_count_clone() local
558 ASSERT0(tx->tx_txg); in dmu_tx_count_clone()
573 zio_t *zio = zio_root(tx->tx_pool->dp_spa, in dmu_tx_count_clone()
578 tx->tx_err = err; in dmu_tx_count_clone()
584 tx->tx_err = err; in dmu_tx_count_clone()
588 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, in dmu_tx_hold_clone_by_dnode() argument
593 ASSERT0(tx->tx_txg); in dmu_tx_hold_clone_by_dnode()
596 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len); in dmu_tx_hold_clone_by_dnode()
606 dmu_tx_t *tx = txh->txh_tx; in dmu_tx_hold_zap_impl() local
610 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_zap_impl()
624 zap_get_micro_max_size(tx->tx_pool->dp_spa), FTAG); in dmu_tx_hold_zap_impl()
638 tx->tx_err = err; in dmu_tx_hold_zap_impl()
648 tx->tx_err = err; in dmu_tx_hold_zap_impl()
654 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) in dmu_tx_hold_zap() argument
658 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap()
660 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_zap()
667 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) in dmu_tx_hold_zap_by_dnode() argument
671 ASSERT0(tx->tx_txg); in dmu_tx_hold_zap_by_dnode()
674 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); in dmu_tx_hold_zap_by_dnode()
680 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_bonus() argument
684 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_bonus()
686 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_bonus()
693 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_hold_bonus_by_dnode() argument
697 ASSERT0(tx->tx_txg); in dmu_tx_hold_bonus_by_dnode()
699 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); in dmu_tx_hold_bonus_by_dnode()
705 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) in dmu_tx_hold_space() argument
709 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_space()
711 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, in dmu_tx_hold_space()
721 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) in dmu_tx_dirty_buf() argument
728 ASSERT(tx->tx_txg != 0); in dmu_tx_dirty_buf()
729 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); in dmu_tx_dirty_buf()
732 if (tx->tx_anyobj) { in dmu_tx_dirty_buf()
743 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_dirty_buf()
744 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_dirty_buf()
745 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_dirty_buf()
957 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) in dmu_tx_delay() argument
959 dsl_pool_t *dp = tx->tx_pool; in dmu_tx_delay()
995 if (now > tx->tx_start + tx_time) in dmu_tx_delay()
998 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, in dmu_tx_delay()
1002 wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time); in dmu_tx_delay()
1049 dmu_tx_try_assign(dmu_tx_t *tx) in dmu_tx_try_assign() argument
1051 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_try_assign()
1053 ASSERT0(tx->tx_txg); in dmu_tx_try_assign()
1055 if (tx->tx_err) { in dmu_tx_try_assign()
1070 if (!tx->tx_dirty_delayed && in dmu_tx_try_assign()
1071 dsl_pool_need_wrlog_delay(tx->tx_pool)) { in dmu_tx_try_assign()
1072 tx->tx_wait_dirty = B_TRUE; in dmu_tx_try_assign()
1077 if (!tx->tx_dirty_delayed && in dmu_tx_try_assign()
1078 dsl_pool_need_dirty_delay(tx->tx_pool)) { in dmu_tx_try_assign()
1079 tx->tx_wait_dirty = B_TRUE; in dmu_tx_try_assign()
1084 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); in dmu_tx_try_assign()
1085 tx->tx_needassign_txh = NULL; in dmu_tx_try_assign()
1095 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_try_assign()
1096 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_try_assign()
1119 if (dn->dn_assigned_txg == tx->tx_txg - 1) { in dmu_tx_try_assign()
1121 tx->tx_needassign_txh = txh; in dmu_tx_try_assign()
1126 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_try_assign()
1127 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_try_assign()
1128 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_try_assign()
1136 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); in dmu_tx_try_assign()
1140 if (tx->tx_dir != NULL && asize != 0) { in dmu_tx_try_assign()
1141 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, in dmu_tx_try_assign()
1142 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); in dmu_tx_try_assign()
1153 dmu_tx_unassign(dmu_tx_t *tx) in dmu_tx_unassign() argument
1155 if (tx->tx_txg == 0) in dmu_tx_unassign()
1158 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_unassign()
1164 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); in dmu_tx_unassign()
1165 txh && txh != tx->tx_needassign_txh; in dmu_tx_unassign()
1166 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_unassign()
1172 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_unassign()
1174 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_unassign()
1181 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_unassign()
1183 tx->tx_lasttried_txg = tx->tx_txg; in dmu_tx_unassign()
1184 tx->tx_txg = 0; in dmu_tx_unassign()
1231 dmu_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t flags) in dmu_tx_assign() argument
1235 ASSERT(tx->tx_txg == 0); in dmu_tx_assign()
1238 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); in dmu_tx_assign()
1241 IMPLY((flags & DMU_TX_WAIT), !dsl_pool_config_held(tx->tx_pool)); in dmu_tx_assign()
1244 tx->tx_dirty_delayed = B_TRUE; in dmu_tx_assign()
1247 tx->tx_break_on_suspend = B_TRUE; in dmu_tx_assign()
1249 while ((err = dmu_tx_try_assign(tx)) != 0) { in dmu_tx_assign()
1250 dmu_tx_unassign(tx); in dmu_tx_assign()
1266 spa_get_failmode(tx->tx_pool->dp_spa) == in dmu_tx_assign()
1306 tx->tx_break_on_suspend = B_FALSE; in dmu_tx_assign()
1308 dmu_tx_wait(tx); in dmu_tx_assign()
1316 tx->tx_break_on_suspend = B_TRUE; in dmu_tx_assign()
1319 txg_rele_to_quiesce(&tx->tx_txgh); in dmu_tx_assign()
1325 dmu_tx_wait(dmu_tx_t *tx) in dmu_tx_wait() argument
1327 spa_t *spa = tx->tx_pool->dp_spa; in dmu_tx_wait()
1328 dsl_pool_t *dp = tx->tx_pool; in dmu_tx_wait()
1331 ASSERT(tx->tx_txg == 0); in dmu_tx_wait()
1332 ASSERT(!dsl_pool_config_held(tx->tx_pool)); in dmu_tx_wait()
1342 (tx->tx_break_on_suspend ? TXG_WAIT_SUSPEND : TXG_WAIT_NONE); in dmu_tx_wait()
1346 if (tx->tx_wait_dirty) { in dmu_tx_wait()
1362 dmu_tx_delay(tx, dirty); in dmu_tx_wait()
1364 tx->tx_wait_dirty = B_FALSE; in dmu_tx_wait()
1372 tx->tx_dirty_delayed = B_TRUE; in dmu_tx_wait()
1373 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { in dmu_tx_wait()
1382 } else if (tx->tx_needassign_txh) { in dmu_tx_wait()
1383 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; in dmu_tx_wait()
1386 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) in dmu_tx_wait()
1389 tx->tx_needassign_txh = NULL; in dmu_tx_wait()
1403 dmu_tx_destroy(dmu_tx_t *tx) in dmu_tx_destroy() argument
1407 while ((txh = list_head(&tx->tx_holds)) != NULL) { in dmu_tx_destroy()
1410 list_remove(&tx->tx_holds, txh); in dmu_tx_destroy()
1417 dnode_rele(dn, tx); in dmu_tx_destroy()
1420 list_destroy(&tx->tx_callbacks); in dmu_tx_destroy()
1421 list_destroy(&tx->tx_holds); in dmu_tx_destroy()
1422 kmem_free(tx, sizeof (dmu_tx_t)); in dmu_tx_destroy()
1426 dmu_tx_commit(dmu_tx_t *tx) in dmu_tx_commit() argument
1429 ASSERT(tx->tx_txg != 0); in dmu_tx_commit()
1435 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; in dmu_tx_commit()
1436 txh = list_next(&tx->tx_holds, txh)) { in dmu_tx_commit()
1443 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_commit()
1445 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_commit()
1452 if (tx->tx_tempreserve_cookie) in dmu_tx_commit()
1453 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); in dmu_tx_commit()
1455 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_commit()
1456 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); in dmu_tx_commit()
1458 if (tx->tx_anyobj == FALSE) in dmu_tx_commit()
1459 txg_rele_to_sync(&tx->tx_txgh); in dmu_tx_commit()
1461 dmu_tx_destroy(tx); in dmu_tx_commit()
1465 dmu_tx_abort(dmu_tx_t *tx) in dmu_tx_abort() argument
1468 ASSERT0(tx->tx_txg); in dmu_tx_abort()
1471 if (tx->tx_tempreserve_cookie) in dmu_tx_abort()
1472 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); in dmu_tx_abort()
1477 if (!list_is_empty(&tx->tx_callbacks)) in dmu_tx_abort()
1478 dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED)); in dmu_tx_abort()
1481 dmu_tx_unassign(tx); in dmu_tx_abort()
1483 dmu_tx_destroy(tx); in dmu_tx_abort()
1487 dmu_tx_get_txg(dmu_tx_t *tx) in dmu_tx_get_txg() argument
1489 ASSERT(tx->tx_txg != 0); in dmu_tx_get_txg()
1490 return (tx->tx_txg); in dmu_tx_get_txg()
1494 dmu_tx_pool(dmu_tx_t *tx) in dmu_tx_pool() argument
1496 ASSERT(tx->tx_pool != NULL); in dmu_tx_pool()
1497 return (tx->tx_pool); in dmu_tx_pool()
1508 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) in dmu_tx_callback_register() argument
1517 list_insert_tail(&tx->tx_callbacks, dcb); in dmu_tx_callback_register()
1549 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) in dmu_tx_sa_registration_hold() argument
1557 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, in dmu_tx_sa_registration_hold()
1560 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, in dmu_tx_sa_registration_hold()
1567 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) in dmu_tx_hold_spill() argument
1571 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, in dmu_tx_hold_spill()
1579 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) in dmu_tx_hold_sa_create() argument
1581 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa_create()
1583 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); in dmu_tx_hold_sa_create()
1585 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa_create()
1588 if (tx->tx_objset->os_sa->sa_layout_attr_obj) { in dmu_tx_hold_sa_create()
1589 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1591 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa_create()
1592 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa_create()
1593 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1594 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa_create()
1597 dmu_tx_sa_registration_hold(sa, tx); in dmu_tx_hold_sa_create()
1602 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, in dmu_tx_hold_sa_create()
1616 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) in dmu_tx_hold_sa() argument
1619 sa_os_t *sa = tx->tx_objset->os_sa; in dmu_tx_hold_sa()
1627 dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db)); in dmu_tx_hold_sa()
1630 if (tx->tx_objset->os_sa->sa_master_obj == 0) in dmu_tx_hold_sa()
1633 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || in dmu_tx_hold_sa()
1634 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { in dmu_tx_hold_sa()
1635 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); in dmu_tx_hold_sa()
1636 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); in dmu_tx_hold_sa()
1637 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa()
1638 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); in dmu_tx_hold_sa()
1641 dmu_tx_sa_registration_hold(sa, tx); in dmu_tx_hold_sa()
1643 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) in dmu_tx_hold_sa()
1644 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); in dmu_tx_hold_sa()
1647 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_sa()
1648 dmu_tx_hold_spill(tx, object); in dmu_tx_hold_sa()
1652 ASSERT(tx->tx_txg == 0); in dmu_tx_hold_sa()
1653 dmu_tx_hold_spill(tx, object); in dmu_tx_hold_sa()