Lines Matching full:dn

43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
113 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, in dmu_tx_hold_dnode_impl() argument
118 if (dn != NULL) { in dmu_tx_hold_dnode_impl()
119 (void) zfs_refcount_add(&dn->dn_holds, tx); in dmu_tx_hold_dnode_impl()
121 mutex_enter(&dn->dn_mtx); in dmu_tx_hold_dnode_impl()
123 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a in dmu_tx_hold_dnode_impl()
127 ASSERT(dn->dn_assigned_txg == 0); in dmu_tx_hold_dnode_impl()
128 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_hold_dnode_impl()
129 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_hold_dnode_impl()
130 mutex_exit(&dn->dn_mtx); in dmu_tx_hold_dnode_impl()
136 txh->txh_dnode = dn; in dmu_tx_hold_dnode_impl()
151 dnode_t *dn = NULL; in dmu_tx_hold_object_impl() local
156 err = dnode_hold(os, object, FTAG, &dn); in dmu_tx_hold_object_impl()
162 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); in dmu_tx_hold_object_impl()
163 if (dn != NULL) in dmu_tx_hold_object_impl()
164 dnode_rele(dn, FTAG); in dmu_tx_hold_object_impl()
169 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_add_new_object() argument
176 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); in dmu_tx_add_new_object()
208 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) in dmu_tx_check_ioerr() argument
213 rw_enter(&dn->dn_struct_rwlock, RW_READER); in dmu_tx_check_ioerr()
214 err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db); in dmu_tx_check_ioerr()
215 rw_exit(&dn->dn_struct_rwlock); in dmu_tx_check_ioerr()
233 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_write() local
241 if (dn == NULL) in dmu_tx_count_write()
250 if (dn->dn_maxblkid == 0) { in dmu_tx_count_write()
251 if (off < dn->dn_datablksz && in dmu_tx_count_write()
252 (off > 0 || len < dn->dn_datablksz)) { in dmu_tx_count_write()
253 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); in dmu_tx_count_write()
259 zio_t *zio = zio_root(dn->dn_objset->os_spa, in dmu_tx_count_write()
263 uint64_t start = off >> dn->dn_datablkshift; in dmu_tx_count_write()
264 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { in dmu_tx_count_write()
265 err = dmu_tx_check_ioerr(zio, dn, 0, start); in dmu_tx_count_write()
272 uint64_t end = (off + len - 1) >> dn->dn_datablkshift; in dmu_tx_count_write()
273 if (end != start && end <= dn->dn_maxblkid && in dmu_tx_count_write()
274 P2PHASE(off + len, dn->dn_datablksz)) { in dmu_tx_count_write()
275 err = dmu_tx_check_ioerr(zio, dn, 0, end); in dmu_tx_count_write()
282 if (dn->dn_nlevels > 1) { in dmu_tx_count_write()
283 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_count_write()
286 err = dmu_tx_check_ioerr(zio, dn, 1, i); in dmu_tx_count_write()
303 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_append() local
311 if (dn == NULL) in dmu_tx_count_append()
319 if (dn->dn_maxblkid == 0) { in dmu_tx_count_append()
320 if (off < dn->dn_datablksz && in dmu_tx_count_append()
321 (off > 0 || len < dn->dn_datablksz)) { in dmu_tx_count_append()
322 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); in dmu_tx_count_append()
328 zio_t *zio = zio_root(dn->dn_objset->os_spa, in dmu_tx_count_append()
332 uint64_t start = off >> dn->dn_datablkshift; in dmu_tx_count_append()
333 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { in dmu_tx_count_append()
334 err = dmu_tx_check_ioerr(zio, dn, 0, start); in dmu_tx_count_append()
372 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_write_by_dnode() argument
380 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); in dmu_tx_hold_write_by_dnode()
409 dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_append_by_dnode() argument
416 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END); in dmu_tx_hold_append_by_dnode()
441 dnode_t *dn = txh->txh_dnode; in dmu_tx_count_free() local
446 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) in dmu_tx_count_free()
449 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; in dmu_tx_count_free()
460 if (dn->dn_datablkshift == 0) { in dmu_tx_count_free()
461 if (off != 0 || len < dn->dn_datablksz) in dmu_tx_count_free()
462 dmu_tx_count_write(txh, 0, dn->dn_datablksz); in dmu_tx_count_free()
465 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) in dmu_tx_count_free()
468 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) in dmu_tx_count_free()
475 if (dn->dn_nlevels > 1) { in dmu_tx_count_free()
476 int shift = dn->dn_datablkshift + dn->dn_indblkshift - in dmu_tx_count_free()
481 ASSERT(dn->dn_indblkshift != 0); in dmu_tx_count_free()
488 if (dn->dn_datablkshift == 0) in dmu_tx_count_free()
495 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); in dmu_tx_count_free()
506 1 << dn->dn_indblkshift, FTAG); in dmu_tx_count_free()
508 err = dmu_tx_check_ioerr(zio, dn, 1, i); in dmu_tx_count_free()
537 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) in dmu_tx_hold_free_by_dnode() argument
541 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); in dmu_tx_hold_free_by_dnode()
559 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) in dmu_tx_hold_clone_by_dnode() argument
566 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len); in dmu_tx_hold_clone_by_dnode()
577 dnode_t *dn = txh->txh_dnode; in dmu_tx_hold_zap_impl() local
596 if (dn == NULL) in dmu_tx_hold_zap_impl()
599 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); in dmu_tx_hold_zap_impl()
601 if (dn->dn_maxblkid == 0 || name == NULL) { in dmu_tx_hold_zap_impl()
606 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); in dmu_tx_hold_zap_impl()
616 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); in dmu_tx_hold_zap_impl()
637 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) in dmu_tx_hold_zap_by_dnode() argument
642 ASSERT(dn != NULL); in dmu_tx_hold_zap_by_dnode()
644 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); in dmu_tx_hold_zap_by_dnode()
663 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) in dmu_tx_hold_bonus_by_dnode() argument
669 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); in dmu_tx_hold_bonus_by_dnode()
697 dnode_t *dn = DB_DNODE(db); in dmu_tx_dirty_buf() local
699 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); in dmu_tx_dirty_buf()
700 ASSERT3U(dn->dn_object, ==, db->db.db_object); in dmu_tx_dirty_buf()
715 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_dirty_buf()
716 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) in dmu_tx_dirty_buf()
718 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { in dmu_tx_dirty_buf()
719 int datablkshift = dn->dn_datablkshift ? in dmu_tx_dirty_buf()
720 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; in dmu_tx_dirty_buf()
721 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; in dmu_tx_dirty_buf()
1076 dnode_t *dn = txh->txh_dnode; in dmu_tx_try_assign() local
1077 if (dn != NULL) { in dmu_tx_try_assign()
1095 ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock)); in dmu_tx_try_assign()
1097 mutex_enter(&dn->dn_mtx); in dmu_tx_try_assign()
1098 if (dn->dn_assigned_txg == tx->tx_txg - 1) { in dmu_tx_try_assign()
1099 mutex_exit(&dn->dn_mtx); in dmu_tx_try_assign()
1104 if (dn->dn_assigned_txg == 0) in dmu_tx_try_assign()
1105 dn->dn_assigned_txg = tx->tx_txg; in dmu_tx_try_assign()
1106 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_try_assign()
1107 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); in dmu_tx_try_assign()
1108 mutex_exit(&dn->dn_mtx); in dmu_tx_try_assign()
1146 dnode_t *dn = txh->txh_dnode; in dmu_tx_unassign() local
1148 if (dn == NULL) in dmu_tx_unassign()
1150 mutex_enter(&dn->dn_mtx); in dmu_tx_unassign()
1151 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_unassign()
1153 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_unassign()
1154 dn->dn_assigned_txg = 0; in dmu_tx_unassign()
1155 cv_broadcast(&dn->dn_notxholds); in dmu_tx_unassign()
1157 mutex_exit(&dn->dn_mtx); in dmu_tx_unassign()
1280 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; in dmu_tx_wait() local
1282 mutex_enter(&dn->dn_mtx); in dmu_tx_wait()
1283 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) in dmu_tx_wait()
1284 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); in dmu_tx_wait()
1285 mutex_exit(&dn->dn_mtx); in dmu_tx_wait()
1305 dnode_t *dn = txh->txh_dnode; in dmu_tx_destroy() local
1313 if (dn != NULL) in dmu_tx_destroy()
1314 dnode_rele(dn, tx); in dmu_tx_destroy()
1333 dnode_t *dn = txh->txh_dnode; in dmu_tx_commit() local
1335 if (dn == NULL) in dmu_tx_commit()
1338 mutex_enter(&dn->dn_mtx); in dmu_tx_commit()
1339 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); in dmu_tx_commit()
1341 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { in dmu_tx_commit()
1342 dn->dn_assigned_txg = 0; in dmu_tx_commit()
1343 cv_broadcast(&dn->dn_notxholds); in dmu_tx_commit()
1345 mutex_exit(&dn->dn_mtx); in dmu_tx_commit()