xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_tx.c (revision fb2a9bae0030340ad72b9c26ba1ffee2ee3cafec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/dmu.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dbuf.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
32 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
33 #include <sys/dsl_pool.h>
34 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
35 #include <sys/spa.h>
36 #include <sys/sa.h>
37 #include <sys/sa_impl.h>
38 #include <sys/zfs_context.h>
39 #include <sys/varargs.h>
40 
41 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
42     uint64_t arg1, uint64_t arg2);
43 
44 
45 dmu_tx_t *
46 dmu_tx_create_dd(dsl_dir_t *dd)
47 {
48 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
49 	tx->tx_dir = dd;
50 	if (dd)
51 		tx->tx_pool = dd->dd_pool;
52 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
53 	    offsetof(dmu_tx_hold_t, txh_node));
54 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
55 	    offsetof(dmu_tx_callback_t, dcb_node));
56 #ifdef ZFS_DEBUG
57 	refcount_create(&tx->tx_space_written);
58 	refcount_create(&tx->tx_space_freed);
59 #endif
60 	return (tx);
61 }
62 
63 dmu_tx_t *
64 dmu_tx_create(objset_t *os)
65 {
66 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
67 	tx->tx_objset = os;
68 	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
69 	return (tx);
70 }
71 
72 dmu_tx_t *
73 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
74 {
75 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
76 
77 	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
78 	tx->tx_pool = dp;
79 	tx->tx_txg = txg;
80 	tx->tx_anyobj = TRUE;
81 
82 	return (tx);
83 }
84 
85 int
86 dmu_tx_is_syncing(dmu_tx_t *tx)
87 {
88 	return (tx->tx_anyobj);
89 }
90 
91 int
92 dmu_tx_private_ok(dmu_tx_t *tx)
93 {
94 	return (tx->tx_anyobj);
95 }
96 
97 static dmu_tx_hold_t *
98 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
99     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
100 {
101 	dmu_tx_hold_t *txh;
102 	dnode_t *dn = NULL;
103 	int err;
104 
105 	if (object != DMU_NEW_OBJECT) {
106 		err = dnode_hold(os, object, tx, &dn);
107 		if (err) {
108 			tx->tx_err = err;
109 			return (NULL);
110 		}
111 
112 		if (err == 0 && tx->tx_txg != 0) {
113 			mutex_enter(&dn->dn_mtx);
114 			/*
115 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
116 			 * problem, but there's no way for it to happen (for
117 			 * now, at least).
118 			 */
119 			ASSERT(dn->dn_assigned_txg == 0);
120 			dn->dn_assigned_txg = tx->tx_txg;
121 			(void) refcount_add(&dn->dn_tx_holds, tx);
122 			mutex_exit(&dn->dn_mtx);
123 		}
124 	}
125 
126 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
127 	txh->txh_tx = tx;
128 	txh->txh_dnode = dn;
129 #ifdef ZFS_DEBUG
130 	txh->txh_type = type;
131 	txh->txh_arg1 = arg1;
132 	txh->txh_arg2 = arg2;
133 #endif
134 	list_insert_tail(&tx->tx_holds, txh);
135 
136 	return (txh);
137 }
138 
139 void
140 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
141 {
142 	/*
143 	 * If we're syncing, they can manipulate any object anyhow, and
144 	 * the hold on the dnode_t can cause problems.
145 	 */
146 	if (!dmu_tx_is_syncing(tx)) {
147 		(void) dmu_tx_hold_object_impl(tx, os,
148 		    object, THT_NEWOBJECT, 0, 0);
149 	}
150 }
151 
152 static int
153 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
154 {
155 	int err;
156 	dmu_buf_impl_t *db;
157 
158 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
159 	db = dbuf_hold_level(dn, level, blkid, FTAG);
160 	rw_exit(&dn->dn_struct_rwlock);
161 	if (db == NULL)
162 		return (EIO);
163 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
164 	dbuf_rele(db, FTAG);
165 	return (err);
166 }
167 
168 static void
169 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
170     int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
171 {
172 	objset_t *os = dn->dn_objset;
173 	dsl_dataset_t *ds = os->os_dsl_dataset;
174 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
175 	dmu_buf_impl_t *parent = NULL;
176 	blkptr_t *bp = NULL;
177 	uint64_t space;
178 
179 	if (level >= dn->dn_nlevels || history[level] == blkid)
180 		return;
181 
182 	history[level] = blkid;
183 
184 	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
185 
186 	if (db == NULL || db == dn->dn_dbuf) {
187 		ASSERT(level != 0);
188 		db = NULL;
189 	} else {
190 		ASSERT(db->db_dnode == dn);
191 		ASSERT(db->db_level == level);
192 		ASSERT(db->db.db_size == space);
193 		ASSERT(db->db_blkid == blkid);
194 		bp = db->db_blkptr;
195 		parent = db->db_parent;
196 	}
197 
198 	freeable = (bp && (freeable ||
199 	    dsl_dataset_block_freeable(ds, bp->blk_birth)));
200 
201 	if (freeable)
202 		txh->txh_space_tooverwrite += space;
203 	else
204 		txh->txh_space_towrite += space;
205 	if (bp)
206 		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
207 
208 	dmu_tx_count_twig(txh, dn, parent, level + 1,
209 	    blkid >> epbs, freeable, history);
210 }
211 
212 /* ARGSUSED */
213 static void
214 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
215 {
216 	dnode_t *dn = txh->txh_dnode;
217 	uint64_t start, end, i;
218 	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
219 	int err = 0;
220 
221 	if (len == 0)
222 		return;
223 
224 	min_bs = SPA_MINBLOCKSHIFT;
225 	max_bs = SPA_MAXBLOCKSHIFT;
226 	min_ibs = DN_MIN_INDBLKSHIFT;
227 	max_ibs = DN_MAX_INDBLKSHIFT;
228 
229 	if (dn) {
230 		uint64_t history[DN_MAX_LEVELS];
231 		int nlvls = dn->dn_nlevels;
232 		int delta;
233 
234 		/*
235 		 * For i/o error checking, read the first and last level-0
236 		 * blocks (if they are not aligned), and all the level-1 blocks.
237 		 */
238 		if (dn->dn_maxblkid == 0) {
239 			delta = dn->dn_datablksz;
240 			start = (off < dn->dn_datablksz) ? 0 : 1;
241 			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
242 			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
243 				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
244 				if (err)
245 					goto out;
246 				delta -= off;
247 			}
248 		} else {
249 			zio_t *zio = zio_root(dn->dn_objset->os_spa,
250 			    NULL, NULL, ZIO_FLAG_CANFAIL);
251 
252 			/* first level-0 block */
253 			start = off >> dn->dn_datablkshift;
254 			if (P2PHASE(off, dn->dn_datablksz) ||
255 			    len < dn->dn_datablksz) {
256 				err = dmu_tx_check_ioerr(zio, dn, 0, start);
257 				if (err)
258 					goto out;
259 			}
260 
261 			/* last level-0 block */
262 			end = (off+len-1) >> dn->dn_datablkshift;
263 			if (end != start && end <= dn->dn_maxblkid &&
264 			    P2PHASE(off+len, dn->dn_datablksz)) {
265 				err = dmu_tx_check_ioerr(zio, dn, 0, end);
266 				if (err)
267 					goto out;
268 			}
269 
270 			/* level-1 blocks */
271 			if (nlvls > 1) {
272 				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
273 				for (i = (start>>shft)+1; i < end>>shft; i++) {
274 					err = dmu_tx_check_ioerr(zio, dn, 1, i);
275 					if (err)
276 						goto out;
277 				}
278 			}
279 
280 			err = zio_wait(zio);
281 			if (err)
282 				goto out;
283 			delta = P2NPHASE(off, dn->dn_datablksz);
284 		}
285 
286 		if (dn->dn_maxblkid > 0) {
287 			/*
288 			 * The blocksize can't change,
289 			 * so we can make a more precise estimate.
290 			 */
291 			ASSERT(dn->dn_datablkshift != 0);
292 			min_bs = max_bs = dn->dn_datablkshift;
293 			min_ibs = max_ibs = dn->dn_indblkshift;
294 		} else if (dn->dn_indblkshift > max_ibs) {
295 			/*
296 			 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
297 			 * the code will still work correctly on older pools.
298 			 */
299 			min_ibs = max_ibs = dn->dn_indblkshift;
300 		}
301 
302 		/*
303 		 * If this write is not off the end of the file
304 		 * we need to account for overwrites/unref.
305 		 */
306 		if (start <= dn->dn_maxblkid) {
307 			for (int l = 0; l < DN_MAX_LEVELS; l++)
308 				history[l] = -1ULL;
309 		}
310 		while (start <= dn->dn_maxblkid) {
311 			dmu_buf_impl_t *db;
312 
313 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
314 			db = dbuf_hold_level(dn, 0, start, FTAG);
315 			rw_exit(&dn->dn_struct_rwlock);
316 			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
317 			    history);
318 			dbuf_rele(db, FTAG);
319 			if (++start > end) {
320 				/*
321 				 * Account for new indirects appearing
322 				 * before this IO gets assigned into a txg.
323 				 */
324 				bits = 64 - min_bs;
325 				epbs = min_ibs - SPA_BLKPTRSHIFT;
326 				for (bits -= epbs * (nlvls - 1);
327 				    bits >= 0; bits -= epbs)
328 					txh->txh_fudge += 1ULL << max_ibs;
329 				goto out;
330 			}
331 			off += delta;
332 			if (len >= delta)
333 				len -= delta;
334 			delta = dn->dn_datablksz;
335 		}
336 	}
337 
338 	/*
339 	 * 'end' is the last thing we will access, not one past.
340 	 * This way we won't overflow when accessing the last byte.
341 	 */
342 	start = P2ALIGN(off, 1ULL << max_bs);
343 	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
344 	txh->txh_space_towrite += end - start + 1;
345 
346 	start >>= min_bs;
347 	end >>= min_bs;
348 
349 	epbs = min_ibs - SPA_BLKPTRSHIFT;
350 
351 	/*
352 	 * The object contains at most 2^(64 - min_bs) blocks,
353 	 * and each indirect level maps 2^epbs.
354 	 */
355 	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
356 		start >>= epbs;
357 		end >>= epbs;
358 		ASSERT3U(end, >=, start);
359 		txh->txh_space_towrite += (end - start + 1) << max_ibs;
360 		if (start != 0) {
361 			/*
362 			 * We also need a new blkid=0 indirect block
363 			 * to reference any existing file data.
364 			 */
365 			txh->txh_space_towrite += 1ULL << max_ibs;
366 		}
367 	}
368 
369 out:
370 	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
371 	    2 * DMU_MAX_ACCESS)
372 		err = EFBIG;
373 
374 	if (err)
375 		txh->txh_tx->tx_err = err;
376 }
377 
378 static void
379 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
380 {
381 	dnode_t *dn = txh->txh_dnode;
382 	dnode_t *mdn = txh->txh_tx->tx_objset->os_meta_dnode;
383 	uint64_t space = mdn->dn_datablksz +
384 	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
385 
386 	if (dn && dn->dn_dbuf->db_blkptr &&
387 	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
388 	    dn->dn_dbuf->db_blkptr->blk_birth)) {
389 		txh->txh_space_tooverwrite += space;
390 		txh->txh_space_tounref += space;
391 	} else {
392 		txh->txh_space_towrite += space;
393 		if (dn && dn->dn_dbuf->db_blkptr)
394 			txh->txh_space_tounref += space;
395 	}
396 }
397 
398 void
399 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
400 {
401 	dmu_tx_hold_t *txh;
402 
403 	ASSERT(tx->tx_txg == 0);
404 	ASSERT(len < DMU_MAX_ACCESS);
405 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
406 
407 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
408 	    object, THT_WRITE, off, len);
409 	if (txh == NULL)
410 		return;
411 
412 	dmu_tx_count_write(txh, off, len);
413 	dmu_tx_count_dnode(txh);
414 }
415 
416 static void
417 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
418 {
419 	uint64_t blkid, nblks, lastblk;
420 	uint64_t space = 0, unref = 0, skipped = 0;
421 	dnode_t *dn = txh->txh_dnode;
422 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
423 	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
424 	int epbs;
425 
426 	if (dn->dn_nlevels == 0)
427 		return;
428 
429 	/*
430 	 * The struct_rwlock protects us against dn_nlevels
431 	 * changing, in case (against all odds) we manage to dirty &
432 	 * sync out the changes after we check for being dirty.
433 	 * Also, dbuf_hold_level() wants us to have the struct_rwlock.
434 	 */
435 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
436 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
437 	if (dn->dn_maxblkid == 0) {
438 		if (off == 0 && len >= dn->dn_datablksz) {
439 			blkid = 0;
440 			nblks = 1;
441 		} else {
442 			rw_exit(&dn->dn_struct_rwlock);
443 			return;
444 		}
445 	} else {
446 		blkid = off >> dn->dn_datablkshift;
447 		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
448 
449 		if (blkid >= dn->dn_maxblkid) {
450 			rw_exit(&dn->dn_struct_rwlock);
451 			return;
452 		}
453 		if (blkid + nblks > dn->dn_maxblkid)
454 			nblks = dn->dn_maxblkid - blkid;
455 
456 	}
457 	if (dn->dn_nlevels == 1) {
458 		int i;
459 		for (i = 0; i < nblks; i++) {
460 			blkptr_t *bp = dn->dn_phys->dn_blkptr;
461 			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
462 			bp += blkid + i;
463 			if (dsl_dataset_block_freeable(ds, bp->blk_birth)) {
464 				dprintf_bp(bp, "can free old%s", "");
465 				space += bp_get_dsize(spa, bp);
466 			}
467 			unref += BP_GET_ASIZE(bp);
468 		}
469 		nblks = 0;
470 	}
471 
472 	/*
473 	 * Add in memory requirements of higher-level indirects.
474 	 * This assumes a worst-possible scenario for dn_nlevels.
475 	 */
476 	{
477 		uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
478 		int level = (dn->dn_nlevels > 1) ? 2 : 1;
479 
480 		while (level++ < DN_MAX_LEVELS) {
481 			txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
482 			blkcnt = 1 + (blkcnt >> epbs);
483 		}
484 		ASSERT(blkcnt <= dn->dn_nblkptr);
485 	}
486 
487 	lastblk = blkid + nblks - 1;
488 	while (nblks) {
489 		dmu_buf_impl_t *dbuf;
490 		uint64_t ibyte, new_blkid;
491 		int epb = 1 << epbs;
492 		int err, i, blkoff, tochk;
493 		blkptr_t *bp;
494 
495 		ibyte = blkid << dn->dn_datablkshift;
496 		err = dnode_next_offset(dn,
497 		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
498 		new_blkid = ibyte >> dn->dn_datablkshift;
499 		if (err == ESRCH) {
500 			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
501 			break;
502 		}
503 		if (err) {
504 			txh->txh_tx->tx_err = err;
505 			break;
506 		}
507 		if (new_blkid > lastblk) {
508 			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
509 			break;
510 		}
511 
512 		if (new_blkid > blkid) {
513 			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
514 			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
515 			nblks -= new_blkid - blkid;
516 			blkid = new_blkid;
517 		}
518 		blkoff = P2PHASE(blkid, epb);
519 		tochk = MIN(epb - blkoff, nblks);
520 
521 		dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG);
522 
523 		txh->txh_memory_tohold += dbuf->db.db_size;
524 
525 		/*
526 		 * We don't check memory_tohold against DMU_MAX_ACCESS because
527 		 * memory_tohold is an over-estimation (especially the >L1
528 		 * indirect blocks), so it could fail.  Callers should have
529 		 * already verified that they will not be holding too much
530 		 * memory.
531 		 */
532 
533 		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
534 		if (err != 0) {
535 			txh->txh_tx->tx_err = err;
536 			dbuf_rele(dbuf, FTAG);
537 			break;
538 		}
539 
540 		bp = dbuf->db.db_data;
541 		bp += blkoff;
542 
543 		for (i = 0; i < tochk; i++) {
544 			if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) {
545 				dprintf_bp(&bp[i], "can free old%s", "");
546 				space += bp_get_dsize(spa, &bp[i]);
547 			}
548 			unref += BP_GET_ASIZE(bp);
549 		}
550 		dbuf_rele(dbuf, FTAG);
551 
552 		blkid += tochk;
553 		nblks -= tochk;
554 	}
555 	rw_exit(&dn->dn_struct_rwlock);
556 
557 	/* account for new level 1 indirect blocks that might show up */
558 	if (skipped > 0) {
559 		txh->txh_fudge += skipped << dn->dn_indblkshift;
560 		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
561 		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
562 	}
563 	txh->txh_space_tofree += space;
564 	txh->txh_space_tounref += unref;
565 }
566 
567 void
568 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
569 {
570 	dmu_tx_hold_t *txh;
571 	dnode_t *dn;
572 	uint64_t start, end, i;
573 	int err, shift;
574 	zio_t *zio;
575 
576 	ASSERT(tx->tx_txg == 0);
577 
578 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
579 	    object, THT_FREE, off, len);
580 	if (txh == NULL)
581 		return;
582 	dn = txh->txh_dnode;
583 
584 	/* first block */
585 	if (off != 0)
586 		dmu_tx_count_write(txh, off, 1);
587 	/* last block */
588 	if (len != DMU_OBJECT_END)
589 		dmu_tx_count_write(txh, off+len, 1);
590 
591 	dmu_tx_count_dnode(txh);
592 
593 	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
594 		return;
595 	if (len == DMU_OBJECT_END)
596 		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
597 
598 	/*
599 	 * For i/o error checking, read the first and last level-0
600 	 * blocks, and all the level-1 blocks.  The above count_write's
601 	 * have already taken care of the level-0 blocks.
602 	 */
603 	if (dn->dn_nlevels > 1) {
604 		shift = dn->dn_datablkshift + dn->dn_indblkshift -
605 		    SPA_BLKPTRSHIFT;
606 		start = off >> shift;
607 		end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
608 
609 		zio = zio_root(tx->tx_pool->dp_spa,
610 		    NULL, NULL, ZIO_FLAG_CANFAIL);
611 		for (i = start; i <= end; i++) {
612 			uint64_t ibyte = i << shift;
613 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
614 			i = ibyte >> shift;
615 			if (err == ESRCH)
616 				break;
617 			if (err) {
618 				tx->tx_err = err;
619 				return;
620 			}
621 
622 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
623 			if (err) {
624 				tx->tx_err = err;
625 				return;
626 			}
627 		}
628 		err = zio_wait(zio);
629 		if (err) {
630 			tx->tx_err = err;
631 			return;
632 		}
633 	}
634 
635 	dmu_tx_count_free(txh, off, len);
636 }
637 
638 void
639 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
640 {
641 	dmu_tx_hold_t *txh;
642 	dnode_t *dn;
643 	uint64_t nblocks;
644 	int epbs, err;
645 
646 	ASSERT(tx->tx_txg == 0);
647 
648 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
649 	    object, THT_ZAP, add, (uintptr_t)name);
650 	if (txh == NULL)
651 		return;
652 	dn = txh->txh_dnode;
653 
654 	dmu_tx_count_dnode(txh);
655 
656 	if (dn == NULL) {
657 		/*
658 		 * We will be able to fit a new object's entries into one leaf
659 		 * block.  So there will be at most 2 blocks total,
660 		 * including the header block.
661 		 */
662 		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
663 		return;
664 	}
665 
666 	ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
667 
668 	if (dn->dn_maxblkid == 0 && !add) {
669 		/*
670 		 * If there is only one block  (i.e. this is a micro-zap)
671 		 * and we are not adding anything, the accounting is simple.
672 		 */
673 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
674 		if (err) {
675 			tx->tx_err = err;
676 			return;
677 		}
678 
679 		/*
680 		 * Use max block size here, since we don't know how much
681 		 * the size will change between now and the dbuf dirty call.
682 		 */
683 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
684 		    dn->dn_phys->dn_blkptr[0].blk_birth)) {
685 			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
686 		} else {
687 			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
688 		}
689 		if (dn->dn_phys->dn_blkptr[0].blk_birth)
690 			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
691 		return;
692 	}
693 
694 	if (dn->dn_maxblkid > 0 && name) {
695 		/*
696 		 * access the name in this fat-zap so that we'll check
697 		 * for i/o errors to the leaf blocks, etc.
698 		 */
699 		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
700 		    8, 0, NULL);
701 		if (err == EIO) {
702 			tx->tx_err = err;
703 			return;
704 		}
705 	}
706 
707 	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
708 	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
709 
710 	/*
711 	 * If the modified blocks are scattered to the four winds,
712 	 * we'll have to modify an indirect twig for each.
713 	 */
714 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
715 	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
716 		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
717 			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
718 		else
719 			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
720 }
721 
722 void
723 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
724 {
725 	dmu_tx_hold_t *txh;
726 
727 	ASSERT(tx->tx_txg == 0);
728 
729 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
730 	    object, THT_BONUS, 0, 0);
731 	if (txh)
732 		dmu_tx_count_dnode(txh);
733 }
734 
735 void
736 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
737 {
738 	dmu_tx_hold_t *txh;
739 	ASSERT(tx->tx_txg == 0);
740 
741 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
742 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
743 
744 	txh->txh_space_towrite += space;
745 }
746 
747 int
748 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
749 {
750 	dmu_tx_hold_t *txh;
751 	int holds = 0;
752 
753 	/*
754 	 * By asserting that the tx is assigned, we're counting the
755 	 * number of dn_tx_holds, which is the same as the number of
756 	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
757 	 * dn_tx_holds could be 0.
758 	 */
759 	ASSERT(tx->tx_txg != 0);
760 
761 	/* if (tx->tx_anyobj == TRUE) */
762 		/* return (0); */
763 
764 	for (txh = list_head(&tx->tx_holds); txh;
765 	    txh = list_next(&tx->tx_holds, txh)) {
766 		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
767 			holds++;
768 	}
769 
770 	return (holds);
771 }
772 
773 #ifdef ZFS_DEBUG
774 void
775 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
776 {
777 	dmu_tx_hold_t *txh;
778 	int match_object = FALSE, match_offset = FALSE;
779 	dnode_t *dn = db->db_dnode;
780 
781 	ASSERT(tx->tx_txg != 0);
782 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
783 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
784 
785 	if (tx->tx_anyobj)
786 		return;
787 
788 	/* XXX No checking on the meta dnode for now */
789 	if (db->db.db_object == DMU_META_DNODE_OBJECT)
790 		return;
791 
792 	for (txh = list_head(&tx->tx_holds); txh;
793 	    txh = list_next(&tx->tx_holds, txh)) {
794 		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
795 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
796 			match_object = TRUE;
797 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
798 			int datablkshift = dn->dn_datablkshift ?
799 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
800 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
801 			int shift = datablkshift + epbs * db->db_level;
802 			uint64_t beginblk = shift >= 64 ? 0 :
803 			    (txh->txh_arg1 >> shift);
804 			uint64_t endblk = shift >= 64 ? 0 :
805 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
806 			uint64_t blkid = db->db_blkid;
807 
808 			/* XXX txh_arg2 better not be zero... */
809 
810 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
811 			    txh->txh_type, beginblk, endblk);
812 
813 			switch (txh->txh_type) {
814 			case THT_WRITE:
815 				if (blkid >= beginblk && blkid <= endblk)
816 					match_offset = TRUE;
817 				/*
818 				 * We will let this hold work for the bonus
819 				 * or spill buffer so that we don't need to
820 				 * hold it when creating a new object.
821 				 */
822 				if (blkid == DMU_BONUS_BLKID ||
823 				    blkid == DMU_SPILL_BLKID)
824 					match_offset = TRUE;
825 				/*
826 				 * They might have to increase nlevels,
827 				 * thus dirtying the new TLIBs.  Or the
828 				 * might have to change the block size,
829 				 * thus dirying the new lvl=0 blk=0.
830 				 */
831 				if (blkid == 0)
832 					match_offset = TRUE;
833 				break;
834 			case THT_FREE:
835 				/*
836 				 * We will dirty all the level 1 blocks in
837 				 * the free range and perhaps the first and
838 				 * last level 0 block.
839 				 */
840 				if (blkid >= beginblk && (blkid <= endblk ||
841 				    txh->txh_arg2 == DMU_OBJECT_END))
842 					match_offset = TRUE;
843 				break;
844 			case THT_SPILL:
845 				if (blkid == DMU_SPILL_BLKID)
846 					match_offset = TRUE;
847 				break;
848 			case THT_BONUS:
849 				if (blkid == DMU_BONUS_BLKID)
850 					match_offset = TRUE;
851 				break;
852 			case THT_ZAP:
853 				match_offset = TRUE;
854 				break;
855 			case THT_NEWOBJECT:
856 				match_object = TRUE;
857 				break;
858 			default:
859 				ASSERT(!"bad txh_type");
860 			}
861 		}
862 		if (match_object && match_offset)
863 			return;
864 	}
865 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
866 	    (u_longlong_t)db->db.db_object, db->db_level,
867 	    (u_longlong_t)db->db_blkid);
868 }
869 #endif
870 
871 static int
872 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
873 {
874 	dmu_tx_hold_t *txh;
875 	spa_t *spa = tx->tx_pool->dp_spa;
876 	uint64_t memory, asize, fsize, usize;
877 	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
878 
879 	ASSERT3U(tx->tx_txg, ==, 0);
880 
881 	if (tx->tx_err)
882 		return (tx->tx_err);
883 
884 	if (spa_suspended(spa)) {
885 		/*
886 		 * If the user has indicated a blocking failure mode
887 		 * then return ERESTART which will block in dmu_tx_wait().
888 		 * Otherwise, return EIO so that an error can get
889 		 * propagated back to the VOP calls.
890 		 *
891 		 * Note that we always honor the txg_how flag regardless
892 		 * of the failuremode setting.
893 		 */
894 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
895 		    txg_how != TXG_WAIT)
896 			return (EIO);
897 
898 		return (ERESTART);
899 	}
900 
901 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
902 	tx->tx_needassign_txh = NULL;
903 
904 	/*
905 	 * NB: No error returns are allowed after txg_hold_open, but
906 	 * before processing the dnode holds, due to the
907 	 * dmu_tx_unassign() logic.
908 	 */
909 
910 	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
911 	for (txh = list_head(&tx->tx_holds); txh;
912 	    txh = list_next(&tx->tx_holds, txh)) {
913 		dnode_t *dn = txh->txh_dnode;
914 		if (dn != NULL) {
915 			mutex_enter(&dn->dn_mtx);
916 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
917 				mutex_exit(&dn->dn_mtx);
918 				tx->tx_needassign_txh = txh;
919 				return (ERESTART);
920 			}
921 			if (dn->dn_assigned_txg == 0)
922 				dn->dn_assigned_txg = tx->tx_txg;
923 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
924 			(void) refcount_add(&dn->dn_tx_holds, tx);
925 			mutex_exit(&dn->dn_mtx);
926 		}
927 		towrite += txh->txh_space_towrite;
928 		tofree += txh->txh_space_tofree;
929 		tooverwrite += txh->txh_space_tooverwrite;
930 		tounref += txh->txh_space_tounref;
931 		tohold += txh->txh_memory_tohold;
932 		fudge += txh->txh_fudge;
933 	}
934 
935 	/*
936 	 * NB: This check must be after we've held the dnodes, so that
937 	 * the dmu_tx_unassign() logic will work properly
938 	 */
939 	if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
940 		return (ERESTART);
941 
942 	/*
943 	 * If a snapshot has been taken since we made our estimates,
944 	 * assume that we won't be able to free or overwrite anything.
945 	 */
946 	if (tx->tx_objset &&
947 	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
948 	    tx->tx_lastsnap_txg) {
949 		towrite += tooverwrite;
950 		tooverwrite = tofree = 0;
951 	}
952 
953 	/* needed allocation: worst-case estimate of write space */
954 	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
955 	/* freed space estimate: worst-case overwrite + free estimate */
956 	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
957 	/* convert unrefd space to worst-case estimate */
958 	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
959 	/* calculate memory footprint estimate */
960 	memory = towrite + tooverwrite + tohold;
961 
962 #ifdef ZFS_DEBUG
963 	/*
964 	 * Add in 'tohold' to account for our dirty holds on this memory
965 	 * XXX - the "fudge" factor is to account for skipped blocks that
966 	 * we missed because dnode_next_offset() misses in-core-only blocks.
967 	 */
968 	tx->tx_space_towrite = asize +
969 	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
970 	tx->tx_space_tofree = tofree;
971 	tx->tx_space_tooverwrite = tooverwrite;
972 	tx->tx_space_tounref = tounref;
973 #endif
974 
975 	if (tx->tx_dir && asize != 0) {
976 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
977 		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
978 		if (err)
979 			return (err);
980 	}
981 
982 	return (0);
983 }
984 
985 static void
986 dmu_tx_unassign(dmu_tx_t *tx)
987 {
988 	dmu_tx_hold_t *txh;
989 
990 	if (tx->tx_txg == 0)
991 		return;
992 
993 	txg_rele_to_quiesce(&tx->tx_txgh);
994 
995 	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
996 	    txh = list_next(&tx->tx_holds, txh)) {
997 		dnode_t *dn = txh->txh_dnode;
998 
999 		if (dn == NULL)
1000 			continue;
1001 		mutex_enter(&dn->dn_mtx);
1002 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1003 
1004 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1005 			dn->dn_assigned_txg = 0;
1006 			cv_broadcast(&dn->dn_notxholds);
1007 		}
1008 		mutex_exit(&dn->dn_mtx);
1009 	}
1010 
1011 	txg_rele_to_sync(&tx->tx_txgh);
1012 
1013 	tx->tx_lasttried_txg = tx->tx_txg;
1014 	tx->tx_txg = 0;
1015 }
1016 
1017 /*
1018  * Assign tx to a transaction group.  txg_how can be one of:
1019  *
1020  * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1021  *	a new one.  This should be used when you're not holding locks.
1022  *	If will only fail if we're truly out of space (or over quota).
1023  *
1024  * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1025  *	blocking, returns immediately with ERESTART.  This should be used
1026  *	whenever you're holding locks.  On an ERESTART error, the caller
1027  *	should drop locks, do a dmu_tx_wait(tx), and try again.
1028  *
1029  * (3)	A specific txg.  Use this if you need to ensure that multiple
1030  *	transactions all sync in the same txg.  Like TXG_NOWAIT, it
1031  *	returns ERESTART if it can't assign you into the requested txg.
1032  */
1033 int
1034 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1035 {
1036 	int err;
1037 
1038 	ASSERT(tx->tx_txg == 0);
1039 	ASSERT(txg_how != 0);
1040 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1041 
1042 	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1043 		dmu_tx_unassign(tx);
1044 
1045 		if (err != ERESTART || txg_how != TXG_WAIT)
1046 			return (err);
1047 
1048 		dmu_tx_wait(tx);
1049 	}
1050 
1051 	txg_rele_to_quiesce(&tx->tx_txgh);
1052 
1053 	return (0);
1054 }
1055 
1056 void
1057 dmu_tx_wait(dmu_tx_t *tx)
1058 {
1059 	spa_t *spa = tx->tx_pool->dp_spa;
1060 
1061 	ASSERT(tx->tx_txg == 0);
1062 
1063 	/*
1064 	 * It's possible that the pool has become active after this thread
1065 	 * has tried to obtain a tx. If that's the case then his
1066 	 * tx_lasttried_txg would not have been assigned.
1067 	 */
1068 	if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1069 		txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1070 	} else if (tx->tx_needassign_txh) {
1071 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1072 
1073 		mutex_enter(&dn->dn_mtx);
1074 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1075 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1076 		mutex_exit(&dn->dn_mtx);
1077 		tx->tx_needassign_txh = NULL;
1078 	} else {
1079 		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1080 	}
1081 }
1082 
1083 void
1084 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1085 {
1086 #ifdef ZFS_DEBUG
1087 	if (tx->tx_dir == NULL || delta == 0)
1088 		return;
1089 
1090 	if (delta > 0) {
1091 		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1092 		    tx->tx_space_towrite);
1093 		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1094 	} else {
1095 		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1096 	}
1097 #endif
1098 }
1099 
1100 void
1101 dmu_tx_commit(dmu_tx_t *tx)
1102 {
1103 	dmu_tx_hold_t *txh;
1104 
1105 	ASSERT(tx->tx_txg != 0);
1106 
1107 	while (txh = list_head(&tx->tx_holds)) {
1108 		dnode_t *dn = txh->txh_dnode;
1109 
1110 		list_remove(&tx->tx_holds, txh);
1111 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1112 		if (dn == NULL)
1113 			continue;
1114 		mutex_enter(&dn->dn_mtx);
1115 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1116 
1117 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1118 			dn->dn_assigned_txg = 0;
1119 			cv_broadcast(&dn->dn_notxholds);
1120 		}
1121 		mutex_exit(&dn->dn_mtx);
1122 		dnode_rele(dn, tx);
1123 	}
1124 
1125 	if (tx->tx_tempreserve_cookie)
1126 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1127 
1128 	if (!list_is_empty(&tx->tx_callbacks))
1129 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1130 
1131 	if (tx->tx_anyobj == FALSE)
1132 		txg_rele_to_sync(&tx->tx_txgh);
1133 
1134 	list_destroy(&tx->tx_callbacks);
1135 	list_destroy(&tx->tx_holds);
1136 #ifdef ZFS_DEBUG
1137 	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1138 	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1139 	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1140 	refcount_destroy_many(&tx->tx_space_written,
1141 	    refcount_count(&tx->tx_space_written));
1142 	refcount_destroy_many(&tx->tx_space_freed,
1143 	    refcount_count(&tx->tx_space_freed));
1144 #endif
1145 	kmem_free(tx, sizeof (dmu_tx_t));
1146 }
1147 
1148 void
1149 dmu_tx_abort(dmu_tx_t *tx)
1150 {
1151 	dmu_tx_hold_t *txh;
1152 
1153 	ASSERT(tx->tx_txg == 0);
1154 
1155 	while (txh = list_head(&tx->tx_holds)) {
1156 		dnode_t *dn = txh->txh_dnode;
1157 
1158 		list_remove(&tx->tx_holds, txh);
1159 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1160 		if (dn != NULL)
1161 			dnode_rele(dn, tx);
1162 	}
1163 
1164 	/*
1165 	 * Call any registered callbacks with an error code.
1166 	 */
1167 	if (!list_is_empty(&tx->tx_callbacks))
1168 		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1169 
1170 	list_destroy(&tx->tx_callbacks);
1171 	list_destroy(&tx->tx_holds);
1172 #ifdef ZFS_DEBUG
1173 	refcount_destroy_many(&tx->tx_space_written,
1174 	    refcount_count(&tx->tx_space_written));
1175 	refcount_destroy_many(&tx->tx_space_freed,
1176 	    refcount_count(&tx->tx_space_freed));
1177 #endif
1178 	kmem_free(tx, sizeof (dmu_tx_t));
1179 }
1180 
1181 uint64_t
1182 dmu_tx_get_txg(dmu_tx_t *tx)
1183 {
1184 	ASSERT(tx->tx_txg != 0);
1185 	return (tx->tx_txg);
1186 }
1187 
1188 void
1189 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1190 {
1191 	dmu_tx_callback_t *dcb;
1192 
1193 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1194 
1195 	dcb->dcb_func = func;
1196 	dcb->dcb_data = data;
1197 
1198 	list_insert_tail(&tx->tx_callbacks, dcb);
1199 }
1200 
1201 /*
1202  * Call all the commit callbacks on a list, with a given error code.
1203  */
1204 void
1205 dmu_tx_do_callbacks(list_t *cb_list, int error)
1206 {
1207 	dmu_tx_callback_t *dcb;
1208 
1209 	while (dcb = list_head(cb_list)) {
1210 		list_remove(cb_list, dcb);
1211 		dcb->dcb_func(dcb->dcb_data, error);
1212 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1213 	}
1214 }
1215 
1216 /*
1217  * Interface to hold a bunch of attributes.
1218  * used for creating new files.
1219  * attrsize is the total size of all attributes
1220  * to be added during object creation
1221  *
1222  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1223  */
1224 
1225 /*
1226  * hold necessary attribute name for attribute registration.
1227  * should be a very rare case where this is needed.  If it does
1228  * happen it would only happen on the first write to the file system.
1229  */
1230 static void
1231 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1232 {
1233 	int i;
1234 
1235 	if (!sa->sa_need_attr_registration)
1236 		return;
1237 
1238 	for (i = 0; i != sa->sa_num_attrs; i++) {
1239 		if (!sa->sa_attr_table[i].sa_registered) {
1240 			if (sa->sa_reg_attr_obj)
1241 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1242 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1243 			else
1244 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1245 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1246 		}
1247 	}
1248 }
1249 
1250 
1251 void
1252 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1253 {
1254 	dnode_t *dn;
1255 	dmu_tx_hold_t *txh;
1256 	blkptr_t *bp;
1257 
1258 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1259 	    THT_SPILL, 0, 0);
1260 
1261 	dn = txh->txh_dnode;
1262 
1263 	if (dn == NULL)
1264 		return;
1265 
1266 	/* If blkptr doesn't exist then add space to towrite */
1267 	bp = &dn->dn_phys->dn_spill;
1268 	if (BP_IS_HOLE(bp)) {
1269 		txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1270 		txh->txh_space_tounref = 0;
1271 	} else {
1272 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1273 		    bp->blk_birth))
1274 			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1275 		else
1276 			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1277 		if (bp->blk_birth)
1278 			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1279 	}
1280 }
1281 
1282 void
1283 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1284 {
1285 	sa_os_t *sa = tx->tx_objset->os_sa;
1286 
1287 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1288 
1289 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1290 		return;
1291 
1292 	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1293 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1294 	else {
1295 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1296 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1297 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1298 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1299 	}
1300 
1301 	dmu_tx_sa_registration_hold(sa, tx);
1302 
1303 	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1304 		return;
1305 
1306 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1307 	    THT_SPILL, 0, 0);
1308 }
1309 
1310 /*
1311  * Hold SA attribute
1312  *
1313  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1314  *
1315  * variable_size is the total size of all variable sized attributes
1316  * passed to this function.  It is not the total size of all
1317  * variable size attributes that *may* exist on this object.
1318  */
1319 void
1320 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1321 {
1322 	uint64_t object;
1323 	sa_os_t *sa = tx->tx_objset->os_sa;
1324 
1325 	ASSERT(hdl != NULL);
1326 
1327 	object = sa_handle_object(hdl);
1328 
1329 	dmu_tx_hold_bonus(tx, object);
1330 
1331 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1332 		return;
1333 
1334 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1335 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1336 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1337 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1338 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1339 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1340 	}
1341 
1342 	dmu_tx_sa_registration_hold(sa, tx);
1343 
1344 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1345 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1346 
1347 	if (sa->sa_force_spill || may_grow || hdl->sa_spill ||
1348 	    ((dmu_buf_impl_t *)hdl->sa_bonus)->db_dnode->dn_have_spill) {
1349 		ASSERT(tx->tx_txg == 0);
1350 		dmu_tx_hold_spill(tx, object);
1351 	}
1352 }
1353