xref: /titanic_52/usr/src/uts/common/fs/zfs/dmu_tx.c (revision 0605fe789584720c74945c982c61a3f934642c02)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
26  */
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dbuf.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
34 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
37 #include <sys/spa.h>
38 #include <sys/sa.h>
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/varargs.h>
42 
43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
44     uint64_t arg1, uint64_t arg2);
45 
46 
47 dmu_tx_t *
48 dmu_tx_create_dd(dsl_dir_t *dd)
49 {
50 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
51 	tx->tx_dir = dd;
52 	if (dd)
53 		tx->tx_pool = dd->dd_pool;
54 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
55 	    offsetof(dmu_tx_hold_t, txh_node));
56 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
57 	    offsetof(dmu_tx_callback_t, dcb_node));
58 #ifdef ZFS_DEBUG
59 	refcount_create(&tx->tx_space_written);
60 	refcount_create(&tx->tx_space_freed);
61 #endif
62 	return (tx);
63 }
64 
65 dmu_tx_t *
66 dmu_tx_create(objset_t *os)
67 {
68 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
69 	tx->tx_objset = os;
70 	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
71 	return (tx);
72 }
73 
74 dmu_tx_t *
75 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
76 {
77 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
78 
79 	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
80 	tx->tx_pool = dp;
81 	tx->tx_txg = txg;
82 	tx->tx_anyobj = TRUE;
83 
84 	return (tx);
85 }
86 
87 int
88 dmu_tx_is_syncing(dmu_tx_t *tx)
89 {
90 	return (tx->tx_anyobj);
91 }
92 
93 int
94 dmu_tx_private_ok(dmu_tx_t *tx)
95 {
96 	return (tx->tx_anyobj);
97 }
98 
99 static dmu_tx_hold_t *
100 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
101     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
102 {
103 	dmu_tx_hold_t *txh;
104 	dnode_t *dn = NULL;
105 	int err;
106 
107 	if (object != DMU_NEW_OBJECT) {
108 		err = dnode_hold(os, object, tx, &dn);
109 		if (err) {
110 			tx->tx_err = err;
111 			return (NULL);
112 		}
113 
114 		if (err == 0 && tx->tx_txg != 0) {
115 			mutex_enter(&dn->dn_mtx);
116 			/*
117 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
118 			 * problem, but there's no way for it to happen (for
119 			 * now, at least).
120 			 */
121 			ASSERT(dn->dn_assigned_txg == 0);
122 			dn->dn_assigned_txg = tx->tx_txg;
123 			(void) refcount_add(&dn->dn_tx_holds, tx);
124 			mutex_exit(&dn->dn_mtx);
125 		}
126 	}
127 
128 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
129 	txh->txh_tx = tx;
130 	txh->txh_dnode = dn;
131 #ifdef ZFS_DEBUG
132 	txh->txh_type = type;
133 	txh->txh_arg1 = arg1;
134 	txh->txh_arg2 = arg2;
135 #endif
136 	list_insert_tail(&tx->tx_holds, txh);
137 
138 	return (txh);
139 }
140 
141 void
142 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
143 {
144 	/*
145 	 * If we're syncing, they can manipulate any object anyhow, and
146 	 * the hold on the dnode_t can cause problems.
147 	 */
148 	if (!dmu_tx_is_syncing(tx)) {
149 		(void) dmu_tx_hold_object_impl(tx, os,
150 		    object, THT_NEWOBJECT, 0, 0);
151 	}
152 }
153 
154 static int
155 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
156 {
157 	int err;
158 	dmu_buf_impl_t *db;
159 
160 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
161 	db = dbuf_hold_level(dn, level, blkid, FTAG);
162 	rw_exit(&dn->dn_struct_rwlock);
163 	if (db == NULL)
164 		return (EIO);
165 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
166 	dbuf_rele(db, FTAG);
167 	return (err);
168 }
169 
170 static void
171 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
172     int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
173 {
174 	objset_t *os = dn->dn_objset;
175 	dsl_dataset_t *ds = os->os_dsl_dataset;
176 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
177 	dmu_buf_impl_t *parent = NULL;
178 	blkptr_t *bp = NULL;
179 	uint64_t space;
180 
181 	if (level >= dn->dn_nlevels || history[level] == blkid)
182 		return;
183 
184 	history[level] = blkid;
185 
186 	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
187 
188 	if (db == NULL || db == dn->dn_dbuf) {
189 		ASSERT(level != 0);
190 		db = NULL;
191 	} else {
192 		ASSERT(DB_DNODE(db) == dn);
193 		ASSERT(db->db_level == level);
194 		ASSERT(db->db.db_size == space);
195 		ASSERT(db->db_blkid == blkid);
196 		bp = db->db_blkptr;
197 		parent = db->db_parent;
198 	}
199 
200 	freeable = (bp && (freeable ||
201 	    dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
202 
203 	if (freeable)
204 		txh->txh_space_tooverwrite += space;
205 	else
206 		txh->txh_space_towrite += space;
207 	if (bp)
208 		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
209 
210 	dmu_tx_count_twig(txh, dn, parent, level + 1,
211 	    blkid >> epbs, freeable, history);
212 }
213 
214 /* ARGSUSED */
215 static void
216 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
217 {
218 	dnode_t *dn = txh->txh_dnode;
219 	uint64_t start, end, i;
220 	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
221 	int err = 0;
222 
223 	if (len == 0)
224 		return;
225 
226 	min_bs = SPA_MINBLOCKSHIFT;
227 	max_bs = SPA_MAXBLOCKSHIFT;
228 	min_ibs = DN_MIN_INDBLKSHIFT;
229 	max_ibs = DN_MAX_INDBLKSHIFT;
230 
231 	if (dn) {
232 		uint64_t history[DN_MAX_LEVELS];
233 		int nlvls = dn->dn_nlevels;
234 		int delta;
235 
236 		/*
237 		 * For i/o error checking, read the first and last level-0
238 		 * blocks (if they are not aligned), and all the level-1 blocks.
239 		 */
240 		if (dn->dn_maxblkid == 0) {
241 			delta = dn->dn_datablksz;
242 			start = (off < dn->dn_datablksz) ? 0 : 1;
243 			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
244 			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
245 				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
246 				if (err)
247 					goto out;
248 				delta -= off;
249 			}
250 		} else {
251 			zio_t *zio = zio_root(dn->dn_objset->os_spa,
252 			    NULL, NULL, ZIO_FLAG_CANFAIL);
253 
254 			/* first level-0 block */
255 			start = off >> dn->dn_datablkshift;
256 			if (P2PHASE(off, dn->dn_datablksz) ||
257 			    len < dn->dn_datablksz) {
258 				err = dmu_tx_check_ioerr(zio, dn, 0, start);
259 				if (err)
260 					goto out;
261 			}
262 
263 			/* last level-0 block */
264 			end = (off+len-1) >> dn->dn_datablkshift;
265 			if (end != start && end <= dn->dn_maxblkid &&
266 			    P2PHASE(off+len, dn->dn_datablksz)) {
267 				err = dmu_tx_check_ioerr(zio, dn, 0, end);
268 				if (err)
269 					goto out;
270 			}
271 
272 			/* level-1 blocks */
273 			if (nlvls > 1) {
274 				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
275 				for (i = (start>>shft)+1; i < end>>shft; i++) {
276 					err = dmu_tx_check_ioerr(zio, dn, 1, i);
277 					if (err)
278 						goto out;
279 				}
280 			}
281 
282 			err = zio_wait(zio);
283 			if (err)
284 				goto out;
285 			delta = P2NPHASE(off, dn->dn_datablksz);
286 		}
287 
288 		if (dn->dn_maxblkid > 0) {
289 			/*
290 			 * The blocksize can't change,
291 			 * so we can make a more precise estimate.
292 			 */
293 			ASSERT(dn->dn_datablkshift != 0);
294 			min_bs = max_bs = dn->dn_datablkshift;
295 			min_ibs = max_ibs = dn->dn_indblkshift;
296 		} else if (dn->dn_indblkshift > max_ibs) {
297 			/*
298 			 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
299 			 * the code will still work correctly on older pools.
300 			 */
301 			min_ibs = max_ibs = dn->dn_indblkshift;
302 		}
303 
304 		/*
305 		 * If this write is not off the end of the file
306 		 * we need to account for overwrites/unref.
307 		 */
308 		if (start <= dn->dn_maxblkid) {
309 			for (int l = 0; l < DN_MAX_LEVELS; l++)
310 				history[l] = -1ULL;
311 		}
312 		while (start <= dn->dn_maxblkid) {
313 			dmu_buf_impl_t *db;
314 
315 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
316 			err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
317 			rw_exit(&dn->dn_struct_rwlock);
318 
319 			if (err) {
320 				txh->txh_tx->tx_err = err;
321 				return;
322 			}
323 
324 			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
325 			    history);
326 			dbuf_rele(db, FTAG);
327 			if (++start > end) {
328 				/*
329 				 * Account for new indirects appearing
330 				 * before this IO gets assigned into a txg.
331 				 */
332 				bits = 64 - min_bs;
333 				epbs = min_ibs - SPA_BLKPTRSHIFT;
334 				for (bits -= epbs * (nlvls - 1);
335 				    bits >= 0; bits -= epbs)
336 					txh->txh_fudge += 1ULL << max_ibs;
337 				goto out;
338 			}
339 			off += delta;
340 			if (len >= delta)
341 				len -= delta;
342 			delta = dn->dn_datablksz;
343 		}
344 	}
345 
346 	/*
347 	 * 'end' is the last thing we will access, not one past.
348 	 * This way we won't overflow when accessing the last byte.
349 	 */
350 	start = P2ALIGN(off, 1ULL << max_bs);
351 	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
352 	txh->txh_space_towrite += end - start + 1;
353 
354 	start >>= min_bs;
355 	end >>= min_bs;
356 
357 	epbs = min_ibs - SPA_BLKPTRSHIFT;
358 
359 	/*
360 	 * The object contains at most 2^(64 - min_bs) blocks,
361 	 * and each indirect level maps 2^epbs.
362 	 */
363 	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
364 		start >>= epbs;
365 		end >>= epbs;
366 		ASSERT3U(end, >=, start);
367 		txh->txh_space_towrite += (end - start + 1) << max_ibs;
368 		if (start != 0) {
369 			/*
370 			 * We also need a new blkid=0 indirect block
371 			 * to reference any existing file data.
372 			 */
373 			txh->txh_space_towrite += 1ULL << max_ibs;
374 		}
375 	}
376 
377 out:
378 	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
379 	    2 * DMU_MAX_ACCESS)
380 		err = EFBIG;
381 
382 	if (err)
383 		txh->txh_tx->tx_err = err;
384 }
385 
386 static void
387 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
388 {
389 	dnode_t *dn = txh->txh_dnode;
390 	dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
391 	uint64_t space = mdn->dn_datablksz +
392 	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
393 
394 	if (dn && dn->dn_dbuf->db_blkptr &&
395 	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
396 	    dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
397 		txh->txh_space_tooverwrite += space;
398 		txh->txh_space_tounref += space;
399 	} else {
400 		txh->txh_space_towrite += space;
401 		if (dn && dn->dn_dbuf->db_blkptr)
402 			txh->txh_space_tounref += space;
403 	}
404 }
405 
406 void
407 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
408 {
409 	dmu_tx_hold_t *txh;
410 
411 	ASSERT(tx->tx_txg == 0);
412 	ASSERT(len < DMU_MAX_ACCESS);
413 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
414 
415 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
416 	    object, THT_WRITE, off, len);
417 	if (txh == NULL)
418 		return;
419 
420 	dmu_tx_count_write(txh, off, len);
421 	dmu_tx_count_dnode(txh);
422 }
423 
424 static void
425 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
426 {
427 	uint64_t blkid, nblks, lastblk;
428 	uint64_t space = 0, unref = 0, skipped = 0;
429 	dnode_t *dn = txh->txh_dnode;
430 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
431 	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
432 	int epbs;
433 
434 	if (dn->dn_nlevels == 0)
435 		return;
436 
437 	/*
438 	 * The struct_rwlock protects us against dn_nlevels
439 	 * changing, in case (against all odds) we manage to dirty &
440 	 * sync out the changes after we check for being dirty.
441 	 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
442 	 */
443 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
444 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
445 	if (dn->dn_maxblkid == 0) {
446 		if (off == 0 && len >= dn->dn_datablksz) {
447 			blkid = 0;
448 			nblks = 1;
449 		} else {
450 			rw_exit(&dn->dn_struct_rwlock);
451 			return;
452 		}
453 	} else {
454 		blkid = off >> dn->dn_datablkshift;
455 		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
456 
457 		if (blkid >= dn->dn_maxblkid) {
458 			rw_exit(&dn->dn_struct_rwlock);
459 			return;
460 		}
461 		if (blkid + nblks > dn->dn_maxblkid)
462 			nblks = dn->dn_maxblkid - blkid;
463 
464 	}
465 	if (dn->dn_nlevels == 1) {
466 		int i;
467 		for (i = 0; i < nblks; i++) {
468 			blkptr_t *bp = dn->dn_phys->dn_blkptr;
469 			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
470 			bp += blkid + i;
471 			if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
472 				dprintf_bp(bp, "can free old%s", "");
473 				space += bp_get_dsize(spa, bp);
474 			}
475 			unref += BP_GET_ASIZE(bp);
476 		}
477 		nblks = 0;
478 	}
479 
480 	/*
481 	 * Add in memory requirements of higher-level indirects.
482 	 * This assumes a worst-possible scenario for dn_nlevels.
483 	 */
484 	{
485 		uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
486 		int level = (dn->dn_nlevels > 1) ? 2 : 1;
487 
488 		while (level++ < DN_MAX_LEVELS) {
489 			txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
490 			blkcnt = 1 + (blkcnt >> epbs);
491 		}
492 		ASSERT(blkcnt <= dn->dn_nblkptr);
493 	}
494 
495 	lastblk = blkid + nblks - 1;
496 	while (nblks) {
497 		dmu_buf_impl_t *dbuf;
498 		uint64_t ibyte, new_blkid;
499 		int epb = 1 << epbs;
500 		int err, i, blkoff, tochk;
501 		blkptr_t *bp;
502 
503 		ibyte = blkid << dn->dn_datablkshift;
504 		err = dnode_next_offset(dn,
505 		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
506 		new_blkid = ibyte >> dn->dn_datablkshift;
507 		if (err == ESRCH) {
508 			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
509 			break;
510 		}
511 		if (err) {
512 			txh->txh_tx->tx_err = err;
513 			break;
514 		}
515 		if (new_blkid > lastblk) {
516 			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
517 			break;
518 		}
519 
520 		if (new_blkid > blkid) {
521 			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
522 			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
523 			nblks -= new_blkid - blkid;
524 			blkid = new_blkid;
525 		}
526 		blkoff = P2PHASE(blkid, epb);
527 		tochk = MIN(epb - blkoff, nblks);
528 
529 		err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
530 		if (err) {
531 			txh->txh_tx->tx_err = err;
532 			break;
533 		}
534 
535 		txh->txh_memory_tohold += dbuf->db.db_size;
536 
537 		/*
538 		 * We don't check memory_tohold against DMU_MAX_ACCESS because
539 		 * memory_tohold is an over-estimation (especially the >L1
540 		 * indirect blocks), so it could fail.  Callers should have
541 		 * already verified that they will not be holding too much
542 		 * memory.
543 		 */
544 
545 		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
546 		if (err != 0) {
547 			txh->txh_tx->tx_err = err;
548 			dbuf_rele(dbuf, FTAG);
549 			break;
550 		}
551 
552 		bp = dbuf->db.db_data;
553 		bp += blkoff;
554 
555 		for (i = 0; i < tochk; i++) {
556 			if (dsl_dataset_block_freeable(ds, &bp[i],
557 			    bp[i].blk_birth)) {
558 				dprintf_bp(&bp[i], "can free old%s", "");
559 				space += bp_get_dsize(spa, &bp[i]);
560 			}
561 			unref += BP_GET_ASIZE(bp);
562 		}
563 		dbuf_rele(dbuf, FTAG);
564 
565 		blkid += tochk;
566 		nblks -= tochk;
567 	}
568 	rw_exit(&dn->dn_struct_rwlock);
569 
570 	/* account for new level 1 indirect blocks that might show up */
571 	if (skipped > 0) {
572 		txh->txh_fudge += skipped << dn->dn_indblkshift;
573 		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
574 		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
575 	}
576 	txh->txh_space_tofree += space;
577 	txh->txh_space_tounref += unref;
578 }
579 
580 void
581 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
582 {
583 	dmu_tx_hold_t *txh;
584 	dnode_t *dn;
585 	uint64_t start, end, i;
586 	int err, shift;
587 	zio_t *zio;
588 
589 	ASSERT(tx->tx_txg == 0);
590 
591 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
592 	    object, THT_FREE, off, len);
593 	if (txh == NULL)
594 		return;
595 	dn = txh->txh_dnode;
596 
597 	/* first block */
598 	if (off != 0)
599 		dmu_tx_count_write(txh, off, 1);
600 	/* last block */
601 	if (len != DMU_OBJECT_END)
602 		dmu_tx_count_write(txh, off+len, 1);
603 
604 	dmu_tx_count_dnode(txh);
605 
606 	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
607 		return;
608 	if (len == DMU_OBJECT_END)
609 		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
610 
611 	/*
612 	 * For i/o error checking, read the first and last level-0
613 	 * blocks, and all the level-1 blocks.  The above count_write's
614 	 * have already taken care of the level-0 blocks.
615 	 */
616 	if (dn->dn_nlevels > 1) {
617 		shift = dn->dn_datablkshift + dn->dn_indblkshift -
618 		    SPA_BLKPTRSHIFT;
619 		start = off >> shift;
620 		end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
621 
622 		zio = zio_root(tx->tx_pool->dp_spa,
623 		    NULL, NULL, ZIO_FLAG_CANFAIL);
624 		for (i = start; i <= end; i++) {
625 			uint64_t ibyte = i << shift;
626 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
627 			i = ibyte >> shift;
628 			if (err == ESRCH)
629 				break;
630 			if (err) {
631 				tx->tx_err = err;
632 				return;
633 			}
634 
635 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
636 			if (err) {
637 				tx->tx_err = err;
638 				return;
639 			}
640 		}
641 		err = zio_wait(zio);
642 		if (err) {
643 			tx->tx_err = err;
644 			return;
645 		}
646 	}
647 
648 	dmu_tx_count_free(txh, off, len);
649 }
650 
651 void
652 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
653 {
654 	dmu_tx_hold_t *txh;
655 	dnode_t *dn;
656 	uint64_t nblocks;
657 	int epbs, err;
658 
659 	ASSERT(tx->tx_txg == 0);
660 
661 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
662 	    object, THT_ZAP, add, (uintptr_t)name);
663 	if (txh == NULL)
664 		return;
665 	dn = txh->txh_dnode;
666 
667 	dmu_tx_count_dnode(txh);
668 
669 	if (dn == NULL) {
670 		/*
671 		 * We will be able to fit a new object's entries into one leaf
672 		 * block.  So there will be at most 2 blocks total,
673 		 * including the header block.
674 		 */
675 		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
676 		return;
677 	}
678 
679 	ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
680 
681 	if (dn->dn_maxblkid == 0 && !add) {
682 		blkptr_t *bp;
683 
684 		/*
685 		 * If there is only one block  (i.e. this is a micro-zap)
686 		 * and we are not adding anything, the accounting is simple.
687 		 */
688 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
689 		if (err) {
690 			tx->tx_err = err;
691 			return;
692 		}
693 
694 		/*
695 		 * Use max block size here, since we don't know how much
696 		 * the size will change between now and the dbuf dirty call.
697 		 */
698 		bp = &dn->dn_phys->dn_blkptr[0];
699 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
700 		    bp, bp->blk_birth))
701 			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
702 		else
703 			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
704 		if (!BP_IS_HOLE(bp))
705 			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
706 		return;
707 	}
708 
709 	if (dn->dn_maxblkid > 0 && name) {
710 		/*
711 		 * access the name in this fat-zap so that we'll check
712 		 * for i/o errors to the leaf blocks, etc.
713 		 */
714 		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
715 		    8, 0, NULL);
716 		if (err == EIO) {
717 			tx->tx_err = err;
718 			return;
719 		}
720 	}
721 
722 	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
723 	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
724 
725 	/*
726 	 * If the modified blocks are scattered to the four winds,
727 	 * we'll have to modify an indirect twig for each.
728 	 */
729 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
730 	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
731 		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
732 			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
733 		else
734 			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
735 }
736 
737 void
738 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
739 {
740 	dmu_tx_hold_t *txh;
741 
742 	ASSERT(tx->tx_txg == 0);
743 
744 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
745 	    object, THT_BONUS, 0, 0);
746 	if (txh)
747 		dmu_tx_count_dnode(txh);
748 }
749 
750 void
751 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
752 {
753 	dmu_tx_hold_t *txh;
754 	ASSERT(tx->tx_txg == 0);
755 
756 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
757 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
758 
759 	txh->txh_space_towrite += space;
760 }
761 
762 int
763 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
764 {
765 	dmu_tx_hold_t *txh;
766 	int holds = 0;
767 
768 	/*
769 	 * By asserting that the tx is assigned, we're counting the
770 	 * number of dn_tx_holds, which is the same as the number of
771 	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
772 	 * dn_tx_holds could be 0.
773 	 */
774 	ASSERT(tx->tx_txg != 0);
775 
776 	/* if (tx->tx_anyobj == TRUE) */
777 		/* return (0); */
778 
779 	for (txh = list_head(&tx->tx_holds); txh;
780 	    txh = list_next(&tx->tx_holds, txh)) {
781 		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
782 			holds++;
783 	}
784 
785 	return (holds);
786 }
787 
788 #ifdef ZFS_DEBUG
789 void
790 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
791 {
792 	dmu_tx_hold_t *txh;
793 	int match_object = FALSE, match_offset = FALSE;
794 	dnode_t *dn;
795 
796 	DB_DNODE_ENTER(db);
797 	dn = DB_DNODE(db);
798 	ASSERT(tx->tx_txg != 0);
799 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
800 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
801 
802 	if (tx->tx_anyobj) {
803 		DB_DNODE_EXIT(db);
804 		return;
805 	}
806 
807 	/* XXX No checking on the meta dnode for now */
808 	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
809 		DB_DNODE_EXIT(db);
810 		return;
811 	}
812 
813 	for (txh = list_head(&tx->tx_holds); txh;
814 	    txh = list_next(&tx->tx_holds, txh)) {
815 		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
816 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
817 			match_object = TRUE;
818 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
819 			int datablkshift = dn->dn_datablkshift ?
820 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
821 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
822 			int shift = datablkshift + epbs * db->db_level;
823 			uint64_t beginblk = shift >= 64 ? 0 :
824 			    (txh->txh_arg1 >> shift);
825 			uint64_t endblk = shift >= 64 ? 0 :
826 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
827 			uint64_t blkid = db->db_blkid;
828 
829 			/* XXX txh_arg2 better not be zero... */
830 
831 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
832 			    txh->txh_type, beginblk, endblk);
833 
834 			switch (txh->txh_type) {
835 			case THT_WRITE:
836 				if (blkid >= beginblk && blkid <= endblk)
837 					match_offset = TRUE;
838 				/*
839 				 * We will let this hold work for the bonus
840 				 * or spill buffer so that we don't need to
841 				 * hold it when creating a new object.
842 				 */
843 				if (blkid == DMU_BONUS_BLKID ||
844 				    blkid == DMU_SPILL_BLKID)
845 					match_offset = TRUE;
846 				/*
847 				 * They might have to increase nlevels,
848 				 * thus dirtying the new TLIBs.  Or the
849 				 * might have to change the block size,
850 				 * thus dirying the new lvl=0 blk=0.
851 				 */
852 				if (blkid == 0)
853 					match_offset = TRUE;
854 				break;
855 			case THT_FREE:
856 				/*
857 				 * We will dirty all the level 1 blocks in
858 				 * the free range and perhaps the first and
859 				 * last level 0 block.
860 				 */
861 				if (blkid >= beginblk && (blkid <= endblk ||
862 				    txh->txh_arg2 == DMU_OBJECT_END))
863 					match_offset = TRUE;
864 				break;
865 			case THT_SPILL:
866 				if (blkid == DMU_SPILL_BLKID)
867 					match_offset = TRUE;
868 				break;
869 			case THT_BONUS:
870 				if (blkid == DMU_BONUS_BLKID)
871 					match_offset = TRUE;
872 				break;
873 			case THT_ZAP:
874 				match_offset = TRUE;
875 				break;
876 			case THT_NEWOBJECT:
877 				match_object = TRUE;
878 				break;
879 			default:
880 				ASSERT(!"bad txh_type");
881 			}
882 		}
883 		if (match_object && match_offset) {
884 			DB_DNODE_EXIT(db);
885 			return;
886 		}
887 	}
888 	DB_DNODE_EXIT(db);
889 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
890 	    (u_longlong_t)db->db.db_object, db->db_level,
891 	    (u_longlong_t)db->db_blkid);
892 }
893 #endif
894 
895 static int
896 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
897 {
898 	dmu_tx_hold_t *txh;
899 	spa_t *spa = tx->tx_pool->dp_spa;
900 	uint64_t memory, asize, fsize, usize;
901 	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
902 
903 	ASSERT3U(tx->tx_txg, ==, 0);
904 
905 	if (tx->tx_err)
906 		return (tx->tx_err);
907 
908 	if (spa_suspended(spa)) {
909 		/*
910 		 * If the user has indicated a blocking failure mode
911 		 * then return ERESTART which will block in dmu_tx_wait().
912 		 * Otherwise, return EIO so that an error can get
913 		 * propagated back to the VOP calls.
914 		 *
915 		 * Note that we always honor the txg_how flag regardless
916 		 * of the failuremode setting.
917 		 */
918 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
919 		    txg_how != TXG_WAIT)
920 			return (EIO);
921 
922 		return (ERESTART);
923 	}
924 
925 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
926 	tx->tx_needassign_txh = NULL;
927 
928 	/*
929 	 * NB: No error returns are allowed after txg_hold_open, but
930 	 * before processing the dnode holds, due to the
931 	 * dmu_tx_unassign() logic.
932 	 */
933 
934 	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
935 	for (txh = list_head(&tx->tx_holds); txh;
936 	    txh = list_next(&tx->tx_holds, txh)) {
937 		dnode_t *dn = txh->txh_dnode;
938 		if (dn != NULL) {
939 			mutex_enter(&dn->dn_mtx);
940 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
941 				mutex_exit(&dn->dn_mtx);
942 				tx->tx_needassign_txh = txh;
943 				return (ERESTART);
944 			}
945 			if (dn->dn_assigned_txg == 0)
946 				dn->dn_assigned_txg = tx->tx_txg;
947 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
948 			(void) refcount_add(&dn->dn_tx_holds, tx);
949 			mutex_exit(&dn->dn_mtx);
950 		}
951 		towrite += txh->txh_space_towrite;
952 		tofree += txh->txh_space_tofree;
953 		tooverwrite += txh->txh_space_tooverwrite;
954 		tounref += txh->txh_space_tounref;
955 		tohold += txh->txh_memory_tohold;
956 		fudge += txh->txh_fudge;
957 	}
958 
959 	/*
960 	 * NB: This check must be after we've held the dnodes, so that
961 	 * the dmu_tx_unassign() logic will work properly
962 	 */
963 	if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
964 		return (ERESTART);
965 
966 	/*
967 	 * If a snapshot has been taken since we made our estimates,
968 	 * assume that we won't be able to free or overwrite anything.
969 	 */
970 	if (tx->tx_objset &&
971 	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
972 	    tx->tx_lastsnap_txg) {
973 		towrite += tooverwrite;
974 		tooverwrite = tofree = 0;
975 	}
976 
977 	/* needed allocation: worst-case estimate of write space */
978 	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
979 	/* freed space estimate: worst-case overwrite + free estimate */
980 	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
981 	/* convert unrefd space to worst-case estimate */
982 	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
983 	/* calculate memory footprint estimate */
984 	memory = towrite + tooverwrite + tohold;
985 
986 #ifdef ZFS_DEBUG
987 	/*
988 	 * Add in 'tohold' to account for our dirty holds on this memory
989 	 * XXX - the "fudge" factor is to account for skipped blocks that
990 	 * we missed because dnode_next_offset() misses in-core-only blocks.
991 	 */
992 	tx->tx_space_towrite = asize +
993 	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
994 	tx->tx_space_tofree = tofree;
995 	tx->tx_space_tooverwrite = tooverwrite;
996 	tx->tx_space_tounref = tounref;
997 #endif
998 
999 	if (tx->tx_dir && asize != 0) {
1000 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1001 		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1002 		if (err)
1003 			return (err);
1004 	}
1005 
1006 	return (0);
1007 }
1008 
1009 static void
1010 dmu_tx_unassign(dmu_tx_t *tx)
1011 {
1012 	dmu_tx_hold_t *txh;
1013 
1014 	if (tx->tx_txg == 0)
1015 		return;
1016 
1017 	txg_rele_to_quiesce(&tx->tx_txgh);
1018 
1019 	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1020 	    txh = list_next(&tx->tx_holds, txh)) {
1021 		dnode_t *dn = txh->txh_dnode;
1022 
1023 		if (dn == NULL)
1024 			continue;
1025 		mutex_enter(&dn->dn_mtx);
1026 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1027 
1028 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1029 			dn->dn_assigned_txg = 0;
1030 			cv_broadcast(&dn->dn_notxholds);
1031 		}
1032 		mutex_exit(&dn->dn_mtx);
1033 	}
1034 
1035 	txg_rele_to_sync(&tx->tx_txgh);
1036 
1037 	tx->tx_lasttried_txg = tx->tx_txg;
1038 	tx->tx_txg = 0;
1039 }
1040 
1041 /*
1042  * Assign tx to a transaction group.  txg_how can be one of:
1043  *
1044  * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1045  *	a new one.  This should be used when you're not holding locks.
1046  *	If will only fail if we're truly out of space (or over quota).
1047  *
1048  * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1049  *	blocking, returns immediately with ERESTART.  This should be used
1050  *	whenever you're holding locks.  On an ERESTART error, the caller
1051  *	should drop locks, do a dmu_tx_wait(tx), and try again.
1052  *
1053  * (3)	A specific txg.  Use this if you need to ensure that multiple
1054  *	transactions all sync in the same txg.  Like TXG_NOWAIT, it
1055  *	returns ERESTART if it can't assign you into the requested txg.
1056  */
1057 int
1058 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1059 {
1060 	int err;
1061 
1062 	ASSERT(tx->tx_txg == 0);
1063 	ASSERT(txg_how != 0);
1064 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1065 
1066 	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1067 		dmu_tx_unassign(tx);
1068 
1069 		if (err != ERESTART || txg_how != TXG_WAIT)
1070 			return (err);
1071 
1072 		dmu_tx_wait(tx);
1073 	}
1074 
1075 	txg_rele_to_quiesce(&tx->tx_txgh);
1076 
1077 	return (0);
1078 }
1079 
1080 void
1081 dmu_tx_wait(dmu_tx_t *tx)
1082 {
1083 	spa_t *spa = tx->tx_pool->dp_spa;
1084 
1085 	ASSERT(tx->tx_txg == 0);
1086 
1087 	/*
1088 	 * It's possible that the pool has become active after this thread
1089 	 * has tried to obtain a tx. If that's the case then his
1090 	 * tx_lasttried_txg would not have been assigned.
1091 	 */
1092 	if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1093 		txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1094 	} else if (tx->tx_needassign_txh) {
1095 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1096 
1097 		mutex_enter(&dn->dn_mtx);
1098 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1099 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1100 		mutex_exit(&dn->dn_mtx);
1101 		tx->tx_needassign_txh = NULL;
1102 	} else {
1103 		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1104 	}
1105 }
1106 
1107 void
1108 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1109 {
1110 #ifdef ZFS_DEBUG
1111 	if (tx->tx_dir == NULL || delta == 0)
1112 		return;
1113 
1114 	if (delta > 0) {
1115 		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1116 		    tx->tx_space_towrite);
1117 		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1118 	} else {
1119 		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1120 	}
1121 #endif
1122 }
1123 
1124 void
1125 dmu_tx_commit(dmu_tx_t *tx)
1126 {
1127 	dmu_tx_hold_t *txh;
1128 
1129 	ASSERT(tx->tx_txg != 0);
1130 
1131 	while (txh = list_head(&tx->tx_holds)) {
1132 		dnode_t *dn = txh->txh_dnode;
1133 
1134 		list_remove(&tx->tx_holds, txh);
1135 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1136 		if (dn == NULL)
1137 			continue;
1138 		mutex_enter(&dn->dn_mtx);
1139 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1140 
1141 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1142 			dn->dn_assigned_txg = 0;
1143 			cv_broadcast(&dn->dn_notxholds);
1144 		}
1145 		mutex_exit(&dn->dn_mtx);
1146 		dnode_rele(dn, tx);
1147 	}
1148 
1149 	if (tx->tx_tempreserve_cookie)
1150 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1151 
1152 	if (!list_is_empty(&tx->tx_callbacks))
1153 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1154 
1155 	if (tx->tx_anyobj == FALSE)
1156 		txg_rele_to_sync(&tx->tx_txgh);
1157 
1158 	list_destroy(&tx->tx_callbacks);
1159 	list_destroy(&tx->tx_holds);
1160 #ifdef ZFS_DEBUG
1161 	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1162 	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1163 	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1164 	refcount_destroy_many(&tx->tx_space_written,
1165 	    refcount_count(&tx->tx_space_written));
1166 	refcount_destroy_many(&tx->tx_space_freed,
1167 	    refcount_count(&tx->tx_space_freed));
1168 #endif
1169 	kmem_free(tx, sizeof (dmu_tx_t));
1170 }
1171 
1172 void
1173 dmu_tx_abort(dmu_tx_t *tx)
1174 {
1175 	dmu_tx_hold_t *txh;
1176 
1177 	ASSERT(tx->tx_txg == 0);
1178 
1179 	while (txh = list_head(&tx->tx_holds)) {
1180 		dnode_t *dn = txh->txh_dnode;
1181 
1182 		list_remove(&tx->tx_holds, txh);
1183 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1184 		if (dn != NULL)
1185 			dnode_rele(dn, tx);
1186 	}
1187 
1188 	/*
1189 	 * Call any registered callbacks with an error code.
1190 	 */
1191 	if (!list_is_empty(&tx->tx_callbacks))
1192 		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1193 
1194 	list_destroy(&tx->tx_callbacks);
1195 	list_destroy(&tx->tx_holds);
1196 #ifdef ZFS_DEBUG
1197 	refcount_destroy_many(&tx->tx_space_written,
1198 	    refcount_count(&tx->tx_space_written));
1199 	refcount_destroy_many(&tx->tx_space_freed,
1200 	    refcount_count(&tx->tx_space_freed));
1201 #endif
1202 	kmem_free(tx, sizeof (dmu_tx_t));
1203 }
1204 
1205 uint64_t
1206 dmu_tx_get_txg(dmu_tx_t *tx)
1207 {
1208 	ASSERT(tx->tx_txg != 0);
1209 	return (tx->tx_txg);
1210 }
1211 
1212 void
1213 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1214 {
1215 	dmu_tx_callback_t *dcb;
1216 
1217 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1218 
1219 	dcb->dcb_func = func;
1220 	dcb->dcb_data = data;
1221 
1222 	list_insert_tail(&tx->tx_callbacks, dcb);
1223 }
1224 
1225 /*
1226  * Call all the commit callbacks on a list, with a given error code.
1227  */
1228 void
1229 dmu_tx_do_callbacks(list_t *cb_list, int error)
1230 {
1231 	dmu_tx_callback_t *dcb;
1232 
1233 	while (dcb = list_head(cb_list)) {
1234 		list_remove(cb_list, dcb);
1235 		dcb->dcb_func(dcb->dcb_data, error);
1236 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1237 	}
1238 }
1239 
1240 /*
1241  * Interface to hold a bunch of attributes.
1242  * used for creating new files.
1243  * attrsize is the total size of all attributes
1244  * to be added during object creation
1245  *
1246  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1247  */
1248 
1249 /*
1250  * hold necessary attribute name for attribute registration.
1251  * should be a very rare case where this is needed.  If it does
1252  * happen it would only happen on the first write to the file system.
1253  */
1254 static void
1255 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1256 {
1257 	int i;
1258 
1259 	if (!sa->sa_need_attr_registration)
1260 		return;
1261 
1262 	for (i = 0; i != sa->sa_num_attrs; i++) {
1263 		if (!sa->sa_attr_table[i].sa_registered) {
1264 			if (sa->sa_reg_attr_obj)
1265 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1266 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1267 			else
1268 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1269 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1270 		}
1271 	}
1272 }
1273 
1274 
1275 void
1276 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1277 {
1278 	dnode_t *dn;
1279 	dmu_tx_hold_t *txh;
1280 
1281 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1282 	    THT_SPILL, 0, 0);
1283 
1284 	dn = txh->txh_dnode;
1285 
1286 	if (dn == NULL)
1287 		return;
1288 
1289 	/* If blkptr doesn't exist then add space to towrite */
1290 	if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1291 		txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1292 	} else {
1293 		blkptr_t *bp;
1294 
1295 		bp = &dn->dn_phys->dn_spill;
1296 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1297 		    bp, bp->blk_birth))
1298 			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1299 		else
1300 			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1301 		if (!BP_IS_HOLE(bp))
1302 			txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1303 	}
1304 }
1305 
1306 void
1307 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1308 {
1309 	sa_os_t *sa = tx->tx_objset->os_sa;
1310 
1311 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1312 
1313 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1314 		return;
1315 
1316 	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1317 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1318 	else {
1319 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1320 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1321 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1322 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1323 	}
1324 
1325 	dmu_tx_sa_registration_hold(sa, tx);
1326 
1327 	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1328 		return;
1329 
1330 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1331 	    THT_SPILL, 0, 0);
1332 }
1333 
1334 /*
1335  * Hold SA attribute
1336  *
1337  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1338  *
1339  * variable_size is the total size of all variable sized attributes
1340  * passed to this function.  It is not the total size of all
1341  * variable size attributes that *may* exist on this object.
1342  */
1343 void
1344 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1345 {
1346 	uint64_t object;
1347 	sa_os_t *sa = tx->tx_objset->os_sa;
1348 
1349 	ASSERT(hdl != NULL);
1350 
1351 	object = sa_handle_object(hdl);
1352 
1353 	dmu_tx_hold_bonus(tx, object);
1354 
1355 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1356 		return;
1357 
1358 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1359 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1360 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1361 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1362 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1363 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1364 	}
1365 
1366 	dmu_tx_sa_registration_hold(sa, tx);
1367 
1368 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1369 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1370 
1371 	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1372 		ASSERT(tx->tx_txg == 0);
1373 		dmu_tx_hold_spill(tx, object);
1374 	} else {
1375 		dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1376 		dnode_t *dn;
1377 
1378 		DB_DNODE_ENTER(db);
1379 		dn = DB_DNODE(db);
1380 		if (dn->dn_have_spill) {
1381 			ASSERT(tx->tx_txg == 0);
1382 			dmu_tx_hold_spill(tx, object);
1383 		}
1384 		DB_DNODE_EXIT(db);
1385 	}
1386 }
1387