xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_tx.c (revision 24b9abbad58fdd63dad716fd35a99a7944c4e3eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dbuf.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
34 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
37 #include <sys/spa.h>
38 #include <sys/zfs_context.h>
39 
40 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
41     uint64_t arg1, uint64_t arg2);
42 
43 
44 dmu_tx_t *
45 dmu_tx_create_dd(dsl_dir_t *dd)
46 {
47 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
48 	tx->tx_dir = dd;
49 	if (dd)
50 		tx->tx_pool = dd->dd_pool;
51 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
52 	    offsetof(dmu_tx_hold_t, txh_node));
53 #ifdef ZFS_DEBUG
54 	refcount_create(&tx->tx_space_written);
55 	refcount_create(&tx->tx_space_freed);
56 #endif
57 	return (tx);
58 }
59 
60 dmu_tx_t *
61 dmu_tx_create(objset_t *os)
62 {
63 	dmu_tx_t *tx = dmu_tx_create_dd(os->os->os_dsl_dataset->ds_dir);
64 	tx->tx_objset = os;
65 	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os->os_dsl_dataset);
66 	return (tx);
67 }
68 
69 dmu_tx_t *
70 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
71 {
72 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
73 
74 	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
75 	tx->tx_pool = dp;
76 	tx->tx_txg = txg;
77 	tx->tx_anyobj = TRUE;
78 
79 	return (tx);
80 }
81 
82 int
83 dmu_tx_is_syncing(dmu_tx_t *tx)
84 {
85 	return (tx->tx_anyobj);
86 }
87 
88 int
89 dmu_tx_private_ok(dmu_tx_t *tx)
90 {
91 	return (tx->tx_anyobj);
92 }
93 
94 static dmu_tx_hold_t *
95 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
96     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
97 {
98 	dmu_tx_hold_t *txh;
99 	dnode_t *dn = NULL;
100 	int err;
101 
102 	if (object != DMU_NEW_OBJECT) {
103 		err = dnode_hold(os->os, object, tx, &dn);
104 		if (err) {
105 			tx->tx_err = err;
106 			return (NULL);
107 		}
108 
109 		if (err == 0 && tx->tx_txg != 0) {
110 			mutex_enter(&dn->dn_mtx);
111 			/*
112 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
113 			 * problem, but there's no way for it to happen (for
114 			 * now, at least).
115 			 */
116 			ASSERT(dn->dn_assigned_txg == 0);
117 			dn->dn_assigned_txg = tx->tx_txg;
118 			(void) refcount_add(&dn->dn_tx_holds, tx);
119 			mutex_exit(&dn->dn_mtx);
120 		}
121 	}
122 
123 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
124 	txh->txh_tx = tx;
125 	txh->txh_dnode = dn;
126 #ifdef ZFS_DEBUG
127 	txh->txh_type = type;
128 	txh->txh_arg1 = arg1;
129 	txh->txh_arg2 = arg2;
130 #endif
131 	list_insert_tail(&tx->tx_holds, txh);
132 
133 	return (txh);
134 }
135 
136 void
137 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
138 {
139 	/*
140 	 * If we're syncing, they can manipulate any object anyhow, and
141 	 * the hold on the dnode_t can cause problems.
142 	 */
143 	if (!dmu_tx_is_syncing(tx)) {
144 		(void) dmu_tx_hold_object_impl(tx, os,
145 		    object, THT_NEWOBJECT, 0, 0);
146 	}
147 }
148 
149 static int
150 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
151 {
152 	int err;
153 	dmu_buf_impl_t *db;
154 
155 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
156 	db = dbuf_hold_level(dn, level, blkid, FTAG);
157 	rw_exit(&dn->dn_struct_rwlock);
158 	if (db == NULL)
159 		return (EIO);
160 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
161 	dbuf_rele(db, FTAG);
162 	return (err);
163 }
164 
165 /* ARGSUSED */
166 static void
167 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
168 {
169 	dnode_t *dn = txh->txh_dnode;
170 	uint64_t start, end, i;
171 	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
172 	int err = 0;
173 
174 	if (len == 0)
175 		return;
176 
177 	min_bs = SPA_MINBLOCKSHIFT;
178 	max_bs = SPA_MAXBLOCKSHIFT;
179 	min_ibs = DN_MIN_INDBLKSHIFT;
180 	max_ibs = DN_MAX_INDBLKSHIFT;
181 
182 
183 	/*
184 	 * For i/o error checking, read the first and last level-0
185 	 * blocks (if they are not aligned), and all the level-1 blocks.
186 	 */
187 
188 	if (dn) {
189 		if (dn->dn_maxblkid == 0) {
190 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
191 			if (err)
192 				goto out;
193 		} else {
194 			zio_t *zio = zio_root(dn->dn_objset->os_spa,
195 			    NULL, NULL, ZIO_FLAG_CANFAIL);
196 
197 			/* first level-0 block */
198 			start = off >> dn->dn_datablkshift;
199 			if (P2PHASE(off, dn->dn_datablksz) ||
200 			    len < dn->dn_datablksz) {
201 				err = dmu_tx_check_ioerr(zio, dn, 0, start);
202 				if (err)
203 					goto out;
204 			}
205 
206 			/* last level-0 block */
207 			end = (off+len-1) >> dn->dn_datablkshift;
208 			if (end != start &&
209 			    P2PHASE(off+len, dn->dn_datablksz)) {
210 				err = dmu_tx_check_ioerr(zio, dn, 0, end);
211 				if (err)
212 					goto out;
213 			}
214 
215 			/* level-1 blocks */
216 			if (dn->dn_nlevels > 1) {
217 				start >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT;
218 				end >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT;
219 				for (i = start+1; i < end; i++) {
220 					err = dmu_tx_check_ioerr(zio, dn, 1, i);
221 					if (err)
222 						goto out;
223 				}
224 			}
225 
226 			err = zio_wait(zio);
227 			if (err)
228 				goto out;
229 		}
230 	}
231 
232 	/*
233 	 * If there's more than one block, the blocksize can't change,
234 	 * so we can make a more precise estimate.  Alternatively,
235 	 * if the dnode's ibs is larger than max_ibs, always use that.
236 	 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
237 	 * the code will still work correctly on existing pools.
238 	 */
239 	if (dn && (dn->dn_maxblkid != 0 || dn->dn_indblkshift > max_ibs)) {
240 		min_ibs = max_ibs = dn->dn_indblkshift;
241 		if (dn->dn_datablkshift != 0)
242 			min_bs = max_bs = dn->dn_datablkshift;
243 	}
244 
245 	/*
246 	 * 'end' is the last thing we will access, not one past.
247 	 * This way we won't overflow when accessing the last byte.
248 	 */
249 	start = P2ALIGN(off, 1ULL << max_bs);
250 	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
251 	txh->txh_space_towrite += end - start + 1;
252 
253 	start >>= min_bs;
254 	end >>= min_bs;
255 
256 	epbs = min_ibs - SPA_BLKPTRSHIFT;
257 
258 	/*
259 	 * The object contains at most 2^(64 - min_bs) blocks,
260 	 * and each indirect level maps 2^epbs.
261 	 */
262 	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
263 		start >>= epbs;
264 		end >>= epbs;
265 		/*
266 		 * If we increase the number of levels of indirection,
267 		 * we'll need new blkid=0 indirect blocks.  If start == 0,
268 		 * we're already accounting for that blocks; and if end == 0,
269 		 * we can't increase the number of levels beyond that.
270 		 */
271 		if (start != 0 && end != 0)
272 			txh->txh_space_towrite += 1ULL << max_ibs;
273 		txh->txh_space_towrite += (end - start + 1) << max_ibs;
274 	}
275 
276 	ASSERT(txh->txh_space_towrite < 2 * DMU_MAX_ACCESS);
277 
278 out:
279 	if (err)
280 		txh->txh_tx->tx_err = err;
281 }
282 
283 static void
284 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
285 {
286 	dnode_t *dn = txh->txh_dnode;
287 	dnode_t *mdn = txh->txh_tx->tx_objset->os->os_meta_dnode;
288 	uint64_t space = mdn->dn_datablksz +
289 	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
290 
291 	if (dn && dn->dn_dbuf->db_blkptr &&
292 	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
293 	    dn->dn_dbuf->db_blkptr->blk_birth)) {
294 		txh->txh_space_tooverwrite += space;
295 	} else {
296 		txh->txh_space_towrite += space;
297 		if (dn && dn->dn_dbuf->db_blkptr)
298 			txh->txh_space_tounref += space;
299 	}
300 }
301 
302 void
303 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
304 {
305 	dmu_tx_hold_t *txh;
306 
307 	ASSERT(tx->tx_txg == 0);
308 	ASSERT(len < DMU_MAX_ACCESS);
309 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
310 
311 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
312 	    object, THT_WRITE, off, len);
313 	if (txh == NULL)
314 		return;
315 
316 	dmu_tx_count_write(txh, off, len);
317 	dmu_tx_count_dnode(txh);
318 }
319 
320 static void
321 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
322 {
323 	uint64_t blkid, nblks, lastblk;
324 	uint64_t space = 0, unref = 0, skipped = 0;
325 	dnode_t *dn = txh->txh_dnode;
326 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
327 	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
328 	int epbs;
329 
330 	if (dn->dn_nlevels == 0)
331 		return;
332 
333 	/*
334 	 * The struct_rwlock protects us against dn_nlevels
335 	 * changing, in case (against all odds) we manage to dirty &
336 	 * sync out the changes after we check for being dirty.
337 	 * Also, dbuf_hold_level() wants us to have the struct_rwlock.
338 	 */
339 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
340 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
341 	if (dn->dn_maxblkid == 0) {
342 		if (off == 0 && len >= dn->dn_datablksz) {
343 			blkid = 0;
344 			nblks = 1;
345 		} else {
346 			rw_exit(&dn->dn_struct_rwlock);
347 			return;
348 		}
349 	} else {
350 		blkid = off >> dn->dn_datablkshift;
351 		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
352 
353 		if (blkid >= dn->dn_maxblkid) {
354 			rw_exit(&dn->dn_struct_rwlock);
355 			return;
356 		}
357 		if (blkid + nblks > dn->dn_maxblkid)
358 			nblks = dn->dn_maxblkid - blkid;
359 
360 	}
361 	if (dn->dn_nlevels == 1) {
362 		int i;
363 		for (i = 0; i < nblks; i++) {
364 			blkptr_t *bp = dn->dn_phys->dn_blkptr;
365 			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
366 			bp += blkid + i;
367 			if (dsl_dataset_block_freeable(ds, bp->blk_birth)) {
368 				dprintf_bp(bp, "can free old%s", "");
369 				space += bp_get_dasize(spa, bp);
370 			}
371 			unref += BP_GET_ASIZE(bp);
372 		}
373 		nblks = 0;
374 	}
375 
376 	/*
377 	 * Add in memory requirements of higher-level indirects.
378 	 * This assumes a worst-possible scenario for dn_nlevels.
379 	 */
380 	{
381 		uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
382 		int level = dn->dn_nlevels > 1 ? 2 : 1;
383 
384 		while (level++ < DN_MAX_LEVELS) {
385 			txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
386 			blkcnt = 1 + (blkcnt >> epbs);
387 		}
388 		ASSERT(blkcnt <= dn->dn_nblkptr);
389 	}
390 
391 	lastblk = blkid + nblks - 1;
392 	while (nblks) {
393 		dmu_buf_impl_t *dbuf;
394 		uint64_t ibyte, new_blkid;
395 		int epb = 1 << epbs;
396 		int err, i, blkoff, tochk;
397 		blkptr_t *bp;
398 
399 		ibyte = blkid << dn->dn_datablkshift;
400 		err = dnode_next_offset(dn,
401 		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
402 		new_blkid = ibyte >> dn->dn_datablkshift;
403 		if (err == ESRCH)
404 			break;
405 		if (err) {
406 			txh->txh_tx->tx_err = err;
407 			break;
408 		}
409 		if (new_blkid > lastblk)
410 			break;
411 
412 		if (new_blkid > blkid) {
413 			skipped += new_blkid - blkid - 1;
414 			nblks -= new_blkid - blkid;
415 			blkid = new_blkid;
416 		}
417 		blkoff = P2PHASE(blkid, epb);
418 		tochk = MIN(epb - blkoff, nblks);
419 
420 		dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG);
421 
422 		txh->txh_memory_tohold += dbuf->db.db_size;
423 		if (txh->txh_memory_tohold > DMU_MAX_ACCESS) {
424 			txh->txh_tx->tx_err = E2BIG;
425 			dbuf_rele(dbuf, FTAG);
426 			break;
427 		}
428 		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
429 		if (err != 0) {
430 			txh->txh_tx->tx_err = err;
431 			dbuf_rele(dbuf, FTAG);
432 			break;
433 		}
434 
435 		bp = dbuf->db.db_data;
436 		bp += blkoff;
437 
438 		for (i = 0; i < tochk; i++) {
439 			if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) {
440 				dprintf_bp(&bp[i], "can free old%s", "");
441 				space += bp_get_dasize(spa, &bp[i]);
442 			}
443 			unref += BP_GET_ASIZE(bp);
444 		}
445 		dbuf_rele(dbuf, FTAG);
446 
447 		blkid += tochk;
448 		nblks -= tochk;
449 	}
450 	rw_exit(&dn->dn_struct_rwlock);
451 
452 	/* account for new level 1 indirect blocks that might show up */
453 	if (skipped) {
454 		txh->txh_fudge += skipped << dn->dn_indblkshift;
455 		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
456 		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
457 	}
458 	txh->txh_space_tofree += space;
459 	txh->txh_space_tounref += unref;
460 }
461 
462 void
463 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
464 {
465 	dmu_tx_hold_t *txh;
466 	dnode_t *dn;
467 	uint64_t start, end, i;
468 	int err, shift;
469 	zio_t *zio;
470 
471 	ASSERT(tx->tx_txg == 0);
472 
473 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
474 	    object, THT_FREE, off, len);
475 	if (txh == NULL)
476 		return;
477 	dn = txh->txh_dnode;
478 
479 	/* first block */
480 	if (off != 0)
481 		dmu_tx_count_write(txh, off, 1);
482 	/* last block */
483 	if (len != DMU_OBJECT_END)
484 		dmu_tx_count_write(txh, off+len, 1);
485 
486 	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
487 		return;
488 	if (len == DMU_OBJECT_END)
489 		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
490 
491 	/*
492 	 * For i/o error checking, read the first and last level-0
493 	 * blocks, and all the level-1 blocks.  The above count_write's
494 	 * have already taken care of the level-0 blocks.
495 	 */
496 	if (dn->dn_nlevels > 1) {
497 		shift = dn->dn_datablkshift + dn->dn_indblkshift -
498 		    SPA_BLKPTRSHIFT;
499 		start = off >> shift;
500 		end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
501 
502 		zio = zio_root(tx->tx_pool->dp_spa,
503 		    NULL, NULL, ZIO_FLAG_CANFAIL);
504 		for (i = start; i <= end; i++) {
505 			uint64_t ibyte = i << shift;
506 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
507 			i = ibyte >> shift;
508 			if (err == ESRCH)
509 				break;
510 			if (err) {
511 				tx->tx_err = err;
512 				return;
513 			}
514 
515 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
516 			if (err) {
517 				tx->tx_err = err;
518 				return;
519 			}
520 		}
521 		err = zio_wait(zio);
522 		if (err) {
523 			tx->tx_err = err;
524 			return;
525 		}
526 	}
527 
528 	dmu_tx_count_dnode(txh);
529 	dmu_tx_count_free(txh, off, len);
530 }
531 
532 void
533 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, char *name)
534 {
535 	dmu_tx_hold_t *txh;
536 	dnode_t *dn;
537 	uint64_t nblocks;
538 	int epbs, err;
539 
540 	ASSERT(tx->tx_txg == 0);
541 
542 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
543 	    object, THT_ZAP, add, (uintptr_t)name);
544 	if (txh == NULL)
545 		return;
546 	dn = txh->txh_dnode;
547 
548 	dmu_tx_count_dnode(txh);
549 
550 	if (dn == NULL) {
551 		/*
552 		 * We will be able to fit a new object's entries into one leaf
553 		 * block.  So there will be at most 2 blocks total,
554 		 * including the header block.
555 		 */
556 		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
557 		return;
558 	}
559 
560 	ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
561 
562 	if (dn->dn_maxblkid == 0 && !add) {
563 		/*
564 		 * If there is only one block  (i.e. this is a micro-zap)
565 		 * and we are not adding anything, the accounting is simple.
566 		 */
567 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
568 		if (err) {
569 			tx->tx_err = err;
570 			return;
571 		}
572 
573 		/*
574 		 * Use max block size here, since we don't know how much
575 		 * the size will change between now and the dbuf dirty call.
576 		 */
577 		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
578 		    dn->dn_phys->dn_blkptr[0].blk_birth)) {
579 			txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
580 		} else {
581 			txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
582 			txh->txh_space_tounref +=
583 			    BP_GET_ASIZE(dn->dn_phys->dn_blkptr);
584 		}
585 		return;
586 	}
587 
588 	if (dn->dn_maxblkid > 0 && name) {
589 		/*
590 		 * access the name in this fat-zap so that we'll check
591 		 * for i/o errors to the leaf blocks, etc.
592 		 */
593 		err = zap_lookup(&dn->dn_objset->os, dn->dn_object, name,
594 		    8, 0, NULL);
595 		if (err == EIO) {
596 			tx->tx_err = err;
597 			return;
598 		}
599 	}
600 
601 	/*
602 	 * 3 blocks overwritten: target leaf, ptrtbl block, header block
603 	 * 3 new blocks written if adding: new split leaf, 2 grown ptrtbl blocks
604 	 */
605 	dmu_tx_count_write(txh, dn->dn_maxblkid * dn->dn_datablksz,
606 	    (3 + add ? 3 : 0) << dn->dn_datablkshift);
607 
608 	/*
609 	 * If the modified blocks are scattered to the four winds,
610 	 * we'll have to modify an indirect twig for each.
611 	 */
612 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
613 	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
614 		txh->txh_space_towrite += 3 << dn->dn_indblkshift;
615 }
616 
617 void
618 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
619 {
620 	dmu_tx_hold_t *txh;
621 
622 	ASSERT(tx->tx_txg == 0);
623 
624 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
625 	    object, THT_BONUS, 0, 0);
626 	if (txh)
627 		dmu_tx_count_dnode(txh);
628 }
629 
630 void
631 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
632 {
633 	dmu_tx_hold_t *txh;
634 	ASSERT(tx->tx_txg == 0);
635 
636 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
637 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
638 
639 	txh->txh_space_towrite += space;
640 }
641 
642 int
643 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
644 {
645 	dmu_tx_hold_t *txh;
646 	int holds = 0;
647 
648 	/*
649 	 * By asserting that the tx is assigned, we're counting the
650 	 * number of dn_tx_holds, which is the same as the number of
651 	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
652 	 * dn_tx_holds could be 0.
653 	 */
654 	ASSERT(tx->tx_txg != 0);
655 
656 	/* if (tx->tx_anyobj == TRUE) */
657 		/* return (0); */
658 
659 	for (txh = list_head(&tx->tx_holds); txh;
660 	    txh = list_next(&tx->tx_holds, txh)) {
661 		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
662 			holds++;
663 	}
664 
665 	return (holds);
666 }
667 
668 #ifdef ZFS_DEBUG
669 void
670 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
671 {
672 	dmu_tx_hold_t *txh;
673 	int match_object = FALSE, match_offset = FALSE;
674 	dnode_t *dn = db->db_dnode;
675 
676 	ASSERT(tx->tx_txg != 0);
677 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset->os);
678 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
679 
680 	if (tx->tx_anyobj)
681 		return;
682 
683 	/* XXX No checking on the meta dnode for now */
684 	if (db->db.db_object == DMU_META_DNODE_OBJECT)
685 		return;
686 
687 	for (txh = list_head(&tx->tx_holds); txh;
688 	    txh = list_next(&tx->tx_holds, txh)) {
689 		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
690 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
691 			match_object = TRUE;
692 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
693 			int datablkshift = dn->dn_datablkshift ?
694 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
695 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
696 			int shift = datablkshift + epbs * db->db_level;
697 			uint64_t beginblk = shift >= 64 ? 0 :
698 			    (txh->txh_arg1 >> shift);
699 			uint64_t endblk = shift >= 64 ? 0 :
700 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
701 			uint64_t blkid = db->db_blkid;
702 
703 			/* XXX txh_arg2 better not be zero... */
704 
705 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
706 			    txh->txh_type, beginblk, endblk);
707 
708 			switch (txh->txh_type) {
709 			case THT_WRITE:
710 				if (blkid >= beginblk && blkid <= endblk)
711 					match_offset = TRUE;
712 				/*
713 				 * We will let this hold work for the bonus
714 				 * buffer so that we don't need to hold it
715 				 * when creating a new object.
716 				 */
717 				if (blkid == DB_BONUS_BLKID)
718 					match_offset = TRUE;
719 				/*
720 				 * They might have to increase nlevels,
721 				 * thus dirtying the new TLIBs.  Or the
722 				 * might have to change the block size,
723 				 * thus dirying the new lvl=0 blk=0.
724 				 */
725 				if (blkid == 0)
726 					match_offset = TRUE;
727 				break;
728 			case THT_FREE:
729 				/*
730 				 * We will dirty all the level 1 blocks in
731 				 * the free range and perhaps the first and
732 				 * last level 0 block.
733 				 */
734 				if (blkid >= beginblk && (blkid <= endblk ||
735 				    txh->txh_arg2 == DMU_OBJECT_END))
736 					match_offset = TRUE;
737 				break;
738 			case THT_BONUS:
739 				if (blkid == DB_BONUS_BLKID)
740 					match_offset = TRUE;
741 				break;
742 			case THT_ZAP:
743 				match_offset = TRUE;
744 				break;
745 			case THT_NEWOBJECT:
746 				match_object = TRUE;
747 				break;
748 			default:
749 				ASSERT(!"bad txh_type");
750 			}
751 		}
752 		if (match_object && match_offset)
753 			return;
754 	}
755 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
756 	    (u_longlong_t)db->db.db_object, db->db_level,
757 	    (u_longlong_t)db->db_blkid);
758 }
759 #endif
760 
761 static int
762 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
763 {
764 	dmu_tx_hold_t *txh;
765 	spa_t *spa = tx->tx_pool->dp_spa;
766 	uint64_t memory, asize, fsize, usize;
767 	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
768 
769 	ASSERT3U(tx->tx_txg, ==, 0);
770 
771 	if (tx->tx_err)
772 		return (tx->tx_err);
773 
774 	if (spa_state(spa) == POOL_STATE_IO_FAILURE) {
775 		/*
776 		 * If the user has indicated a blocking failure mode
777 		 * then return ERESTART which will block in dmu_tx_wait().
778 		 * Otherwise, return EIO so that an error can get
779 		 * propagated back to the VOP calls.
780 		 *
781 		 * Note that we always honor the txg_how flag regardless
782 		 * of the failuremode setting.
783 		 */
784 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
785 		    txg_how != TXG_WAIT)
786 			return (EIO);
787 
788 		return (ERESTART);
789 	}
790 
791 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
792 	tx->tx_needassign_txh = NULL;
793 
794 	/*
795 	 * NB: No error returns are allowed after txg_hold_open, but
796 	 * before processing the dnode holds, due to the
797 	 * dmu_tx_unassign() logic.
798 	 */
799 
800 	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
801 	for (txh = list_head(&tx->tx_holds); txh;
802 	    txh = list_next(&tx->tx_holds, txh)) {
803 		dnode_t *dn = txh->txh_dnode;
804 		if (dn != NULL) {
805 			mutex_enter(&dn->dn_mtx);
806 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
807 				mutex_exit(&dn->dn_mtx);
808 				tx->tx_needassign_txh = txh;
809 				return (ERESTART);
810 			}
811 			if (dn->dn_assigned_txg == 0)
812 				dn->dn_assigned_txg = tx->tx_txg;
813 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
814 			(void) refcount_add(&dn->dn_tx_holds, tx);
815 			mutex_exit(&dn->dn_mtx);
816 		}
817 		towrite += txh->txh_space_towrite;
818 		tofree += txh->txh_space_tofree;
819 		tooverwrite += txh->txh_space_tooverwrite;
820 		tounref += txh->txh_space_tounref;
821 		tohold += txh->txh_memory_tohold;
822 		fudge += txh->txh_fudge;
823 	}
824 
825 	/*
826 	 * NB: This check must be after we've held the dnodes, so that
827 	 * the dmu_tx_unassign() logic will work properly
828 	 */
829 	if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
830 		return (ERESTART);
831 
832 	/*
833 	 * If a snapshot has been taken since we made our estimates,
834 	 * assume that we won't be able to free or overwrite anything.
835 	 */
836 	if (tx->tx_objset &&
837 	    dsl_dataset_prev_snap_txg(tx->tx_objset->os->os_dsl_dataset) >
838 	    tx->tx_lastsnap_txg) {
839 		towrite += tooverwrite;
840 		tooverwrite = tofree = 0;
841 	}
842 
843 	/* needed allocation: worst-case estimate of write space */
844 	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
845 	/* freed space estimate: worst-case overwrite + free estimate */
846 	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
847 	/* convert unrefd space to worst-case estimate */
848 	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
849 	/* calculate memory footprint estimate */
850 	memory = towrite + tooverwrite + tohold;
851 
852 #ifdef ZFS_DEBUG
853 	/*
854 	 * Add in 'tohold' to account for our dirty holds on this memory
855 	 * XXX - the "fudge" factor is to account for skipped blocks that
856 	 * we missed because dnode_next_offset() misses in-core-only blocks.
857 	 */
858 	tx->tx_space_towrite = asize +
859 	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
860 	tx->tx_space_tofree = tofree;
861 	tx->tx_space_tooverwrite = tooverwrite;
862 	tx->tx_space_tounref = tounref;
863 #endif
864 
865 	if (tx->tx_dir && asize != 0) {
866 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
867 		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
868 		if (err)
869 			return (err);
870 	}
871 
872 	return (0);
873 }
874 
875 static void
876 dmu_tx_unassign(dmu_tx_t *tx)
877 {
878 	dmu_tx_hold_t *txh;
879 
880 	if (tx->tx_txg == 0)
881 		return;
882 
883 	txg_rele_to_quiesce(&tx->tx_txgh);
884 
885 	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
886 	    txh = list_next(&tx->tx_holds, txh)) {
887 		dnode_t *dn = txh->txh_dnode;
888 
889 		if (dn == NULL)
890 			continue;
891 		mutex_enter(&dn->dn_mtx);
892 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
893 
894 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
895 			dn->dn_assigned_txg = 0;
896 			cv_broadcast(&dn->dn_notxholds);
897 		}
898 		mutex_exit(&dn->dn_mtx);
899 	}
900 
901 	txg_rele_to_sync(&tx->tx_txgh);
902 
903 	tx->tx_lasttried_txg = tx->tx_txg;
904 	tx->tx_txg = 0;
905 }
906 
907 /*
908  * Assign tx to a transaction group.  txg_how can be one of:
909  *
910  * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
911  *	a new one.  This should be used when you're not holding locks.
912  *	If will only fail if we're truly out of space (or over quota).
913  *
914  * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
915  *	blocking, returns immediately with ERESTART.  This should be used
916  *	whenever you're holding locks.  On an ERESTART error, the caller
917  *	should drop locks, do a dmu_tx_wait(tx), and try again.
918  *
919  * (3)	A specific txg.  Use this if you need to ensure that multiple
920  *	transactions all sync in the same txg.  Like TXG_NOWAIT, it
921  *	returns ERESTART if it can't assign you into the requested txg.
922  */
923 int
924 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
925 {
926 	int err;
927 
928 	ASSERT(tx->tx_txg == 0);
929 	ASSERT(txg_how != 0);
930 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
931 
932 	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
933 		dmu_tx_unassign(tx);
934 
935 		if (err != ERESTART || txg_how != TXG_WAIT)
936 			return (err);
937 
938 		dmu_tx_wait(tx);
939 	}
940 
941 	txg_rele_to_quiesce(&tx->tx_txgh);
942 
943 	return (0);
944 }
945 
946 void
947 dmu_tx_wait(dmu_tx_t *tx)
948 {
949 	spa_t *spa = tx->tx_pool->dp_spa;
950 
951 	ASSERT(tx->tx_txg == 0);
952 
953 	/*
954 	 * It's possible that the pool has become active after this thread
955 	 * has tried to obtain a tx. If that's the case then his
956 	 * tx_lasttried_txg would not have been assigned.
957 	 */
958 	if (spa_state(spa) == POOL_STATE_IO_FAILURE ||
959 	    tx->tx_lasttried_txg == 0) {
960 		txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
961 	} else if (tx->tx_needassign_txh) {
962 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
963 
964 		mutex_enter(&dn->dn_mtx);
965 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
966 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
967 		mutex_exit(&dn->dn_mtx);
968 		tx->tx_needassign_txh = NULL;
969 	} else {
970 		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
971 	}
972 }
973 
974 void
975 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
976 {
977 #ifdef ZFS_DEBUG
978 	if (tx->tx_dir == NULL || delta == 0)
979 		return;
980 
981 	if (delta > 0) {
982 		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
983 		    tx->tx_space_towrite);
984 		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
985 	} else {
986 		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
987 	}
988 #endif
989 }
990 
991 void
992 dmu_tx_commit(dmu_tx_t *tx)
993 {
994 	dmu_tx_hold_t *txh;
995 
996 	ASSERT(tx->tx_txg != 0);
997 
998 	while (txh = list_head(&tx->tx_holds)) {
999 		dnode_t *dn = txh->txh_dnode;
1000 
1001 		list_remove(&tx->tx_holds, txh);
1002 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1003 		if (dn == NULL)
1004 			continue;
1005 		mutex_enter(&dn->dn_mtx);
1006 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1007 
1008 		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1009 			dn->dn_assigned_txg = 0;
1010 			cv_broadcast(&dn->dn_notxholds);
1011 		}
1012 		mutex_exit(&dn->dn_mtx);
1013 		dnode_rele(dn, tx);
1014 	}
1015 
1016 	if (tx->tx_tempreserve_cookie)
1017 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1018 
1019 	if (tx->tx_anyobj == FALSE)
1020 		txg_rele_to_sync(&tx->tx_txgh);
1021 	list_destroy(&tx->tx_holds);
1022 #ifdef ZFS_DEBUG
1023 	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1024 	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1025 	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1026 	refcount_destroy_many(&tx->tx_space_written,
1027 	    refcount_count(&tx->tx_space_written));
1028 	refcount_destroy_many(&tx->tx_space_freed,
1029 	    refcount_count(&tx->tx_space_freed));
1030 #endif
1031 	kmem_free(tx, sizeof (dmu_tx_t));
1032 }
1033 
1034 void
1035 dmu_tx_abort(dmu_tx_t *tx)
1036 {
1037 	dmu_tx_hold_t *txh;
1038 
1039 	ASSERT(tx->tx_txg == 0);
1040 
1041 	while (txh = list_head(&tx->tx_holds)) {
1042 		dnode_t *dn = txh->txh_dnode;
1043 
1044 		list_remove(&tx->tx_holds, txh);
1045 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1046 		if (dn != NULL)
1047 			dnode_rele(dn, tx);
1048 	}
1049 	list_destroy(&tx->tx_holds);
1050 #ifdef ZFS_DEBUG
1051 	refcount_destroy_many(&tx->tx_space_written,
1052 	    refcount_count(&tx->tx_space_written));
1053 	refcount_destroy_many(&tx->tx_space_freed,
1054 	    refcount_count(&tx->tx_space_freed));
1055 #endif
1056 	kmem_free(tx, sizeof (dmu_tx_t));
1057 }
1058 
1059 uint64_t
1060 dmu_tx_get_txg(dmu_tx_t *tx)
1061 {
1062 	ASSERT(tx->tx_txg != 0);
1063 	return (tx->tx_txg);
1064 }
1065