xref: /freebsd/sys/contrib/openzfs/module/zfs/dmu_tx.c (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25  * Copyright (c) 2024, Klara, Inc.
26  */
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dbuf.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h>
37 #include <sys/spa.h>
38 #include <sys/sa.h>
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/trace_zfs.h>
42 
43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
44     uint64_t arg1, uint64_t arg2);
45 
46 dmu_tx_stats_t dmu_tx_stats = {
47 	{ "dmu_tx_assigned",		KSTAT_DATA_UINT64 },
48 	{ "dmu_tx_delay",		KSTAT_DATA_UINT64 },
49 	{ "dmu_tx_error",		KSTAT_DATA_UINT64 },
50 	{ "dmu_tx_suspended",		KSTAT_DATA_UINT64 },
51 	{ "dmu_tx_group",		KSTAT_DATA_UINT64 },
52 	{ "dmu_tx_memory_reserve",	KSTAT_DATA_UINT64 },
53 	{ "dmu_tx_memory_reclaim",	KSTAT_DATA_UINT64 },
54 	{ "dmu_tx_dirty_throttle",	KSTAT_DATA_UINT64 },
55 	{ "dmu_tx_dirty_delay",		KSTAT_DATA_UINT64 },
56 	{ "dmu_tx_dirty_over_max",	KSTAT_DATA_UINT64 },
57 	{ "dmu_tx_dirty_frees_delay",	KSTAT_DATA_UINT64 },
58 	{ "dmu_tx_wrlog_delay",		KSTAT_DATA_UINT64 },
59 	{ "dmu_tx_quota",		KSTAT_DATA_UINT64 },
60 };
61 
62 static kstat_t *dmu_tx_ksp;
63 
64 dmu_tx_t *
65 dmu_tx_create_dd(dsl_dir_t *dd)
66 {
67 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
68 	tx->tx_dir = dd;
69 	if (dd != NULL)
70 		tx->tx_pool = dd->dd_pool;
71 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
72 	    offsetof(dmu_tx_hold_t, txh_node));
73 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
74 	    offsetof(dmu_tx_callback_t, dcb_node));
75 	tx->tx_start = gethrtime();
76 	return (tx);
77 }
78 
79 dmu_tx_t *
80 dmu_tx_create(objset_t *os)
81 {
82 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
83 	tx->tx_objset = os;
84 	return (tx);
85 }
86 
87 dmu_tx_t *
88 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
89 {
90 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
91 
92 	TXG_VERIFY(dp->dp_spa, txg);
93 	tx->tx_pool = dp;
94 	tx->tx_txg = txg;
95 	tx->tx_anyobj = TRUE;
96 
97 	return (tx);
98 }
99 
100 int
101 dmu_tx_is_syncing(dmu_tx_t *tx)
102 {
103 	return (tx->tx_anyobj);
104 }
105 
106 int
107 dmu_tx_private_ok(dmu_tx_t *tx)
108 {
109 	return (tx->tx_anyobj);
110 }
111 
112 static dmu_tx_hold_t *
113 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
114     uint64_t arg1, uint64_t arg2)
115 {
116 	dmu_tx_hold_t *txh;
117 
118 	if (dn != NULL) {
119 		(void) zfs_refcount_add(&dn->dn_holds, tx);
120 		if (tx->tx_txg != 0) {
121 			mutex_enter(&dn->dn_mtx);
122 			/*
123 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
124 			 * problem, but there's no way for it to happen (for
125 			 * now, at least).
126 			 */
127 			ASSERT(dn->dn_assigned_txg == 0);
128 			dn->dn_assigned_txg = tx->tx_txg;
129 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
130 			mutex_exit(&dn->dn_mtx);
131 		}
132 	}
133 
134 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
135 	txh->txh_tx = tx;
136 	txh->txh_dnode = dn;
137 	zfs_refcount_create(&txh->txh_space_towrite);
138 	zfs_refcount_create(&txh->txh_memory_tohold);
139 	txh->txh_type = type;
140 	txh->txh_arg1 = arg1;
141 	txh->txh_arg2 = arg2;
142 	list_insert_tail(&tx->tx_holds, txh);
143 
144 	return (txh);
145 }
146 
147 static dmu_tx_hold_t *
148 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
149     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
150 {
151 	dnode_t *dn = NULL;
152 	dmu_tx_hold_t *txh;
153 	int err;
154 
155 	if (object != DMU_NEW_OBJECT) {
156 		err = dnode_hold(os, object, FTAG, &dn);
157 		if (err != 0) {
158 			tx->tx_err = err;
159 			return (NULL);
160 		}
161 	}
162 	txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
163 	if (dn != NULL)
164 		dnode_rele(dn, FTAG);
165 	return (txh);
166 }
167 
168 void
169 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
170 {
171 	/*
172 	 * If we're syncing, they can manipulate any object anyhow, and
173 	 * the hold on the dnode_t can cause problems.
174 	 */
175 	if (!dmu_tx_is_syncing(tx))
176 		(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
177 }
178 
179 /*
180  * This function reads specified data from disk.  The specified data will
181  * be needed to perform the transaction -- i.e, it will be read after
182  * we do dmu_tx_assign().  There are two reasons that we read the data now
183  * (before dmu_tx_assign()):
184  *
185  * 1. Reading it now has potentially better performance.  The transaction
186  * has not yet been assigned, so the TXG is not held open, and also the
187  * caller typically has less locks held when calling dmu_tx_hold_*() than
188  * after the transaction has been assigned.  This reduces the lock (and txg)
189  * hold times, thus reducing lock contention.
190  *
191  * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
192  * that are detected before they start making changes to the DMU state
193  * (i.e. now).  Once the transaction has been assigned, and some DMU
194  * state has been changed, it can be difficult to recover from an i/o
195  * error (e.g. to undo the changes already made in memory at the DMU
196  * layer).  Typically code to do so does not exist in the caller -- it
197  * assumes that the data has already been cached and thus i/o errors are
198  * not possible.
199  *
200  * It has been observed that the i/o initiated here can be a performance
201  * problem, and it appears to be optional, because we don't look at the
202  * data which is read.  However, removing this read would only serve to
203  * move the work elsewhere (after the dmu_tx_assign()), where it may
204  * have a greater impact on performance (in addition to the impact on
205  * fault tolerance noted above).
206  */
207 static int
208 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
209 {
210 	int err;
211 	dmu_buf_impl_t *db;
212 
213 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
214 	err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db);
215 	rw_exit(&dn->dn_struct_rwlock);
216 	if (err == ENOENT)
217 		return (0);
218 	if (err != 0)
219 		return (err);
220 	/*
221 	 * PARTIAL_FIRST allows caching for uncacheable blocks.  It will
222 	 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
223 	 */
224 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH |
225 	    (level == 0 ? DB_RF_PARTIAL_FIRST : 0));
226 	dbuf_rele(db, FTAG);
227 	return (err);
228 }
229 
230 static void
231 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
232 {
233 	dnode_t *dn = txh->txh_dnode;
234 	int err = 0;
235 
236 	if (len == 0)
237 		return;
238 
239 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
240 
241 	if (dn == NULL)
242 		return;
243 
244 	/*
245 	 * For i/o error checking, read the blocks that will be needed
246 	 * to perform the write: the first and last level-0 blocks (if
247 	 * they are not aligned, i.e. if they are partial-block writes),
248 	 * and all the level-1 blocks.
249 	 */
250 	if (dn->dn_maxblkid == 0) {
251 		if (off < dn->dn_datablksz &&
252 		    (off > 0 || len < dn->dn_datablksz)) {
253 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
254 			if (err != 0) {
255 				txh->txh_tx->tx_err = err;
256 			}
257 		}
258 	} else {
259 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
260 		    NULL, NULL, ZIO_FLAG_CANFAIL);
261 
262 		/* first level-0 block */
263 		uint64_t start = off >> dn->dn_datablkshift;
264 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
265 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
266 			if (err != 0) {
267 				txh->txh_tx->tx_err = err;
268 			}
269 		}
270 
271 		/* last level-0 block */
272 		uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
273 		if (end != start && end <= dn->dn_maxblkid &&
274 		    P2PHASE(off + len, dn->dn_datablksz)) {
275 			err = dmu_tx_check_ioerr(zio, dn, 0, end);
276 			if (err != 0) {
277 				txh->txh_tx->tx_err = err;
278 			}
279 		}
280 
281 		/* level-1 blocks */
282 		if (dn->dn_nlevels > 1) {
283 			int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
284 			for (uint64_t i = (start >> shft) + 1;
285 			    i < end >> shft; i++) {
286 				err = dmu_tx_check_ioerr(zio, dn, 1, i);
287 				if (err != 0) {
288 					txh->txh_tx->tx_err = err;
289 				}
290 			}
291 		}
292 
293 		err = zio_wait(zio);
294 		if (err != 0) {
295 			txh->txh_tx->tx_err = err;
296 		}
297 	}
298 }
299 
300 static void
301 dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
302 {
303 	dnode_t *dn = txh->txh_dnode;
304 	int err = 0;
305 
306 	if (len == 0)
307 		return;
308 
309 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
310 
311 	if (dn == NULL)
312 		return;
313 
314 	/*
315 	 * For i/o error checking, read the blocks that will be needed
316 	 * to perform the append; first level-0 block (if not aligned, i.e.
317 	 * if they are partial-block writes), no additional blocks are read.
318 	 */
319 	if (dn->dn_maxblkid == 0) {
320 		if (off < dn->dn_datablksz &&
321 		    (off > 0 || len < dn->dn_datablksz)) {
322 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
323 			if (err != 0) {
324 				txh->txh_tx->tx_err = err;
325 			}
326 		}
327 	} else {
328 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
329 		    NULL, NULL, ZIO_FLAG_CANFAIL);
330 
331 		/* first level-0 block */
332 		uint64_t start = off >> dn->dn_datablkshift;
333 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
334 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
335 			if (err != 0) {
336 				txh->txh_tx->tx_err = err;
337 			}
338 		}
339 
340 		err = zio_wait(zio);
341 		if (err != 0) {
342 			txh->txh_tx->tx_err = err;
343 		}
344 	}
345 }
346 
347 static void
348 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
349 {
350 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
351 	    DNODE_MIN_SIZE, FTAG);
352 }
353 
354 void
355 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
356 {
357 	dmu_tx_hold_t *txh;
358 
359 	ASSERT0(tx->tx_txg);
360 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
361 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
362 
363 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
364 	    object, THT_WRITE, off, len);
365 	if (txh != NULL) {
366 		dmu_tx_count_write(txh, off, len);
367 		dmu_tx_count_dnode(txh);
368 	}
369 }
370 
371 void
372 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
373 {
374 	dmu_tx_hold_t *txh;
375 
376 	ASSERT0(tx->tx_txg);
377 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
378 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
379 
380 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
381 	if (txh != NULL) {
382 		dmu_tx_count_write(txh, off, len);
383 		dmu_tx_count_dnode(txh);
384 	}
385 }
386 
387 /*
388  * Should be used when appending to an object and the exact offset is unknown.
389  * The write must occur at or beyond the specified offset.  Only the L0 block
390  * at provided offset will be prefetched.
391  */
392 void
393 dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
394 {
395 	dmu_tx_hold_t *txh;
396 
397 	ASSERT0(tx->tx_txg);
398 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
399 
400 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
401 	    object, THT_APPEND, off, DMU_OBJECT_END);
402 	if (txh != NULL) {
403 		dmu_tx_count_append(txh, off, len);
404 		dmu_tx_count_dnode(txh);
405 	}
406 }
407 
408 void
409 dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
410 {
411 	dmu_tx_hold_t *txh;
412 
413 	ASSERT0(tx->tx_txg);
414 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
415 
416 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END);
417 	if (txh != NULL) {
418 		dmu_tx_count_append(txh, off, len);
419 		dmu_tx_count_dnode(txh);
420 	}
421 }
422 
423 /*
424  * This function marks the transaction as being a "net free".  The end
425  * result is that refquotas will be disabled for this transaction, and
426  * this transaction will be able to use half of the pool space overhead
427  * (see dsl_pool_adjustedsize()).  Therefore this function should only
428  * be called for transactions that we expect will not cause a net increase
429  * in the amount of space used (but it's OK if that is occasionally not true).
430  */
431 void
432 dmu_tx_mark_netfree(dmu_tx_t *tx)
433 {
434 	tx->tx_netfree = B_TRUE;
435 }
436 
437 static void
438 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
439 {
440 	dmu_tx_t *tx = txh->txh_tx;
441 	dnode_t *dn = txh->txh_dnode;
442 	int err;
443 
444 	ASSERT(tx->tx_txg == 0);
445 
446 	if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
447 		return;
448 	if (len == DMU_OBJECT_END)
449 		len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
450 
451 	/*
452 	 * For i/o error checking, we read the first and last level-0
453 	 * blocks if they are not aligned, and all the level-1 blocks.
454 	 *
455 	 * Note:  dbuf_free_range() assumes that we have not instantiated
456 	 * any level-0 dbufs that will be completely freed.  Therefore we must
457 	 * exercise care to not read or count the first and last blocks
458 	 * if they are blocksize-aligned.
459 	 */
460 	if (dn->dn_datablkshift == 0) {
461 		if (off != 0 || len < dn->dn_datablksz)
462 			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
463 	} else {
464 		/* first block will be modified if it is not aligned */
465 		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
466 			dmu_tx_count_write(txh, off, 1);
467 		/* last block will be modified if it is not aligned */
468 		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
469 			dmu_tx_count_write(txh, off + len, 1);
470 	}
471 
472 	/*
473 	 * Check level-1 blocks.
474 	 */
475 	if (dn->dn_nlevels > 1) {
476 		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
477 		    SPA_BLKPTRSHIFT;
478 		uint64_t start = off >> shift;
479 		uint64_t end = (off + len) >> shift;
480 
481 		ASSERT(dn->dn_indblkshift != 0);
482 
483 		/*
484 		 * dnode_reallocate() can result in an object with indirect
485 		 * blocks having an odd data block size.  In this case,
486 		 * just check the single block.
487 		 */
488 		if (dn->dn_datablkshift == 0)
489 			start = end = 0;
490 
491 		zio_t *zio = zio_root(tx->tx_pool->dp_spa,
492 		    NULL, NULL, ZIO_FLAG_CANFAIL);
493 		for (uint64_t i = start; i <= end; i++) {
494 			uint64_t ibyte = i << shift;
495 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
496 			i = ibyte >> shift;
497 			if (err == ESRCH || i > end)
498 				break;
499 			if (err != 0) {
500 				tx->tx_err = err;
501 				(void) zio_wait(zio);
502 				return;
503 			}
504 
505 			(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
506 			    1 << dn->dn_indblkshift, FTAG);
507 
508 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
509 			if (err != 0) {
510 				tx->tx_err = err;
511 				(void) zio_wait(zio);
512 				return;
513 			}
514 		}
515 		err = zio_wait(zio);
516 		if (err != 0) {
517 			tx->tx_err = err;
518 			return;
519 		}
520 	}
521 }
522 
523 void
524 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
525 {
526 	dmu_tx_hold_t *txh;
527 
528 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
529 	    object, THT_FREE, off, len);
530 	if (txh != NULL) {
531 		dmu_tx_count_dnode(txh);
532 		dmu_tx_count_free(txh, off, len);
533 	}
534 }
535 
536 void
537 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
538 {
539 	dmu_tx_hold_t *txh;
540 
541 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
542 	if (txh != NULL) {
543 		dmu_tx_count_dnode(txh);
544 		dmu_tx_count_free(txh, off, len);
545 	}
546 }
547 
548 static void
549 dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
550 {
551 
552 	/*
553 	 * Reuse dmu_tx_count_free(), it does exactly what we need for clone.
554 	 */
555 	dmu_tx_count_free(txh, off, len);
556 }
557 
558 void
559 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
560 {
561 	dmu_tx_hold_t *txh;
562 
563 	ASSERT0(tx->tx_txg);
564 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
565 
566 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
567 	if (txh != NULL) {
568 		dmu_tx_count_dnode(txh);
569 		dmu_tx_count_clone(txh, off, len);
570 	}
571 }
572 
573 static void
574 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
575 {
576 	dmu_tx_t *tx = txh->txh_tx;
577 	dnode_t *dn = txh->txh_dnode;
578 	int err;
579 
580 	ASSERT(tx->tx_txg == 0);
581 
582 	dmu_tx_count_dnode(txh);
583 
584 	/*
585 	 * Modifying a almost-full microzap is around the worst case (128KB)
586 	 *
587 	 * If it is a fat zap, the worst case would be 7*16KB=112KB:
588 	 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
589 	 * - 4 new blocks written if adding:
590 	 *    - 2 blocks for possibly split leaves,
591 	 *    - 2 grown ptrtbl blocks
592 	 */
593 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
594 	    zap_get_micro_max_size(tx->tx_pool->dp_spa), FTAG);
595 
596 	if (dn == NULL)
597 		return;
598 
599 	ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
600 
601 	if (dn->dn_maxblkid == 0 || name == NULL) {
602 		/*
603 		 * This is a microzap (only one block), or we don't know
604 		 * the name.  Check the first block for i/o errors.
605 		 */
606 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
607 		if (err != 0) {
608 			tx->tx_err = err;
609 		}
610 	} else {
611 		/*
612 		 * Access the name so that we'll check for i/o errors to
613 		 * the leaf blocks, etc.  We ignore ENOENT, as this name
614 		 * may not yet exist.
615 		 */
616 		err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
617 		if (err == EIO || err == ECKSUM || err == ENXIO) {
618 			tx->tx_err = err;
619 		}
620 	}
621 }
622 
623 void
624 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
625 {
626 	dmu_tx_hold_t *txh;
627 
628 	ASSERT0(tx->tx_txg);
629 
630 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
631 	    object, THT_ZAP, add, (uintptr_t)name);
632 	if (txh != NULL)
633 		dmu_tx_hold_zap_impl(txh, name);
634 }
635 
636 void
637 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
638 {
639 	dmu_tx_hold_t *txh;
640 
641 	ASSERT0(tx->tx_txg);
642 	ASSERT(dn != NULL);
643 
644 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
645 	if (txh != NULL)
646 		dmu_tx_hold_zap_impl(txh, name);
647 }
648 
649 void
650 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
651 {
652 	dmu_tx_hold_t *txh;
653 
654 	ASSERT(tx->tx_txg == 0);
655 
656 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
657 	    object, THT_BONUS, 0, 0);
658 	if (txh)
659 		dmu_tx_count_dnode(txh);
660 }
661 
662 void
663 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
664 {
665 	dmu_tx_hold_t *txh;
666 
667 	ASSERT0(tx->tx_txg);
668 
669 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
670 	if (txh)
671 		dmu_tx_count_dnode(txh);
672 }
673 
674 void
675 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
676 {
677 	dmu_tx_hold_t *txh;
678 
679 	ASSERT(tx->tx_txg == 0);
680 
681 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
682 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
683 	if (txh) {
684 		(void) zfs_refcount_add_many(
685 		    &txh->txh_space_towrite, space, FTAG);
686 	}
687 }
688 
689 #ifdef ZFS_DEBUG
690 void
691 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
692 {
693 	boolean_t match_object = B_FALSE;
694 	boolean_t match_offset = B_FALSE;
695 
696 	DB_DNODE_ENTER(db);
697 	dnode_t *dn = DB_DNODE(db);
698 	ASSERT(tx->tx_txg != 0);
699 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
700 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
701 
702 	if (tx->tx_anyobj) {
703 		DB_DNODE_EXIT(db);
704 		return;
705 	}
706 
707 	/* XXX No checking on the meta dnode for now */
708 	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
709 		DB_DNODE_EXIT(db);
710 		return;
711 	}
712 
713 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
714 	    txh = list_next(&tx->tx_holds, txh)) {
715 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
716 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
717 			match_object = TRUE;
718 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
719 			int datablkshift = dn->dn_datablkshift ?
720 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
721 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
722 			int shift = datablkshift + epbs * db->db_level;
723 			uint64_t beginblk = shift >= 64 ? 0 :
724 			    (txh->txh_arg1 >> shift);
725 			uint64_t endblk = shift >= 64 ? 0 :
726 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
727 			uint64_t blkid = db->db_blkid;
728 
729 			/* XXX txh_arg2 better not be zero... */
730 
731 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
732 			    txh->txh_type, (u_longlong_t)beginblk,
733 			    (u_longlong_t)endblk);
734 
735 			switch (txh->txh_type) {
736 			case THT_WRITE:
737 				if (blkid >= beginblk && blkid <= endblk)
738 					match_offset = TRUE;
739 				/*
740 				 * We will let this hold work for the bonus
741 				 * or spill buffer so that we don't need to
742 				 * hold it when creating a new object.
743 				 */
744 				if (blkid == DMU_BONUS_BLKID ||
745 				    blkid == DMU_SPILL_BLKID)
746 					match_offset = TRUE;
747 				/*
748 				 * They might have to increase nlevels,
749 				 * thus dirtying the new TLIBs.  Or the
750 				 * might have to change the block size,
751 				 * thus dirying the new lvl=0 blk=0.
752 				 */
753 				if (blkid == 0)
754 					match_offset = TRUE;
755 				break;
756 			case THT_APPEND:
757 				if (blkid >= beginblk && (blkid <= endblk ||
758 				    txh->txh_arg2 == DMU_OBJECT_END))
759 					match_offset = TRUE;
760 
761 				/*
762 				 * THT_WRITE used for bonus and spill blocks.
763 				 */
764 				ASSERT(blkid != DMU_BONUS_BLKID &&
765 				    blkid != DMU_SPILL_BLKID);
766 
767 				/*
768 				 * They might have to increase nlevels,
769 				 * thus dirtying the new TLIBs.  Or the
770 				 * might have to change the block size,
771 				 * thus dirying the new lvl=0 blk=0.
772 				 */
773 				if (blkid == 0)
774 					match_offset = TRUE;
775 				break;
776 			case THT_FREE:
777 				/*
778 				 * We will dirty all the level 1 blocks in
779 				 * the free range and perhaps the first and
780 				 * last level 0 block.
781 				 */
782 				if (blkid >= beginblk && (blkid <= endblk ||
783 				    txh->txh_arg2 == DMU_OBJECT_END))
784 					match_offset = TRUE;
785 				break;
786 			case THT_SPILL:
787 				if (blkid == DMU_SPILL_BLKID)
788 					match_offset = TRUE;
789 				break;
790 			case THT_BONUS:
791 				if (blkid == DMU_BONUS_BLKID)
792 					match_offset = TRUE;
793 				break;
794 			case THT_ZAP:
795 				match_offset = TRUE;
796 				break;
797 			case THT_NEWOBJECT:
798 				match_object = TRUE;
799 				break;
800 			case THT_CLONE:
801 				if (blkid >= beginblk && blkid <= endblk)
802 					match_offset = TRUE;
803 				/*
804 				 * They might have to increase nlevels,
805 				 * thus dirtying the new TLIBs.  Or the
806 				 * might have to change the block size,
807 				 * thus dirying the new lvl=0 blk=0.
808 				 */
809 				if (blkid == 0)
810 					match_offset = TRUE;
811 				break;
812 			default:
813 				cmn_err(CE_PANIC, "bad txh_type %d",
814 				    txh->txh_type);
815 			}
816 		}
817 		if (match_object && match_offset) {
818 			DB_DNODE_EXIT(db);
819 			return;
820 		}
821 	}
822 	DB_DNODE_EXIT(db);
823 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
824 	    (u_longlong_t)db->db.db_object, db->db_level,
825 	    (u_longlong_t)db->db_blkid);
826 }
827 #endif
828 
829 /*
830  * If we can't do 10 iops, something is wrong.  Let us go ahead
831  * and hit zfs_dirty_data_max.
832  */
833 static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
834 
835 /*
836  * We delay transactions when we've determined that the backend storage
837  * isn't able to accommodate the rate of incoming writes.
838  *
839  * If there is already a transaction waiting, we delay relative to when
840  * that transaction finishes waiting.  This way the calculated min_time
841  * is independent of the number of threads concurrently executing
842  * transactions.
843  *
844  * If we are the only waiter, wait relative to when the transaction
845  * started, rather than the current time.  This credits the transaction for
846  * "time already served", e.g. reading indirect blocks.
847  *
848  * The minimum time for a transaction to take is calculated as:
849  *     min_time = scale * (dirty - min) / (max - dirty)
850  *     min_time is then capped at zfs_delay_max_ns.
851  *
852  * The delay has two degrees of freedom that can be adjusted via tunables.
853  * The percentage of dirty data at which we start to delay is defined by
854  * zfs_delay_min_dirty_percent. This should typically be at or above
855  * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
856  * delay after writing at full speed has failed to keep up with the incoming
857  * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
858  * speaking, this variable determines the amount of delay at the midpoint of
859  * the curve.
860  *
861  * delay
862  *  10ms +-------------------------------------------------------------*+
863  *       |                                                             *|
864  *   9ms +                                                             *+
865  *       |                                                             *|
866  *   8ms +                                                             *+
867  *       |                                                            * |
868  *   7ms +                                                            * +
869  *       |                                                            * |
870  *   6ms +                                                            * +
871  *       |                                                            * |
872  *   5ms +                                                           *  +
873  *       |                                                           *  |
874  *   4ms +                                                           *  +
875  *       |                                                           *  |
876  *   3ms +                                                          *   +
877  *       |                                                          *   |
878  *   2ms +                                              (midpoint) *    +
879  *       |                                                  |    **     |
880  *   1ms +                                                  v ***       +
881  *       |             zfs_delay_scale ---------->     ********         |
882  *     0 +-------------------------------------*********----------------+
883  *       0%                    <- zfs_dirty_data_max ->               100%
884  *
885  * Note that since the delay is added to the outstanding time remaining on the
886  * most recent transaction, the delay is effectively the inverse of IOPS.
887  * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
888  * was chosen such that small changes in the amount of accumulated dirty data
889  * in the first 3/4 of the curve yield relatively small differences in the
890  * amount of delay.
891  *
892  * The effects can be easier to understand when the amount of delay is
893  * represented on a log scale:
894  *
895  * delay
896  * 100ms +-------------------------------------------------------------++
897  *       +                                                              +
898  *       |                                                              |
899  *       +                                                             *+
900  *  10ms +                                                             *+
901  *       +                                                           ** +
902  *       |                                              (midpoint)  **  |
903  *       +                                                  |     **    +
904  *   1ms +                                                  v ****      +
905  *       +             zfs_delay_scale ---------->        *****         +
906  *       |                                             ****             |
907  *       +                                          ****                +
908  * 100us +                                        **                    +
909  *       +                                       *                      +
910  *       |                                      *                       |
911  *       +                                     *                        +
912  *  10us +                                     *                        +
913  *       +                                                              +
914  *       |                                                              |
915  *       +                                                              +
916  *       +--------------------------------------------------------------+
917  *       0%                    <- zfs_dirty_data_max ->               100%
918  *
919  * Note here that only as the amount of dirty data approaches its limit does
920  * the delay start to increase rapidly. The goal of a properly tuned system
921  * should be to keep the amount of dirty data out of that range by first
922  * ensuring that the appropriate limits are set for the I/O scheduler to reach
923  * optimal throughput on the backend storage, and then by changing the value
924  * of zfs_delay_scale to increase the steepness of the curve.
925  */
926 static void
927 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
928 {
929 	dsl_pool_t *dp = tx->tx_pool;
930 	uint64_t delay_min_bytes, wrlog;
931 	hrtime_t wakeup, tx_time = 0, now;
932 
933 	/* Calculate minimum transaction time for the dirty data amount. */
934 	delay_min_bytes =
935 	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
936 	if (dirty > delay_min_bytes) {
937 		/*
938 		 * The caller has already waited until we are under the max.
939 		 * We make them pass us the amount of dirty data so we don't
940 		 * have to handle the case of it being >= the max, which
941 		 * could cause a divide-by-zero if it's == the max.
942 		 */
943 		ASSERT3U(dirty, <, zfs_dirty_data_max);
944 
945 		tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
946 		    (zfs_dirty_data_max - dirty);
947 	}
948 
949 	/* Calculate minimum transaction time for the TX_WRITE log size. */
950 	wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
951 	delay_min_bytes =
952 	    zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
953 	if (wrlog >= zfs_wrlog_data_max) {
954 		tx_time = zfs_delay_max_ns;
955 	} else if (wrlog > delay_min_bytes) {
956 		tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
957 		    (zfs_wrlog_data_max - wrlog), tx_time);
958 	}
959 
960 	if (tx_time == 0)
961 		return;
962 
963 	tx_time = MIN(tx_time, zfs_delay_max_ns);
964 	now = gethrtime();
965 	if (now > tx->tx_start + tx_time)
966 		return;
967 
968 	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
969 	    uint64_t, tx_time);
970 
971 	mutex_enter(&dp->dp_lock);
972 	wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
973 	dp->dp_last_wakeup = wakeup;
974 	mutex_exit(&dp->dp_lock);
975 
976 	zfs_sleep_until(wakeup);
977 }
978 
979 /*
980  * This routine attempts to assign the transaction to a transaction group.
981  * To do so, we must determine if there is sufficient free space on disk.
982  *
983  * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
984  * on it), then it is assumed that there is sufficient free space,
985  * unless there's insufficient slop space in the pool (see the comment
986  * above spa_slop_shift in spa_misc.c).
987  *
988  * If it is not a "netfree" transaction, then if the data already on disk
989  * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
990  * ENOSPC.  Otherwise, if the current rough estimate of pending changes,
991  * plus the rough estimate of this transaction's changes, may exceed the
992  * allowed usage, then this will fail with ERESTART, which will cause the
993  * caller to wait for the pending changes to be written to disk (by waiting
994  * for the next TXG to open), and then check the space usage again.
995  *
996  * The rough estimate of pending changes is comprised of the sum of:
997  *
998  *  - this transaction's holds' txh_space_towrite
999  *
1000  *  - dd_tempreserved[], which is the sum of in-flight transactions'
1001  *    holds' txh_space_towrite (i.e. those transactions that have called
1002  *    dmu_tx_assign() but not yet called dmu_tx_commit()).
1003  *
1004  *  - dd_space_towrite[], which is the amount of dirtied dbufs.
1005  *
1006  * Note that all of these values are inflated by spa_get_worst_case_asize(),
1007  * which means that we may get ERESTART well before we are actually in danger
1008  * of running out of space, but this also mitigates any small inaccuracies
1009  * in the rough estimate (e.g. txh_space_towrite doesn't take into account
1010  * indirect blocks, and dd_space_towrite[] doesn't take into account changes
1011  * to the MOS).
1012  *
1013  * Note that due to this algorithm, it is possible to exceed the allowed
1014  * usage by one transaction.  Also, as we approach the allowed usage,
1015  * we will allow a very limited amount of changes into each TXG, thus
1016  * decreasing performance.
1017  */
1018 static int
1019 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
1020 {
1021 	spa_t *spa = tx->tx_pool->dp_spa;
1022 
1023 	ASSERT0(tx->tx_txg);
1024 
1025 	if (tx->tx_err) {
1026 		DMU_TX_STAT_BUMP(dmu_tx_error);
1027 		return (tx->tx_err);
1028 	}
1029 
1030 	if (spa_suspended(spa)) {
1031 		DMU_TX_STAT_BUMP(dmu_tx_suspended);
1032 
1033 		/*
1034 		 * If the user has indicated a blocking failure mode
1035 		 * then return ERESTART which will block in dmu_tx_wait().
1036 		 * Otherwise, return EIO so that an error can get
1037 		 * propagated back to the VOP calls.
1038 		 *
1039 		 * Note that we always honor the txg_how flag regardless
1040 		 * of the failuremode setting.
1041 		 */
1042 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1043 		    !(txg_how & TXG_WAIT))
1044 			return (SET_ERROR(EIO));
1045 
1046 		return (SET_ERROR(ERESTART));
1047 	}
1048 
1049 	if (!tx->tx_dirty_delayed &&
1050 	    dsl_pool_need_wrlog_delay(tx->tx_pool)) {
1051 		tx->tx_wait_dirty = B_TRUE;
1052 		DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
1053 		return (SET_ERROR(ERESTART));
1054 	}
1055 
1056 	if (!tx->tx_dirty_delayed &&
1057 	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
1058 		tx->tx_wait_dirty = B_TRUE;
1059 		DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
1060 		return (SET_ERROR(ERESTART));
1061 	}
1062 
1063 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1064 	tx->tx_needassign_txh = NULL;
1065 
1066 	/*
1067 	 * NB: No error returns are allowed after txg_hold_open, but
1068 	 * before processing the dnode holds, due to the
1069 	 * dmu_tx_unassign() logic.
1070 	 */
1071 
1072 	uint64_t towrite = 0;
1073 	uint64_t tohold = 0;
1074 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1075 	    txh = list_next(&tx->tx_holds, txh)) {
1076 		dnode_t *dn = txh->txh_dnode;
1077 		if (dn != NULL) {
1078 			/*
1079 			 * This thread can't hold the dn_struct_rwlock
1080 			 * while assigning the tx, because this can lead to
1081 			 * deadlock. Specifically, if this dnode is already
1082 			 * assigned to an earlier txg, this thread may need
1083 			 * to wait for that txg to sync (the ERESTART case
1084 			 * below).  The other thread that has assigned this
1085 			 * dnode to an earlier txg prevents this txg from
1086 			 * syncing until its tx can complete (calling
1087 			 * dmu_tx_commit()), but it may need to acquire the
1088 			 * dn_struct_rwlock to do so (e.g. via
1089 			 * dmu_buf_hold*()).
1090 			 *
1091 			 * Note that this thread can't hold the lock for
1092 			 * read either, but the rwlock doesn't record
1093 			 * enough information to make that assertion.
1094 			 */
1095 			ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
1096 
1097 			mutex_enter(&dn->dn_mtx);
1098 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1099 				mutex_exit(&dn->dn_mtx);
1100 				tx->tx_needassign_txh = txh;
1101 				DMU_TX_STAT_BUMP(dmu_tx_group);
1102 				return (SET_ERROR(ERESTART));
1103 			}
1104 			if (dn->dn_assigned_txg == 0)
1105 				dn->dn_assigned_txg = tx->tx_txg;
1106 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1107 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
1108 			mutex_exit(&dn->dn_mtx);
1109 		}
1110 		towrite += zfs_refcount_count(&txh->txh_space_towrite);
1111 		tohold += zfs_refcount_count(&txh->txh_memory_tohold);
1112 	}
1113 
1114 	/* needed allocation: worst-case estimate of write space */
1115 	uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
1116 	/* calculate memory footprint estimate */
1117 	uint64_t memory = towrite + tohold;
1118 
1119 	if (tx->tx_dir != NULL && asize != 0) {
1120 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1121 		    asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
1122 		if (err != 0)
1123 			return (err);
1124 	}
1125 
1126 	DMU_TX_STAT_BUMP(dmu_tx_assigned);
1127 
1128 	return (0);
1129 }
1130 
1131 static void
1132 dmu_tx_unassign(dmu_tx_t *tx)
1133 {
1134 	if (tx->tx_txg == 0)
1135 		return;
1136 
1137 	txg_rele_to_quiesce(&tx->tx_txgh);
1138 
1139 	/*
1140 	 * Walk the transaction's hold list, removing the hold on the
1141 	 * associated dnode, and notifying waiters if the refcount drops to 0.
1142 	 */
1143 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
1144 	    txh && txh != tx->tx_needassign_txh;
1145 	    txh = list_next(&tx->tx_holds, txh)) {
1146 		dnode_t *dn = txh->txh_dnode;
1147 
1148 		if (dn == NULL)
1149 			continue;
1150 		mutex_enter(&dn->dn_mtx);
1151 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1152 
1153 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1154 			dn->dn_assigned_txg = 0;
1155 			cv_broadcast(&dn->dn_notxholds);
1156 		}
1157 		mutex_exit(&dn->dn_mtx);
1158 	}
1159 
1160 	txg_rele_to_sync(&tx->tx_txgh);
1161 
1162 	tx->tx_lasttried_txg = tx->tx_txg;
1163 	tx->tx_txg = 0;
1164 }
1165 
1166 /*
1167  * Assign tx to a transaction group; txg_how is a bitmask:
1168  *
1169  * If TXG_WAIT is set and the currently open txg is full, this function
1170  * will wait until there's a new txg. This should be used when no locks
1171  * are being held. With this bit set, this function will only fail if
1172  * we're truly out of space (or over quota).
1173  *
1174  * If TXG_WAIT is *not* set and we can't assign into the currently open
1175  * txg without blocking, this function will return immediately with
1176  * ERESTART. This should be used whenever locks are being held.  On an
1177  * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1178  * and try again.
1179  *
1180  * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1181  * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1182  * details on the throttle). This is used by the VFS operations, after
1183  * they have already called dmu_tx_wait() (though most likely on a
1184  * different tx).
1185  *
1186  * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1187  * will assign the tx to monotonically increasing txgs. Of course this is
1188  * not strong monotonicity, because the same txg can be returned multiple
1189  * times in a row. This guarantee holds both for subsequent calls from
1190  * one thread and for multiple threads. For example, it is impossible to
1191  * observe the following sequence of events:
1192  *
1193  *          Thread 1                            Thread 2
1194  *
1195  *     dmu_tx_assign(T1, ...)
1196  *     1 <- dmu_tx_get_txg(T1)
1197  *                                       dmu_tx_assign(T2, ...)
1198  *                                       2 <- dmu_tx_get_txg(T2)
1199  *     dmu_tx_assign(T3, ...)
1200  *     1 <- dmu_tx_get_txg(T3)
1201  */
1202 int
1203 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1204 {
1205 	int err;
1206 
1207 	ASSERT(tx->tx_txg == 0);
1208 	ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
1209 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1210 
1211 	/* If we might wait, we must not hold the config lock. */
1212 	IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
1213 
1214 	if ((txg_how & TXG_NOTHROTTLE))
1215 		tx->tx_dirty_delayed = B_TRUE;
1216 
1217 	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1218 		dmu_tx_unassign(tx);
1219 
1220 		if (err != ERESTART || !(txg_how & TXG_WAIT))
1221 			return (err);
1222 
1223 		dmu_tx_wait(tx);
1224 	}
1225 
1226 	txg_rele_to_quiesce(&tx->tx_txgh);
1227 
1228 	return (0);
1229 }
1230 
1231 void
1232 dmu_tx_wait(dmu_tx_t *tx)
1233 {
1234 	spa_t *spa = tx->tx_pool->dp_spa;
1235 	dsl_pool_t *dp = tx->tx_pool;
1236 	hrtime_t before;
1237 
1238 	ASSERT(tx->tx_txg == 0);
1239 	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1240 
1241 	before = gethrtime();
1242 
1243 	if (tx->tx_wait_dirty) {
1244 		uint64_t dirty;
1245 
1246 		/*
1247 		 * dmu_tx_try_assign() has determined that we need to wait
1248 		 * because we've consumed much or all of the dirty buffer
1249 		 * space.
1250 		 */
1251 		mutex_enter(&dp->dp_lock);
1252 		if (dp->dp_dirty_total >= zfs_dirty_data_max)
1253 			DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1254 		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1255 			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1256 		dirty = dp->dp_dirty_total;
1257 		mutex_exit(&dp->dp_lock);
1258 
1259 		dmu_tx_delay(tx, dirty);
1260 
1261 		tx->tx_wait_dirty = B_FALSE;
1262 
1263 		/*
1264 		 * Note: setting tx_dirty_delayed only has effect if the
1265 		 * caller used TX_WAIT.  Otherwise they are going to
1266 		 * destroy this tx and try again.  The common case,
1267 		 * zfs_write(), uses TX_WAIT.
1268 		 */
1269 		tx->tx_dirty_delayed = B_TRUE;
1270 	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1271 		/*
1272 		 * If the pool is suspended we need to wait until it
1273 		 * is resumed.  Note that it's possible that the pool
1274 		 * has become active after this thread has tried to
1275 		 * obtain a tx.  If that's the case then tx_lasttried_txg
1276 		 * would not have been set.
1277 		 */
1278 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1279 	} else if (tx->tx_needassign_txh) {
1280 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1281 
1282 		mutex_enter(&dn->dn_mtx);
1283 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1284 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1285 		mutex_exit(&dn->dn_mtx);
1286 		tx->tx_needassign_txh = NULL;
1287 	} else {
1288 		/*
1289 		 * If we have a lot of dirty data just wait until we sync
1290 		 * out a TXG at which point we'll hopefully have synced
1291 		 * a portion of the changes.
1292 		 */
1293 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1294 	}
1295 
1296 	spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1297 }
1298 
1299 static void
1300 dmu_tx_destroy(dmu_tx_t *tx)
1301 {
1302 	dmu_tx_hold_t *txh;
1303 
1304 	while ((txh = list_head(&tx->tx_holds)) != NULL) {
1305 		dnode_t *dn = txh->txh_dnode;
1306 
1307 		list_remove(&tx->tx_holds, txh);
1308 		zfs_refcount_destroy_many(&txh->txh_space_towrite,
1309 		    zfs_refcount_count(&txh->txh_space_towrite));
1310 		zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1311 		    zfs_refcount_count(&txh->txh_memory_tohold));
1312 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1313 		if (dn != NULL)
1314 			dnode_rele(dn, tx);
1315 	}
1316 
1317 	list_destroy(&tx->tx_callbacks);
1318 	list_destroy(&tx->tx_holds);
1319 	kmem_free(tx, sizeof (dmu_tx_t));
1320 }
1321 
1322 void
1323 dmu_tx_commit(dmu_tx_t *tx)
1324 {
1325 	ASSERT(tx->tx_txg != 0);
1326 
1327 	/*
1328 	 * Go through the transaction's hold list and remove holds on
1329 	 * associated dnodes, notifying waiters if no holds remain.
1330 	 */
1331 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1332 	    txh = list_next(&tx->tx_holds, txh)) {
1333 		dnode_t *dn = txh->txh_dnode;
1334 
1335 		if (dn == NULL)
1336 			continue;
1337 
1338 		mutex_enter(&dn->dn_mtx);
1339 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1340 
1341 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1342 			dn->dn_assigned_txg = 0;
1343 			cv_broadcast(&dn->dn_notxholds);
1344 		}
1345 		mutex_exit(&dn->dn_mtx);
1346 	}
1347 
1348 	if (tx->tx_tempreserve_cookie)
1349 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1350 
1351 	if (!list_is_empty(&tx->tx_callbacks))
1352 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1353 
1354 	if (tx->tx_anyobj == FALSE)
1355 		txg_rele_to_sync(&tx->tx_txgh);
1356 
1357 	dmu_tx_destroy(tx);
1358 }
1359 
1360 void
1361 dmu_tx_abort(dmu_tx_t *tx)
1362 {
1363 	ASSERT(tx->tx_txg == 0);
1364 
1365 	/*
1366 	 * Call any registered callbacks with an error code.
1367 	 */
1368 	if (!list_is_empty(&tx->tx_callbacks))
1369 		dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1370 
1371 	dmu_tx_destroy(tx);
1372 }
1373 
1374 uint64_t
1375 dmu_tx_get_txg(dmu_tx_t *tx)
1376 {
1377 	ASSERT(tx->tx_txg != 0);
1378 	return (tx->tx_txg);
1379 }
1380 
1381 dsl_pool_t *
1382 dmu_tx_pool(dmu_tx_t *tx)
1383 {
1384 	ASSERT(tx->tx_pool != NULL);
1385 	return (tx->tx_pool);
1386 }
1387 
1388 /*
1389  * Register a callback to be executed at the end of a TXG.
1390  *
1391  * Note: This currently exists for outside consumers, specifically the ZFS OSD
1392  * for Lustre. Please do not remove before checking that project. For examples
1393  * on how to use this see `ztest_commit_callback`.
1394  */
1395 void
1396 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1397 {
1398 	dmu_tx_callback_t *dcb;
1399 
1400 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1401 
1402 	dcb->dcb_func = func;
1403 	dcb->dcb_data = data;
1404 
1405 	list_insert_tail(&tx->tx_callbacks, dcb);
1406 }
1407 
1408 /*
1409  * Call all the commit callbacks on a list, with a given error code.
1410  */
1411 void
1412 dmu_tx_do_callbacks(list_t *cb_list, int error)
1413 {
1414 	dmu_tx_callback_t *dcb;
1415 
1416 	while ((dcb = list_remove_tail(cb_list)) != NULL) {
1417 		dcb->dcb_func(dcb->dcb_data, error);
1418 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1419 	}
1420 }
1421 
1422 /*
1423  * Interface to hold a bunch of attributes.
1424  * used for creating new files.
1425  * attrsize is the total size of all attributes
1426  * to be added during object creation
1427  *
1428  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1429  */
1430 
1431 /*
1432  * hold necessary attribute name for attribute registration.
1433  * should be a very rare case where this is needed.  If it does
1434  * happen it would only happen on the first write to the file system.
1435  */
1436 static void
1437 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1438 {
1439 	if (!sa->sa_need_attr_registration)
1440 		return;
1441 
1442 	for (int i = 0; i != sa->sa_num_attrs; i++) {
1443 		if (!sa->sa_attr_table[i].sa_registered) {
1444 			if (sa->sa_reg_attr_obj)
1445 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1446 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1447 			else
1448 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1449 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1450 		}
1451 	}
1452 }
1453 
1454 void
1455 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1456 {
1457 	dmu_tx_hold_t *txh;
1458 
1459 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1460 	    THT_SPILL, 0, 0);
1461 	if (txh != NULL)
1462 		(void) zfs_refcount_add_many(&txh->txh_space_towrite,
1463 		    SPA_OLD_MAXBLOCKSIZE, FTAG);
1464 }
1465 
1466 void
1467 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1468 {
1469 	sa_os_t *sa = tx->tx_objset->os_sa;
1470 
1471 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1472 
1473 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1474 		return;
1475 
1476 	if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1477 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1478 	} else {
1479 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1480 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1481 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1482 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1483 	}
1484 
1485 	dmu_tx_sa_registration_hold(sa, tx);
1486 
1487 	if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1488 		return;
1489 
1490 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1491 	    THT_SPILL, 0, 0);
1492 }
1493 
1494 /*
1495  * Hold SA attribute
1496  *
1497  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1498  *
1499  * variable_size is the total size of all variable sized attributes
1500  * passed to this function.  It is not the total size of all
1501  * variable size attributes that *may* exist on this object.
1502  */
1503 void
1504 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1505 {
1506 	uint64_t object;
1507 	sa_os_t *sa = tx->tx_objset->os_sa;
1508 
1509 	ASSERT(hdl != NULL);
1510 
1511 	object = sa_handle_object(hdl);
1512 
1513 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1514 	DB_DNODE_ENTER(db);
1515 	dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1516 	DB_DNODE_EXIT(db);
1517 
1518 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1519 		return;
1520 
1521 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1522 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1523 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1524 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1525 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1526 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1527 	}
1528 
1529 	dmu_tx_sa_registration_hold(sa, tx);
1530 
1531 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1532 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1533 
1534 	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1535 		ASSERT(tx->tx_txg == 0);
1536 		dmu_tx_hold_spill(tx, object);
1537 	} else {
1538 		DB_DNODE_ENTER(db);
1539 		if (DB_DNODE(db)->dn_have_spill) {
1540 			ASSERT(tx->tx_txg == 0);
1541 			dmu_tx_hold_spill(tx, object);
1542 		}
1543 		DB_DNODE_EXIT(db);
1544 	}
1545 }
1546 
1547 void
1548 dmu_tx_init(void)
1549 {
1550 	dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1551 	    KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1552 	    KSTAT_FLAG_VIRTUAL);
1553 
1554 	if (dmu_tx_ksp != NULL) {
1555 		dmu_tx_ksp->ks_data = &dmu_tx_stats;
1556 		kstat_install(dmu_tx_ksp);
1557 	}
1558 }
1559 
1560 void
1561 dmu_tx_fini(void)
1562 {
1563 	if (dmu_tx_ksp != NULL) {
1564 		kstat_delete(dmu_tx_ksp);
1565 		dmu_tx_ksp = NULL;
1566 	}
1567 }
1568 
1569 #if defined(_KERNEL)
1570 EXPORT_SYMBOL(dmu_tx_create);
1571 EXPORT_SYMBOL(dmu_tx_hold_write);
1572 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1573 EXPORT_SYMBOL(dmu_tx_hold_append);
1574 EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode);
1575 EXPORT_SYMBOL(dmu_tx_hold_free);
1576 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1577 EXPORT_SYMBOL(dmu_tx_hold_zap);
1578 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1579 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1580 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1581 EXPORT_SYMBOL(dmu_tx_abort);
1582 EXPORT_SYMBOL(dmu_tx_assign);
1583 EXPORT_SYMBOL(dmu_tx_wait);
1584 EXPORT_SYMBOL(dmu_tx_commit);
1585 EXPORT_SYMBOL(dmu_tx_mark_netfree);
1586 EXPORT_SYMBOL(dmu_tx_get_txg);
1587 EXPORT_SYMBOL(dmu_tx_callback_register);
1588 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1589 EXPORT_SYMBOL(dmu_tx_hold_spill);
1590 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1591 EXPORT_SYMBOL(dmu_tx_hold_sa);
1592 #endif
1593