xref: /freebsd/sys/contrib/openzfs/module/zfs/dmu_tx.c (revision c27f7d6b9cf6d4ab01cb3d0972726c14e0aca146)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26  * Copyright (c) 2024, 2025, Klara, Inc.
27  */
28 
29 #include <sys/dmu.h>
30 #include <sys/dmu_impl.h>
31 #include <sys/dbuf.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_pool.h>
37 #include <sys/zap_impl.h>
38 #include <sys/spa.h>
39 #include <sys/sa.h>
40 #include <sys/sa_impl.h>
41 #include <sys/zfs_context.h>
42 #include <sys/trace_zfs.h>
43 
44 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
45     uint64_t arg1, uint64_t arg2);
46 
47 dmu_tx_stats_t dmu_tx_stats = {
48 	{ "dmu_tx_assigned",		KSTAT_DATA_UINT64 },
49 	{ "dmu_tx_delay",		KSTAT_DATA_UINT64 },
50 	{ "dmu_tx_error",		KSTAT_DATA_UINT64 },
51 	{ "dmu_tx_suspended",		KSTAT_DATA_UINT64 },
52 	{ "dmu_tx_group",		KSTAT_DATA_UINT64 },
53 	{ "dmu_tx_memory_reserve",	KSTAT_DATA_UINT64 },
54 	{ "dmu_tx_memory_reclaim",	KSTAT_DATA_UINT64 },
55 	{ "dmu_tx_dirty_throttle",	KSTAT_DATA_UINT64 },
56 	{ "dmu_tx_dirty_delay",		KSTAT_DATA_UINT64 },
57 	{ "dmu_tx_dirty_over_max",	KSTAT_DATA_UINT64 },
58 	{ "dmu_tx_dirty_frees_delay",	KSTAT_DATA_UINT64 },
59 	{ "dmu_tx_wrlog_delay",		KSTAT_DATA_UINT64 },
60 	{ "dmu_tx_quota",		KSTAT_DATA_UINT64 },
61 };
62 
63 static kstat_t *dmu_tx_ksp;
64 
65 dmu_tx_t *
66 dmu_tx_create_dd(dsl_dir_t *dd)
67 {
68 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
69 	tx->tx_dir = dd;
70 	if (dd != NULL)
71 		tx->tx_pool = dd->dd_pool;
72 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
73 	    offsetof(dmu_tx_hold_t, txh_node));
74 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
75 	    offsetof(dmu_tx_callback_t, dcb_node));
76 	tx->tx_start = gethrtime();
77 	return (tx);
78 }
79 
80 dmu_tx_t *
81 dmu_tx_create(objset_t *os)
82 {
83 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
84 	tx->tx_objset = os;
85 	return (tx);
86 }
87 
88 dmu_tx_t *
89 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
90 {
91 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
92 
93 	TXG_VERIFY(dp->dp_spa, txg);
94 	tx->tx_pool = dp;
95 	tx->tx_txg = txg;
96 	tx->tx_anyobj = TRUE;
97 
98 	return (tx);
99 }
100 
101 int
102 dmu_tx_is_syncing(dmu_tx_t *tx)
103 {
104 	return (tx->tx_anyobj);
105 }
106 
107 int
108 dmu_tx_private_ok(dmu_tx_t *tx)
109 {
110 	return (tx->tx_anyobj);
111 }
112 
113 static dmu_tx_hold_t *
114 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
115     uint64_t arg1, uint64_t arg2)
116 {
117 	dmu_tx_hold_t *txh;
118 
119 	if (dn != NULL) {
120 		(void) zfs_refcount_add(&dn->dn_holds, tx);
121 		if (tx->tx_txg != 0) {
122 			mutex_enter(&dn->dn_mtx);
123 			/*
124 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
125 			 * problem, but there's no way for it to happen (for
126 			 * now, at least).
127 			 */
128 			ASSERT(dn->dn_assigned_txg == 0);
129 			dn->dn_assigned_txg = tx->tx_txg;
130 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
131 			mutex_exit(&dn->dn_mtx);
132 		}
133 	}
134 
135 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
136 	txh->txh_tx = tx;
137 	txh->txh_dnode = dn;
138 	zfs_refcount_create(&txh->txh_space_towrite);
139 	zfs_refcount_create(&txh->txh_memory_tohold);
140 	txh->txh_type = type;
141 	txh->txh_arg1 = arg1;
142 	txh->txh_arg2 = arg2;
143 	list_insert_tail(&tx->tx_holds, txh);
144 
145 	return (txh);
146 }
147 
148 static dmu_tx_hold_t *
149 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
150     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
151 {
152 	dnode_t *dn = NULL;
153 	dmu_tx_hold_t *txh;
154 	int err;
155 
156 	if (object != DMU_NEW_OBJECT) {
157 		err = dnode_hold(os, object, FTAG, &dn);
158 		if (err != 0) {
159 			tx->tx_err = err;
160 			return (NULL);
161 		}
162 	}
163 	txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
164 	if (dn != NULL)
165 		dnode_rele(dn, FTAG);
166 	return (txh);
167 }
168 
169 void
170 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
171 {
172 	/*
173 	 * If we're syncing, they can manipulate any object anyhow, and
174 	 * the hold on the dnode_t can cause problems.
175 	 */
176 	if (!dmu_tx_is_syncing(tx))
177 		(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
178 }
179 
180 /*
181  * This function reads specified data from disk.  The specified data will
182  * be needed to perform the transaction -- i.e, it will be read after
183  * we do dmu_tx_assign().  There are two reasons that we read the data now
184  * (before dmu_tx_assign()):
185  *
186  * 1. Reading it now has potentially better performance.  The transaction
187  * has not yet been assigned, so the TXG is not held open, and also the
188  * caller typically has less locks held when calling dmu_tx_hold_*() than
189  * after the transaction has been assigned.  This reduces the lock (and txg)
190  * hold times, thus reducing lock contention.
191  *
192  * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
193  * that are detected before they start making changes to the DMU state
194  * (i.e. now).  Once the transaction has been assigned, and some DMU
195  * state has been changed, it can be difficult to recover from an i/o
196  * error (e.g. to undo the changes already made in memory at the DMU
197  * layer).  Typically code to do so does not exist in the caller -- it
198  * assumes that the data has already been cached and thus i/o errors are
199  * not possible.
200  *
201  * It has been observed that the i/o initiated here can be a performance
202  * problem, and it appears to be optional, because we don't look at the
203  * data which is read.  However, removing this read would only serve to
204  * move the work elsewhere (after the dmu_tx_assign()), where it may
205  * have a greater impact on performance (in addition to the impact on
206  * fault tolerance noted above).
207  */
208 static int
209 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
210 {
211 	int err;
212 	dmu_buf_impl_t *db;
213 
214 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
215 	err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db);
216 	rw_exit(&dn->dn_struct_rwlock);
217 	if (err == ENOENT)
218 		return (0);
219 	if (err != 0)
220 		return (err);
221 	/*
222 	 * PARTIAL_FIRST allows caching for uncacheable blocks.  It will
223 	 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
224 	 */
225 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DMU_READ_NO_PREFETCH |
226 	    (level == 0 ? (DMU_UNCACHEDIO | DMU_PARTIAL_FIRST) : 0));
227 	dbuf_rele(db, FTAG);
228 	return (err);
229 }
230 
231 static void
232 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
233 {
234 	dnode_t *dn = txh->txh_dnode;
235 	int err = 0;
236 
237 	if (len == 0)
238 		return;
239 
240 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
241 
242 	if (dn == NULL)
243 		return;
244 
245 	/*
246 	 * For i/o error checking, read the blocks that will be needed
247 	 * to perform the write: the first and last level-0 blocks (if
248 	 * they are not aligned, i.e. if they are partial-block writes),
249 	 * and all the level-1 blocks.
250 	 */
251 	if (dn->dn_maxblkid == 0) {
252 		if (off < dn->dn_datablksz &&
253 		    (off > 0 || len < dn->dn_datablksz)) {
254 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
255 			if (err != 0) {
256 				txh->txh_tx->tx_err = err;
257 			}
258 		}
259 	} else {
260 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
261 		    NULL, NULL, ZIO_FLAG_CANFAIL);
262 
263 		/* first level-0 block */
264 		uint64_t start = off >> dn->dn_datablkshift;
265 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
266 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
267 			if (err != 0) {
268 				txh->txh_tx->tx_err = err;
269 			}
270 		}
271 
272 		/* last level-0 block */
273 		uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
274 		if (end != start && end <= dn->dn_maxblkid &&
275 		    P2PHASE(off + len, dn->dn_datablksz)) {
276 			err = dmu_tx_check_ioerr(zio, dn, 0, end);
277 			if (err != 0) {
278 				txh->txh_tx->tx_err = err;
279 			}
280 		}
281 
282 		/* level-1 blocks */
283 		if (dn->dn_nlevels > 1) {
284 			int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
285 			for (uint64_t i = (start >> shft) + 1;
286 			    i < end >> shft; i++) {
287 				err = dmu_tx_check_ioerr(zio, dn, 1, i);
288 				if (err != 0) {
289 					txh->txh_tx->tx_err = err;
290 				}
291 			}
292 		}
293 
294 		err = zio_wait(zio);
295 		if (err != 0) {
296 			txh->txh_tx->tx_err = err;
297 		}
298 	}
299 }
300 
301 static void
302 dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
303 {
304 	dnode_t *dn = txh->txh_dnode;
305 	int err = 0;
306 
307 	if (len == 0)
308 		return;
309 
310 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
311 
312 	if (dn == NULL)
313 		return;
314 
315 	/*
316 	 * For i/o error checking, read the blocks that will be needed
317 	 * to perform the append; first level-0 block (if not aligned, i.e.
318 	 * if they are partial-block writes), no additional blocks are read.
319 	 */
320 	if (dn->dn_maxblkid == 0) {
321 		if (off < dn->dn_datablksz &&
322 		    (off > 0 || len < dn->dn_datablksz)) {
323 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
324 			if (err != 0) {
325 				txh->txh_tx->tx_err = err;
326 			}
327 		}
328 	} else {
329 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
330 		    NULL, NULL, ZIO_FLAG_CANFAIL);
331 
332 		/* first level-0 block */
333 		uint64_t start = off >> dn->dn_datablkshift;
334 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
335 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
336 			if (err != 0) {
337 				txh->txh_tx->tx_err = err;
338 			}
339 		}
340 
341 		err = zio_wait(zio);
342 		if (err != 0) {
343 			txh->txh_tx->tx_err = err;
344 		}
345 	}
346 }
347 
348 static void
349 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
350 {
351 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
352 	    DNODE_MIN_SIZE, FTAG);
353 }
354 
355 void
356 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
357 {
358 	dmu_tx_hold_t *txh;
359 
360 	ASSERT0(tx->tx_txg);
361 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
362 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
363 
364 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
365 	    object, THT_WRITE, off, len);
366 	if (txh != NULL) {
367 		dmu_tx_count_write(txh, off, len);
368 		dmu_tx_count_dnode(txh);
369 	}
370 }
371 
372 void
373 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
374 {
375 	dmu_tx_hold_t *txh;
376 
377 	ASSERT0(tx->tx_txg);
378 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
379 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
380 
381 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
382 	if (txh != NULL) {
383 		dmu_tx_count_write(txh, off, len);
384 		dmu_tx_count_dnode(txh);
385 	}
386 }
387 
388 /*
389  * Should be used when appending to an object and the exact offset is unknown.
390  * The write must occur at or beyond the specified offset.  Only the L0 block
391  * at provided offset will be prefetched.
392  */
393 void
394 dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
395 {
396 	dmu_tx_hold_t *txh;
397 
398 	ASSERT0(tx->tx_txg);
399 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
400 
401 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
402 	    object, THT_APPEND, off, DMU_OBJECT_END);
403 	if (txh != NULL) {
404 		dmu_tx_count_append(txh, off, len);
405 		dmu_tx_count_dnode(txh);
406 	}
407 }
408 
409 void
410 dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
411 {
412 	dmu_tx_hold_t *txh;
413 
414 	ASSERT0(tx->tx_txg);
415 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
416 
417 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END);
418 	if (txh != NULL) {
419 		dmu_tx_count_append(txh, off, len);
420 		dmu_tx_count_dnode(txh);
421 	}
422 }
423 
424 /*
425  * This function marks the transaction as being a "net free".  The end
426  * result is that refquotas will be disabled for this transaction, and
427  * this transaction will be able to use half of the pool space overhead
428  * (see dsl_pool_adjustedsize()).  Therefore this function should only
429  * be called for transactions that we expect will not cause a net increase
430  * in the amount of space used (but it's OK if that is occasionally not true).
431  */
432 void
433 dmu_tx_mark_netfree(dmu_tx_t *tx)
434 {
435 	tx->tx_netfree = B_TRUE;
436 }
437 
438 static void
439 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
440 {
441 	dmu_tx_t *tx = txh->txh_tx;
442 	dnode_t *dn = txh->txh_dnode;
443 	int err;
444 
445 	ASSERT(tx->tx_txg == 0);
446 
447 	if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
448 		return;
449 	if (len == DMU_OBJECT_END)
450 		len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
451 
452 	/*
453 	 * For i/o error checking, we read the first and last level-0
454 	 * blocks if they are not aligned, and all the level-1 blocks.
455 	 *
456 	 * Note:  dbuf_free_range() assumes that we have not instantiated
457 	 * any level-0 dbufs that will be completely freed.  Therefore we must
458 	 * exercise care to not read or count the first and last blocks
459 	 * if they are blocksize-aligned.
460 	 */
461 	if (dn->dn_datablkshift == 0) {
462 		if (off != 0 || len < dn->dn_datablksz)
463 			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
464 	} else {
465 		/* first block will be modified if it is not aligned */
466 		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
467 			dmu_tx_count_write(txh, off, 1);
468 		/* last block will be modified if it is not aligned */
469 		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
470 			dmu_tx_count_write(txh, off + len, 1);
471 	}
472 
473 	/*
474 	 * Check level-1 blocks.
475 	 */
476 	if (dn->dn_nlevels > 1) {
477 		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
478 		    SPA_BLKPTRSHIFT;
479 		uint64_t start = off >> shift;
480 		uint64_t end = (off + len) >> shift;
481 
482 		ASSERT(dn->dn_indblkshift != 0);
483 
484 		/*
485 		 * dnode_reallocate() can result in an object with indirect
486 		 * blocks having an odd data block size.  In this case,
487 		 * just check the single block.
488 		 */
489 		if (dn->dn_datablkshift == 0)
490 			start = end = 0;
491 
492 		zio_t *zio = zio_root(tx->tx_pool->dp_spa,
493 		    NULL, NULL, ZIO_FLAG_CANFAIL);
494 		for (uint64_t i = start; i <= end; i++) {
495 			uint64_t ibyte = i << shift;
496 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
497 			i = ibyte >> shift;
498 			if (err == ESRCH || i > end)
499 				break;
500 			if (err != 0) {
501 				tx->tx_err = err;
502 				(void) zio_wait(zio);
503 				return;
504 			}
505 
506 			(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
507 			    1 << dn->dn_indblkshift, FTAG);
508 
509 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
510 			if (err != 0) {
511 				tx->tx_err = err;
512 				(void) zio_wait(zio);
513 				return;
514 			}
515 		}
516 		err = zio_wait(zio);
517 		if (err != 0) {
518 			tx->tx_err = err;
519 			return;
520 		}
521 	}
522 }
523 
524 void
525 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
526 {
527 	dmu_tx_hold_t *txh;
528 
529 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
530 	    object, THT_FREE, off, len);
531 	if (txh != NULL) {
532 		dmu_tx_count_dnode(txh);
533 		dmu_tx_count_free(txh, off, len);
534 	}
535 }
536 
537 void
538 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
539 {
540 	dmu_tx_hold_t *txh;
541 
542 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
543 	if (txh != NULL) {
544 		dmu_tx_count_dnode(txh);
545 		dmu_tx_count_free(txh, off, len);
546 	}
547 }
548 
549 static void
550 dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
551 {
552 
553 	/*
554 	 * Reuse dmu_tx_count_free(), it does exactly what we need for clone.
555 	 */
556 	dmu_tx_count_free(txh, off, len);
557 }
558 
559 void
560 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
561 {
562 	dmu_tx_hold_t *txh;
563 
564 	ASSERT0(tx->tx_txg);
565 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
566 
567 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
568 	if (txh != NULL) {
569 		dmu_tx_count_dnode(txh);
570 		dmu_tx_count_clone(txh, off, len);
571 	}
572 }
573 
574 static void
575 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
576 {
577 	dmu_tx_t *tx = txh->txh_tx;
578 	dnode_t *dn = txh->txh_dnode;
579 	int err;
580 
581 	ASSERT(tx->tx_txg == 0);
582 
583 	dmu_tx_count_dnode(txh);
584 
585 	/*
586 	 * Modifying a almost-full microzap is around the worst case (128KB)
587 	 *
588 	 * If it is a fat zap, the worst case would be 7*16KB=112KB:
589 	 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
590 	 * - 4 new blocks written if adding:
591 	 *    - 2 blocks for possibly split leaves,
592 	 *    - 2 grown ptrtbl blocks
593 	 */
594 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
595 	    zap_get_micro_max_size(tx->tx_pool->dp_spa), FTAG);
596 
597 	if (dn == NULL)
598 		return;
599 
600 	ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
601 
602 	if (dn->dn_maxblkid == 0 || name == NULL) {
603 		/*
604 		 * This is a microzap (only one block), or we don't know
605 		 * the name.  Check the first block for i/o errors.
606 		 */
607 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
608 		if (err != 0) {
609 			tx->tx_err = err;
610 		}
611 	} else {
612 		/*
613 		 * Access the name so that we'll check for i/o errors to
614 		 * the leaf blocks, etc.  We ignore ENOENT, as this name
615 		 * may not yet exist.
616 		 */
617 		err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
618 		if (err == EIO || err == ECKSUM || err == ENXIO) {
619 			tx->tx_err = err;
620 		}
621 	}
622 }
623 
624 void
625 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
626 {
627 	dmu_tx_hold_t *txh;
628 
629 	ASSERT0(tx->tx_txg);
630 
631 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
632 	    object, THT_ZAP, add, (uintptr_t)name);
633 	if (txh != NULL)
634 		dmu_tx_hold_zap_impl(txh, name);
635 }
636 
637 void
638 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
639 {
640 	dmu_tx_hold_t *txh;
641 
642 	ASSERT0(tx->tx_txg);
643 	ASSERT(dn != NULL);
644 
645 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
646 	if (txh != NULL)
647 		dmu_tx_hold_zap_impl(txh, name);
648 }
649 
650 void
651 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
652 {
653 	dmu_tx_hold_t *txh;
654 
655 	ASSERT(tx->tx_txg == 0);
656 
657 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
658 	    object, THT_BONUS, 0, 0);
659 	if (txh)
660 		dmu_tx_count_dnode(txh);
661 }
662 
663 void
664 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
665 {
666 	dmu_tx_hold_t *txh;
667 
668 	ASSERT0(tx->tx_txg);
669 
670 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
671 	if (txh)
672 		dmu_tx_count_dnode(txh);
673 }
674 
675 void
676 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
677 {
678 	dmu_tx_hold_t *txh;
679 
680 	ASSERT(tx->tx_txg == 0);
681 
682 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
683 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
684 	if (txh) {
685 		(void) zfs_refcount_add_many(
686 		    &txh->txh_space_towrite, space, FTAG);
687 	}
688 }
689 
690 #ifdef ZFS_DEBUG
691 void
692 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
693 {
694 	boolean_t match_object = B_FALSE;
695 	boolean_t match_offset = B_FALSE;
696 
697 	DB_DNODE_ENTER(db);
698 	dnode_t *dn = DB_DNODE(db);
699 	ASSERT(tx->tx_txg != 0);
700 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
701 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
702 
703 	if (tx->tx_anyobj) {
704 		DB_DNODE_EXIT(db);
705 		return;
706 	}
707 
708 	/* XXX No checking on the meta dnode for now */
709 	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
710 		DB_DNODE_EXIT(db);
711 		return;
712 	}
713 
714 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
715 	    txh = list_next(&tx->tx_holds, txh)) {
716 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
717 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
718 			match_object = TRUE;
719 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
720 			int datablkshift = dn->dn_datablkshift ?
721 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
722 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
723 			int shift = datablkshift + epbs * db->db_level;
724 			uint64_t beginblk = shift >= 64 ? 0 :
725 			    (txh->txh_arg1 >> shift);
726 			uint64_t endblk = shift >= 64 ? 0 :
727 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
728 			uint64_t blkid = db->db_blkid;
729 
730 			/* XXX txh_arg2 better not be zero... */
731 
732 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
733 			    txh->txh_type, (u_longlong_t)beginblk,
734 			    (u_longlong_t)endblk);
735 
736 			switch (txh->txh_type) {
737 			case THT_WRITE:
738 				if (blkid >= beginblk && blkid <= endblk)
739 					match_offset = TRUE;
740 				/*
741 				 * We will let this hold work for the bonus
742 				 * or spill buffer so that we don't need to
743 				 * hold it when creating a new object.
744 				 */
745 				if (blkid == DMU_BONUS_BLKID ||
746 				    blkid == DMU_SPILL_BLKID)
747 					match_offset = TRUE;
748 				/*
749 				 * They might have to increase nlevels,
750 				 * thus dirtying the new TLIBs.  Or the
751 				 * might have to change the block size,
752 				 * thus dirying the new lvl=0 blk=0.
753 				 */
754 				if (blkid == 0)
755 					match_offset = TRUE;
756 				break;
757 			case THT_APPEND:
758 				if (blkid >= beginblk && (blkid <= endblk ||
759 				    txh->txh_arg2 == DMU_OBJECT_END))
760 					match_offset = TRUE;
761 
762 				/*
763 				 * THT_WRITE used for bonus and spill blocks.
764 				 */
765 				ASSERT(blkid != DMU_BONUS_BLKID &&
766 				    blkid != DMU_SPILL_BLKID);
767 
768 				/*
769 				 * They might have to increase nlevels,
770 				 * thus dirtying the new TLIBs.  Or the
771 				 * might have to change the block size,
772 				 * thus dirying the new lvl=0 blk=0.
773 				 */
774 				if (blkid == 0)
775 					match_offset = TRUE;
776 				break;
777 			case THT_FREE:
778 				/*
779 				 * We will dirty all the level 1 blocks in
780 				 * the free range and perhaps the first and
781 				 * last level 0 block.
782 				 */
783 				if (blkid >= beginblk && (blkid <= endblk ||
784 				    txh->txh_arg2 == DMU_OBJECT_END))
785 					match_offset = TRUE;
786 				break;
787 			case THT_SPILL:
788 				if (blkid == DMU_SPILL_BLKID)
789 					match_offset = TRUE;
790 				break;
791 			case THT_BONUS:
792 				if (blkid == DMU_BONUS_BLKID)
793 					match_offset = TRUE;
794 				break;
795 			case THT_ZAP:
796 				match_offset = TRUE;
797 				break;
798 			case THT_NEWOBJECT:
799 				match_object = TRUE;
800 				break;
801 			case THT_CLONE:
802 				if (blkid >= beginblk && blkid <= endblk)
803 					match_offset = TRUE;
804 				/*
805 				 * They might have to increase nlevels,
806 				 * thus dirtying the new TLIBs.  Or the
807 				 * might have to change the block size,
808 				 * thus dirying the new lvl=0 blk=0.
809 				 */
810 				if (blkid == 0)
811 					match_offset = TRUE;
812 				break;
813 			default:
814 				cmn_err(CE_PANIC, "bad txh_type %d",
815 				    txh->txh_type);
816 			}
817 		}
818 		if (match_object && match_offset) {
819 			DB_DNODE_EXIT(db);
820 			return;
821 		}
822 	}
823 	DB_DNODE_EXIT(db);
824 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
825 	    (u_longlong_t)db->db.db_object, db->db_level,
826 	    (u_longlong_t)db->db_blkid);
827 }
828 #endif
829 
830 /*
831  * If we can't do 10 iops, something is wrong.  Let us go ahead
832  * and hit zfs_dirty_data_max.
833  */
834 static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
835 
836 /*
837  * We delay transactions when we've determined that the backend storage
838  * isn't able to accommodate the rate of incoming writes.
839  *
840  * If there is already a transaction waiting, we delay relative to when
841  * that transaction finishes waiting.  This way the calculated min_time
842  * is independent of the number of threads concurrently executing
843  * transactions.
844  *
845  * If we are the only waiter, wait relative to when the transaction
846  * started, rather than the current time.  This credits the transaction for
847  * "time already served", e.g. reading indirect blocks.
848  *
849  * The minimum time for a transaction to take is calculated as:
850  *     min_time = scale * (dirty - min) / (max - dirty)
851  *     min_time is then capped at zfs_delay_max_ns.
852  *
853  * The delay has two degrees of freedom that can be adjusted via tunables.
854  * The percentage of dirty data at which we start to delay is defined by
855  * zfs_delay_min_dirty_percent. This should typically be at or above
856  * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
857  * delay after writing at full speed has failed to keep up with the incoming
858  * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
859  * speaking, this variable determines the amount of delay at the midpoint of
860  * the curve.
861  *
862  * delay
863  *  10ms +-------------------------------------------------------------*+
864  *       |                                                             *|
865  *   9ms +                                                             *+
866  *       |                                                             *|
867  *   8ms +                                                             *+
868  *       |                                                            * |
869  *   7ms +                                                            * +
870  *       |                                                            * |
871  *   6ms +                                                            * +
872  *       |                                                            * |
873  *   5ms +                                                           *  +
874  *       |                                                           *  |
875  *   4ms +                                                           *  +
876  *       |                                                           *  |
877  *   3ms +                                                          *   +
878  *       |                                                          *   |
879  *   2ms +                                              (midpoint) *    +
880  *       |                                                  |    **     |
881  *   1ms +                                                  v ***       +
882  *       |             zfs_delay_scale ---------->     ********         |
883  *     0 +-------------------------------------*********----------------+
884  *       0%                    <- zfs_dirty_data_max ->               100%
885  *
886  * Note that since the delay is added to the outstanding time remaining on the
887  * most recent transaction, the delay is effectively the inverse of IOPS.
888  * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
889  * was chosen such that small changes in the amount of accumulated dirty data
890  * in the first 3/4 of the curve yield relatively small differences in the
891  * amount of delay.
892  *
893  * The effects can be easier to understand when the amount of delay is
894  * represented on a log scale:
895  *
896  * delay
897  * 100ms +-------------------------------------------------------------++
898  *       +                                                              +
899  *       |                                                              |
900  *       +                                                             *+
901  *  10ms +                                                             *+
902  *       +                                                           ** +
903  *       |                                              (midpoint)  **  |
904  *       +                                                  |     **    +
905  *   1ms +                                                  v ****      +
906  *       +             zfs_delay_scale ---------->        *****         +
907  *       |                                             ****             |
908  *       +                                          ****                +
909  * 100us +                                        **                    +
910  *       +                                       *                      +
911  *       |                                      *                       |
912  *       +                                     *                        +
913  *  10us +                                     *                        +
914  *       +                                                              +
915  *       |                                                              |
916  *       +                                                              +
917  *       +--------------------------------------------------------------+
918  *       0%                    <- zfs_dirty_data_max ->               100%
919  *
920  * Note here that only as the amount of dirty data approaches its limit does
921  * the delay start to increase rapidly. The goal of a properly tuned system
922  * should be to keep the amount of dirty data out of that range by first
923  * ensuring that the appropriate limits are set for the I/O scheduler to reach
924  * optimal throughput on the backend storage, and then by changing the value
925  * of zfs_delay_scale to increase the steepness of the curve.
926  */
927 static void
928 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
929 {
930 	dsl_pool_t *dp = tx->tx_pool;
931 	uint64_t delay_min_bytes, wrlog;
932 	hrtime_t wakeup, tx_time = 0, now;
933 
934 	/* Calculate minimum transaction time for the dirty data amount. */
935 	delay_min_bytes =
936 	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
937 	if (dirty > delay_min_bytes) {
938 		/*
939 		 * The caller has already waited until we are under the max.
940 		 * We make them pass us the amount of dirty data so we don't
941 		 * have to handle the case of it being >= the max, which
942 		 * could cause a divide-by-zero if it's == the max.
943 		 */
944 		ASSERT3U(dirty, <, zfs_dirty_data_max);
945 
946 		tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
947 		    (zfs_dirty_data_max - dirty);
948 	}
949 
950 	/* Calculate minimum transaction time for the TX_WRITE log size. */
951 	wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
952 	delay_min_bytes =
953 	    zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
954 	if (wrlog >= zfs_wrlog_data_max) {
955 		tx_time = zfs_delay_max_ns;
956 	} else if (wrlog > delay_min_bytes) {
957 		tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
958 		    (zfs_wrlog_data_max - wrlog), tx_time);
959 	}
960 
961 	if (tx_time == 0)
962 		return;
963 
964 	tx_time = MIN(tx_time, zfs_delay_max_ns);
965 	now = gethrtime();
966 	if (now > tx->tx_start + tx_time)
967 		return;
968 
969 	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
970 	    uint64_t, tx_time);
971 
972 	mutex_enter(&dp->dp_lock);
973 	wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
974 	dp->dp_last_wakeup = wakeup;
975 	mutex_exit(&dp->dp_lock);
976 
977 	zfs_sleep_until(wakeup);
978 }
979 
980 /*
981  * This routine attempts to assign the transaction to a transaction group.
982  * To do so, we must determine if there is sufficient free space on disk.
983  *
984  * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
985  * on it), then it is assumed that there is sufficient free space,
986  * unless there's insufficient slop space in the pool (see the comment
987  * above spa_slop_shift in spa_misc.c).
988  *
989  * If it is not a "netfree" transaction, then if the data already on disk
990  * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
991  * ENOSPC.  Otherwise, if the current rough estimate of pending changes,
992  * plus the rough estimate of this transaction's changes, may exceed the
993  * allowed usage, then this will fail with ERESTART, which will cause the
994  * caller to wait for the pending changes to be written to disk (by waiting
995  * for the next TXG to open), and then check the space usage again.
996  *
997  * The rough estimate of pending changes is comprised of the sum of:
998  *
999  *  - this transaction's holds' txh_space_towrite
1000  *
1001  *  - dd_tempreserved[], which is the sum of in-flight transactions'
1002  *    holds' txh_space_towrite (i.e. those transactions that have called
1003  *    dmu_tx_assign() but not yet called dmu_tx_commit()).
1004  *
1005  *  - dd_space_towrite[], which is the amount of dirtied dbufs.
1006  *
1007  * Note that all of these values are inflated by spa_get_worst_case_asize(),
1008  * which means that we may get ERESTART well before we are actually in danger
1009  * of running out of space, but this also mitigates any small inaccuracies
1010  * in the rough estimate (e.g. txh_space_towrite doesn't take into account
1011  * indirect blocks, and dd_space_towrite[] doesn't take into account changes
1012  * to the MOS).
1013  *
1014  * Note that due to this algorithm, it is possible to exceed the allowed
1015  * usage by one transaction.  Also, as we approach the allowed usage,
1016  * we will allow a very limited amount of changes into each TXG, thus
1017  * decreasing performance.
1018  */
1019 static int
1020 dmu_tx_try_assign(dmu_tx_t *tx)
1021 {
1022 	spa_t *spa = tx->tx_pool->dp_spa;
1023 
1024 	ASSERT0(tx->tx_txg);
1025 
1026 	if (tx->tx_err) {
1027 		DMU_TX_STAT_BUMP(dmu_tx_error);
1028 		return (tx->tx_err);
1029 	}
1030 
1031 	if (spa_suspended(spa)) {
1032 		DMU_TX_STAT_BUMP(dmu_tx_suspended);
1033 
1034 		/*
1035 		 * Let dmu_tx_assign() know specifically what happened, so
1036 		 * it can make the right choice based on the caller flags.
1037 		 */
1038 		return (SET_ERROR(ESHUTDOWN));
1039 	}
1040 
1041 	if (!tx->tx_dirty_delayed &&
1042 	    dsl_pool_need_wrlog_delay(tx->tx_pool)) {
1043 		tx->tx_wait_dirty = B_TRUE;
1044 		DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
1045 		return (SET_ERROR(ERESTART));
1046 	}
1047 
1048 	if (!tx->tx_dirty_delayed &&
1049 	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
1050 		tx->tx_wait_dirty = B_TRUE;
1051 		DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
1052 		return (SET_ERROR(ERESTART));
1053 	}
1054 
1055 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1056 	tx->tx_needassign_txh = NULL;
1057 
1058 	/*
1059 	 * NB: No error returns are allowed after txg_hold_open, but
1060 	 * before processing the dnode holds, due to the
1061 	 * dmu_tx_unassign() logic.
1062 	 */
1063 
1064 	uint64_t towrite = 0;
1065 	uint64_t tohold = 0;
1066 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1067 	    txh = list_next(&tx->tx_holds, txh)) {
1068 		dnode_t *dn = txh->txh_dnode;
1069 		if (dn != NULL) {
1070 			/*
1071 			 * This thread can't hold the dn_struct_rwlock
1072 			 * while assigning the tx, because this can lead to
1073 			 * deadlock. Specifically, if this dnode is already
1074 			 * assigned to an earlier txg, this thread may need
1075 			 * to wait for that txg to sync (the ERESTART case
1076 			 * below).  The other thread that has assigned this
1077 			 * dnode to an earlier txg prevents this txg from
1078 			 * syncing until its tx can complete (calling
1079 			 * dmu_tx_commit()), but it may need to acquire the
1080 			 * dn_struct_rwlock to do so (e.g. via
1081 			 * dmu_buf_hold*()).
1082 			 *
1083 			 * Note that this thread can't hold the lock for
1084 			 * read either, but the rwlock doesn't record
1085 			 * enough information to make that assertion.
1086 			 */
1087 			ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
1088 
1089 			mutex_enter(&dn->dn_mtx);
1090 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1091 				mutex_exit(&dn->dn_mtx);
1092 				tx->tx_needassign_txh = txh;
1093 				DMU_TX_STAT_BUMP(dmu_tx_group);
1094 				return (SET_ERROR(ERESTART));
1095 			}
1096 			if (dn->dn_assigned_txg == 0)
1097 				dn->dn_assigned_txg = tx->tx_txg;
1098 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1099 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
1100 			mutex_exit(&dn->dn_mtx);
1101 		}
1102 		towrite += zfs_refcount_count(&txh->txh_space_towrite);
1103 		tohold += zfs_refcount_count(&txh->txh_memory_tohold);
1104 	}
1105 
1106 	/* needed allocation: worst-case estimate of write space */
1107 	uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
1108 	/* calculate memory footprint estimate */
1109 	uint64_t memory = towrite + tohold;
1110 
1111 	if (tx->tx_dir != NULL && asize != 0) {
1112 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1113 		    asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
1114 		if (err != 0)
1115 			return (err);
1116 	}
1117 
1118 	DMU_TX_STAT_BUMP(dmu_tx_assigned);
1119 
1120 	return (0);
1121 }
1122 
1123 static void
1124 dmu_tx_unassign(dmu_tx_t *tx)
1125 {
1126 	if (tx->tx_txg == 0)
1127 		return;
1128 
1129 	txg_rele_to_quiesce(&tx->tx_txgh);
1130 
1131 	/*
1132 	 * Walk the transaction's hold list, removing the hold on the
1133 	 * associated dnode, and notifying waiters if the refcount drops to 0.
1134 	 */
1135 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
1136 	    txh && txh != tx->tx_needassign_txh;
1137 	    txh = list_next(&tx->tx_holds, txh)) {
1138 		dnode_t *dn = txh->txh_dnode;
1139 
1140 		if (dn == NULL)
1141 			continue;
1142 		mutex_enter(&dn->dn_mtx);
1143 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1144 
1145 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1146 			dn->dn_assigned_txg = 0;
1147 			cv_broadcast(&dn->dn_notxholds);
1148 		}
1149 		mutex_exit(&dn->dn_mtx);
1150 	}
1151 
1152 	txg_rele_to_sync(&tx->tx_txgh);
1153 
1154 	tx->tx_lasttried_txg = tx->tx_txg;
1155 	tx->tx_txg = 0;
1156 }
1157 
1158 /*
1159  * Assign tx to a transaction group; `flags` is a bitmask:
1160  *
1161  * If DMU_TX_WAIT is set and the currently open txg is full, this function
1162  * will wait until there's a new txg. This should be used when no locks
1163  * are being held. With this bit set, this function will only fail if
1164  * we're truly out of space (or over quota).
1165  *
1166  * If DMU_TX_WAIT is *not* set and we can't assign into the currently open
1167  * txg without blocking, this function will return immediately with
1168  * ERESTART. This should be used whenever locks are being held.  On an
1169  * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1170  * and try again.
1171  *
1172  * If DMU_TX_NOTHROTTLE is set, this indicates that this tx should not be
1173  * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1174  * details on the throttle). This is used by the VFS operations, after
1175  * they have already called dmu_tx_wait() (though most likely on a
1176  * different tx).
1177  *
1178  * If DMU_TX_SUSPEND is set, this indicates that this tx should ignore
1179  * the pool being or becoming suspending while it is in progress. This will
1180  * cause dmu_tx_assign() (and dmu_tx_wait()) to block until the pool resumes.
1181  * If this flag is not set and the pool suspends, the return will be either
1182  * ERESTART or EIO, depending on the value of the pool's failmode= property.
1183  *
1184  * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1185  * will assign the tx to monotonically increasing txgs. Of course this is
1186  * not strong monotonicity, because the same txg can be returned multiple
1187  * times in a row. This guarantee holds both for subsequent calls from
1188  * one thread and for multiple threads. For example, it is impossible to
1189  * observe the following sequence of events:
1190  *
1191  *          Thread 1                            Thread 2
1192  *
1193  *     dmu_tx_assign(T1, ...)
1194  *     1 <- dmu_tx_get_txg(T1)
1195  *                                       dmu_tx_assign(T2, ...)
1196  *                                       2 <- dmu_tx_get_txg(T2)
1197  *     dmu_tx_assign(T3, ...)
1198  *     1 <- dmu_tx_get_txg(T3)
1199  */
1200 int
1201 dmu_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t flags)
1202 {
1203 	int err;
1204 
1205 	ASSERT(tx->tx_txg == 0);
1206 	ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND));
1207 	IMPLY(flags & DMU_TX_SUSPEND, flags & DMU_TX_WAIT);
1208 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1209 
1210 	/* If we might wait, we must not hold the config lock. */
1211 	IMPLY((flags & DMU_TX_WAIT), !dsl_pool_config_held(tx->tx_pool));
1212 
1213 	if ((flags & DMU_TX_NOTHROTTLE))
1214 		tx->tx_dirty_delayed = B_TRUE;
1215 
1216 	if (!(flags & DMU_TX_SUSPEND))
1217 		tx->tx_break_on_suspend = B_TRUE;
1218 
1219 	while ((err = dmu_tx_try_assign(tx)) != 0) {
1220 		dmu_tx_unassign(tx);
1221 
1222 		boolean_t suspended = (err == ESHUTDOWN);
1223 		if (suspended) {
1224 			/*
1225 			 * Pool suspended. We need to decide whether to block
1226 			 * and retry, or return error, depending on the
1227 			 * caller's flags and the pool config.
1228 			 */
1229 			if (flags & DMU_TX_SUSPEND)
1230 				/*
1231 				 * The caller expressly does not care about
1232 				 * suspend, so treat it as a normal retry.
1233 				 */
1234 				err = SET_ERROR(ERESTART);
1235 			else if ((flags & DMU_TX_WAIT) &&
1236 			    spa_get_failmode(tx->tx_pool->dp_spa) ==
1237 			    ZIO_FAILURE_MODE_CONTINUE)
1238 				/*
1239 				 * Caller wants to wait, but pool config is
1240 				 * overriding that, so return EIO to be
1241 				 * propagated back to userspace.
1242 				 */
1243 				err = SET_ERROR(EIO);
1244 			else
1245 				/* Anything else, we should just block. */
1246 				err = SET_ERROR(ERESTART);
1247 		}
1248 
1249 		/*
1250 		 * Return unless we decided to retry, or the caller does not
1251 		 * want to block.
1252 		 */
1253 		if (err != ERESTART || !(flags & DMU_TX_WAIT))
1254 			return (err);
1255 
1256 		/*
1257 		 * Wait until there's room in this txg, or until it's been
1258 		 * synced out and a new one is available.
1259 		 *
1260 		 * If we're here because the pool suspended above, then we
1261 		 * unset tx_break_on_suspend to make sure that if dmu_tx_wait()
1262 		 * has to fall back to a txg_wait_synced_flags(), it doesn't
1263 		 * immediately return because the pool is suspended. That would
1264 		 * then immediately return here, and we'd end up in a busy loop
1265 		 * until the pool resumes.
1266 		 *
1267 		 * On the other hand, if the pool hasn't suspended yet, then it
1268 		 * should be allowed to break a txg wait if the pool does
1269 		 * suspend, so we can loop and reassess it in
1270 		 * dmu_tx_try_assign().
1271 		 */
1272 		if (suspended)
1273 			tx->tx_break_on_suspend = B_FALSE;
1274 
1275 		dmu_tx_wait(tx);
1276 
1277 		/*
1278 		 * Reset tx_break_on_suspend for DMU_TX_SUSPEND. We do this
1279 		 * here so that it's available if we return for some other
1280 		 * reason, and then the caller calls dmu_tx_wait().
1281 		 */
1282 		if (!(flags & DMU_TX_SUSPEND))
1283 			tx->tx_break_on_suspend = B_TRUE;
1284 	}
1285 
1286 	txg_rele_to_quiesce(&tx->tx_txgh);
1287 
1288 	return (0);
1289 }
1290 
1291 void
1292 dmu_tx_wait(dmu_tx_t *tx)
1293 {
1294 	spa_t *spa = tx->tx_pool->dp_spa;
1295 	dsl_pool_t *dp = tx->tx_pool;
1296 	hrtime_t before;
1297 
1298 	ASSERT(tx->tx_txg == 0);
1299 	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1300 
1301 	/*
1302 	 * Break on suspend according to whether or not DMU_TX_SUSPEND was
1303 	 * supplied to the previous dmu_tx_assign() call. For clients, this
1304 	 * ensures that after dmu_tx_assign() fails, the followup dmu_tx_wait()
1305 	 * gets the same behaviour wrt suspend. See also the comments in
1306 	 * dmu_tx_assign().
1307 	 */
1308 	txg_wait_flag_t flags =
1309 	    (tx->tx_break_on_suspend ? TXG_WAIT_SUSPEND : TXG_WAIT_NONE);
1310 
1311 	before = gethrtime();
1312 
1313 	if (tx->tx_wait_dirty) {
1314 		uint64_t dirty;
1315 
1316 		/*
1317 		 * dmu_tx_try_assign() has determined that we need to wait
1318 		 * because we've consumed much or all of the dirty buffer
1319 		 * space.
1320 		 */
1321 		mutex_enter(&dp->dp_lock);
1322 		if (dp->dp_dirty_total >= zfs_dirty_data_max)
1323 			DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1324 		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1325 			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1326 		dirty = dp->dp_dirty_total;
1327 		mutex_exit(&dp->dp_lock);
1328 
1329 		dmu_tx_delay(tx, dirty);
1330 
1331 		tx->tx_wait_dirty = B_FALSE;
1332 
1333 		/*
1334 		 * Note: setting tx_dirty_delayed only has effect if the
1335 		 * caller used DMU_TX_WAIT.  Otherwise they are going to
1336 		 * destroy this tx and try again.  The common case,
1337 		 * zfs_write(), uses DMU_TX_WAIT.
1338 		 */
1339 		tx->tx_dirty_delayed = B_TRUE;
1340 	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1341 		/*
1342 		 * If the pool is suspended we need to wait until it
1343 		 * is resumed.  Note that it's possible that the pool
1344 		 * has become active after this thread has tried to
1345 		 * obtain a tx.  If that's the case then tx_lasttried_txg
1346 		 * would not have been set.
1347 		 */
1348 		txg_wait_synced_flags(dp, spa_last_synced_txg(spa) + 1, flags);
1349 	} else if (tx->tx_needassign_txh) {
1350 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1351 
1352 		mutex_enter(&dn->dn_mtx);
1353 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1354 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1355 		mutex_exit(&dn->dn_mtx);
1356 		tx->tx_needassign_txh = NULL;
1357 	} else {
1358 		/*
1359 		 * If we have a lot of dirty data just wait until we sync
1360 		 * out a TXG at which point we'll hopefully have synced
1361 		 * a portion of the changes.
1362 		 */
1363 		txg_wait_synced_flags(dp, spa_last_synced_txg(spa) + 1, flags);
1364 	}
1365 
1366 	spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1367 }
1368 
1369 static void
1370 dmu_tx_destroy(dmu_tx_t *tx)
1371 {
1372 	dmu_tx_hold_t *txh;
1373 
1374 	while ((txh = list_head(&tx->tx_holds)) != NULL) {
1375 		dnode_t *dn = txh->txh_dnode;
1376 
1377 		list_remove(&tx->tx_holds, txh);
1378 		zfs_refcount_destroy_many(&txh->txh_space_towrite,
1379 		    zfs_refcount_count(&txh->txh_space_towrite));
1380 		zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1381 		    zfs_refcount_count(&txh->txh_memory_tohold));
1382 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1383 		if (dn != NULL)
1384 			dnode_rele(dn, tx);
1385 	}
1386 
1387 	list_destroy(&tx->tx_callbacks);
1388 	list_destroy(&tx->tx_holds);
1389 	kmem_free(tx, sizeof (dmu_tx_t));
1390 }
1391 
1392 void
1393 dmu_tx_commit(dmu_tx_t *tx)
1394 {
1395 	ASSERT(tx->tx_txg != 0);
1396 
1397 	/*
1398 	 * Go through the transaction's hold list and remove holds on
1399 	 * associated dnodes, notifying waiters if no holds remain.
1400 	 */
1401 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1402 	    txh = list_next(&tx->tx_holds, txh)) {
1403 		dnode_t *dn = txh->txh_dnode;
1404 
1405 		if (dn == NULL)
1406 			continue;
1407 
1408 		mutex_enter(&dn->dn_mtx);
1409 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1410 
1411 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1412 			dn->dn_assigned_txg = 0;
1413 			cv_broadcast(&dn->dn_notxholds);
1414 		}
1415 		mutex_exit(&dn->dn_mtx);
1416 	}
1417 
1418 	if (tx->tx_tempreserve_cookie)
1419 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1420 
1421 	if (!list_is_empty(&tx->tx_callbacks))
1422 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1423 
1424 	if (tx->tx_anyobj == FALSE)
1425 		txg_rele_to_sync(&tx->tx_txgh);
1426 
1427 	dmu_tx_destroy(tx);
1428 }
1429 
1430 void
1431 dmu_tx_abort(dmu_tx_t *tx)
1432 {
1433 	ASSERT(tx->tx_txg == 0);
1434 
1435 	/*
1436 	 * Call any registered callbacks with an error code.
1437 	 */
1438 	if (!list_is_empty(&tx->tx_callbacks))
1439 		dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1440 
1441 	dmu_tx_destroy(tx);
1442 }
1443 
1444 uint64_t
1445 dmu_tx_get_txg(dmu_tx_t *tx)
1446 {
1447 	ASSERT(tx->tx_txg != 0);
1448 	return (tx->tx_txg);
1449 }
1450 
1451 dsl_pool_t *
1452 dmu_tx_pool(dmu_tx_t *tx)
1453 {
1454 	ASSERT(tx->tx_pool != NULL);
1455 	return (tx->tx_pool);
1456 }
1457 
1458 /*
1459  * Register a callback to be executed at the end of a TXG.
1460  *
1461  * Note: This currently exists for outside consumers, specifically the ZFS OSD
1462  * for Lustre. Please do not remove before checking that project. For examples
1463  * on how to use this see `ztest_commit_callback`.
1464  */
1465 void
1466 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1467 {
1468 	dmu_tx_callback_t *dcb;
1469 
1470 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1471 
1472 	dcb->dcb_func = func;
1473 	dcb->dcb_data = data;
1474 
1475 	list_insert_tail(&tx->tx_callbacks, dcb);
1476 }
1477 
1478 /*
1479  * Call all the commit callbacks on a list, with a given error code.
1480  */
1481 void
1482 dmu_tx_do_callbacks(list_t *cb_list, int error)
1483 {
1484 	dmu_tx_callback_t *dcb;
1485 
1486 	while ((dcb = list_remove_tail(cb_list)) != NULL) {
1487 		dcb->dcb_func(dcb->dcb_data, error);
1488 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1489 	}
1490 }
1491 
1492 /*
1493  * Interface to hold a bunch of attributes.
1494  * used for creating new files.
1495  * attrsize is the total size of all attributes
1496  * to be added during object creation
1497  *
1498  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1499  */
1500 
1501 /*
1502  * hold necessary attribute name for attribute registration.
1503  * should be a very rare case where this is needed.  If it does
1504  * happen it would only happen on the first write to the file system.
1505  */
1506 static void
1507 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1508 {
1509 	if (!sa->sa_need_attr_registration)
1510 		return;
1511 
1512 	for (int i = 0; i != sa->sa_num_attrs; i++) {
1513 		if (!sa->sa_attr_table[i].sa_registered) {
1514 			if (sa->sa_reg_attr_obj)
1515 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1516 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1517 			else
1518 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1519 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1520 		}
1521 	}
1522 }
1523 
1524 void
1525 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1526 {
1527 	dmu_tx_hold_t *txh;
1528 
1529 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1530 	    THT_SPILL, 0, 0);
1531 	if (txh != NULL)
1532 		(void) zfs_refcount_add_many(&txh->txh_space_towrite,
1533 		    SPA_OLD_MAXBLOCKSIZE, FTAG);
1534 }
1535 
1536 void
1537 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1538 {
1539 	sa_os_t *sa = tx->tx_objset->os_sa;
1540 
1541 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1542 
1543 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1544 		return;
1545 
1546 	if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1547 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1548 	} else {
1549 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1550 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1551 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1552 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1553 	}
1554 
1555 	dmu_tx_sa_registration_hold(sa, tx);
1556 
1557 	if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1558 		return;
1559 
1560 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1561 	    THT_SPILL, 0, 0);
1562 }
1563 
1564 /*
1565  * Hold SA attribute
1566  *
1567  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1568  *
1569  * variable_size is the total size of all variable sized attributes
1570  * passed to this function.  It is not the total size of all
1571  * variable size attributes that *may* exist on this object.
1572  */
1573 void
1574 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1575 {
1576 	uint64_t object;
1577 	sa_os_t *sa = tx->tx_objset->os_sa;
1578 
1579 	ASSERT(hdl != NULL);
1580 
1581 	object = sa_handle_object(hdl);
1582 
1583 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1584 	DB_DNODE_ENTER(db);
1585 	dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1586 	DB_DNODE_EXIT(db);
1587 
1588 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1589 		return;
1590 
1591 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1592 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1593 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1594 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1595 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1596 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1597 	}
1598 
1599 	dmu_tx_sa_registration_hold(sa, tx);
1600 
1601 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1602 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1603 
1604 	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1605 		ASSERT(tx->tx_txg == 0);
1606 		dmu_tx_hold_spill(tx, object);
1607 	} else {
1608 		DB_DNODE_ENTER(db);
1609 		if (DB_DNODE(db)->dn_have_spill) {
1610 			ASSERT(tx->tx_txg == 0);
1611 			dmu_tx_hold_spill(tx, object);
1612 		}
1613 		DB_DNODE_EXIT(db);
1614 	}
1615 }
1616 
1617 void
1618 dmu_tx_init(void)
1619 {
1620 	dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1621 	    KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1622 	    KSTAT_FLAG_VIRTUAL);
1623 
1624 	if (dmu_tx_ksp != NULL) {
1625 		dmu_tx_ksp->ks_data = &dmu_tx_stats;
1626 		kstat_install(dmu_tx_ksp);
1627 	}
1628 }
1629 
1630 void
1631 dmu_tx_fini(void)
1632 {
1633 	if (dmu_tx_ksp != NULL) {
1634 		kstat_delete(dmu_tx_ksp);
1635 		dmu_tx_ksp = NULL;
1636 	}
1637 }
1638 
1639 #if defined(_KERNEL)
1640 EXPORT_SYMBOL(dmu_tx_create);
1641 EXPORT_SYMBOL(dmu_tx_hold_write);
1642 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1643 EXPORT_SYMBOL(dmu_tx_hold_append);
1644 EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode);
1645 EXPORT_SYMBOL(dmu_tx_hold_free);
1646 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1647 EXPORT_SYMBOL(dmu_tx_hold_zap);
1648 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1649 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1650 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1651 EXPORT_SYMBOL(dmu_tx_abort);
1652 EXPORT_SYMBOL(dmu_tx_assign);
1653 EXPORT_SYMBOL(dmu_tx_wait);
1654 EXPORT_SYMBOL(dmu_tx_commit);
1655 EXPORT_SYMBOL(dmu_tx_mark_netfree);
1656 EXPORT_SYMBOL(dmu_tx_get_txg);
1657 EXPORT_SYMBOL(dmu_tx_callback_register);
1658 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1659 EXPORT_SYMBOL(dmu_tx_hold_spill);
1660 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1661 EXPORT_SYMBOL(dmu_tx_hold_sa);
1662 #endif
1663