xref: /freebsd/sys/contrib/openzfs/module/zfs/dmu_tx.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1*61145dc2SMartin Matuska // SPDX-License-Identifier: CDDL-1.0
2eda14cbcSMatt Macy /*
3eda14cbcSMatt Macy  * CDDL HEADER START
4eda14cbcSMatt Macy  *
5eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
6eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
7eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
8eda14cbcSMatt Macy  *
9eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
11eda14cbcSMatt Macy  * See the License for the specific language governing permissions
12eda14cbcSMatt Macy  * and limitations under the License.
13eda14cbcSMatt Macy  *
14eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
15eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
17eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
18eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
19eda14cbcSMatt Macy  *
20eda14cbcSMatt Macy  * CDDL HEADER END
21eda14cbcSMatt Macy  */
22eda14cbcSMatt Macy /*
23eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24eda14cbcSMatt Macy  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
267a7741afSMartin Matuska  * Copyright (c) 2024, Klara, Inc.
27eda14cbcSMatt Macy  */
28eda14cbcSMatt Macy 
29eda14cbcSMatt Macy #include <sys/dmu.h>
30eda14cbcSMatt Macy #include <sys/dmu_impl.h>
31eda14cbcSMatt Macy #include <sys/dbuf.h>
32eda14cbcSMatt Macy #include <sys/dmu_tx.h>
33eda14cbcSMatt Macy #include <sys/dmu_objset.h>
34eda14cbcSMatt Macy #include <sys/dsl_dataset.h>
35eda14cbcSMatt Macy #include <sys/dsl_dir.h>
36eda14cbcSMatt Macy #include <sys/dsl_pool.h>
37eda14cbcSMatt Macy #include <sys/zap_impl.h>
38eda14cbcSMatt Macy #include <sys/spa.h>
39eda14cbcSMatt Macy #include <sys/sa.h>
40eda14cbcSMatt Macy #include <sys/sa_impl.h>
41eda14cbcSMatt Macy #include <sys/zfs_context.h>
42eda14cbcSMatt Macy #include <sys/trace_zfs.h>
43eda14cbcSMatt Macy 
44eda14cbcSMatt Macy typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
45eda14cbcSMatt Macy     uint64_t arg1, uint64_t arg2);
46eda14cbcSMatt Macy 
47eda14cbcSMatt Macy dmu_tx_stats_t dmu_tx_stats = {
48eda14cbcSMatt Macy 	{ "dmu_tx_assigned",		KSTAT_DATA_UINT64 },
49eda14cbcSMatt Macy 	{ "dmu_tx_delay",		KSTAT_DATA_UINT64 },
50eda14cbcSMatt Macy 	{ "dmu_tx_error",		KSTAT_DATA_UINT64 },
51eda14cbcSMatt Macy 	{ "dmu_tx_suspended",		KSTAT_DATA_UINT64 },
52eda14cbcSMatt Macy 	{ "dmu_tx_group",		KSTAT_DATA_UINT64 },
53eda14cbcSMatt Macy 	{ "dmu_tx_memory_reserve",	KSTAT_DATA_UINT64 },
54eda14cbcSMatt Macy 	{ "dmu_tx_memory_reclaim",	KSTAT_DATA_UINT64 },
55eda14cbcSMatt Macy 	{ "dmu_tx_dirty_throttle",	KSTAT_DATA_UINT64 },
56eda14cbcSMatt Macy 	{ "dmu_tx_dirty_delay",		KSTAT_DATA_UINT64 },
57eda14cbcSMatt Macy 	{ "dmu_tx_dirty_over_max",	KSTAT_DATA_UINT64 },
58eda14cbcSMatt Macy 	{ "dmu_tx_dirty_frees_delay",	KSTAT_DATA_UINT64 },
59e3aa18adSMartin Matuska 	{ "dmu_tx_wrlog_delay",		KSTAT_DATA_UINT64 },
60eda14cbcSMatt Macy 	{ "dmu_tx_quota",		KSTAT_DATA_UINT64 },
61eda14cbcSMatt Macy };
62eda14cbcSMatt Macy 
63eda14cbcSMatt Macy static kstat_t *dmu_tx_ksp;
64eda14cbcSMatt Macy 
65eda14cbcSMatt Macy dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t * dd)66eda14cbcSMatt Macy dmu_tx_create_dd(dsl_dir_t *dd)
67eda14cbcSMatt Macy {
68eda14cbcSMatt Macy 	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
69eda14cbcSMatt Macy 	tx->tx_dir = dd;
70eda14cbcSMatt Macy 	if (dd != NULL)
71eda14cbcSMatt Macy 		tx->tx_pool = dd->dd_pool;
72eda14cbcSMatt Macy 	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
73eda14cbcSMatt Macy 	    offsetof(dmu_tx_hold_t, txh_node));
74eda14cbcSMatt Macy 	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
75eda14cbcSMatt Macy 	    offsetof(dmu_tx_callback_t, dcb_node));
76eda14cbcSMatt Macy 	tx->tx_start = gethrtime();
77eda14cbcSMatt Macy 	return (tx);
78eda14cbcSMatt Macy }
79eda14cbcSMatt Macy 
80eda14cbcSMatt Macy dmu_tx_t *
dmu_tx_create(objset_t * os)81eda14cbcSMatt Macy dmu_tx_create(objset_t *os)
82eda14cbcSMatt Macy {
83eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
84eda14cbcSMatt Macy 	tx->tx_objset = os;
85eda14cbcSMatt Macy 	return (tx);
86eda14cbcSMatt Macy }
87eda14cbcSMatt Macy 
88eda14cbcSMatt Macy dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool * dp,uint64_t txg)89eda14cbcSMatt Macy dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
90eda14cbcSMatt Macy {
91eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
92eda14cbcSMatt Macy 
93eda14cbcSMatt Macy 	TXG_VERIFY(dp->dp_spa, txg);
94eda14cbcSMatt Macy 	tx->tx_pool = dp;
95eda14cbcSMatt Macy 	tx->tx_txg = txg;
96eda14cbcSMatt Macy 	tx->tx_anyobj = TRUE;
97eda14cbcSMatt Macy 
98eda14cbcSMatt Macy 	return (tx);
99eda14cbcSMatt Macy }
100eda14cbcSMatt Macy 
101eda14cbcSMatt Macy int
dmu_tx_is_syncing(dmu_tx_t * tx)102eda14cbcSMatt Macy dmu_tx_is_syncing(dmu_tx_t *tx)
103eda14cbcSMatt Macy {
104eda14cbcSMatt Macy 	return (tx->tx_anyobj);
105eda14cbcSMatt Macy }
106eda14cbcSMatt Macy 
107eda14cbcSMatt Macy int
dmu_tx_private_ok(dmu_tx_t * tx)108eda14cbcSMatt Macy dmu_tx_private_ok(dmu_tx_t *tx)
109eda14cbcSMatt Macy {
110eda14cbcSMatt Macy 	return (tx->tx_anyobj);
111eda14cbcSMatt Macy }
112eda14cbcSMatt Macy 
113eda14cbcSMatt Macy static dmu_tx_hold_t *
dmu_tx_hold_dnode_impl(dmu_tx_t * tx,dnode_t * dn,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)114eda14cbcSMatt Macy dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
115eda14cbcSMatt Macy     uint64_t arg1, uint64_t arg2)
116eda14cbcSMatt Macy {
117eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
118eda14cbcSMatt Macy 
119eda14cbcSMatt Macy 	if (dn != NULL) {
120eda14cbcSMatt Macy 		(void) zfs_refcount_add(&dn->dn_holds, tx);
121eda14cbcSMatt Macy 		if (tx->tx_txg != 0) {
122eda14cbcSMatt Macy 			mutex_enter(&dn->dn_mtx);
123eda14cbcSMatt Macy 			/*
124eda14cbcSMatt Macy 			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
125eda14cbcSMatt Macy 			 * problem, but there's no way for it to happen (for
126eda14cbcSMatt Macy 			 * now, at least).
127eda14cbcSMatt Macy 			 */
128eda14cbcSMatt Macy 			ASSERT(dn->dn_assigned_txg == 0);
129eda14cbcSMatt Macy 			dn->dn_assigned_txg = tx->tx_txg;
130eda14cbcSMatt Macy 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
131eda14cbcSMatt Macy 			mutex_exit(&dn->dn_mtx);
132eda14cbcSMatt Macy 		}
133eda14cbcSMatt Macy 	}
134eda14cbcSMatt Macy 
135eda14cbcSMatt Macy 	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
136eda14cbcSMatt Macy 	txh->txh_tx = tx;
137eda14cbcSMatt Macy 	txh->txh_dnode = dn;
138eda14cbcSMatt Macy 	zfs_refcount_create(&txh->txh_space_towrite);
139eda14cbcSMatt Macy 	zfs_refcount_create(&txh->txh_memory_tohold);
140eda14cbcSMatt Macy 	txh->txh_type = type;
141eda14cbcSMatt Macy 	txh->txh_arg1 = arg1;
142eda14cbcSMatt Macy 	txh->txh_arg2 = arg2;
143eda14cbcSMatt Macy 	list_insert_tail(&tx->tx_holds, txh);
144eda14cbcSMatt Macy 
145eda14cbcSMatt Macy 	return (txh);
146eda14cbcSMatt Macy }
147eda14cbcSMatt Macy 
148eda14cbcSMatt Macy static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t * tx,objset_t * os,uint64_t object,enum dmu_tx_hold_type type,uint64_t arg1,uint64_t arg2)149eda14cbcSMatt Macy dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
150eda14cbcSMatt Macy     enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
151eda14cbcSMatt Macy {
152eda14cbcSMatt Macy 	dnode_t *dn = NULL;
153eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
154eda14cbcSMatt Macy 	int err;
155eda14cbcSMatt Macy 
156eda14cbcSMatt Macy 	if (object != DMU_NEW_OBJECT) {
157eda14cbcSMatt Macy 		err = dnode_hold(os, object, FTAG, &dn);
158eda14cbcSMatt Macy 		if (err != 0) {
159eda14cbcSMatt Macy 			tx->tx_err = err;
160eda14cbcSMatt Macy 			return (NULL);
161eda14cbcSMatt Macy 		}
162eda14cbcSMatt Macy 	}
163eda14cbcSMatt Macy 	txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
164eda14cbcSMatt Macy 	if (dn != NULL)
165eda14cbcSMatt Macy 		dnode_rele(dn, FTAG);
166eda14cbcSMatt Macy 	return (txh);
167eda14cbcSMatt Macy }
168eda14cbcSMatt Macy 
169eda14cbcSMatt Macy void
dmu_tx_add_new_object(dmu_tx_t * tx,dnode_t * dn)170eda14cbcSMatt Macy dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
171eda14cbcSMatt Macy {
172eda14cbcSMatt Macy 	/*
173eda14cbcSMatt Macy 	 * If we're syncing, they can manipulate any object anyhow, and
174eda14cbcSMatt Macy 	 * the hold on the dnode_t can cause problems.
175eda14cbcSMatt Macy 	 */
176eda14cbcSMatt Macy 	if (!dmu_tx_is_syncing(tx))
177eda14cbcSMatt Macy 		(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
178eda14cbcSMatt Macy }
179eda14cbcSMatt Macy 
180eda14cbcSMatt Macy /*
181eda14cbcSMatt Macy  * This function reads specified data from disk.  The specified data will
182eda14cbcSMatt Macy  * be needed to perform the transaction -- i.e, it will be read after
183eda14cbcSMatt Macy  * we do dmu_tx_assign().  There are two reasons that we read the data now
184eda14cbcSMatt Macy  * (before dmu_tx_assign()):
185eda14cbcSMatt Macy  *
186eda14cbcSMatt Macy  * 1. Reading it now has potentially better performance.  The transaction
187eda14cbcSMatt Macy  * has not yet been assigned, so the TXG is not held open, and also the
188eda14cbcSMatt Macy  * caller typically has less locks held when calling dmu_tx_hold_*() than
189eda14cbcSMatt Macy  * after the transaction has been assigned.  This reduces the lock (and txg)
190eda14cbcSMatt Macy  * hold times, thus reducing lock contention.
191eda14cbcSMatt Macy  *
192eda14cbcSMatt Macy  * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
193eda14cbcSMatt Macy  * that are detected before they start making changes to the DMU state
194eda14cbcSMatt Macy  * (i.e. now).  Once the transaction has been assigned, and some DMU
195eda14cbcSMatt Macy  * state has been changed, it can be difficult to recover from an i/o
196eda14cbcSMatt Macy  * error (e.g. to undo the changes already made in memory at the DMU
197eda14cbcSMatt Macy  * layer).  Typically code to do so does not exist in the caller -- it
198eda14cbcSMatt Macy  * assumes that the data has already been cached and thus i/o errors are
199eda14cbcSMatt Macy  * not possible.
200eda14cbcSMatt Macy  *
201eda14cbcSMatt Macy  * It has been observed that the i/o initiated here can be a performance
202eda14cbcSMatt Macy  * problem, and it appears to be optional, because we don't look at the
203eda14cbcSMatt Macy  * data which is read.  However, removing this read would only serve to
204eda14cbcSMatt Macy  * move the work elsewhere (after the dmu_tx_assign()), where it may
205eda14cbcSMatt Macy  * have a greater impact on performance (in addition to the impact on
206eda14cbcSMatt Macy  * fault tolerance noted above).
207eda14cbcSMatt Macy  */
208eda14cbcSMatt Macy static int
dmu_tx_check_ioerr(zio_t * zio,dnode_t * dn,int level,uint64_t blkid)209eda14cbcSMatt Macy dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
210eda14cbcSMatt Macy {
211eda14cbcSMatt Macy 	int err;
212eda14cbcSMatt Macy 	dmu_buf_impl_t *db;
213eda14cbcSMatt Macy 
214eda14cbcSMatt Macy 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
21523cf27dbSMartin Matuska 	err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db);
216eda14cbcSMatt Macy 	rw_exit(&dn->dn_struct_rwlock);
21723cf27dbSMartin Matuska 	if (err == ENOENT)
21823cf27dbSMartin Matuska 		return (0);
21923cf27dbSMartin Matuska 	if (err != 0)
22023cf27dbSMartin Matuska 		return (err);
22115f0b8c3SMartin Matuska 	/*
22215f0b8c3SMartin Matuska 	 * PARTIAL_FIRST allows caching for uncacheable blocks.  It will
22315f0b8c3SMartin Matuska 	 * be cleared after dmu_buf_will_dirty() call dbuf_read() again.
22415f0b8c3SMartin Matuska 	 */
22515f0b8c3SMartin Matuska 	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH |
22615f0b8c3SMartin Matuska 	    (level == 0 ? DB_RF_PARTIAL_FIRST : 0));
227eda14cbcSMatt Macy 	dbuf_rele(db, FTAG);
228eda14cbcSMatt Macy 	return (err);
229eda14cbcSMatt Macy }
230eda14cbcSMatt Macy 
231eda14cbcSMatt Macy static void
dmu_tx_count_write(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)232eda14cbcSMatt Macy dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
233eda14cbcSMatt Macy {
234eda14cbcSMatt Macy 	dnode_t *dn = txh->txh_dnode;
235eda14cbcSMatt Macy 	int err = 0;
236eda14cbcSMatt Macy 
237eda14cbcSMatt Macy 	if (len == 0)
238eda14cbcSMatt Macy 		return;
239eda14cbcSMatt Macy 
240eda14cbcSMatt Macy 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
241eda14cbcSMatt Macy 
242eda14cbcSMatt Macy 	if (dn == NULL)
243eda14cbcSMatt Macy 		return;
244eda14cbcSMatt Macy 
245eda14cbcSMatt Macy 	/*
246eda14cbcSMatt Macy 	 * For i/o error checking, read the blocks that will be needed
247eda14cbcSMatt Macy 	 * to perform the write: the first and last level-0 blocks (if
248eda14cbcSMatt Macy 	 * they are not aligned, i.e. if they are partial-block writes),
249eda14cbcSMatt Macy 	 * and all the level-1 blocks.
250eda14cbcSMatt Macy 	 */
251eda14cbcSMatt Macy 	if (dn->dn_maxblkid == 0) {
252eda14cbcSMatt Macy 		if (off < dn->dn_datablksz &&
253eda14cbcSMatt Macy 		    (off > 0 || len < dn->dn_datablksz)) {
254eda14cbcSMatt Macy 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
255eda14cbcSMatt Macy 			if (err != 0) {
256eda14cbcSMatt Macy 				txh->txh_tx->tx_err = err;
257eda14cbcSMatt Macy 			}
258eda14cbcSMatt Macy 		}
259eda14cbcSMatt Macy 	} else {
260eda14cbcSMatt Macy 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
261eda14cbcSMatt Macy 		    NULL, NULL, ZIO_FLAG_CANFAIL);
262eda14cbcSMatt Macy 
263eda14cbcSMatt Macy 		/* first level-0 block */
264eda14cbcSMatt Macy 		uint64_t start = off >> dn->dn_datablkshift;
265eda14cbcSMatt Macy 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
266eda14cbcSMatt Macy 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
267eda14cbcSMatt Macy 			if (err != 0) {
268eda14cbcSMatt Macy 				txh->txh_tx->tx_err = err;
269eda14cbcSMatt Macy 			}
270eda14cbcSMatt Macy 		}
271eda14cbcSMatt Macy 
272eda14cbcSMatt Macy 		/* last level-0 block */
273eda14cbcSMatt Macy 		uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
274eda14cbcSMatt Macy 		if (end != start && end <= dn->dn_maxblkid &&
275eda14cbcSMatt Macy 		    P2PHASE(off + len, dn->dn_datablksz)) {
276eda14cbcSMatt Macy 			err = dmu_tx_check_ioerr(zio, dn, 0, end);
277eda14cbcSMatt Macy 			if (err != 0) {
278eda14cbcSMatt Macy 				txh->txh_tx->tx_err = err;
279eda14cbcSMatt Macy 			}
280eda14cbcSMatt Macy 		}
281eda14cbcSMatt Macy 
282eda14cbcSMatt Macy 		/* level-1 blocks */
283eda14cbcSMatt Macy 		if (dn->dn_nlevels > 1) {
284eda14cbcSMatt Macy 			int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
285eda14cbcSMatt Macy 			for (uint64_t i = (start >> shft) + 1;
286eda14cbcSMatt Macy 			    i < end >> shft; i++) {
287eda14cbcSMatt Macy 				err = dmu_tx_check_ioerr(zio, dn, 1, i);
288eda14cbcSMatt Macy 				if (err != 0) {
289eda14cbcSMatt Macy 					txh->txh_tx->tx_err = err;
290eda14cbcSMatt Macy 				}
291eda14cbcSMatt Macy 			}
292eda14cbcSMatt Macy 		}
293eda14cbcSMatt Macy 
294eda14cbcSMatt Macy 		err = zio_wait(zio);
295eda14cbcSMatt Macy 		if (err != 0) {
296eda14cbcSMatt Macy 			txh->txh_tx->tx_err = err;
297eda14cbcSMatt Macy 		}
298eda14cbcSMatt Macy 	}
299eda14cbcSMatt Macy }
300eda14cbcSMatt Macy 
301eda14cbcSMatt Macy static void
dmu_tx_count_append(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)302e639e0d2SMartin Matuska dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
303e639e0d2SMartin Matuska {
304e639e0d2SMartin Matuska 	dnode_t *dn = txh->txh_dnode;
305e639e0d2SMartin Matuska 	int err = 0;
306e639e0d2SMartin Matuska 
307e639e0d2SMartin Matuska 	if (len == 0)
308e639e0d2SMartin Matuska 		return;
309e639e0d2SMartin Matuska 
310e639e0d2SMartin Matuska 	(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
311e639e0d2SMartin Matuska 
312e639e0d2SMartin Matuska 	if (dn == NULL)
313e639e0d2SMartin Matuska 		return;
314e639e0d2SMartin Matuska 
315e639e0d2SMartin Matuska 	/*
316e639e0d2SMartin Matuska 	 * For i/o error checking, read the blocks that will be needed
317e639e0d2SMartin Matuska 	 * to perform the append; first level-0 block (if not aligned, i.e.
318e639e0d2SMartin Matuska 	 * if they are partial-block writes), no additional blocks are read.
319e639e0d2SMartin Matuska 	 */
320e639e0d2SMartin Matuska 	if (dn->dn_maxblkid == 0) {
321e639e0d2SMartin Matuska 		if (off < dn->dn_datablksz &&
322e639e0d2SMartin Matuska 		    (off > 0 || len < dn->dn_datablksz)) {
323e639e0d2SMartin Matuska 			err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
324e639e0d2SMartin Matuska 			if (err != 0) {
325e639e0d2SMartin Matuska 				txh->txh_tx->tx_err = err;
326e639e0d2SMartin Matuska 			}
327e639e0d2SMartin Matuska 		}
328e639e0d2SMartin Matuska 	} else {
329e639e0d2SMartin Matuska 		zio_t *zio = zio_root(dn->dn_objset->os_spa,
330e639e0d2SMartin Matuska 		    NULL, NULL, ZIO_FLAG_CANFAIL);
331e639e0d2SMartin Matuska 
332e639e0d2SMartin Matuska 		/* first level-0 block */
333e639e0d2SMartin Matuska 		uint64_t start = off >> dn->dn_datablkshift;
334e639e0d2SMartin Matuska 		if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
335e639e0d2SMartin Matuska 			err = dmu_tx_check_ioerr(zio, dn, 0, start);
336e639e0d2SMartin Matuska 			if (err != 0) {
337e639e0d2SMartin Matuska 				txh->txh_tx->tx_err = err;
338e639e0d2SMartin Matuska 			}
339e639e0d2SMartin Matuska 		}
340e639e0d2SMartin Matuska 
341e639e0d2SMartin Matuska 		err = zio_wait(zio);
342e639e0d2SMartin Matuska 		if (err != 0) {
343e639e0d2SMartin Matuska 			txh->txh_tx->tx_err = err;
344e639e0d2SMartin Matuska 		}
345e639e0d2SMartin Matuska 	}
346e639e0d2SMartin Matuska }
347e639e0d2SMartin Matuska 
348e639e0d2SMartin Matuska static void
dmu_tx_count_dnode(dmu_tx_hold_t * txh)349eda14cbcSMatt Macy dmu_tx_count_dnode(dmu_tx_hold_t *txh)
350eda14cbcSMatt Macy {
351eda14cbcSMatt Macy 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
352eda14cbcSMatt Macy 	    DNODE_MIN_SIZE, FTAG);
353eda14cbcSMatt Macy }
354eda14cbcSMatt Macy 
355eda14cbcSMatt Macy void
dmu_tx_hold_write(dmu_tx_t * tx,uint64_t object,uint64_t off,int len)356eda14cbcSMatt Macy dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
357eda14cbcSMatt Macy {
358eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
359eda14cbcSMatt Macy 
360eda14cbcSMatt Macy 	ASSERT0(tx->tx_txg);
361eda14cbcSMatt Macy 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
362eda14cbcSMatt Macy 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
363eda14cbcSMatt Macy 
364eda14cbcSMatt Macy 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
365eda14cbcSMatt Macy 	    object, THT_WRITE, off, len);
366eda14cbcSMatt Macy 	if (txh != NULL) {
367eda14cbcSMatt Macy 		dmu_tx_count_write(txh, off, len);
368eda14cbcSMatt Macy 		dmu_tx_count_dnode(txh);
369eda14cbcSMatt Macy 	}
370eda14cbcSMatt Macy }
371eda14cbcSMatt Macy 
372eda14cbcSMatt Macy void
dmu_tx_hold_write_by_dnode(dmu_tx_t * tx,dnode_t * dn,uint64_t off,int len)373eda14cbcSMatt Macy dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
374eda14cbcSMatt Macy {
375eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
376eda14cbcSMatt Macy 
377eda14cbcSMatt Macy 	ASSERT0(tx->tx_txg);
378eda14cbcSMatt Macy 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
379eda14cbcSMatt Macy 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
380eda14cbcSMatt Macy 
381eda14cbcSMatt Macy 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
382eda14cbcSMatt Macy 	if (txh != NULL) {
383eda14cbcSMatt Macy 		dmu_tx_count_write(txh, off, len);
384eda14cbcSMatt Macy 		dmu_tx_count_dnode(txh);
385eda14cbcSMatt Macy 	}
386eda14cbcSMatt Macy }
387eda14cbcSMatt Macy 
388eda14cbcSMatt Macy /*
389e639e0d2SMartin Matuska  * Should be used when appending to an object and the exact offset is unknown.
390e639e0d2SMartin Matuska  * The write must occur at or beyond the specified offset.  Only the L0 block
391e639e0d2SMartin Matuska  * at provided offset will be prefetched.
392e639e0d2SMartin Matuska  */
393e639e0d2SMartin Matuska void
dmu_tx_hold_append(dmu_tx_t * tx,uint64_t object,uint64_t off,int len)394e639e0d2SMartin Matuska dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
395e639e0d2SMartin Matuska {
396e639e0d2SMartin Matuska 	dmu_tx_hold_t *txh;
397e639e0d2SMartin Matuska 
398e639e0d2SMartin Matuska 	ASSERT0(tx->tx_txg);
399e639e0d2SMartin Matuska 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
400e639e0d2SMartin Matuska 
401e639e0d2SMartin Matuska 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
402e639e0d2SMartin Matuska 	    object, THT_APPEND, off, DMU_OBJECT_END);
403e639e0d2SMartin Matuska 	if (txh != NULL) {
404e639e0d2SMartin Matuska 		dmu_tx_count_append(txh, off, len);
405e639e0d2SMartin Matuska 		dmu_tx_count_dnode(txh);
406e639e0d2SMartin Matuska 	}
407e639e0d2SMartin Matuska }
408e639e0d2SMartin Matuska 
409e639e0d2SMartin Matuska void
dmu_tx_hold_append_by_dnode(dmu_tx_t * tx,dnode_t * dn,uint64_t off,int len)410e639e0d2SMartin Matuska dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
411e639e0d2SMartin Matuska {
412e639e0d2SMartin Matuska 	dmu_tx_hold_t *txh;
413e639e0d2SMartin Matuska 
414e639e0d2SMartin Matuska 	ASSERT0(tx->tx_txg);
415e639e0d2SMartin Matuska 	ASSERT3U(len, <=, DMU_MAX_ACCESS);
416e639e0d2SMartin Matuska 
417e639e0d2SMartin Matuska 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END);
418e639e0d2SMartin Matuska 	if (txh != NULL) {
419e639e0d2SMartin Matuska 		dmu_tx_count_append(txh, off, len);
420e639e0d2SMartin Matuska 		dmu_tx_count_dnode(txh);
421e639e0d2SMartin Matuska 	}
422e639e0d2SMartin Matuska }
423e639e0d2SMartin Matuska 
424e639e0d2SMartin Matuska /*
425eda14cbcSMatt Macy  * This function marks the transaction as being a "net free".  The end
426eda14cbcSMatt Macy  * result is that refquotas will be disabled for this transaction, and
427eda14cbcSMatt Macy  * this transaction will be able to use half of the pool space overhead
428eda14cbcSMatt Macy  * (see dsl_pool_adjustedsize()).  Therefore this function should only
429eda14cbcSMatt Macy  * be called for transactions that we expect will not cause a net increase
430eda14cbcSMatt Macy  * in the amount of space used (but it's OK if that is occasionally not true).
431eda14cbcSMatt Macy  */
432eda14cbcSMatt Macy void
dmu_tx_mark_netfree(dmu_tx_t * tx)433eda14cbcSMatt Macy dmu_tx_mark_netfree(dmu_tx_t *tx)
434eda14cbcSMatt Macy {
435eda14cbcSMatt Macy 	tx->tx_netfree = B_TRUE;
436eda14cbcSMatt Macy }
437eda14cbcSMatt Macy 
438eda14cbcSMatt Macy static void
dmu_tx_count_free(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)4392a58b312SMartin Matuska dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
440eda14cbcSMatt Macy {
441eda14cbcSMatt Macy 	dmu_tx_t *tx = txh->txh_tx;
442eda14cbcSMatt Macy 	dnode_t *dn = txh->txh_dnode;
443eda14cbcSMatt Macy 	int err;
444eda14cbcSMatt Macy 
445eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
446eda14cbcSMatt Macy 
447eda14cbcSMatt Macy 	if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
448eda14cbcSMatt Macy 		return;
449eda14cbcSMatt Macy 	if (len == DMU_OBJECT_END)
450eda14cbcSMatt Macy 		len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
451eda14cbcSMatt Macy 
452eda14cbcSMatt Macy 	/*
453eda14cbcSMatt Macy 	 * For i/o error checking, we read the first and last level-0
454eda14cbcSMatt Macy 	 * blocks if they are not aligned, and all the level-1 blocks.
455eda14cbcSMatt Macy 	 *
456eda14cbcSMatt Macy 	 * Note:  dbuf_free_range() assumes that we have not instantiated
457eda14cbcSMatt Macy 	 * any level-0 dbufs that will be completely freed.  Therefore we must
458eda14cbcSMatt Macy 	 * exercise care to not read or count the first and last blocks
459eda14cbcSMatt Macy 	 * if they are blocksize-aligned.
460eda14cbcSMatt Macy 	 */
461eda14cbcSMatt Macy 	if (dn->dn_datablkshift == 0) {
462eda14cbcSMatt Macy 		if (off != 0 || len < dn->dn_datablksz)
463eda14cbcSMatt Macy 			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
464eda14cbcSMatt Macy 	} else {
465eda14cbcSMatt Macy 		/* first block will be modified if it is not aligned */
466eda14cbcSMatt Macy 		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
467eda14cbcSMatt Macy 			dmu_tx_count_write(txh, off, 1);
468eda14cbcSMatt Macy 		/* last block will be modified if it is not aligned */
469eda14cbcSMatt Macy 		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
470eda14cbcSMatt Macy 			dmu_tx_count_write(txh, off + len, 1);
471eda14cbcSMatt Macy 	}
472eda14cbcSMatt Macy 
473eda14cbcSMatt Macy 	/*
474eda14cbcSMatt Macy 	 * Check level-1 blocks.
475eda14cbcSMatt Macy 	 */
476eda14cbcSMatt Macy 	if (dn->dn_nlevels > 1) {
477eda14cbcSMatt Macy 		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
478eda14cbcSMatt Macy 		    SPA_BLKPTRSHIFT;
479eda14cbcSMatt Macy 		uint64_t start = off >> shift;
480eda14cbcSMatt Macy 		uint64_t end = (off + len) >> shift;
481eda14cbcSMatt Macy 
482eda14cbcSMatt Macy 		ASSERT(dn->dn_indblkshift != 0);
483eda14cbcSMatt Macy 
484eda14cbcSMatt Macy 		/*
485eda14cbcSMatt Macy 		 * dnode_reallocate() can result in an object with indirect
486eda14cbcSMatt Macy 		 * blocks having an odd data block size.  In this case,
487eda14cbcSMatt Macy 		 * just check the single block.
488eda14cbcSMatt Macy 		 */
489eda14cbcSMatt Macy 		if (dn->dn_datablkshift == 0)
490eda14cbcSMatt Macy 			start = end = 0;
491eda14cbcSMatt Macy 
492eda14cbcSMatt Macy 		zio_t *zio = zio_root(tx->tx_pool->dp_spa,
493eda14cbcSMatt Macy 		    NULL, NULL, ZIO_FLAG_CANFAIL);
494eda14cbcSMatt Macy 		for (uint64_t i = start; i <= end; i++) {
495eda14cbcSMatt Macy 			uint64_t ibyte = i << shift;
496eda14cbcSMatt Macy 			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
497eda14cbcSMatt Macy 			i = ibyte >> shift;
498eda14cbcSMatt Macy 			if (err == ESRCH || i > end)
499eda14cbcSMatt Macy 				break;
500eda14cbcSMatt Macy 			if (err != 0) {
501eda14cbcSMatt Macy 				tx->tx_err = err;
502eda14cbcSMatt Macy 				(void) zio_wait(zio);
503eda14cbcSMatt Macy 				return;
504eda14cbcSMatt Macy 			}
505eda14cbcSMatt Macy 
506eda14cbcSMatt Macy 			(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
507eda14cbcSMatt Macy 			    1 << dn->dn_indblkshift, FTAG);
508eda14cbcSMatt Macy 
509eda14cbcSMatt Macy 			err = dmu_tx_check_ioerr(zio, dn, 1, i);
510eda14cbcSMatt Macy 			if (err != 0) {
511eda14cbcSMatt Macy 				tx->tx_err = err;
512eda14cbcSMatt Macy 				(void) zio_wait(zio);
513eda14cbcSMatt Macy 				return;
514eda14cbcSMatt Macy 			}
515eda14cbcSMatt Macy 		}
516eda14cbcSMatt Macy 		err = zio_wait(zio);
517eda14cbcSMatt Macy 		if (err != 0) {
518eda14cbcSMatt Macy 			tx->tx_err = err;
519eda14cbcSMatt Macy 			return;
520eda14cbcSMatt Macy 		}
521eda14cbcSMatt Macy 	}
522eda14cbcSMatt Macy }
523eda14cbcSMatt Macy 
524eda14cbcSMatt Macy void
dmu_tx_hold_free(dmu_tx_t * tx,uint64_t object,uint64_t off,uint64_t len)525eda14cbcSMatt Macy dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
526eda14cbcSMatt Macy {
527eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
528eda14cbcSMatt Macy 
529eda14cbcSMatt Macy 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
530eda14cbcSMatt Macy 	    object, THT_FREE, off, len);
5312a58b312SMartin Matuska 	if (txh != NULL) {
5322a58b312SMartin Matuska 		dmu_tx_count_dnode(txh);
5332a58b312SMartin Matuska 		dmu_tx_count_free(txh, off, len);
5342a58b312SMartin Matuska 	}
535eda14cbcSMatt Macy }
536eda14cbcSMatt Macy 
537eda14cbcSMatt Macy void
dmu_tx_hold_free_by_dnode(dmu_tx_t * tx,dnode_t * dn,uint64_t off,uint64_t len)538eda14cbcSMatt Macy dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
539eda14cbcSMatt Macy {
540eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
541eda14cbcSMatt Macy 
542eda14cbcSMatt Macy 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
5432a58b312SMartin Matuska 	if (txh != NULL) {
5442a58b312SMartin Matuska 		dmu_tx_count_dnode(txh);
5452a58b312SMartin Matuska 		dmu_tx_count_free(txh, off, len);
5462a58b312SMartin Matuska 	}
5472a58b312SMartin Matuska }
5482a58b312SMartin Matuska 
5492a58b312SMartin Matuska static void
dmu_tx_count_clone(dmu_tx_hold_t * txh,uint64_t off,uint64_t len)5502a58b312SMartin Matuska dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
5512a58b312SMartin Matuska {
5522a58b312SMartin Matuska 
5532a58b312SMartin Matuska 	/*
5542a58b312SMartin Matuska 	 * Reuse dmu_tx_count_free(), it does exactly what we need for clone.
5552a58b312SMartin Matuska 	 */
5562a58b312SMartin Matuska 	dmu_tx_count_free(txh, off, len);
5572a58b312SMartin Matuska }
5582a58b312SMartin Matuska 
5592a58b312SMartin Matuska void
dmu_tx_hold_clone_by_dnode(dmu_tx_t * tx,dnode_t * dn,uint64_t off,int len)5602a58b312SMartin Matuska dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
5612a58b312SMartin Matuska {
5622a58b312SMartin Matuska 	dmu_tx_hold_t *txh;
5632a58b312SMartin Matuska 
5642a58b312SMartin Matuska 	ASSERT0(tx->tx_txg);
5652a58b312SMartin Matuska 	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
5662a58b312SMartin Matuska 
5672a58b312SMartin Matuska 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
5682a58b312SMartin Matuska 	if (txh != NULL) {
5692a58b312SMartin Matuska 		dmu_tx_count_dnode(txh);
5702a58b312SMartin Matuska 		dmu_tx_count_clone(txh, off, len);
5712a58b312SMartin Matuska 	}
572eda14cbcSMatt Macy }
573eda14cbcSMatt Macy 
574eda14cbcSMatt Macy static void
dmu_tx_hold_zap_impl(dmu_tx_hold_t * txh,const char * name)575eda14cbcSMatt Macy dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
576eda14cbcSMatt Macy {
577eda14cbcSMatt Macy 	dmu_tx_t *tx = txh->txh_tx;
578eda14cbcSMatt Macy 	dnode_t *dn = txh->txh_dnode;
579eda14cbcSMatt Macy 	int err;
580eda14cbcSMatt Macy 
581eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
582eda14cbcSMatt Macy 
583eda14cbcSMatt Macy 	dmu_tx_count_dnode(txh);
584eda14cbcSMatt Macy 
585eda14cbcSMatt Macy 	/*
586eda14cbcSMatt Macy 	 * Modifying a almost-full microzap is around the worst case (128KB)
587eda14cbcSMatt Macy 	 *
588eda14cbcSMatt Macy 	 * If it is a fat zap, the worst case would be 7*16KB=112KB:
589eda14cbcSMatt Macy 	 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
590eda14cbcSMatt Macy 	 * - 4 new blocks written if adding:
591eda14cbcSMatt Macy 	 *    - 2 blocks for possibly split leaves,
592eda14cbcSMatt Macy 	 *    - 2 grown ptrtbl blocks
593eda14cbcSMatt Macy 	 */
594eda14cbcSMatt Macy 	(void) zfs_refcount_add_many(&txh->txh_space_towrite,
5957a7741afSMartin Matuska 	    zap_get_micro_max_size(tx->tx_pool->dp_spa), FTAG);
596eda14cbcSMatt Macy 
597eda14cbcSMatt Macy 	if (dn == NULL)
598eda14cbcSMatt Macy 		return;
599eda14cbcSMatt Macy 
600eda14cbcSMatt Macy 	ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
601eda14cbcSMatt Macy 
602eda14cbcSMatt Macy 	if (dn->dn_maxblkid == 0 || name == NULL) {
603eda14cbcSMatt Macy 		/*
604eda14cbcSMatt Macy 		 * This is a microzap (only one block), or we don't know
605eda14cbcSMatt Macy 		 * the name.  Check the first block for i/o errors.
606eda14cbcSMatt Macy 		 */
607eda14cbcSMatt Macy 		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
608eda14cbcSMatt Macy 		if (err != 0) {
609eda14cbcSMatt Macy 			tx->tx_err = err;
610eda14cbcSMatt Macy 		}
611eda14cbcSMatt Macy 	} else {
612eda14cbcSMatt Macy 		/*
613eda14cbcSMatt Macy 		 * Access the name so that we'll check for i/o errors to
614eda14cbcSMatt Macy 		 * the leaf blocks, etc.  We ignore ENOENT, as this name
615eda14cbcSMatt Macy 		 * may not yet exist.
616eda14cbcSMatt Macy 		 */
617eda14cbcSMatt Macy 		err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
618eda14cbcSMatt Macy 		if (err == EIO || err == ECKSUM || err == ENXIO) {
619eda14cbcSMatt Macy 			tx->tx_err = err;
620eda14cbcSMatt Macy 		}
621eda14cbcSMatt Macy 	}
622eda14cbcSMatt Macy }
623eda14cbcSMatt Macy 
624eda14cbcSMatt Macy void
dmu_tx_hold_zap(dmu_tx_t * tx,uint64_t object,int add,const char * name)625eda14cbcSMatt Macy dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
626eda14cbcSMatt Macy {
627eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
628eda14cbcSMatt Macy 
629eda14cbcSMatt Macy 	ASSERT0(tx->tx_txg);
630eda14cbcSMatt Macy 
631eda14cbcSMatt Macy 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
632eda14cbcSMatt Macy 	    object, THT_ZAP, add, (uintptr_t)name);
633eda14cbcSMatt Macy 	if (txh != NULL)
634eda14cbcSMatt Macy 		dmu_tx_hold_zap_impl(txh, name);
635eda14cbcSMatt Macy }
636eda14cbcSMatt Macy 
637eda14cbcSMatt Macy void
dmu_tx_hold_zap_by_dnode(dmu_tx_t * tx,dnode_t * dn,int add,const char * name)638eda14cbcSMatt Macy dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
639eda14cbcSMatt Macy {
640eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
641eda14cbcSMatt Macy 
642eda14cbcSMatt Macy 	ASSERT0(tx->tx_txg);
643eda14cbcSMatt Macy 	ASSERT(dn != NULL);
644eda14cbcSMatt Macy 
645eda14cbcSMatt Macy 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
646eda14cbcSMatt Macy 	if (txh != NULL)
647eda14cbcSMatt Macy 		dmu_tx_hold_zap_impl(txh, name);
648eda14cbcSMatt Macy }
649eda14cbcSMatt Macy 
650eda14cbcSMatt Macy void
dmu_tx_hold_bonus(dmu_tx_t * tx,uint64_t object)651eda14cbcSMatt Macy dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
652eda14cbcSMatt Macy {
653eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
654eda14cbcSMatt Macy 
655eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
656eda14cbcSMatt Macy 
657eda14cbcSMatt Macy 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
658eda14cbcSMatt Macy 	    object, THT_BONUS, 0, 0);
659eda14cbcSMatt Macy 	if (txh)
660eda14cbcSMatt Macy 		dmu_tx_count_dnode(txh);
661eda14cbcSMatt Macy }
662eda14cbcSMatt Macy 
663eda14cbcSMatt Macy void
dmu_tx_hold_bonus_by_dnode(dmu_tx_t * tx,dnode_t * dn)664eda14cbcSMatt Macy dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
665eda14cbcSMatt Macy {
666eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
667eda14cbcSMatt Macy 
668eda14cbcSMatt Macy 	ASSERT0(tx->tx_txg);
669eda14cbcSMatt Macy 
670eda14cbcSMatt Macy 	txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
671eda14cbcSMatt Macy 	if (txh)
672eda14cbcSMatt Macy 		dmu_tx_count_dnode(txh);
673eda14cbcSMatt Macy }
674eda14cbcSMatt Macy 
675eda14cbcSMatt Macy void
dmu_tx_hold_space(dmu_tx_t * tx,uint64_t space)676eda14cbcSMatt Macy dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
677eda14cbcSMatt Macy {
678eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
679eda14cbcSMatt Macy 
680eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
681eda14cbcSMatt Macy 
682eda14cbcSMatt Macy 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
683eda14cbcSMatt Macy 	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
684eda14cbcSMatt Macy 	if (txh) {
685eda14cbcSMatt Macy 		(void) zfs_refcount_add_many(
686eda14cbcSMatt Macy 		    &txh->txh_space_towrite, space, FTAG);
687eda14cbcSMatt Macy 	}
688eda14cbcSMatt Macy }
689eda14cbcSMatt Macy 
690eda14cbcSMatt Macy #ifdef ZFS_DEBUG
691eda14cbcSMatt Macy void
dmu_tx_dirty_buf(dmu_tx_t * tx,dmu_buf_impl_t * db)692eda14cbcSMatt Macy dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
693eda14cbcSMatt Macy {
694eda14cbcSMatt Macy 	boolean_t match_object = B_FALSE;
695eda14cbcSMatt Macy 	boolean_t match_offset = B_FALSE;
696eda14cbcSMatt Macy 
697eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
698eda14cbcSMatt Macy 	dnode_t *dn = DB_DNODE(db);
699eda14cbcSMatt Macy 	ASSERT(tx->tx_txg != 0);
700eda14cbcSMatt Macy 	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
701eda14cbcSMatt Macy 	ASSERT3U(dn->dn_object, ==, db->db.db_object);
702eda14cbcSMatt Macy 
703eda14cbcSMatt Macy 	if (tx->tx_anyobj) {
704eda14cbcSMatt Macy 		DB_DNODE_EXIT(db);
705eda14cbcSMatt Macy 		return;
706eda14cbcSMatt Macy 	}
707eda14cbcSMatt Macy 
708eda14cbcSMatt Macy 	/* XXX No checking on the meta dnode for now */
709eda14cbcSMatt Macy 	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
710eda14cbcSMatt Macy 		DB_DNODE_EXIT(db);
711eda14cbcSMatt Macy 		return;
712eda14cbcSMatt Macy 	}
713eda14cbcSMatt Macy 
714eda14cbcSMatt Macy 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
715eda14cbcSMatt Macy 	    txh = list_next(&tx->tx_holds, txh)) {
716eda14cbcSMatt Macy 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
717eda14cbcSMatt Macy 		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
718eda14cbcSMatt Macy 			match_object = TRUE;
719eda14cbcSMatt Macy 		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
720eda14cbcSMatt Macy 			int datablkshift = dn->dn_datablkshift ?
721eda14cbcSMatt Macy 			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
722eda14cbcSMatt Macy 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
723eda14cbcSMatt Macy 			int shift = datablkshift + epbs * db->db_level;
724eda14cbcSMatt Macy 			uint64_t beginblk = shift >= 64 ? 0 :
725eda14cbcSMatt Macy 			    (txh->txh_arg1 >> shift);
726eda14cbcSMatt Macy 			uint64_t endblk = shift >= 64 ? 0 :
727eda14cbcSMatt Macy 			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
728eda14cbcSMatt Macy 			uint64_t blkid = db->db_blkid;
729eda14cbcSMatt Macy 
730eda14cbcSMatt Macy 			/* XXX txh_arg2 better not be zero... */
731eda14cbcSMatt Macy 
732eda14cbcSMatt Macy 			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
73333b8c039SMartin Matuska 			    txh->txh_type, (u_longlong_t)beginblk,
73433b8c039SMartin Matuska 			    (u_longlong_t)endblk);
735eda14cbcSMatt Macy 
736eda14cbcSMatt Macy 			switch (txh->txh_type) {
737eda14cbcSMatt Macy 			case THT_WRITE:
738eda14cbcSMatt Macy 				if (blkid >= beginblk && blkid <= endblk)
739eda14cbcSMatt Macy 					match_offset = TRUE;
740eda14cbcSMatt Macy 				/*
741eda14cbcSMatt Macy 				 * We will let this hold work for the bonus
742eda14cbcSMatt Macy 				 * or spill buffer so that we don't need to
743eda14cbcSMatt Macy 				 * hold it when creating a new object.
744eda14cbcSMatt Macy 				 */
745eda14cbcSMatt Macy 				if (blkid == DMU_BONUS_BLKID ||
746eda14cbcSMatt Macy 				    blkid == DMU_SPILL_BLKID)
747eda14cbcSMatt Macy 					match_offset = TRUE;
748eda14cbcSMatt Macy 				/*
749eda14cbcSMatt Macy 				 * They might have to increase nlevels,
750eda14cbcSMatt Macy 				 * thus dirtying the new TLIBs.  Or the
751eda14cbcSMatt Macy 				 * might have to change the block size,
752eda14cbcSMatt Macy 				 * thus dirying the new lvl=0 blk=0.
753eda14cbcSMatt Macy 				 */
754eda14cbcSMatt Macy 				if (blkid == 0)
755eda14cbcSMatt Macy 					match_offset = TRUE;
756eda14cbcSMatt Macy 				break;
757e639e0d2SMartin Matuska 			case THT_APPEND:
758e639e0d2SMartin Matuska 				if (blkid >= beginblk && (blkid <= endblk ||
759e639e0d2SMartin Matuska 				    txh->txh_arg2 == DMU_OBJECT_END))
760e639e0d2SMartin Matuska 					match_offset = TRUE;
761e639e0d2SMartin Matuska 
762e639e0d2SMartin Matuska 				/*
763e639e0d2SMartin Matuska 				 * THT_WRITE used for bonus and spill blocks.
764e639e0d2SMartin Matuska 				 */
765e639e0d2SMartin Matuska 				ASSERT(blkid != DMU_BONUS_BLKID &&
766e639e0d2SMartin Matuska 				    blkid != DMU_SPILL_BLKID);
767e639e0d2SMartin Matuska 
768e639e0d2SMartin Matuska 				/*
769e639e0d2SMartin Matuska 				 * They might have to increase nlevels,
770e639e0d2SMartin Matuska 				 * thus dirtying the new TLIBs.  Or the
771e639e0d2SMartin Matuska 				 * might have to change the block size,
772e639e0d2SMartin Matuska 				 * thus dirying the new lvl=0 blk=0.
773e639e0d2SMartin Matuska 				 */
774e639e0d2SMartin Matuska 				if (blkid == 0)
775e639e0d2SMartin Matuska 					match_offset = TRUE;
776e639e0d2SMartin Matuska 				break;
777eda14cbcSMatt Macy 			case THT_FREE:
778eda14cbcSMatt Macy 				/*
779eda14cbcSMatt Macy 				 * We will dirty all the level 1 blocks in
780eda14cbcSMatt Macy 				 * the free range and perhaps the first and
781eda14cbcSMatt Macy 				 * last level 0 block.
782eda14cbcSMatt Macy 				 */
783eda14cbcSMatt Macy 				if (blkid >= beginblk && (blkid <= endblk ||
784eda14cbcSMatt Macy 				    txh->txh_arg2 == DMU_OBJECT_END))
785eda14cbcSMatt Macy 					match_offset = TRUE;
786eda14cbcSMatt Macy 				break;
787eda14cbcSMatt Macy 			case THT_SPILL:
788eda14cbcSMatt Macy 				if (blkid == DMU_SPILL_BLKID)
789eda14cbcSMatt Macy 					match_offset = TRUE;
790eda14cbcSMatt Macy 				break;
791eda14cbcSMatt Macy 			case THT_BONUS:
792eda14cbcSMatt Macy 				if (blkid == DMU_BONUS_BLKID)
793eda14cbcSMatt Macy 					match_offset = TRUE;
794eda14cbcSMatt Macy 				break;
795eda14cbcSMatt Macy 			case THT_ZAP:
796eda14cbcSMatt Macy 				match_offset = TRUE;
797eda14cbcSMatt Macy 				break;
798eda14cbcSMatt Macy 			case THT_NEWOBJECT:
799eda14cbcSMatt Macy 				match_object = TRUE;
800eda14cbcSMatt Macy 				break;
8012a58b312SMartin Matuska 			case THT_CLONE:
8022a58b312SMartin Matuska 				if (blkid >= beginblk && blkid <= endblk)
8032a58b312SMartin Matuska 					match_offset = TRUE;
80417aab35aSMartin Matuska 				/*
80517aab35aSMartin Matuska 				 * They might have to increase nlevels,
80617aab35aSMartin Matuska 				 * thus dirtying the new TLIBs.  Or the
80717aab35aSMartin Matuska 				 * might have to change the block size,
80817aab35aSMartin Matuska 				 * thus dirying the new lvl=0 blk=0.
80917aab35aSMartin Matuska 				 */
81017aab35aSMartin Matuska 				if (blkid == 0)
81117aab35aSMartin Matuska 					match_offset = TRUE;
8122a58b312SMartin Matuska 				break;
813eda14cbcSMatt Macy 			default:
814eda14cbcSMatt Macy 				cmn_err(CE_PANIC, "bad txh_type %d",
815eda14cbcSMatt Macy 				    txh->txh_type);
816eda14cbcSMatt Macy 			}
817eda14cbcSMatt Macy 		}
818eda14cbcSMatt Macy 		if (match_object && match_offset) {
819eda14cbcSMatt Macy 			DB_DNODE_EXIT(db);
820eda14cbcSMatt Macy 			return;
821eda14cbcSMatt Macy 		}
822eda14cbcSMatt Macy 	}
823eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
824eda14cbcSMatt Macy 	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
825eda14cbcSMatt Macy 	    (u_longlong_t)db->db.db_object, db->db_level,
826eda14cbcSMatt Macy 	    (u_longlong_t)db->db_blkid);
827eda14cbcSMatt Macy }
828eda14cbcSMatt Macy #endif
829eda14cbcSMatt Macy 
830eda14cbcSMatt Macy /*
831eda14cbcSMatt Macy  * If we can't do 10 iops, something is wrong.  Let us go ahead
832eda14cbcSMatt Macy  * and hit zfs_dirty_data_max.
833eda14cbcSMatt Macy  */
834e92ffd9bSMartin Matuska static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
835eda14cbcSMatt Macy 
836eda14cbcSMatt Macy /*
837eda14cbcSMatt Macy  * We delay transactions when we've determined that the backend storage
838eda14cbcSMatt Macy  * isn't able to accommodate the rate of incoming writes.
839eda14cbcSMatt Macy  *
840eda14cbcSMatt Macy  * If there is already a transaction waiting, we delay relative to when
841eda14cbcSMatt Macy  * that transaction finishes waiting.  This way the calculated min_time
842eda14cbcSMatt Macy  * is independent of the number of threads concurrently executing
843eda14cbcSMatt Macy  * transactions.
844eda14cbcSMatt Macy  *
845eda14cbcSMatt Macy  * If we are the only waiter, wait relative to when the transaction
846eda14cbcSMatt Macy  * started, rather than the current time.  This credits the transaction for
847eda14cbcSMatt Macy  * "time already served", e.g. reading indirect blocks.
848eda14cbcSMatt Macy  *
849eda14cbcSMatt Macy  * The minimum time for a transaction to take is calculated as:
850eda14cbcSMatt Macy  *     min_time = scale * (dirty - min) / (max - dirty)
851eda14cbcSMatt Macy  *     min_time is then capped at zfs_delay_max_ns.
852eda14cbcSMatt Macy  *
853eda14cbcSMatt Macy  * The delay has two degrees of freedom that can be adjusted via tunables.
854eda14cbcSMatt Macy  * The percentage of dirty data at which we start to delay is defined by
855eda14cbcSMatt Macy  * zfs_delay_min_dirty_percent. This should typically be at or above
856eda14cbcSMatt Macy  * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
857eda14cbcSMatt Macy  * delay after writing at full speed has failed to keep up with the incoming
858eda14cbcSMatt Macy  * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
859eda14cbcSMatt Macy  * speaking, this variable determines the amount of delay at the midpoint of
860eda14cbcSMatt Macy  * the curve.
861eda14cbcSMatt Macy  *
862eda14cbcSMatt Macy  * delay
863eda14cbcSMatt Macy  *  10ms +-------------------------------------------------------------*+
864eda14cbcSMatt Macy  *       |                                                             *|
865eda14cbcSMatt Macy  *   9ms +                                                             *+
866eda14cbcSMatt Macy  *       |                                                             *|
867eda14cbcSMatt Macy  *   8ms +                                                             *+
868eda14cbcSMatt Macy  *       |                                                            * |
869eda14cbcSMatt Macy  *   7ms +                                                            * +
870eda14cbcSMatt Macy  *       |                                                            * |
871eda14cbcSMatt Macy  *   6ms +                                                            * +
872eda14cbcSMatt Macy  *       |                                                            * |
873eda14cbcSMatt Macy  *   5ms +                                                           *  +
874eda14cbcSMatt Macy  *       |                                                           *  |
875eda14cbcSMatt Macy  *   4ms +                                                           *  +
876eda14cbcSMatt Macy  *       |                                                           *  |
877eda14cbcSMatt Macy  *   3ms +                                                          *   +
878eda14cbcSMatt Macy  *       |                                                          *   |
879eda14cbcSMatt Macy  *   2ms +                                              (midpoint) *    +
880eda14cbcSMatt Macy  *       |                                                  |    **     |
881eda14cbcSMatt Macy  *   1ms +                                                  v ***       +
882eda14cbcSMatt Macy  *       |             zfs_delay_scale ---------->     ********         |
883eda14cbcSMatt Macy  *     0 +-------------------------------------*********----------------+
884eda14cbcSMatt Macy  *       0%                    <- zfs_dirty_data_max ->               100%
885eda14cbcSMatt Macy  *
886eda14cbcSMatt Macy  * Note that since the delay is added to the outstanding time remaining on the
887eda14cbcSMatt Macy  * most recent transaction, the delay is effectively the inverse of IOPS.
888eda14cbcSMatt Macy  * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
889eda14cbcSMatt Macy  * was chosen such that small changes in the amount of accumulated dirty data
890eda14cbcSMatt Macy  * in the first 3/4 of the curve yield relatively small differences in the
891eda14cbcSMatt Macy  * amount of delay.
892eda14cbcSMatt Macy  *
893eda14cbcSMatt Macy  * The effects can be easier to understand when the amount of delay is
894eda14cbcSMatt Macy  * represented on a log scale:
895eda14cbcSMatt Macy  *
896eda14cbcSMatt Macy  * delay
897eda14cbcSMatt Macy  * 100ms +-------------------------------------------------------------++
898eda14cbcSMatt Macy  *       +                                                              +
899eda14cbcSMatt Macy  *       |                                                              |
900eda14cbcSMatt Macy  *       +                                                             *+
901eda14cbcSMatt Macy  *  10ms +                                                             *+
902eda14cbcSMatt Macy  *       +                                                           ** +
903eda14cbcSMatt Macy  *       |                                              (midpoint)  **  |
904eda14cbcSMatt Macy  *       +                                                  |     **    +
905eda14cbcSMatt Macy  *   1ms +                                                  v ****      +
906eda14cbcSMatt Macy  *       +             zfs_delay_scale ---------->        *****         +
907eda14cbcSMatt Macy  *       |                                             ****             |
908eda14cbcSMatt Macy  *       +                                          ****                +
909eda14cbcSMatt Macy  * 100us +                                        **                    +
910eda14cbcSMatt Macy  *       +                                       *                      +
911eda14cbcSMatt Macy  *       |                                      *                       |
912eda14cbcSMatt Macy  *       +                                     *                        +
913eda14cbcSMatt Macy  *  10us +                                     *                        +
914eda14cbcSMatt Macy  *       +                                                              +
915eda14cbcSMatt Macy  *       |                                                              |
916eda14cbcSMatt Macy  *       +                                                              +
917eda14cbcSMatt Macy  *       +--------------------------------------------------------------+
918eda14cbcSMatt Macy  *       0%                    <- zfs_dirty_data_max ->               100%
919eda14cbcSMatt Macy  *
920eda14cbcSMatt Macy  * Note here that only as the amount of dirty data approaches its limit does
921eda14cbcSMatt Macy  * the delay start to increase rapidly. The goal of a properly tuned system
922eda14cbcSMatt Macy  * should be to keep the amount of dirty data out of that range by first
923eda14cbcSMatt Macy  * ensuring that the appropriate limits are set for the I/O scheduler to reach
924eda14cbcSMatt Macy  * optimal throughput on the backend storage, and then by changing the value
925eda14cbcSMatt Macy  * of zfs_delay_scale to increase the steepness of the curve.
926eda14cbcSMatt Macy  */
927eda14cbcSMatt Macy static void
dmu_tx_delay(dmu_tx_t * tx,uint64_t dirty)928eda14cbcSMatt Macy dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
929eda14cbcSMatt Macy {
930eda14cbcSMatt Macy 	dsl_pool_t *dp = tx->tx_pool;
931e3aa18adSMartin Matuska 	uint64_t delay_min_bytes, wrlog;
932e3aa18adSMartin Matuska 	hrtime_t wakeup, tx_time = 0, now;
933e3aa18adSMartin Matuska 
934e3aa18adSMartin Matuska 	/* Calculate minimum transaction time for the dirty data amount. */
935e3aa18adSMartin Matuska 	delay_min_bytes =
936eda14cbcSMatt Macy 	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
937e3aa18adSMartin Matuska 	if (dirty > delay_min_bytes) {
938eda14cbcSMatt Macy 		/*
939eda14cbcSMatt Macy 		 * The caller has already waited until we are under the max.
940eda14cbcSMatt Macy 		 * We make them pass us the amount of dirty data so we don't
941e3aa18adSMartin Matuska 		 * have to handle the case of it being >= the max, which
942e3aa18adSMartin Matuska 		 * could cause a divide-by-zero if it's == the max.
943eda14cbcSMatt Macy 		 */
944eda14cbcSMatt Macy 		ASSERT3U(dirty, <, zfs_dirty_data_max);
945eda14cbcSMatt Macy 
946e3aa18adSMartin Matuska 		tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
947e3aa18adSMartin Matuska 		    (zfs_dirty_data_max - dirty);
948e3aa18adSMartin Matuska 	}
949e3aa18adSMartin Matuska 
950e3aa18adSMartin Matuska 	/* Calculate minimum transaction time for the TX_WRITE log size. */
951e3aa18adSMartin Matuska 	wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
952e3aa18adSMartin Matuska 	delay_min_bytes =
953e3aa18adSMartin Matuska 	    zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
954e3aa18adSMartin Matuska 	if (wrlog >= zfs_wrlog_data_max) {
955e3aa18adSMartin Matuska 		tx_time = zfs_delay_max_ns;
956e3aa18adSMartin Matuska 	} else if (wrlog > delay_min_bytes) {
957e3aa18adSMartin Matuska 		tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
958e3aa18adSMartin Matuska 		    (zfs_wrlog_data_max - wrlog), tx_time);
959e3aa18adSMartin Matuska 	}
960e3aa18adSMartin Matuska 
961e3aa18adSMartin Matuska 	if (tx_time == 0)
962e3aa18adSMartin Matuska 		return;
963e3aa18adSMartin Matuska 
964e3aa18adSMartin Matuska 	tx_time = MIN(tx_time, zfs_delay_max_ns);
965eda14cbcSMatt Macy 	now = gethrtime();
966e3aa18adSMartin Matuska 	if (now > tx->tx_start + tx_time)
967eda14cbcSMatt Macy 		return;
968eda14cbcSMatt Macy 
969eda14cbcSMatt Macy 	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
970e3aa18adSMartin Matuska 	    uint64_t, tx_time);
971eda14cbcSMatt Macy 
972eda14cbcSMatt Macy 	mutex_enter(&dp->dp_lock);
973e3aa18adSMartin Matuska 	wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
974eda14cbcSMatt Macy 	dp->dp_last_wakeup = wakeup;
975eda14cbcSMatt Macy 	mutex_exit(&dp->dp_lock);
976eda14cbcSMatt Macy 
977eda14cbcSMatt Macy 	zfs_sleep_until(wakeup);
978eda14cbcSMatt Macy }
979eda14cbcSMatt Macy 
980eda14cbcSMatt Macy /*
981eda14cbcSMatt Macy  * This routine attempts to assign the transaction to a transaction group.
982eda14cbcSMatt Macy  * To do so, we must determine if there is sufficient free space on disk.
983eda14cbcSMatt Macy  *
984eda14cbcSMatt Macy  * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
985eda14cbcSMatt Macy  * on it), then it is assumed that there is sufficient free space,
986eda14cbcSMatt Macy  * unless there's insufficient slop space in the pool (see the comment
987eda14cbcSMatt Macy  * above spa_slop_shift in spa_misc.c).
988eda14cbcSMatt Macy  *
989eda14cbcSMatt Macy  * If it is not a "netfree" transaction, then if the data already on disk
990eda14cbcSMatt Macy  * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
991eda14cbcSMatt Macy  * ENOSPC.  Otherwise, if the current rough estimate of pending changes,
992eda14cbcSMatt Macy  * plus the rough estimate of this transaction's changes, may exceed the
993eda14cbcSMatt Macy  * allowed usage, then this will fail with ERESTART, which will cause the
994eda14cbcSMatt Macy  * caller to wait for the pending changes to be written to disk (by waiting
995eda14cbcSMatt Macy  * for the next TXG to open), and then check the space usage again.
996eda14cbcSMatt Macy  *
997eda14cbcSMatt Macy  * The rough estimate of pending changes is comprised of the sum of:
998eda14cbcSMatt Macy  *
999eda14cbcSMatt Macy  *  - this transaction's holds' txh_space_towrite
1000eda14cbcSMatt Macy  *
1001eda14cbcSMatt Macy  *  - dd_tempreserved[], which is the sum of in-flight transactions'
1002eda14cbcSMatt Macy  *    holds' txh_space_towrite (i.e. those transactions that have called
1003eda14cbcSMatt Macy  *    dmu_tx_assign() but not yet called dmu_tx_commit()).
1004eda14cbcSMatt Macy  *
1005eda14cbcSMatt Macy  *  - dd_space_towrite[], which is the amount of dirtied dbufs.
1006eda14cbcSMatt Macy  *
1007eda14cbcSMatt Macy  * Note that all of these values are inflated by spa_get_worst_case_asize(),
1008eda14cbcSMatt Macy  * which means that we may get ERESTART well before we are actually in danger
1009eda14cbcSMatt Macy  * of running out of space, but this also mitigates any small inaccuracies
1010eda14cbcSMatt Macy  * in the rough estimate (e.g. txh_space_towrite doesn't take into account
1011eda14cbcSMatt Macy  * indirect blocks, and dd_space_towrite[] doesn't take into account changes
1012eda14cbcSMatt Macy  * to the MOS).
1013eda14cbcSMatt Macy  *
1014eda14cbcSMatt Macy  * Note that due to this algorithm, it is possible to exceed the allowed
1015eda14cbcSMatt Macy  * usage by one transaction.  Also, as we approach the allowed usage,
1016eda14cbcSMatt Macy  * we will allow a very limited amount of changes into each TXG, thus
1017eda14cbcSMatt Macy  * decreasing performance.
1018eda14cbcSMatt Macy  */
1019eda14cbcSMatt Macy static int
dmu_tx_try_assign(dmu_tx_t * tx,uint64_t flags)1020*61145dc2SMartin Matuska dmu_tx_try_assign(dmu_tx_t *tx, uint64_t flags)
1021eda14cbcSMatt Macy {
1022eda14cbcSMatt Macy 	spa_t *spa = tx->tx_pool->dp_spa;
1023eda14cbcSMatt Macy 
1024eda14cbcSMatt Macy 	ASSERT0(tx->tx_txg);
1025eda14cbcSMatt Macy 
1026eda14cbcSMatt Macy 	if (tx->tx_err) {
1027eda14cbcSMatt Macy 		DMU_TX_STAT_BUMP(dmu_tx_error);
1028eda14cbcSMatt Macy 		return (tx->tx_err);
1029eda14cbcSMatt Macy 	}
1030eda14cbcSMatt Macy 
1031eda14cbcSMatt Macy 	if (spa_suspended(spa)) {
1032eda14cbcSMatt Macy 		DMU_TX_STAT_BUMP(dmu_tx_suspended);
1033eda14cbcSMatt Macy 
1034eda14cbcSMatt Macy 		/*
1035eda14cbcSMatt Macy 		 * If the user has indicated a blocking failure mode
1036eda14cbcSMatt Macy 		 * then return ERESTART which will block in dmu_tx_wait().
1037eda14cbcSMatt Macy 		 * Otherwise, return EIO so that an error can get
1038eda14cbcSMatt Macy 		 * propagated back to the VOP calls.
1039eda14cbcSMatt Macy 		 *
1040*61145dc2SMartin Matuska 		 * Note that we always honor the `flags` flag regardless
1041eda14cbcSMatt Macy 		 * of the failuremode setting.
1042eda14cbcSMatt Macy 		 */
1043eda14cbcSMatt Macy 		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1044*61145dc2SMartin Matuska 		    !(flags & DMU_TX_WAIT))
1045eda14cbcSMatt Macy 			return (SET_ERROR(EIO));
1046eda14cbcSMatt Macy 
1047eda14cbcSMatt Macy 		return (SET_ERROR(ERESTART));
1048eda14cbcSMatt Macy 	}
1049eda14cbcSMatt Macy 
1050eda14cbcSMatt Macy 	if (!tx->tx_dirty_delayed &&
1051e3aa18adSMartin Matuska 	    dsl_pool_need_wrlog_delay(tx->tx_pool)) {
1052e3aa18adSMartin Matuska 		tx->tx_wait_dirty = B_TRUE;
1053e3aa18adSMartin Matuska 		DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
10543f9d360cSMartin Matuska 		return (SET_ERROR(ERESTART));
10553f9d360cSMartin Matuska 	}
10563f9d360cSMartin Matuska 
10573f9d360cSMartin Matuska 	if (!tx->tx_dirty_delayed &&
1058eda14cbcSMatt Macy 	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
1059eda14cbcSMatt Macy 		tx->tx_wait_dirty = B_TRUE;
1060eda14cbcSMatt Macy 		DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
1061eda14cbcSMatt Macy 		return (SET_ERROR(ERESTART));
1062eda14cbcSMatt Macy 	}
1063eda14cbcSMatt Macy 
1064eda14cbcSMatt Macy 	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1065eda14cbcSMatt Macy 	tx->tx_needassign_txh = NULL;
1066eda14cbcSMatt Macy 
1067eda14cbcSMatt Macy 	/*
1068eda14cbcSMatt Macy 	 * NB: No error returns are allowed after txg_hold_open, but
1069eda14cbcSMatt Macy 	 * before processing the dnode holds, due to the
1070eda14cbcSMatt Macy 	 * dmu_tx_unassign() logic.
1071eda14cbcSMatt Macy 	 */
1072eda14cbcSMatt Macy 
1073eda14cbcSMatt Macy 	uint64_t towrite = 0;
1074eda14cbcSMatt Macy 	uint64_t tohold = 0;
1075eda14cbcSMatt Macy 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1076eda14cbcSMatt Macy 	    txh = list_next(&tx->tx_holds, txh)) {
1077eda14cbcSMatt Macy 		dnode_t *dn = txh->txh_dnode;
1078eda14cbcSMatt Macy 		if (dn != NULL) {
1079eda14cbcSMatt Macy 			/*
1080eda14cbcSMatt Macy 			 * This thread can't hold the dn_struct_rwlock
1081eda14cbcSMatt Macy 			 * while assigning the tx, because this can lead to
1082eda14cbcSMatt Macy 			 * deadlock. Specifically, if this dnode is already
1083eda14cbcSMatt Macy 			 * assigned to an earlier txg, this thread may need
1084eda14cbcSMatt Macy 			 * to wait for that txg to sync (the ERESTART case
1085eda14cbcSMatt Macy 			 * below).  The other thread that has assigned this
1086eda14cbcSMatt Macy 			 * dnode to an earlier txg prevents this txg from
1087eda14cbcSMatt Macy 			 * syncing until its tx can complete (calling
1088eda14cbcSMatt Macy 			 * dmu_tx_commit()), but it may need to acquire the
1089eda14cbcSMatt Macy 			 * dn_struct_rwlock to do so (e.g. via
1090eda14cbcSMatt Macy 			 * dmu_buf_hold*()).
1091eda14cbcSMatt Macy 			 *
1092eda14cbcSMatt Macy 			 * Note that this thread can't hold the lock for
1093eda14cbcSMatt Macy 			 * read either, but the rwlock doesn't record
1094eda14cbcSMatt Macy 			 * enough information to make that assertion.
1095eda14cbcSMatt Macy 			 */
1096eda14cbcSMatt Macy 			ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
1097eda14cbcSMatt Macy 
1098eda14cbcSMatt Macy 			mutex_enter(&dn->dn_mtx);
1099eda14cbcSMatt Macy 			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1100eda14cbcSMatt Macy 				mutex_exit(&dn->dn_mtx);
1101eda14cbcSMatt Macy 				tx->tx_needassign_txh = txh;
1102eda14cbcSMatt Macy 				DMU_TX_STAT_BUMP(dmu_tx_group);
1103eda14cbcSMatt Macy 				return (SET_ERROR(ERESTART));
1104eda14cbcSMatt Macy 			}
1105eda14cbcSMatt Macy 			if (dn->dn_assigned_txg == 0)
1106eda14cbcSMatt Macy 				dn->dn_assigned_txg = tx->tx_txg;
1107eda14cbcSMatt Macy 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1108eda14cbcSMatt Macy 			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
1109eda14cbcSMatt Macy 			mutex_exit(&dn->dn_mtx);
1110eda14cbcSMatt Macy 		}
1111eda14cbcSMatt Macy 		towrite += zfs_refcount_count(&txh->txh_space_towrite);
1112eda14cbcSMatt Macy 		tohold += zfs_refcount_count(&txh->txh_memory_tohold);
1113eda14cbcSMatt Macy 	}
1114eda14cbcSMatt Macy 
1115eda14cbcSMatt Macy 	/* needed allocation: worst-case estimate of write space */
1116eda14cbcSMatt Macy 	uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
1117eda14cbcSMatt Macy 	/* calculate memory footprint estimate */
1118eda14cbcSMatt Macy 	uint64_t memory = towrite + tohold;
1119eda14cbcSMatt Macy 
1120eda14cbcSMatt Macy 	if (tx->tx_dir != NULL && asize != 0) {
1121eda14cbcSMatt Macy 		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1122eda14cbcSMatt Macy 		    asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
1123eda14cbcSMatt Macy 		if (err != 0)
1124eda14cbcSMatt Macy 			return (err);
1125eda14cbcSMatt Macy 	}
1126eda14cbcSMatt Macy 
1127eda14cbcSMatt Macy 	DMU_TX_STAT_BUMP(dmu_tx_assigned);
1128eda14cbcSMatt Macy 
1129eda14cbcSMatt Macy 	return (0);
1130eda14cbcSMatt Macy }
1131eda14cbcSMatt Macy 
1132eda14cbcSMatt Macy static void
dmu_tx_unassign(dmu_tx_t * tx)1133eda14cbcSMatt Macy dmu_tx_unassign(dmu_tx_t *tx)
1134eda14cbcSMatt Macy {
1135eda14cbcSMatt Macy 	if (tx->tx_txg == 0)
1136eda14cbcSMatt Macy 		return;
1137eda14cbcSMatt Macy 
1138eda14cbcSMatt Macy 	txg_rele_to_quiesce(&tx->tx_txgh);
1139eda14cbcSMatt Macy 
1140eda14cbcSMatt Macy 	/*
1141eda14cbcSMatt Macy 	 * Walk the transaction's hold list, removing the hold on the
1142eda14cbcSMatt Macy 	 * associated dnode, and notifying waiters if the refcount drops to 0.
1143eda14cbcSMatt Macy 	 */
1144eda14cbcSMatt Macy 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
1145eda14cbcSMatt Macy 	    txh && txh != tx->tx_needassign_txh;
1146eda14cbcSMatt Macy 	    txh = list_next(&tx->tx_holds, txh)) {
1147eda14cbcSMatt Macy 		dnode_t *dn = txh->txh_dnode;
1148eda14cbcSMatt Macy 
1149eda14cbcSMatt Macy 		if (dn == NULL)
1150eda14cbcSMatt Macy 			continue;
1151eda14cbcSMatt Macy 		mutex_enter(&dn->dn_mtx);
1152eda14cbcSMatt Macy 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1153eda14cbcSMatt Macy 
1154eda14cbcSMatt Macy 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1155eda14cbcSMatt Macy 			dn->dn_assigned_txg = 0;
1156eda14cbcSMatt Macy 			cv_broadcast(&dn->dn_notxholds);
1157eda14cbcSMatt Macy 		}
1158eda14cbcSMatt Macy 		mutex_exit(&dn->dn_mtx);
1159eda14cbcSMatt Macy 	}
1160eda14cbcSMatt Macy 
1161eda14cbcSMatt Macy 	txg_rele_to_sync(&tx->tx_txgh);
1162eda14cbcSMatt Macy 
1163eda14cbcSMatt Macy 	tx->tx_lasttried_txg = tx->tx_txg;
1164eda14cbcSMatt Macy 	tx->tx_txg = 0;
1165eda14cbcSMatt Macy }
1166eda14cbcSMatt Macy 
1167eda14cbcSMatt Macy /*
1168*61145dc2SMartin Matuska  * Assign tx to a transaction group; `flags` is a bitmask:
1169eda14cbcSMatt Macy  *
1170*61145dc2SMartin Matuska  * If DMU_TX_WAIT is set and the currently open txg is full, this function
1171eda14cbcSMatt Macy  * will wait until there's a new txg. This should be used when no locks
1172eda14cbcSMatt Macy  * are being held. With this bit set, this function will only fail if
1173eda14cbcSMatt Macy  * we're truly out of space (or over quota).
1174eda14cbcSMatt Macy  *
1175*61145dc2SMartin Matuska  * If DMU_TX_WAIT is *not* set and we can't assign into the currently open
1176eda14cbcSMatt Macy  * txg without blocking, this function will return immediately with
1177eda14cbcSMatt Macy  * ERESTART. This should be used whenever locks are being held.  On an
1178eda14cbcSMatt Macy  * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
1179eda14cbcSMatt Macy  * and try again.
1180eda14cbcSMatt Macy  *
1181*61145dc2SMartin Matuska  * If DMU_TX_NOTHROTTLE is set, this indicates that this tx should not be
1182eda14cbcSMatt Macy  * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1183eda14cbcSMatt Macy  * details on the throttle). This is used by the VFS operations, after
1184eda14cbcSMatt Macy  * they have already called dmu_tx_wait() (though most likely on a
1185eda14cbcSMatt Macy  * different tx).
1186184c1b94SMartin Matuska  *
1187184c1b94SMartin Matuska  * It is guaranteed that subsequent successful calls to dmu_tx_assign()
1188184c1b94SMartin Matuska  * will assign the tx to monotonically increasing txgs. Of course this is
1189184c1b94SMartin Matuska  * not strong monotonicity, because the same txg can be returned multiple
1190184c1b94SMartin Matuska  * times in a row. This guarantee holds both for subsequent calls from
1191184c1b94SMartin Matuska  * one thread and for multiple threads. For example, it is impossible to
1192184c1b94SMartin Matuska  * observe the following sequence of events:
1193184c1b94SMartin Matuska  *
1194184c1b94SMartin Matuska  *          Thread 1                            Thread 2
1195184c1b94SMartin Matuska  *
1196184c1b94SMartin Matuska  *     dmu_tx_assign(T1, ...)
1197184c1b94SMartin Matuska  *     1 <- dmu_tx_get_txg(T1)
1198184c1b94SMartin Matuska  *                                       dmu_tx_assign(T2, ...)
1199184c1b94SMartin Matuska  *                                       2 <- dmu_tx_get_txg(T2)
1200184c1b94SMartin Matuska  *     dmu_tx_assign(T3, ...)
1201184c1b94SMartin Matuska  *     1 <- dmu_tx_get_txg(T3)
1202eda14cbcSMatt Macy  */
1203eda14cbcSMatt Macy int
dmu_tx_assign(dmu_tx_t * tx,uint64_t flags)1204*61145dc2SMartin Matuska dmu_tx_assign(dmu_tx_t *tx, uint64_t flags)
1205eda14cbcSMatt Macy {
1206eda14cbcSMatt Macy 	int err;
1207eda14cbcSMatt Macy 
1208eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
1209*61145dc2SMartin Matuska 	ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
1210eda14cbcSMatt Macy 	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1211eda14cbcSMatt Macy 
1212eda14cbcSMatt Macy 	/* If we might wait, we must not hold the config lock. */
1213*61145dc2SMartin Matuska 	IMPLY((flags & DMU_TX_WAIT), !dsl_pool_config_held(tx->tx_pool));
1214eda14cbcSMatt Macy 
1215*61145dc2SMartin Matuska 	if ((flags & DMU_TX_NOTHROTTLE))
1216eda14cbcSMatt Macy 		tx->tx_dirty_delayed = B_TRUE;
1217eda14cbcSMatt Macy 
1218*61145dc2SMartin Matuska 	while ((err = dmu_tx_try_assign(tx, flags)) != 0) {
1219eda14cbcSMatt Macy 		dmu_tx_unassign(tx);
1220eda14cbcSMatt Macy 
1221*61145dc2SMartin Matuska 		if (err != ERESTART || !(flags & DMU_TX_WAIT))
1222eda14cbcSMatt Macy 			return (err);
1223eda14cbcSMatt Macy 
1224eda14cbcSMatt Macy 		dmu_tx_wait(tx);
1225eda14cbcSMatt Macy 	}
1226eda14cbcSMatt Macy 
1227eda14cbcSMatt Macy 	txg_rele_to_quiesce(&tx->tx_txgh);
1228eda14cbcSMatt Macy 
1229eda14cbcSMatt Macy 	return (0);
1230eda14cbcSMatt Macy }
1231eda14cbcSMatt Macy 
1232eda14cbcSMatt Macy void
dmu_tx_wait(dmu_tx_t * tx)1233eda14cbcSMatt Macy dmu_tx_wait(dmu_tx_t *tx)
1234eda14cbcSMatt Macy {
1235eda14cbcSMatt Macy 	spa_t *spa = tx->tx_pool->dp_spa;
1236eda14cbcSMatt Macy 	dsl_pool_t *dp = tx->tx_pool;
1237eda14cbcSMatt Macy 	hrtime_t before;
1238eda14cbcSMatt Macy 
1239eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
1240eda14cbcSMatt Macy 	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1241eda14cbcSMatt Macy 
1242eda14cbcSMatt Macy 	before = gethrtime();
1243eda14cbcSMatt Macy 
1244eda14cbcSMatt Macy 	if (tx->tx_wait_dirty) {
1245eda14cbcSMatt Macy 		uint64_t dirty;
1246eda14cbcSMatt Macy 
1247eda14cbcSMatt Macy 		/*
1248eda14cbcSMatt Macy 		 * dmu_tx_try_assign() has determined that we need to wait
1249eda14cbcSMatt Macy 		 * because we've consumed much or all of the dirty buffer
1250eda14cbcSMatt Macy 		 * space.
1251eda14cbcSMatt Macy 		 */
1252eda14cbcSMatt Macy 		mutex_enter(&dp->dp_lock);
1253eda14cbcSMatt Macy 		if (dp->dp_dirty_total >= zfs_dirty_data_max)
1254eda14cbcSMatt Macy 			DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
1255eda14cbcSMatt Macy 		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1256eda14cbcSMatt Macy 			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1257eda14cbcSMatt Macy 		dirty = dp->dp_dirty_total;
1258eda14cbcSMatt Macy 		mutex_exit(&dp->dp_lock);
1259eda14cbcSMatt Macy 
1260eda14cbcSMatt Macy 		dmu_tx_delay(tx, dirty);
1261eda14cbcSMatt Macy 
1262eda14cbcSMatt Macy 		tx->tx_wait_dirty = B_FALSE;
1263eda14cbcSMatt Macy 
1264eda14cbcSMatt Macy 		/*
1265eda14cbcSMatt Macy 		 * Note: setting tx_dirty_delayed only has effect if the
1266*61145dc2SMartin Matuska 		 * caller used DMU_TX_WAIT.  Otherwise they are going to
1267eda14cbcSMatt Macy 		 * destroy this tx and try again.  The common case,
1268*61145dc2SMartin Matuska 		 * zfs_write(), uses DMU_TX_WAIT.
1269eda14cbcSMatt Macy 		 */
1270eda14cbcSMatt Macy 		tx->tx_dirty_delayed = B_TRUE;
1271eda14cbcSMatt Macy 	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1272eda14cbcSMatt Macy 		/*
1273eda14cbcSMatt Macy 		 * If the pool is suspended we need to wait until it
1274eda14cbcSMatt Macy 		 * is resumed.  Note that it's possible that the pool
1275eda14cbcSMatt Macy 		 * has become active after this thread has tried to
1276eda14cbcSMatt Macy 		 * obtain a tx.  If that's the case then tx_lasttried_txg
1277eda14cbcSMatt Macy 		 * would not have been set.
1278eda14cbcSMatt Macy 		 */
1279eda14cbcSMatt Macy 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1280eda14cbcSMatt Macy 	} else if (tx->tx_needassign_txh) {
1281eda14cbcSMatt Macy 		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1282eda14cbcSMatt Macy 
1283eda14cbcSMatt Macy 		mutex_enter(&dn->dn_mtx);
1284eda14cbcSMatt Macy 		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1285eda14cbcSMatt Macy 			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1286eda14cbcSMatt Macy 		mutex_exit(&dn->dn_mtx);
1287eda14cbcSMatt Macy 		tx->tx_needassign_txh = NULL;
1288eda14cbcSMatt Macy 	} else {
1289eda14cbcSMatt Macy 		/*
1290eda14cbcSMatt Macy 		 * If we have a lot of dirty data just wait until we sync
1291eda14cbcSMatt Macy 		 * out a TXG at which point we'll hopefully have synced
1292eda14cbcSMatt Macy 		 * a portion of the changes.
1293eda14cbcSMatt Macy 		 */
1294eda14cbcSMatt Macy 		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1295eda14cbcSMatt Macy 	}
1296eda14cbcSMatt Macy 
1297eda14cbcSMatt Macy 	spa_tx_assign_add_nsecs(spa, gethrtime() - before);
1298eda14cbcSMatt Macy }
1299eda14cbcSMatt Macy 
1300eda14cbcSMatt Macy static void
dmu_tx_destroy(dmu_tx_t * tx)1301eda14cbcSMatt Macy dmu_tx_destroy(dmu_tx_t *tx)
1302eda14cbcSMatt Macy {
1303eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
1304eda14cbcSMatt Macy 
1305eda14cbcSMatt Macy 	while ((txh = list_head(&tx->tx_holds)) != NULL) {
1306eda14cbcSMatt Macy 		dnode_t *dn = txh->txh_dnode;
1307eda14cbcSMatt Macy 
1308eda14cbcSMatt Macy 		list_remove(&tx->tx_holds, txh);
1309eda14cbcSMatt Macy 		zfs_refcount_destroy_many(&txh->txh_space_towrite,
1310eda14cbcSMatt Macy 		    zfs_refcount_count(&txh->txh_space_towrite));
1311eda14cbcSMatt Macy 		zfs_refcount_destroy_many(&txh->txh_memory_tohold,
1312eda14cbcSMatt Macy 		    zfs_refcount_count(&txh->txh_memory_tohold));
1313eda14cbcSMatt Macy 		kmem_free(txh, sizeof (dmu_tx_hold_t));
1314eda14cbcSMatt Macy 		if (dn != NULL)
1315eda14cbcSMatt Macy 			dnode_rele(dn, tx);
1316eda14cbcSMatt Macy 	}
1317eda14cbcSMatt Macy 
1318eda14cbcSMatt Macy 	list_destroy(&tx->tx_callbacks);
1319eda14cbcSMatt Macy 	list_destroy(&tx->tx_holds);
1320eda14cbcSMatt Macy 	kmem_free(tx, sizeof (dmu_tx_t));
1321eda14cbcSMatt Macy }
1322eda14cbcSMatt Macy 
1323eda14cbcSMatt Macy void
dmu_tx_commit(dmu_tx_t * tx)1324eda14cbcSMatt Macy dmu_tx_commit(dmu_tx_t *tx)
1325eda14cbcSMatt Macy {
1326eda14cbcSMatt Macy 	ASSERT(tx->tx_txg != 0);
1327eda14cbcSMatt Macy 
1328eda14cbcSMatt Macy 	/*
1329eda14cbcSMatt Macy 	 * Go through the transaction's hold list and remove holds on
1330eda14cbcSMatt Macy 	 * associated dnodes, notifying waiters if no holds remain.
1331eda14cbcSMatt Macy 	 */
1332eda14cbcSMatt Macy 	for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1333eda14cbcSMatt Macy 	    txh = list_next(&tx->tx_holds, txh)) {
1334eda14cbcSMatt Macy 		dnode_t *dn = txh->txh_dnode;
1335eda14cbcSMatt Macy 
1336eda14cbcSMatt Macy 		if (dn == NULL)
1337eda14cbcSMatt Macy 			continue;
1338eda14cbcSMatt Macy 
1339eda14cbcSMatt Macy 		mutex_enter(&dn->dn_mtx);
1340eda14cbcSMatt Macy 		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1341eda14cbcSMatt Macy 
1342eda14cbcSMatt Macy 		if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1343eda14cbcSMatt Macy 			dn->dn_assigned_txg = 0;
1344eda14cbcSMatt Macy 			cv_broadcast(&dn->dn_notxholds);
1345eda14cbcSMatt Macy 		}
1346eda14cbcSMatt Macy 		mutex_exit(&dn->dn_mtx);
1347eda14cbcSMatt Macy 	}
1348eda14cbcSMatt Macy 
1349eda14cbcSMatt Macy 	if (tx->tx_tempreserve_cookie)
1350eda14cbcSMatt Macy 		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1351eda14cbcSMatt Macy 
1352eda14cbcSMatt Macy 	if (!list_is_empty(&tx->tx_callbacks))
1353eda14cbcSMatt Macy 		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1354eda14cbcSMatt Macy 
1355eda14cbcSMatt Macy 	if (tx->tx_anyobj == FALSE)
1356eda14cbcSMatt Macy 		txg_rele_to_sync(&tx->tx_txgh);
1357eda14cbcSMatt Macy 
1358eda14cbcSMatt Macy 	dmu_tx_destroy(tx);
1359eda14cbcSMatt Macy }
1360eda14cbcSMatt Macy 
1361eda14cbcSMatt Macy void
dmu_tx_abort(dmu_tx_t * tx)1362eda14cbcSMatt Macy dmu_tx_abort(dmu_tx_t *tx)
1363eda14cbcSMatt Macy {
1364eda14cbcSMatt Macy 	ASSERT(tx->tx_txg == 0);
1365eda14cbcSMatt Macy 
1366eda14cbcSMatt Macy 	/*
1367eda14cbcSMatt Macy 	 * Call any registered callbacks with an error code.
1368eda14cbcSMatt Macy 	 */
1369eda14cbcSMatt Macy 	if (!list_is_empty(&tx->tx_callbacks))
1370eda14cbcSMatt Macy 		dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
1371eda14cbcSMatt Macy 
1372eda14cbcSMatt Macy 	dmu_tx_destroy(tx);
1373eda14cbcSMatt Macy }
1374eda14cbcSMatt Macy 
1375eda14cbcSMatt Macy uint64_t
dmu_tx_get_txg(dmu_tx_t * tx)1376eda14cbcSMatt Macy dmu_tx_get_txg(dmu_tx_t *tx)
1377eda14cbcSMatt Macy {
1378eda14cbcSMatt Macy 	ASSERT(tx->tx_txg != 0);
1379eda14cbcSMatt Macy 	return (tx->tx_txg);
1380eda14cbcSMatt Macy }
1381eda14cbcSMatt Macy 
1382eda14cbcSMatt Macy dsl_pool_t *
dmu_tx_pool(dmu_tx_t * tx)1383eda14cbcSMatt Macy dmu_tx_pool(dmu_tx_t *tx)
1384eda14cbcSMatt Macy {
1385eda14cbcSMatt Macy 	ASSERT(tx->tx_pool != NULL);
1386eda14cbcSMatt Macy 	return (tx->tx_pool);
1387eda14cbcSMatt Macy }
1388eda14cbcSMatt Macy 
13895c65a0a9SMartin Matuska /*
13905c65a0a9SMartin Matuska  * Register a callback to be executed at the end of a TXG.
13915c65a0a9SMartin Matuska  *
13925c65a0a9SMartin Matuska  * Note: This currently exists for outside consumers, specifically the ZFS OSD
13935c65a0a9SMartin Matuska  * for Lustre. Please do not remove before checking that project. For examples
13945c65a0a9SMartin Matuska  * on how to use this see `ztest_commit_callback`.
13955c65a0a9SMartin Matuska  */
1396eda14cbcSMatt Macy void
dmu_tx_callback_register(dmu_tx_t * tx,dmu_tx_callback_func_t * func,void * data)1397eda14cbcSMatt Macy dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1398eda14cbcSMatt Macy {
1399eda14cbcSMatt Macy 	dmu_tx_callback_t *dcb;
1400eda14cbcSMatt Macy 
1401eda14cbcSMatt Macy 	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1402eda14cbcSMatt Macy 
1403eda14cbcSMatt Macy 	dcb->dcb_func = func;
1404eda14cbcSMatt Macy 	dcb->dcb_data = data;
1405eda14cbcSMatt Macy 
1406eda14cbcSMatt Macy 	list_insert_tail(&tx->tx_callbacks, dcb);
1407eda14cbcSMatt Macy }
1408eda14cbcSMatt Macy 
1409eda14cbcSMatt Macy /*
1410eda14cbcSMatt Macy  * Call all the commit callbacks on a list, with a given error code.
1411eda14cbcSMatt Macy  */
1412eda14cbcSMatt Macy void
dmu_tx_do_callbacks(list_t * cb_list,int error)1413eda14cbcSMatt Macy dmu_tx_do_callbacks(list_t *cb_list, int error)
1414eda14cbcSMatt Macy {
1415eda14cbcSMatt Macy 	dmu_tx_callback_t *dcb;
1416eda14cbcSMatt Macy 
14174e8d558cSMartin Matuska 	while ((dcb = list_remove_tail(cb_list)) != NULL) {
1418eda14cbcSMatt Macy 		dcb->dcb_func(dcb->dcb_data, error);
1419eda14cbcSMatt Macy 		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1420eda14cbcSMatt Macy 	}
1421eda14cbcSMatt Macy }
1422eda14cbcSMatt Macy 
1423eda14cbcSMatt Macy /*
1424eda14cbcSMatt Macy  * Interface to hold a bunch of attributes.
1425eda14cbcSMatt Macy  * used for creating new files.
1426eda14cbcSMatt Macy  * attrsize is the total size of all attributes
1427eda14cbcSMatt Macy  * to be added during object creation
1428eda14cbcSMatt Macy  *
1429eda14cbcSMatt Macy  * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1430eda14cbcSMatt Macy  */
1431eda14cbcSMatt Macy 
1432eda14cbcSMatt Macy /*
1433eda14cbcSMatt Macy  * hold necessary attribute name for attribute registration.
1434eda14cbcSMatt Macy  * should be a very rare case where this is needed.  If it does
1435eda14cbcSMatt Macy  * happen it would only happen on the first write to the file system.
1436eda14cbcSMatt Macy  */
1437eda14cbcSMatt Macy static void
dmu_tx_sa_registration_hold(sa_os_t * sa,dmu_tx_t * tx)1438eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1439eda14cbcSMatt Macy {
1440eda14cbcSMatt Macy 	if (!sa->sa_need_attr_registration)
1441eda14cbcSMatt Macy 		return;
1442eda14cbcSMatt Macy 
1443eda14cbcSMatt Macy 	for (int i = 0; i != sa->sa_num_attrs; i++) {
1444eda14cbcSMatt Macy 		if (!sa->sa_attr_table[i].sa_registered) {
1445eda14cbcSMatt Macy 			if (sa->sa_reg_attr_obj)
1446eda14cbcSMatt Macy 				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1447eda14cbcSMatt Macy 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1448eda14cbcSMatt Macy 			else
1449eda14cbcSMatt Macy 				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1450eda14cbcSMatt Macy 				    B_TRUE, sa->sa_attr_table[i].sa_name);
1451eda14cbcSMatt Macy 		}
1452eda14cbcSMatt Macy 	}
1453eda14cbcSMatt Macy }
1454eda14cbcSMatt Macy 
1455eda14cbcSMatt Macy void
dmu_tx_hold_spill(dmu_tx_t * tx,uint64_t object)1456eda14cbcSMatt Macy dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1457eda14cbcSMatt Macy {
1458eda14cbcSMatt Macy 	dmu_tx_hold_t *txh;
1459eda14cbcSMatt Macy 
1460eda14cbcSMatt Macy 	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1461eda14cbcSMatt Macy 	    THT_SPILL, 0, 0);
1462eda14cbcSMatt Macy 	if (txh != NULL)
1463eda14cbcSMatt Macy 		(void) zfs_refcount_add_many(&txh->txh_space_towrite,
1464eda14cbcSMatt Macy 		    SPA_OLD_MAXBLOCKSIZE, FTAG);
1465eda14cbcSMatt Macy }
1466eda14cbcSMatt Macy 
1467eda14cbcSMatt Macy void
dmu_tx_hold_sa_create(dmu_tx_t * tx,int attrsize)1468eda14cbcSMatt Macy dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1469eda14cbcSMatt Macy {
1470eda14cbcSMatt Macy 	sa_os_t *sa = tx->tx_objset->os_sa;
1471eda14cbcSMatt Macy 
1472eda14cbcSMatt Macy 	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1473eda14cbcSMatt Macy 
1474eda14cbcSMatt Macy 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1475eda14cbcSMatt Macy 		return;
1476eda14cbcSMatt Macy 
1477eda14cbcSMatt Macy 	if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
1478eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1479eda14cbcSMatt Macy 	} else {
1480eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1481eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1482eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1483eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1484eda14cbcSMatt Macy 	}
1485eda14cbcSMatt Macy 
1486eda14cbcSMatt Macy 	dmu_tx_sa_registration_hold(sa, tx);
1487eda14cbcSMatt Macy 
1488eda14cbcSMatt Macy 	if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
1489eda14cbcSMatt Macy 		return;
1490eda14cbcSMatt Macy 
1491eda14cbcSMatt Macy 	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1492eda14cbcSMatt Macy 	    THT_SPILL, 0, 0);
1493eda14cbcSMatt Macy }
1494eda14cbcSMatt Macy 
1495eda14cbcSMatt Macy /*
1496eda14cbcSMatt Macy  * Hold SA attribute
1497eda14cbcSMatt Macy  *
1498eda14cbcSMatt Macy  * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1499eda14cbcSMatt Macy  *
1500eda14cbcSMatt Macy  * variable_size is the total size of all variable sized attributes
1501eda14cbcSMatt Macy  * passed to this function.  It is not the total size of all
1502eda14cbcSMatt Macy  * variable size attributes that *may* exist on this object.
1503eda14cbcSMatt Macy  */
1504eda14cbcSMatt Macy void
dmu_tx_hold_sa(dmu_tx_t * tx,sa_handle_t * hdl,boolean_t may_grow)1505eda14cbcSMatt Macy dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1506eda14cbcSMatt Macy {
1507eda14cbcSMatt Macy 	uint64_t object;
1508eda14cbcSMatt Macy 	sa_os_t *sa = tx->tx_objset->os_sa;
1509eda14cbcSMatt Macy 
1510eda14cbcSMatt Macy 	ASSERT(hdl != NULL);
1511eda14cbcSMatt Macy 
1512eda14cbcSMatt Macy 	object = sa_handle_object(hdl);
1513eda14cbcSMatt Macy 
1514eda14cbcSMatt Macy 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1515eda14cbcSMatt Macy 	DB_DNODE_ENTER(db);
1516eda14cbcSMatt Macy 	dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
1517eda14cbcSMatt Macy 	DB_DNODE_EXIT(db);
1518eda14cbcSMatt Macy 
1519eda14cbcSMatt Macy 	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1520eda14cbcSMatt Macy 		return;
1521eda14cbcSMatt Macy 
1522eda14cbcSMatt Macy 	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1523eda14cbcSMatt Macy 	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1524eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1525eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1526eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1527eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1528eda14cbcSMatt Macy 	}
1529eda14cbcSMatt Macy 
1530eda14cbcSMatt Macy 	dmu_tx_sa_registration_hold(sa, tx);
1531eda14cbcSMatt Macy 
1532eda14cbcSMatt Macy 	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1533eda14cbcSMatt Macy 		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1534eda14cbcSMatt Macy 
1535eda14cbcSMatt Macy 	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1536eda14cbcSMatt Macy 		ASSERT(tx->tx_txg == 0);
1537eda14cbcSMatt Macy 		dmu_tx_hold_spill(tx, object);
1538eda14cbcSMatt Macy 	} else {
1539eda14cbcSMatt Macy 		DB_DNODE_ENTER(db);
1540ce4dcb97SMartin Matuska 		if (DB_DNODE(db)->dn_have_spill) {
1541eda14cbcSMatt Macy 			ASSERT(tx->tx_txg == 0);
1542eda14cbcSMatt Macy 			dmu_tx_hold_spill(tx, object);
1543eda14cbcSMatt Macy 		}
1544eda14cbcSMatt Macy 		DB_DNODE_EXIT(db);
1545eda14cbcSMatt Macy 	}
1546eda14cbcSMatt Macy }
1547eda14cbcSMatt Macy 
1548eda14cbcSMatt Macy void
dmu_tx_init(void)1549eda14cbcSMatt Macy dmu_tx_init(void)
1550eda14cbcSMatt Macy {
1551eda14cbcSMatt Macy 	dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1552eda14cbcSMatt Macy 	    KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1553eda14cbcSMatt Macy 	    KSTAT_FLAG_VIRTUAL);
1554eda14cbcSMatt Macy 
1555eda14cbcSMatt Macy 	if (dmu_tx_ksp != NULL) {
1556eda14cbcSMatt Macy 		dmu_tx_ksp->ks_data = &dmu_tx_stats;
1557eda14cbcSMatt Macy 		kstat_install(dmu_tx_ksp);
1558eda14cbcSMatt Macy 	}
1559eda14cbcSMatt Macy }
1560eda14cbcSMatt Macy 
1561eda14cbcSMatt Macy void
dmu_tx_fini(void)1562eda14cbcSMatt Macy dmu_tx_fini(void)
1563eda14cbcSMatt Macy {
1564eda14cbcSMatt Macy 	if (dmu_tx_ksp != NULL) {
1565eda14cbcSMatt Macy 		kstat_delete(dmu_tx_ksp);
1566eda14cbcSMatt Macy 		dmu_tx_ksp = NULL;
1567eda14cbcSMatt Macy 	}
1568eda14cbcSMatt Macy }
1569eda14cbcSMatt Macy 
1570eda14cbcSMatt Macy #if defined(_KERNEL)
1571eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_create);
1572eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_write);
1573eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
1574e639e0d2SMartin Matuska EXPORT_SYMBOL(dmu_tx_hold_append);
1575e639e0d2SMartin Matuska EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode);
1576eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_free);
1577eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
1578eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_zap);
1579eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
1580eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_bonus);
1581eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
1582eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_abort);
1583eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_assign);
1584eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_wait);
1585eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_commit);
1586eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_mark_netfree);
1587eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_get_txg);
1588eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_callback_register);
1589eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_do_callbacks);
1590eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_spill);
1591eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1592eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_sa);
1593eda14cbcSMatt Macy #endif
1594