xref: /freebsd/sys/contrib/openzfs/module/zfs/zil.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1*61145dc2SMartin Matuska // SPDX-License-Identifier: CDDL-1.0
2eda14cbcSMatt Macy /*
3eda14cbcSMatt Macy  * CDDL HEADER START
4eda14cbcSMatt Macy  *
5eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
6eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
7eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
8eda14cbcSMatt Macy  *
9eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
11eda14cbcSMatt Macy  * See the License for the specific language governing permissions
12eda14cbcSMatt Macy  * and limitations under the License.
13eda14cbcSMatt Macy  *
14eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
15eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
17eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
18eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
19eda14cbcSMatt Macy  *
20eda14cbcSMatt Macy  * CDDL HEADER END
21eda14cbcSMatt Macy  */
22eda14cbcSMatt Macy /*
23eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24eda14cbcSMatt Macy  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25eda14cbcSMatt Macy  * Copyright (c) 2014 Integros [integros.com]
26eda14cbcSMatt Macy  * Copyright (c) 2018 Datto Inc.
27eda14cbcSMatt Macy  */
28eda14cbcSMatt Macy 
29eda14cbcSMatt Macy /* Portions Copyright 2010 Robert Milkowski */
30eda14cbcSMatt Macy 
31eda14cbcSMatt Macy #include <sys/zfs_context.h>
32eda14cbcSMatt Macy #include <sys/spa.h>
33eda14cbcSMatt Macy #include <sys/spa_impl.h>
34eda14cbcSMatt Macy #include <sys/dmu.h>
35eda14cbcSMatt Macy #include <sys/zap.h>
36eda14cbcSMatt Macy #include <sys/arc.h>
37eda14cbcSMatt Macy #include <sys/stat.h>
38eda14cbcSMatt Macy #include <sys/zil.h>
39eda14cbcSMatt Macy #include <sys/zil_impl.h>
40eda14cbcSMatt Macy #include <sys/dsl_dataset.h>
41eda14cbcSMatt Macy #include <sys/vdev_impl.h>
42eda14cbcSMatt Macy #include <sys/dmu_tx.h>
43eda14cbcSMatt Macy #include <sys/dsl_pool.h>
44eda14cbcSMatt Macy #include <sys/metaslab.h>
45eda14cbcSMatt Macy #include <sys/trace_zfs.h>
46eda14cbcSMatt Macy #include <sys/abd.h>
472a58b312SMartin Matuska #include <sys/brt.h>
48271171e0SMartin Matuska #include <sys/wmsum.h>
49eda14cbcSMatt Macy 
50eda14cbcSMatt Macy /*
51eda14cbcSMatt Macy  * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
52eda14cbcSMatt Macy  * calls that change the file system. Each itx has enough information to
53eda14cbcSMatt Macy  * be able to replay them after a system crash, power loss, or
54eda14cbcSMatt Macy  * equivalent failure mode. These are stored in memory until either:
55eda14cbcSMatt Macy  *
56eda14cbcSMatt Macy  *   1. they are committed to the pool by the DMU transaction group
57eda14cbcSMatt Macy  *      (txg), at which point they can be discarded; or
58eda14cbcSMatt Macy  *   2. they are committed to the on-disk ZIL for the dataset being
59eda14cbcSMatt Macy  *      modified (e.g. due to an fsync, O_DSYNC, or other synchronous
60eda14cbcSMatt Macy  *      requirement).
61eda14cbcSMatt Macy  *
62eda14cbcSMatt Macy  * In the event of a crash or power loss, the itxs contained by each
63eda14cbcSMatt Macy  * dataset's on-disk ZIL will be replayed when that dataset is first
64eda14cbcSMatt Macy  * instantiated (e.g. if the dataset is a normal filesystem, when it is
65eda14cbcSMatt Macy  * first mounted).
66eda14cbcSMatt Macy  *
67eda14cbcSMatt Macy  * As hinted at above, there is one ZIL per dataset (both the in-memory
68eda14cbcSMatt Macy  * representation, and the on-disk representation). The on-disk format
69eda14cbcSMatt Macy  * consists of 3 parts:
70eda14cbcSMatt Macy  *
71eda14cbcSMatt Macy  * 	- a single, per-dataset, ZIL header; which points to a chain of
72eda14cbcSMatt Macy  * 	- zero or more ZIL blocks; each of which contains
73eda14cbcSMatt Macy  * 	- zero or more ZIL records
74eda14cbcSMatt Macy  *
75eda14cbcSMatt Macy  * A ZIL record holds the information necessary to replay a single
76eda14cbcSMatt Macy  * system call transaction. A ZIL block can hold many ZIL records, and
77eda14cbcSMatt Macy  * the blocks are chained together, similarly to a singly linked list.
78eda14cbcSMatt Macy  *
79eda14cbcSMatt Macy  * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
80eda14cbcSMatt Macy  * block in the chain, and the ZIL header points to the first block in
81eda14cbcSMatt Macy  * the chain.
82eda14cbcSMatt Macy  *
83eda14cbcSMatt Macy  * Note, there is not a fixed place in the pool to hold these ZIL
84eda14cbcSMatt Macy  * blocks; they are dynamically allocated and freed as needed from the
85eda14cbcSMatt Macy  * blocks available on the pool, though they can be preferentially
86eda14cbcSMatt Macy  * allocated from a dedicated "log" vdev.
87eda14cbcSMatt Macy  */
88eda14cbcSMatt Macy 
89eda14cbcSMatt Macy /*
90eda14cbcSMatt Macy  * This controls the amount of time that a ZIL block (lwb) will remain
91eda14cbcSMatt Macy  * "open" when it isn't "full", and it has a thread waiting for it to be
92eda14cbcSMatt Macy  * committed to stable storage. Please refer to the zil_commit_waiter()
93eda14cbcSMatt Macy  * function (and the comments within it) for more details.
94eda14cbcSMatt Macy  */
956c1e79dfSMartin Matuska static uint_t zfs_commit_timeout_pct = 10;
9615f0b8c3SMartin Matuska 
9715f0b8c3SMartin Matuska /*
98eda14cbcSMatt Macy  * See zil.h for more information about these fields.
99eda14cbcSMatt Macy  */
100271171e0SMartin Matuska static zil_kstat_values_t zil_stats = {
101eda14cbcSMatt Macy 	{ "zil_commit_count",			KSTAT_DATA_UINT64 },
102eda14cbcSMatt Macy 	{ "zil_commit_writer_count",		KSTAT_DATA_UINT64 },
103ce4dcb97SMartin Matuska 	{ "zil_commit_error_count",		KSTAT_DATA_UINT64 },
104ce4dcb97SMartin Matuska 	{ "zil_commit_stall_count",		KSTAT_DATA_UINT64 },
105ce4dcb97SMartin Matuska 	{ "zil_commit_suspend_count",		KSTAT_DATA_UINT64 },
106eda14cbcSMatt Macy 	{ "zil_itx_count",			KSTAT_DATA_UINT64 },
107eda14cbcSMatt Macy 	{ "zil_itx_indirect_count",		KSTAT_DATA_UINT64 },
108eda14cbcSMatt Macy 	{ "zil_itx_indirect_bytes",		KSTAT_DATA_UINT64 },
109eda14cbcSMatt Macy 	{ "zil_itx_copied_count",		KSTAT_DATA_UINT64 },
110eda14cbcSMatt Macy 	{ "zil_itx_copied_bytes",		KSTAT_DATA_UINT64 },
111eda14cbcSMatt Macy 	{ "zil_itx_needcopy_count",		KSTAT_DATA_UINT64 },
112eda14cbcSMatt Macy 	{ "zil_itx_needcopy_bytes",		KSTAT_DATA_UINT64 },
113eda14cbcSMatt Macy 	{ "zil_itx_metaslab_normal_count",	KSTAT_DATA_UINT64 },
114eda14cbcSMatt Macy 	{ "zil_itx_metaslab_normal_bytes",	KSTAT_DATA_UINT64 },
1154e8d558cSMartin Matuska 	{ "zil_itx_metaslab_normal_write",	KSTAT_DATA_UINT64 },
1164e8d558cSMartin Matuska 	{ "zil_itx_metaslab_normal_alloc",	KSTAT_DATA_UINT64 },
117eda14cbcSMatt Macy 	{ "zil_itx_metaslab_slog_count",	KSTAT_DATA_UINT64 },
118eda14cbcSMatt Macy 	{ "zil_itx_metaslab_slog_bytes",	KSTAT_DATA_UINT64 },
1194e8d558cSMartin Matuska 	{ "zil_itx_metaslab_slog_write",	KSTAT_DATA_UINT64 },
1204e8d558cSMartin Matuska 	{ "zil_itx_metaslab_slog_alloc",	KSTAT_DATA_UINT64 },
121eda14cbcSMatt Macy };
122eda14cbcSMatt Macy 
123271171e0SMartin Matuska static zil_sums_t zil_sums_global;
124271171e0SMartin Matuska static kstat_t *zil_kstats_global;
125eda14cbcSMatt Macy 
126eda14cbcSMatt Macy /*
127eda14cbcSMatt Macy  * Disable intent logging replay.  This global ZIL switch affects all pools.
128eda14cbcSMatt Macy  */
129eda14cbcSMatt Macy int zil_replay_disable = 0;
130eda14cbcSMatt Macy 
131eda14cbcSMatt Macy /*
1321719886fSMartin Matuska  * Disable the flush commands that are normally sent to the disk(s) by the ZIL
1331719886fSMartin Matuska  * after an LWB write has completed. Setting this will cause ZIL corruption on
1341719886fSMartin Matuska  * power loss if a volatile out-of-order write cache is enabled.
135eda14cbcSMatt Macy  */
136e92ffd9bSMartin Matuska static int zil_nocacheflush = 0;
137eda14cbcSMatt Macy 
138eda14cbcSMatt Macy /*
139eda14cbcSMatt Macy  * Limit SLOG write size per commit executed with synchronous priority.
140eda14cbcSMatt Macy  * Any writes above that will be executed with lower (asynchronous) priority
141eda14cbcSMatt Macy  * to limit potential SLOG device abuse by single active ZIL writer.
142eda14cbcSMatt Macy  */
14322b267e8SMartin Matuska static uint64_t zil_slog_bulk = 64 * 1024 * 1024;
144eda14cbcSMatt Macy 
145eda14cbcSMatt Macy static kmem_cache_t *zil_lwb_cache;
146eda14cbcSMatt Macy static kmem_cache_t *zil_zcw_cache;
147eda14cbcSMatt Macy 
1484e8d558cSMartin Matuska static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
1494e8d558cSMartin Matuska static itx_t *zil_itx_clone(itx_t *oitx);
150b356da80SMartin Matuska static uint64_t zil_max_waste_space(zilog_t *zilog);
1514e8d558cSMartin Matuska 
152eda14cbcSMatt Macy static int
zil_bp_compare(const void * x1,const void * x2)153eda14cbcSMatt Macy zil_bp_compare(const void *x1, const void *x2)
154eda14cbcSMatt Macy {
155eda14cbcSMatt Macy 	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
156eda14cbcSMatt Macy 	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
157eda14cbcSMatt Macy 
158eda14cbcSMatt Macy 	int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
159eda14cbcSMatt Macy 	if (likely(cmp))
160eda14cbcSMatt Macy 		return (cmp);
161eda14cbcSMatt Macy 
162eda14cbcSMatt Macy 	return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
163eda14cbcSMatt Macy }
164eda14cbcSMatt Macy 
165eda14cbcSMatt Macy static void
zil_bp_tree_init(zilog_t * zilog)166eda14cbcSMatt Macy zil_bp_tree_init(zilog_t *zilog)
167eda14cbcSMatt Macy {
168eda14cbcSMatt Macy 	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
169eda14cbcSMatt Macy 	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
170eda14cbcSMatt Macy }
171eda14cbcSMatt Macy 
172eda14cbcSMatt Macy static void
zil_bp_tree_fini(zilog_t * zilog)173eda14cbcSMatt Macy zil_bp_tree_fini(zilog_t *zilog)
174eda14cbcSMatt Macy {
175eda14cbcSMatt Macy 	avl_tree_t *t = &zilog->zl_bp_tree;
176eda14cbcSMatt Macy 	zil_bp_node_t *zn;
177eda14cbcSMatt Macy 	void *cookie = NULL;
178eda14cbcSMatt Macy 
179eda14cbcSMatt Macy 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
180eda14cbcSMatt Macy 		kmem_free(zn, sizeof (zil_bp_node_t));
181eda14cbcSMatt Macy 
182eda14cbcSMatt Macy 	avl_destroy(t);
183eda14cbcSMatt Macy }
184eda14cbcSMatt Macy 
185eda14cbcSMatt Macy int
zil_bp_tree_add(zilog_t * zilog,const blkptr_t * bp)186eda14cbcSMatt Macy zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
187eda14cbcSMatt Macy {
188eda14cbcSMatt Macy 	avl_tree_t *t = &zilog->zl_bp_tree;
189eda14cbcSMatt Macy 	const dva_t *dva;
190eda14cbcSMatt Macy 	zil_bp_node_t *zn;
191eda14cbcSMatt Macy 	avl_index_t where;
192eda14cbcSMatt Macy 
193eda14cbcSMatt Macy 	if (BP_IS_EMBEDDED(bp))
194eda14cbcSMatt Macy 		return (0);
195eda14cbcSMatt Macy 
196eda14cbcSMatt Macy 	dva = BP_IDENTITY(bp);
197eda14cbcSMatt Macy 
198eda14cbcSMatt Macy 	if (avl_find(t, dva, &where) != NULL)
199eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
200eda14cbcSMatt Macy 
201eda14cbcSMatt Macy 	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
202eda14cbcSMatt Macy 	zn->zn_dva = *dva;
203eda14cbcSMatt Macy 	avl_insert(t, zn, where);
204eda14cbcSMatt Macy 
205eda14cbcSMatt Macy 	return (0);
206eda14cbcSMatt Macy }
207eda14cbcSMatt Macy 
208eda14cbcSMatt Macy static zil_header_t *
zil_header_in_syncing_context(zilog_t * zilog)209eda14cbcSMatt Macy zil_header_in_syncing_context(zilog_t *zilog)
210eda14cbcSMatt Macy {
211eda14cbcSMatt Macy 	return ((zil_header_t *)zilog->zl_header);
212eda14cbcSMatt Macy }
213eda14cbcSMatt Macy 
214eda14cbcSMatt Macy static void
zil_init_log_chain(zilog_t * zilog,blkptr_t * bp)215eda14cbcSMatt Macy zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
216eda14cbcSMatt Macy {
217eda14cbcSMatt Macy 	zio_cksum_t *zc = &bp->blk_cksum;
218eda14cbcSMatt Macy 
21933b8c039SMartin Matuska 	(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
22033b8c039SMartin Matuska 	    sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
22133b8c039SMartin Matuska 	(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
22233b8c039SMartin Matuska 	    sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
223eda14cbcSMatt Macy 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
224eda14cbcSMatt Macy 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
225eda14cbcSMatt Macy }
226eda14cbcSMatt Macy 
227271171e0SMartin Matuska static int
zil_kstats_global_update(kstat_t * ksp,int rw)228271171e0SMartin Matuska zil_kstats_global_update(kstat_t *ksp, int rw)
229271171e0SMartin Matuska {
230271171e0SMartin Matuska 	zil_kstat_values_t *zs = ksp->ks_data;
231271171e0SMartin Matuska 	ASSERT3P(&zil_stats, ==, zs);
232271171e0SMartin Matuska 
233271171e0SMartin Matuska 	if (rw == KSTAT_WRITE) {
234271171e0SMartin Matuska 		return (SET_ERROR(EACCES));
235271171e0SMartin Matuska 	}
236271171e0SMartin Matuska 
237271171e0SMartin Matuska 	zil_kstat_values_update(zs, &zil_sums_global);
238271171e0SMartin Matuska 
239271171e0SMartin Matuska 	return (0);
240271171e0SMartin Matuska }
241271171e0SMartin Matuska 
242eda14cbcSMatt Macy /*
243eda14cbcSMatt Macy  * Read a log block and make sure it's valid.
244eda14cbcSMatt Macy  */
245eda14cbcSMatt Macy static int
zil_read_log_block(zilog_t * zilog,boolean_t decrypt,const blkptr_t * bp,blkptr_t * nbp,char ** begin,char ** end,arc_buf_t ** abuf)246eda14cbcSMatt Macy zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
2474e8d558cSMartin Matuska     blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf)
248eda14cbcSMatt Macy {
249dbd5678dSMartin Matuska 	zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
250eda14cbcSMatt Macy 	arc_flags_t aflags = ARC_FLAG_WAIT;
251eda14cbcSMatt Macy 	zbookmark_phys_t zb;
252eda14cbcSMatt Macy 	int error;
253eda14cbcSMatt Macy 
254eda14cbcSMatt Macy 	if (zilog->zl_header->zh_claim_txg == 0)
255eda14cbcSMatt Macy 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
256eda14cbcSMatt Macy 
257eda14cbcSMatt Macy 	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
258eda14cbcSMatt Macy 		zio_flags |= ZIO_FLAG_SPECULATIVE;
259eda14cbcSMatt Macy 
260eda14cbcSMatt Macy 	if (!decrypt)
261eda14cbcSMatt Macy 		zio_flags |= ZIO_FLAG_RAW;
262eda14cbcSMatt Macy 
263eda14cbcSMatt Macy 	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
264eda14cbcSMatt Macy 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
265eda14cbcSMatt Macy 
266eda14cbcSMatt Macy 	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
2674e8d558cSMartin Matuska 	    abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
268eda14cbcSMatt Macy 
269eda14cbcSMatt Macy 	if (error == 0) {
270eda14cbcSMatt Macy 		zio_cksum_t cksum = bp->blk_cksum;
271eda14cbcSMatt Macy 
272eda14cbcSMatt Macy 		/*
273eda14cbcSMatt Macy 		 * Validate the checksummed log block.
274eda14cbcSMatt Macy 		 *
275eda14cbcSMatt Macy 		 * Sequence numbers should be... sequential.  The checksum
276eda14cbcSMatt Macy 		 * verifier for the next block should be bp's checksum plus 1.
277eda14cbcSMatt Macy 		 *
278eda14cbcSMatt Macy 		 * Also check the log chain linkage and size used.
279eda14cbcSMatt Macy 		 */
280eda14cbcSMatt Macy 		cksum.zc_word[ZIL_ZC_SEQ]++;
281eda14cbcSMatt Macy 
2824e8d558cSMartin Matuska 		uint64_t size = BP_GET_LSIZE(bp);
283eda14cbcSMatt Macy 		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
2844e8d558cSMartin Matuska 			zil_chain_t *zilc = (*abuf)->b_data;
285eda14cbcSMatt Macy 			char *lr = (char *)(zilc + 1);
286eda14cbcSMatt Macy 
287da5137abSMartin Matuska 			if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
288315ee00fSMartin Matuska 			    sizeof (cksum)) ||
2894e8d558cSMartin Matuska 			    zilc->zc_nused < sizeof (*zilc) ||
2904e8d558cSMartin Matuska 			    zilc->zc_nused > size) {
291eda14cbcSMatt Macy 				error = SET_ERROR(ECKSUM);
292eda14cbcSMatt Macy 			} else {
2934e8d558cSMartin Matuska 				*begin = lr;
2944e8d558cSMartin Matuska 				*end = lr + zilc->zc_nused - sizeof (*zilc);
295eda14cbcSMatt Macy 				*nbp = zilc->zc_next_blk;
296eda14cbcSMatt Macy 			}
297eda14cbcSMatt Macy 		} else {
2984e8d558cSMartin Matuska 			char *lr = (*abuf)->b_data;
299eda14cbcSMatt Macy 			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
300eda14cbcSMatt Macy 
301da5137abSMartin Matuska 			if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
302315ee00fSMartin Matuska 			    sizeof (cksum)) ||
303eda14cbcSMatt Macy 			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
304eda14cbcSMatt Macy 				error = SET_ERROR(ECKSUM);
305eda14cbcSMatt Macy 			} else {
3064e8d558cSMartin Matuska 				*begin = lr;
3074e8d558cSMartin Matuska 				*end = lr + zilc->zc_nused;
308eda14cbcSMatt Macy 				*nbp = zilc->zc_next_blk;
309eda14cbcSMatt Macy 			}
310eda14cbcSMatt Macy 		}
311eda14cbcSMatt Macy 	}
312eda14cbcSMatt Macy 
313eda14cbcSMatt Macy 	return (error);
314eda14cbcSMatt Macy }
315eda14cbcSMatt Macy 
316eda14cbcSMatt Macy /*
317eda14cbcSMatt Macy  * Read a TX_WRITE log data block.
318eda14cbcSMatt Macy  */
319eda14cbcSMatt Macy static int
zil_read_log_data(zilog_t * zilog,const lr_write_t * lr,void * wbuf)320eda14cbcSMatt Macy zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
321eda14cbcSMatt Macy {
322dbd5678dSMartin Matuska 	zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
323eda14cbcSMatt Macy 	const blkptr_t *bp = &lr->lr_blkptr;
324eda14cbcSMatt Macy 	arc_flags_t aflags = ARC_FLAG_WAIT;
325eda14cbcSMatt Macy 	arc_buf_t *abuf = NULL;
326eda14cbcSMatt Macy 	zbookmark_phys_t zb;
327eda14cbcSMatt Macy 	int error;
328eda14cbcSMatt Macy 
329eda14cbcSMatt Macy 	if (BP_IS_HOLE(bp)) {
330eda14cbcSMatt Macy 		if (wbuf != NULL)
331da5137abSMartin Matuska 			memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length));
332eda14cbcSMatt Macy 		return (0);
333eda14cbcSMatt Macy 	}
334eda14cbcSMatt Macy 
335eda14cbcSMatt Macy 	if (zilog->zl_header->zh_claim_txg == 0)
336eda14cbcSMatt Macy 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
337eda14cbcSMatt Macy 
338eda14cbcSMatt Macy 	/*
339eda14cbcSMatt Macy 	 * If we are not using the resulting data, we are just checking that
340eda14cbcSMatt Macy 	 * it hasn't been corrupted so we don't need to waste CPU time
341eda14cbcSMatt Macy 	 * decompressing and decrypting it.
342eda14cbcSMatt Macy 	 */
343eda14cbcSMatt Macy 	if (wbuf == NULL)
344eda14cbcSMatt Macy 		zio_flags |= ZIO_FLAG_RAW;
345eda14cbcSMatt Macy 
346dbd5678dSMartin Matuska 	ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
347eda14cbcSMatt Macy 	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
348eda14cbcSMatt Macy 	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
349eda14cbcSMatt Macy 
350eda14cbcSMatt Macy 	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
351eda14cbcSMatt Macy 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
352eda14cbcSMatt Macy 
353eda14cbcSMatt Macy 	if (error == 0) {
354eda14cbcSMatt Macy 		if (wbuf != NULL)
355da5137abSMartin Matuska 			memcpy(wbuf, abuf->b_data, arc_buf_size(abuf));
356eda14cbcSMatt Macy 		arc_buf_destroy(abuf, &abuf);
357eda14cbcSMatt Macy 	}
358eda14cbcSMatt Macy 
359eda14cbcSMatt Macy 	return (error);
360eda14cbcSMatt Macy }
361eda14cbcSMatt Macy 
362271171e0SMartin Matuska void
zil_sums_init(zil_sums_t * zs)363271171e0SMartin Matuska zil_sums_init(zil_sums_t *zs)
364271171e0SMartin Matuska {
365271171e0SMartin Matuska 	wmsum_init(&zs->zil_commit_count, 0);
366271171e0SMartin Matuska 	wmsum_init(&zs->zil_commit_writer_count, 0);
367ce4dcb97SMartin Matuska 	wmsum_init(&zs->zil_commit_error_count, 0);
368ce4dcb97SMartin Matuska 	wmsum_init(&zs->zil_commit_stall_count, 0);
369ce4dcb97SMartin Matuska 	wmsum_init(&zs->zil_commit_suspend_count, 0);
370271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_count, 0);
371271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_indirect_count, 0);
372271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_indirect_bytes, 0);
373271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_copied_count, 0);
374271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_copied_bytes, 0);
375271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_needcopy_count, 0);
376271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_needcopy_bytes, 0);
377271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_normal_count, 0);
378271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0);
3794e8d558cSMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_normal_write, 0);
3804e8d558cSMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0);
381271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_slog_count, 0);
382271171e0SMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0);
3834e8d558cSMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_slog_write, 0);
3844e8d558cSMartin Matuska 	wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0);
385271171e0SMartin Matuska }
386271171e0SMartin Matuska 
387271171e0SMartin Matuska void
zil_sums_fini(zil_sums_t * zs)388271171e0SMartin Matuska zil_sums_fini(zil_sums_t *zs)
389271171e0SMartin Matuska {
390271171e0SMartin Matuska 	wmsum_fini(&zs->zil_commit_count);
391271171e0SMartin Matuska 	wmsum_fini(&zs->zil_commit_writer_count);
392ce4dcb97SMartin Matuska 	wmsum_fini(&zs->zil_commit_error_count);
393ce4dcb97SMartin Matuska 	wmsum_fini(&zs->zil_commit_stall_count);
394ce4dcb97SMartin Matuska 	wmsum_fini(&zs->zil_commit_suspend_count);
395271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_count);
396271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_indirect_count);
397271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_indirect_bytes);
398271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_copied_count);
399271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_copied_bytes);
400271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_needcopy_count);
401271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_needcopy_bytes);
402271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_normal_count);
403271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_normal_bytes);
4044e8d558cSMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_normal_write);
4054e8d558cSMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_normal_alloc);
406271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_slog_count);
407271171e0SMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_slog_bytes);
4084e8d558cSMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_slog_write);
4094e8d558cSMartin Matuska 	wmsum_fini(&zs->zil_itx_metaslab_slog_alloc);
410271171e0SMartin Matuska }
411271171e0SMartin Matuska 
412271171e0SMartin Matuska void
zil_kstat_values_update(zil_kstat_values_t * zs,zil_sums_t * zil_sums)413271171e0SMartin Matuska zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums)
414271171e0SMartin Matuska {
415271171e0SMartin Matuska 	zs->zil_commit_count.value.ui64 =
416271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_commit_count);
417271171e0SMartin Matuska 	zs->zil_commit_writer_count.value.ui64 =
418271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_commit_writer_count);
419ce4dcb97SMartin Matuska 	zs->zil_commit_error_count.value.ui64 =
420ce4dcb97SMartin Matuska 	    wmsum_value(&zil_sums->zil_commit_error_count);
421ce4dcb97SMartin Matuska 	zs->zil_commit_stall_count.value.ui64 =
422ce4dcb97SMartin Matuska 	    wmsum_value(&zil_sums->zil_commit_stall_count);
423ce4dcb97SMartin Matuska 	zs->zil_commit_suspend_count.value.ui64 =
424ce4dcb97SMartin Matuska 	    wmsum_value(&zil_sums->zil_commit_suspend_count);
425271171e0SMartin Matuska 	zs->zil_itx_count.value.ui64 =
426271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_count);
427271171e0SMartin Matuska 	zs->zil_itx_indirect_count.value.ui64 =
428271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_indirect_count);
429271171e0SMartin Matuska 	zs->zil_itx_indirect_bytes.value.ui64 =
430271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_indirect_bytes);
431271171e0SMartin Matuska 	zs->zil_itx_copied_count.value.ui64 =
432271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_copied_count);
433271171e0SMartin Matuska 	zs->zil_itx_copied_bytes.value.ui64 =
434271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_copied_bytes);
435271171e0SMartin Matuska 	zs->zil_itx_needcopy_count.value.ui64 =
436271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_needcopy_count);
437271171e0SMartin Matuska 	zs->zil_itx_needcopy_bytes.value.ui64 =
438271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_needcopy_bytes);
439271171e0SMartin Matuska 	zs->zil_itx_metaslab_normal_count.value.ui64 =
440271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_normal_count);
441271171e0SMartin Matuska 	zs->zil_itx_metaslab_normal_bytes.value.ui64 =
442271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes);
4434e8d558cSMartin Matuska 	zs->zil_itx_metaslab_normal_write.value.ui64 =
4444e8d558cSMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_normal_write);
4454e8d558cSMartin Matuska 	zs->zil_itx_metaslab_normal_alloc.value.ui64 =
4464e8d558cSMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc);
447271171e0SMartin Matuska 	zs->zil_itx_metaslab_slog_count.value.ui64 =
448271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_slog_count);
449271171e0SMartin Matuska 	zs->zil_itx_metaslab_slog_bytes.value.ui64 =
450271171e0SMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes);
4514e8d558cSMartin Matuska 	zs->zil_itx_metaslab_slog_write.value.ui64 =
4524e8d558cSMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_slog_write);
4534e8d558cSMartin Matuska 	zs->zil_itx_metaslab_slog_alloc.value.ui64 =
4544e8d558cSMartin Matuska 	    wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc);
455271171e0SMartin Matuska }
456271171e0SMartin Matuska 
457eda14cbcSMatt Macy /*
458eda14cbcSMatt Macy  * Parse the intent log, and call parse_func for each valid record within.
459eda14cbcSMatt Macy  */
460eda14cbcSMatt Macy int
zil_parse(zilog_t * zilog,zil_parse_blk_func_t * parse_blk_func,zil_parse_lr_func_t * parse_lr_func,void * arg,uint64_t txg,boolean_t decrypt)461eda14cbcSMatt Macy zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
462eda14cbcSMatt Macy     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
463eda14cbcSMatt Macy     boolean_t decrypt)
464eda14cbcSMatt Macy {
465eda14cbcSMatt Macy 	const zil_header_t *zh = zilog->zl_header;
466eda14cbcSMatt Macy 	boolean_t claimed = !!zh->zh_claim_txg;
467eda14cbcSMatt Macy 	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
468eda14cbcSMatt Macy 	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
469eda14cbcSMatt Macy 	uint64_t max_blk_seq = 0;
470eda14cbcSMatt Macy 	uint64_t max_lr_seq = 0;
471eda14cbcSMatt Macy 	uint64_t blk_count = 0;
472eda14cbcSMatt Macy 	uint64_t lr_count = 0;
473da5137abSMartin Matuska 	blkptr_t blk, next_blk = {{{{0}}}};
474eda14cbcSMatt Macy 	int error = 0;
475eda14cbcSMatt Macy 
476eda14cbcSMatt Macy 	/*
477eda14cbcSMatt Macy 	 * Old logs didn't record the maximum zh_claim_lr_seq.
478eda14cbcSMatt Macy 	 */
479eda14cbcSMatt Macy 	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
480eda14cbcSMatt Macy 		claim_lr_seq = UINT64_MAX;
481eda14cbcSMatt Macy 
482eda14cbcSMatt Macy 	/*
483eda14cbcSMatt Macy 	 * Starting at the block pointed to by zh_log we read the log chain.
484eda14cbcSMatt Macy 	 * For each block in the chain we strongly check that block to
485eda14cbcSMatt Macy 	 * ensure its validity.  We stop when an invalid block is found.
486eda14cbcSMatt Macy 	 * For each block pointer in the chain we call parse_blk_func().
487eda14cbcSMatt Macy 	 * For each record in each valid block we call parse_lr_func().
488eda14cbcSMatt Macy 	 * If the log has been claimed, stop if we encounter a sequence
489eda14cbcSMatt Macy 	 * number greater than the highest claimed sequence number.
490eda14cbcSMatt Macy 	 */
491eda14cbcSMatt Macy 	zil_bp_tree_init(zilog);
492eda14cbcSMatt Macy 
493eda14cbcSMatt Macy 	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
494eda14cbcSMatt Macy 		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
495eda14cbcSMatt Macy 		int reclen;
4964e8d558cSMartin Matuska 		char *lrp, *end;
4974e8d558cSMartin Matuska 		arc_buf_t *abuf = NULL;
498eda14cbcSMatt Macy 
499eda14cbcSMatt Macy 		if (blk_seq > claim_blk_seq)
500eda14cbcSMatt Macy 			break;
501eda14cbcSMatt Macy 
502eda14cbcSMatt Macy 		error = parse_blk_func(zilog, &blk, arg, txg);
503eda14cbcSMatt Macy 		if (error != 0)
504eda14cbcSMatt Macy 			break;
505eda14cbcSMatt Macy 		ASSERT3U(max_blk_seq, <, blk_seq);
506eda14cbcSMatt Macy 		max_blk_seq = blk_seq;
507eda14cbcSMatt Macy 		blk_count++;
508eda14cbcSMatt Macy 
509eda14cbcSMatt Macy 		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
510eda14cbcSMatt Macy 			break;
511eda14cbcSMatt Macy 
512eda14cbcSMatt Macy 		error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
5134e8d558cSMartin Matuska 		    &lrp, &end, &abuf);
514dbd5678dSMartin Matuska 		if (error != 0) {
5154e8d558cSMartin Matuska 			if (abuf)
5164e8d558cSMartin Matuska 				arc_buf_destroy(abuf, &abuf);
517dbd5678dSMartin Matuska 			if (claimed) {
518dbd5678dSMartin Matuska 				char name[ZFS_MAX_DATASET_NAME_LEN];
519dbd5678dSMartin Matuska 
520dbd5678dSMartin Matuska 				dmu_objset_name(zilog->zl_os, name);
521dbd5678dSMartin Matuska 
522dbd5678dSMartin Matuska 				cmn_err(CE_WARN, "ZFS read log block error %d, "
523dbd5678dSMartin Matuska 				    "dataset %s, seq 0x%llx\n", error, name,
524dbd5678dSMartin Matuska 				    (u_longlong_t)blk_seq);
525dbd5678dSMartin Matuska 			}
526eda14cbcSMatt Macy 			break;
527dbd5678dSMartin Matuska 		}
528eda14cbcSMatt Macy 
5294e8d558cSMartin Matuska 		for (; lrp < end; lrp += reclen) {
530eda14cbcSMatt Macy 			lr_t *lr = (lr_t *)lrp;
531ce4dcb97SMartin Matuska 
532ce4dcb97SMartin Matuska 			/*
533ce4dcb97SMartin Matuska 			 * Are the remaining bytes large enough to hold an
534ce4dcb97SMartin Matuska 			 * log record?
535ce4dcb97SMartin Matuska 			 */
536ce4dcb97SMartin Matuska 			if ((char *)(lr + 1) > end) {
537ce4dcb97SMartin Matuska 				cmn_err(CE_WARN, "zil_parse: lr_t overrun");
538ce4dcb97SMartin Matuska 				error = SET_ERROR(ECKSUM);
539ce4dcb97SMartin Matuska 				arc_buf_destroy(abuf, &abuf);
540ce4dcb97SMartin Matuska 				goto done;
541ce4dcb97SMartin Matuska 			}
542eda14cbcSMatt Macy 			reclen = lr->lrc_reclen;
543ce4dcb97SMartin Matuska 			if (reclen < sizeof (lr_t) || reclen > end - lrp) {
544ce4dcb97SMartin Matuska 				cmn_err(CE_WARN,
545ce4dcb97SMartin Matuska 				    "zil_parse: lr_t has an invalid reclen");
546ce4dcb97SMartin Matuska 				error = SET_ERROR(ECKSUM);
547ce4dcb97SMartin Matuska 				arc_buf_destroy(abuf, &abuf);
548ce4dcb97SMartin Matuska 				goto done;
549ce4dcb97SMartin Matuska 			}
550ce4dcb97SMartin Matuska 
551f190c36bSMartin Matuska 			if (lr->lrc_seq > claim_lr_seq) {
552f190c36bSMartin Matuska 				arc_buf_destroy(abuf, &abuf);
553eda14cbcSMatt Macy 				goto done;
554f190c36bSMartin Matuska 			}
555eda14cbcSMatt Macy 
556eda14cbcSMatt Macy 			error = parse_lr_func(zilog, lr, arg, txg);
557f190c36bSMartin Matuska 			if (error != 0) {
558f190c36bSMartin Matuska 				arc_buf_destroy(abuf, &abuf);
559eda14cbcSMatt Macy 				goto done;
560f190c36bSMartin Matuska 			}
561eda14cbcSMatt Macy 			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
562eda14cbcSMatt Macy 			max_lr_seq = lr->lrc_seq;
563eda14cbcSMatt Macy 			lr_count++;
564eda14cbcSMatt Macy 		}
5654e8d558cSMartin Matuska 		arc_buf_destroy(abuf, &abuf);
566eda14cbcSMatt Macy 	}
567eda14cbcSMatt Macy done:
568eda14cbcSMatt Macy 	zilog->zl_parse_error = error;
569eda14cbcSMatt Macy 	zilog->zl_parse_blk_seq = max_blk_seq;
570eda14cbcSMatt Macy 	zilog->zl_parse_lr_seq = max_lr_seq;
571eda14cbcSMatt Macy 	zilog->zl_parse_blk_count = blk_count;
572eda14cbcSMatt Macy 	zilog->zl_parse_lr_count = lr_count;
573eda14cbcSMatt Macy 
574eda14cbcSMatt Macy 	zil_bp_tree_fini(zilog);
575eda14cbcSMatt Macy 
576eda14cbcSMatt Macy 	return (error);
577eda14cbcSMatt Macy }
578eda14cbcSMatt Macy 
579eda14cbcSMatt Macy static int
zil_clear_log_block(zilog_t * zilog,const blkptr_t * bp,void * tx,uint64_t first_txg)580180f8225SMatt Macy zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
581180f8225SMatt Macy     uint64_t first_txg)
582eda14cbcSMatt Macy {
583e92ffd9bSMartin Matuska 	(void) tx;
584eda14cbcSMatt Macy 	ASSERT(!BP_IS_HOLE(bp));
585eda14cbcSMatt Macy 
586eda14cbcSMatt Macy 	/*
587eda14cbcSMatt Macy 	 * As we call this function from the context of a rewind to a
588eda14cbcSMatt Macy 	 * checkpoint, each ZIL block whose txg is later than the txg
589eda14cbcSMatt Macy 	 * that we rewind to is invalid. Thus, we return -1 so
590eda14cbcSMatt Macy 	 * zil_parse() doesn't attempt to read it.
591eda14cbcSMatt Macy 	 */
592783d3ff6SMartin Matuska 	if (BP_GET_LOGICAL_BIRTH(bp) >= first_txg)
593eda14cbcSMatt Macy 		return (-1);
594eda14cbcSMatt Macy 
595eda14cbcSMatt Macy 	if (zil_bp_tree_add(zilog, bp) != 0)
596eda14cbcSMatt Macy 		return (0);
597eda14cbcSMatt Macy 
598eda14cbcSMatt Macy 	zio_free(zilog->zl_spa, first_txg, bp);
599eda14cbcSMatt Macy 	return (0);
600eda14cbcSMatt Macy }
601eda14cbcSMatt Macy 
602eda14cbcSMatt Macy static int
zil_noop_log_record(zilog_t * zilog,const lr_t * lrc,void * tx,uint64_t first_txg)603180f8225SMatt Macy zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
604180f8225SMatt Macy     uint64_t first_txg)
605eda14cbcSMatt Macy {
606e92ffd9bSMartin Matuska 	(void) zilog, (void) lrc, (void) tx, (void) first_txg;
607eda14cbcSMatt Macy 	return (0);
608eda14cbcSMatt Macy }
609eda14cbcSMatt Macy 
610eda14cbcSMatt Macy static int
zil_claim_log_block(zilog_t * zilog,const blkptr_t * bp,void * tx,uint64_t first_txg)611180f8225SMatt Macy zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
612180f8225SMatt Macy     uint64_t first_txg)
613eda14cbcSMatt Macy {
614eda14cbcSMatt Macy 	/*
615eda14cbcSMatt Macy 	 * Claim log block if not already committed and not already claimed.
616eda14cbcSMatt Macy 	 * If tx == NULL, just verify that the block is claimable.
617eda14cbcSMatt Macy 	 */
618783d3ff6SMartin Matuska 	if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) < first_txg ||
619eda14cbcSMatt Macy 	    zil_bp_tree_add(zilog, bp) != 0)
620eda14cbcSMatt Macy 		return (0);
621eda14cbcSMatt Macy 
622eda14cbcSMatt Macy 	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
623eda14cbcSMatt Macy 	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
624eda14cbcSMatt Macy 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
625eda14cbcSMatt Macy }
626eda14cbcSMatt Macy 
627eda14cbcSMatt Macy static int
zil_claim_write(zilog_t * zilog,const lr_t * lrc,void * tx,uint64_t first_txg)6282a58b312SMartin Matuska zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg)
629eda14cbcSMatt Macy {
630eda14cbcSMatt Macy 	lr_write_t *lr = (lr_write_t *)lrc;
631eda14cbcSMatt Macy 	int error;
632eda14cbcSMatt Macy 
633525fe93dSMartin Matuska 	ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
634eda14cbcSMatt Macy 
635eda14cbcSMatt Macy 	/*
636eda14cbcSMatt Macy 	 * If the block is not readable, don't claim it.  This can happen
637eda14cbcSMatt Macy 	 * in normal operation when a log block is written to disk before
638eda14cbcSMatt Macy 	 * some of the dmu_sync() blocks it points to.  In this case, the
639eda14cbcSMatt Macy 	 * transaction cannot have been committed to anyone (we would have
640eda14cbcSMatt Macy 	 * waited for all writes to be stable first), so it is semantically
641eda14cbcSMatt Macy 	 * correct to declare this the end of the log.
642eda14cbcSMatt Macy 	 */
643783d3ff6SMartin Matuska 	if (BP_GET_LOGICAL_BIRTH(&lr->lr_blkptr) >= first_txg) {
644eda14cbcSMatt Macy 		error = zil_read_log_data(zilog, lr, NULL);
645eda14cbcSMatt Macy 		if (error != 0)
646eda14cbcSMatt Macy 			return (error);
647eda14cbcSMatt Macy 	}
648eda14cbcSMatt Macy 
649eda14cbcSMatt Macy 	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
650eda14cbcSMatt Macy }
651eda14cbcSMatt Macy 
652eda14cbcSMatt Macy static int
zil_claim_clone_range(zilog_t * zilog,const lr_t * lrc,void * tx,uint64_t first_txg)6533494f7c0SMartin Matuska zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx,
6543494f7c0SMartin Matuska     uint64_t first_txg)
6552a58b312SMartin Matuska {
6562a58b312SMartin Matuska 	const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
6572a58b312SMartin Matuska 	const blkptr_t *bp;
6583494f7c0SMartin Matuska 	spa_t *spa = zilog->zl_spa;
6592a58b312SMartin Matuska 	uint_t ii;
6602a58b312SMartin Matuska 
661525fe93dSMartin Matuska 	ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
662525fe93dSMartin Matuska 	ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t,
663525fe93dSMartin Matuska 	    lr_bps[lr->lr_nbps]));
6642a58b312SMartin Matuska 
6652a58b312SMartin Matuska 	if (tx == NULL) {
6662a58b312SMartin Matuska 		return (0);
6672a58b312SMartin Matuska 	}
6682a58b312SMartin Matuska 
6692a58b312SMartin Matuska 	/*
6702a58b312SMartin Matuska 	 * XXX: Do we need to byteswap lr?
6712a58b312SMartin Matuska 	 */
6722a58b312SMartin Matuska 
6732a58b312SMartin Matuska 	for (ii = 0; ii < lr->lr_nbps; ii++) {
6742a58b312SMartin Matuska 		bp = &lr->lr_bps[ii];
6752a58b312SMartin Matuska 
6762a58b312SMartin Matuska 		/*
6773494f7c0SMartin Matuska 		 * When data is embedded into the BP there is no need to create
6783494f7c0SMartin Matuska 		 * BRT entry as there is no data block.  Just copy the BP as it
6793494f7c0SMartin Matuska 		 * contains the data.
6802a58b312SMartin Matuska 		 */
6813494f7c0SMartin Matuska 		if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
6823494f7c0SMartin Matuska 			continue;
6833494f7c0SMartin Matuska 
6843494f7c0SMartin Matuska 		/*
6853494f7c0SMartin Matuska 		 * We can not handle block pointers from the future, since they
6863494f7c0SMartin Matuska 		 * are not yet allocated.  It should not normally happen, but
6873494f7c0SMartin Matuska 		 * just in case lets be safe and just stop here now instead of
6883494f7c0SMartin Matuska 		 * corrupting the pool.
6893494f7c0SMartin Matuska 		 */
690783d3ff6SMartin Matuska 		if (BP_GET_BIRTH(bp) >= first_txg)
6913494f7c0SMartin Matuska 			return (SET_ERROR(ENOENT));
6923494f7c0SMartin Matuska 
6933494f7c0SMartin Matuska 		/*
6943494f7c0SMartin Matuska 		 * Assert the block is really allocated before we reference it.
6953494f7c0SMartin Matuska 		 */
6963494f7c0SMartin Matuska 		metaslab_check_free(spa, bp);
6972a58b312SMartin Matuska 	}
6983494f7c0SMartin Matuska 
6993494f7c0SMartin Matuska 	for (ii = 0; ii < lr->lr_nbps; ii++) {
7003494f7c0SMartin Matuska 		bp = &lr->lr_bps[ii];
7013494f7c0SMartin Matuska 		if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp))
7023494f7c0SMartin Matuska 			brt_pending_add(spa, bp, tx);
7032a58b312SMartin Matuska 	}
7042a58b312SMartin Matuska 
7052a58b312SMartin Matuska 	return (0);
7062a58b312SMartin Matuska }
7072a58b312SMartin Matuska 
7082a58b312SMartin Matuska static int
zil_claim_log_record(zilog_t * zilog,const lr_t * lrc,void * tx,uint64_t first_txg)7092a58b312SMartin Matuska zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
7102a58b312SMartin Matuska     uint64_t first_txg)
7112a58b312SMartin Matuska {
7122a58b312SMartin Matuska 
7132a58b312SMartin Matuska 	switch (lrc->lrc_txtype) {
7142a58b312SMartin Matuska 	case TX_WRITE:
7152a58b312SMartin Matuska 		return (zil_claim_write(zilog, lrc, tx, first_txg));
7162a58b312SMartin Matuska 	case TX_CLONE_RANGE:
7173494f7c0SMartin Matuska 		return (zil_claim_clone_range(zilog, lrc, tx, first_txg));
7182a58b312SMartin Matuska 	default:
7192a58b312SMartin Matuska 		return (0);
7202a58b312SMartin Matuska 	}
7212a58b312SMartin Matuska }
7222a58b312SMartin Matuska 
7232a58b312SMartin Matuska static int
zil_free_log_block(zilog_t * zilog,const blkptr_t * bp,void * tx,uint64_t claim_txg)724180f8225SMatt Macy zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
725180f8225SMatt Macy     uint64_t claim_txg)
726eda14cbcSMatt Macy {
727e92ffd9bSMartin Matuska 	(void) claim_txg;
728e92ffd9bSMartin Matuska 
729eda14cbcSMatt Macy 	zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
730eda14cbcSMatt Macy 
731eda14cbcSMatt Macy 	return (0);
732eda14cbcSMatt Macy }
733eda14cbcSMatt Macy 
734eda14cbcSMatt Macy static int
zil_free_write(zilog_t * zilog,const lr_t * lrc,void * tx,uint64_t claim_txg)7352a58b312SMartin Matuska zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg)
736eda14cbcSMatt Macy {
737eda14cbcSMatt Macy 	lr_write_t *lr = (lr_write_t *)lrc;
738eda14cbcSMatt Macy 	blkptr_t *bp = &lr->lr_blkptr;
739eda14cbcSMatt Macy 
740525fe93dSMartin Matuska 	ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
7412a58b312SMartin Matuska 
742eda14cbcSMatt Macy 	/*
743eda14cbcSMatt Macy 	 * If we previously claimed it, we need to free it.
744eda14cbcSMatt Macy 	 */
745783d3ff6SMartin Matuska 	if (BP_GET_LOGICAL_BIRTH(bp) >= claim_txg &&
746783d3ff6SMartin Matuska 	    zil_bp_tree_add(zilog, bp) == 0 && !BP_IS_HOLE(bp)) {
747eda14cbcSMatt Macy 		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
7482a58b312SMartin Matuska 	}
749eda14cbcSMatt Macy 
750eda14cbcSMatt Macy 	return (0);
751eda14cbcSMatt Macy }
752eda14cbcSMatt Macy 
753eda14cbcSMatt Macy static int
zil_free_clone_range(zilog_t * zilog,const lr_t * lrc,void * tx)7542a58b312SMartin Matuska zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
7552a58b312SMartin Matuska {
7562a58b312SMartin Matuska 	const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
7572a58b312SMartin Matuska 	const blkptr_t *bp;
7582a58b312SMartin Matuska 	spa_t *spa;
7592a58b312SMartin Matuska 	uint_t ii;
7602a58b312SMartin Matuska 
761525fe93dSMartin Matuska 	ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
762525fe93dSMartin Matuska 	ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t,
763525fe93dSMartin Matuska 	    lr_bps[lr->lr_nbps]));
7642a58b312SMartin Matuska 
7652a58b312SMartin Matuska 	if (tx == NULL) {
7662a58b312SMartin Matuska 		return (0);
7672a58b312SMartin Matuska 	}
7682a58b312SMartin Matuska 
7692a58b312SMartin Matuska 	spa = zilog->zl_spa;
7702a58b312SMartin Matuska 
7712a58b312SMartin Matuska 	for (ii = 0; ii < lr->lr_nbps; ii++) {
7722a58b312SMartin Matuska 		bp = &lr->lr_bps[ii];
7732a58b312SMartin Matuska 
7742a58b312SMartin Matuska 		if (!BP_IS_HOLE(bp)) {
7752a58b312SMartin Matuska 			zio_free(spa, dmu_tx_get_txg(tx), bp);
7762a58b312SMartin Matuska 		}
7772a58b312SMartin Matuska 	}
7782a58b312SMartin Matuska 
7792a58b312SMartin Matuska 	return (0);
7802a58b312SMartin Matuska }
7812a58b312SMartin Matuska 
7822a58b312SMartin Matuska static int
zil_free_log_record(zilog_t * zilog,const lr_t * lrc,void * tx,uint64_t claim_txg)7832a58b312SMartin Matuska zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
7842a58b312SMartin Matuska     uint64_t claim_txg)
7852a58b312SMartin Matuska {
7862a58b312SMartin Matuska 
7872a58b312SMartin Matuska 	if (claim_txg == 0) {
7882a58b312SMartin Matuska 		return (0);
7892a58b312SMartin Matuska 	}
7902a58b312SMartin Matuska 
7912a58b312SMartin Matuska 	switch (lrc->lrc_txtype) {
7922a58b312SMartin Matuska 	case TX_WRITE:
7932a58b312SMartin Matuska 		return (zil_free_write(zilog, lrc, tx, claim_txg));
7942a58b312SMartin Matuska 	case TX_CLONE_RANGE:
7952a58b312SMartin Matuska 		return (zil_free_clone_range(zilog, lrc, tx));
7962a58b312SMartin Matuska 	default:
7972a58b312SMartin Matuska 		return (0);
7982a58b312SMartin Matuska 	}
7992a58b312SMartin Matuska }
8002a58b312SMartin Matuska 
8012a58b312SMartin Matuska static int
zil_lwb_vdev_compare(const void * x1,const void * x2)802eda14cbcSMatt Macy zil_lwb_vdev_compare(const void *x1, const void *x2)
803eda14cbcSMatt Macy {
804eda14cbcSMatt Macy 	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
805eda14cbcSMatt Macy 	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
806eda14cbcSMatt Macy 
807eda14cbcSMatt Macy 	return (TREE_CMP(v1, v2));
808eda14cbcSMatt Macy }
809eda14cbcSMatt Macy 
810315ee00fSMartin Matuska /*
811315ee00fSMartin Matuska  * Allocate a new lwb.  We may already have a block pointer for it, in which
812315ee00fSMartin Matuska  * case we get size and version from there.  Or we may not yet, in which case
813315ee00fSMartin Matuska  * we choose them here and later make the block allocation match.
814315ee00fSMartin Matuska  */
815eda14cbcSMatt Macy static lwb_t *
zil_alloc_lwb(zilog_t * zilog,int sz,blkptr_t * bp,boolean_t slog,uint64_t txg,lwb_state_t state)816315ee00fSMartin Matuska zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
817315ee00fSMartin Matuska     uint64_t txg, lwb_state_t state)
818eda14cbcSMatt Macy {
819eda14cbcSMatt Macy 	lwb_t *lwb;
820eda14cbcSMatt Macy 
821eda14cbcSMatt Macy 	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
822eda14cbcSMatt Macy 	lwb->lwb_zilog = zilog;
823315ee00fSMartin Matuska 	if (bp) {
824eda14cbcSMatt Macy 		lwb->lwb_blk = *bp;
825315ee00fSMartin Matuska 		lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2);
826315ee00fSMartin Matuska 		sz = BP_GET_LSIZE(bp);
8274e8d558cSMartin Matuska 	} else {
828315ee00fSMartin Matuska 		BP_ZERO(&lwb->lwb_blk);
829315ee00fSMartin Matuska 		lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
830315ee00fSMartin Matuska 		    SPA_VERSION_SLIM_ZIL);
8314e8d558cSMartin Matuska 	}
832315ee00fSMartin Matuska 	lwb->lwb_slog = slog;
833315ee00fSMartin Matuska 	lwb->lwb_error = 0;
834315ee00fSMartin Matuska 	if (lwb->lwb_slim) {
835315ee00fSMartin Matuska 		lwb->lwb_nmax = sz;
836315ee00fSMartin Matuska 		lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
837315ee00fSMartin Matuska 	} else {
838315ee00fSMartin Matuska 		lwb->lwb_nmax = sz - sizeof (zil_chain_t);
839315ee00fSMartin Matuska 		lwb->lwb_nused = lwb->lwb_nfilled = 0;
840315ee00fSMartin Matuska 	}
841315ee00fSMartin Matuska 	lwb->lwb_sz = sz;
842315ee00fSMartin Matuska 	lwb->lwb_state = state;
843315ee00fSMartin Matuska 	lwb->lwb_buf = zio_buf_alloc(sz);
844315ee00fSMartin Matuska 	lwb->lwb_child_zio = NULL;
845eda14cbcSMatt Macy 	lwb->lwb_write_zio = NULL;
846eda14cbcSMatt Macy 	lwb->lwb_root_zio = NULL;
847eda14cbcSMatt Macy 	lwb->lwb_issued_timestamp = 0;
848e3aa18adSMartin Matuska 	lwb->lwb_issued_txg = 0;
849315ee00fSMartin Matuska 	lwb->lwb_alloc_txg = txg;
850315ee00fSMartin Matuska 	lwb->lwb_max_txg = 0;
851eda14cbcSMatt Macy 
852eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
853eda14cbcSMatt Macy 	list_insert_tail(&zilog->zl_lwb_list, lwb);
854315ee00fSMartin Matuska 	if (state != LWB_STATE_NEW)
855315ee00fSMartin Matuska 		zilog->zl_last_lwb_opened = lwb;
856eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
857eda14cbcSMatt Macy 
858eda14cbcSMatt Macy 	return (lwb);
859eda14cbcSMatt Macy }
860eda14cbcSMatt Macy 
861eda14cbcSMatt Macy static void
zil_free_lwb(zilog_t * zilog,lwb_t * lwb)862eda14cbcSMatt Macy zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
863eda14cbcSMatt Macy {
864eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_lock));
8652ad756a6SMartin Matuska 	ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
8662ad756a6SMartin Matuska 	    lwb->lwb_state == LWB_STATE_FLUSH_DONE);
867315ee00fSMartin Matuska 	ASSERT3P(lwb->lwb_child_zio, ==, NULL);
868eda14cbcSMatt Macy 	ASSERT3P(lwb->lwb_write_zio, ==, NULL);
869eda14cbcSMatt Macy 	ASSERT3P(lwb->lwb_root_zio, ==, NULL);
870315ee00fSMartin Matuska 	ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa));
871eda14cbcSMatt Macy 	ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
8722ad756a6SMartin Matuska 	VERIFY(list_is_empty(&lwb->lwb_itxs));
8732ad756a6SMartin Matuska 	VERIFY(list_is_empty(&lwb->lwb_waiters));
8742ad756a6SMartin Matuska 	ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
8752ad756a6SMartin Matuska 	ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
876eda14cbcSMatt Macy 
877eda14cbcSMatt Macy 	/*
878eda14cbcSMatt Macy 	 * Clear the zilog's field to indicate this lwb is no longer
879eda14cbcSMatt Macy 	 * valid, and prevent use-after-free errors.
880eda14cbcSMatt Macy 	 */
881eda14cbcSMatt Macy 	if (zilog->zl_last_lwb_opened == lwb)
882eda14cbcSMatt Macy 		zilog->zl_last_lwb_opened = NULL;
883eda14cbcSMatt Macy 
884eda14cbcSMatt Macy 	kmem_cache_free(zil_lwb_cache, lwb);
885eda14cbcSMatt Macy }
886eda14cbcSMatt Macy 
887eda14cbcSMatt Macy /*
888eda14cbcSMatt Macy  * Called when we create in-memory log transactions so that we know
889eda14cbcSMatt Macy  * to cleanup the itxs at the end of spa_sync().
890eda14cbcSMatt Macy  */
891eda14cbcSMatt Macy static void
zilog_dirty(zilog_t * zilog,uint64_t txg)892eda14cbcSMatt Macy zilog_dirty(zilog_t *zilog, uint64_t txg)
893eda14cbcSMatt Macy {
894eda14cbcSMatt Macy 	dsl_pool_t *dp = zilog->zl_dmu_pool;
895eda14cbcSMatt Macy 	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
896eda14cbcSMatt Macy 
897eda14cbcSMatt Macy 	ASSERT(spa_writeable(zilog->zl_spa));
898eda14cbcSMatt Macy 
899eda14cbcSMatt Macy 	if (ds->ds_is_snapshot)
900eda14cbcSMatt Macy 		panic("dirtying snapshot!");
901eda14cbcSMatt Macy 
902eda14cbcSMatt Macy 	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
903eda14cbcSMatt Macy 		/* up the hold count until we can be written out */
904eda14cbcSMatt Macy 		dmu_buf_add_ref(ds->ds_dbuf, zilog);
905eda14cbcSMatt Macy 
906eda14cbcSMatt Macy 		zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
907eda14cbcSMatt Macy 	}
908eda14cbcSMatt Macy }
909eda14cbcSMatt Macy 
910eda14cbcSMatt Macy /*
911eda14cbcSMatt Macy  * Determine if the zil is dirty in the specified txg. Callers wanting to
912eda14cbcSMatt Macy  * ensure that the dirty state does not change must hold the itxg_lock for
913eda14cbcSMatt Macy  * the specified txg. Holding the lock will ensure that the zil cannot be
914eda14cbcSMatt Macy  * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
915eda14cbcSMatt Macy  * state.
916eda14cbcSMatt Macy  */
917eda14cbcSMatt Macy static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t * zilog,uint64_t txg)918eda14cbcSMatt Macy zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
919eda14cbcSMatt Macy {
920eda14cbcSMatt Macy 	dsl_pool_t *dp = zilog->zl_dmu_pool;
921eda14cbcSMatt Macy 
922eda14cbcSMatt Macy 	if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
923eda14cbcSMatt Macy 		return (B_TRUE);
924eda14cbcSMatt Macy 	return (B_FALSE);
925eda14cbcSMatt Macy }
926eda14cbcSMatt Macy 
927eda14cbcSMatt Macy /*
928eda14cbcSMatt Macy  * Determine if the zil is dirty. The zil is considered dirty if it has
929eda14cbcSMatt Macy  * any pending itx records that have not been cleaned by zil_clean().
930eda14cbcSMatt Macy  */
931eda14cbcSMatt Macy static boolean_t
zilog_is_dirty(zilog_t * zilog)932eda14cbcSMatt Macy zilog_is_dirty(zilog_t *zilog)
933eda14cbcSMatt Macy {
934eda14cbcSMatt Macy 	dsl_pool_t *dp = zilog->zl_dmu_pool;
935eda14cbcSMatt Macy 
936eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++) {
937eda14cbcSMatt Macy 		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
938eda14cbcSMatt Macy 			return (B_TRUE);
939eda14cbcSMatt Macy 	}
940eda14cbcSMatt Macy 	return (B_FALSE);
941eda14cbcSMatt Macy }
942eda14cbcSMatt Macy 
943eda14cbcSMatt Macy /*
944c03c5b1cSMartin Matuska  * Its called in zil_commit context (zil_process_commit_list()/zil_create()).
945c03c5b1cSMartin Matuska  * It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled.
946c03c5b1cSMartin Matuska  * Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every
947c03c5b1cSMartin Matuska  * zil_commit.
948c03c5b1cSMartin Matuska  */
949c03c5b1cSMartin Matuska static void
zil_commit_activate_saxattr_feature(zilog_t * zilog)950c03c5b1cSMartin Matuska zil_commit_activate_saxattr_feature(zilog_t *zilog)
951c03c5b1cSMartin Matuska {
952c03c5b1cSMartin Matuska 	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
953c03c5b1cSMartin Matuska 	uint64_t txg = 0;
954c03c5b1cSMartin Matuska 	dmu_tx_t *tx = NULL;
955c03c5b1cSMartin Matuska 
956dbd5678dSMartin Matuska 	if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
957c03c5b1cSMartin Matuska 	    dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
958dbd5678dSMartin Matuska 	    !dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) {
959c03c5b1cSMartin Matuska 		tx = dmu_tx_create(zilog->zl_os);
960*61145dc2SMartin Matuska 		VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
961c03c5b1cSMartin Matuska 		dsl_dataset_dirty(ds, tx);
962c03c5b1cSMartin Matuska 		txg = dmu_tx_get_txg(tx);
963c03c5b1cSMartin Matuska 
964c03c5b1cSMartin Matuska 		mutex_enter(&ds->ds_lock);
965c03c5b1cSMartin Matuska 		ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
966c03c5b1cSMartin Matuska 		    (void *)B_TRUE;
967c03c5b1cSMartin Matuska 		mutex_exit(&ds->ds_lock);
968c03c5b1cSMartin Matuska 		dmu_tx_commit(tx);
969c03c5b1cSMartin Matuska 		txg_wait_synced(zilog->zl_dmu_pool, txg);
970c03c5b1cSMartin Matuska 	}
971c03c5b1cSMartin Matuska }
972c03c5b1cSMartin Matuska 
973c03c5b1cSMartin Matuska /*
974eda14cbcSMatt Macy  * Create an on-disk intent log.
975eda14cbcSMatt Macy  */
976eda14cbcSMatt Macy static lwb_t *
zil_create(zilog_t * zilog)977eda14cbcSMatt Macy zil_create(zilog_t *zilog)
978eda14cbcSMatt Macy {
979eda14cbcSMatt Macy 	const zil_header_t *zh = zilog->zl_header;
980eda14cbcSMatt Macy 	lwb_t *lwb = NULL;
981eda14cbcSMatt Macy 	uint64_t txg = 0;
982eda14cbcSMatt Macy 	dmu_tx_t *tx = NULL;
983eda14cbcSMatt Macy 	blkptr_t blk;
984eda14cbcSMatt Macy 	int error = 0;
985eda14cbcSMatt Macy 	boolean_t slog = FALSE;
986c03c5b1cSMartin Matuska 	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
987c03c5b1cSMartin Matuska 
988eda14cbcSMatt Macy 
989eda14cbcSMatt Macy 	/*
990eda14cbcSMatt Macy 	 * Wait for any previous destroy to complete.
991eda14cbcSMatt Macy 	 */
992eda14cbcSMatt Macy 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
993eda14cbcSMatt Macy 
994eda14cbcSMatt Macy 	ASSERT(zh->zh_claim_txg == 0);
995eda14cbcSMatt Macy 	ASSERT(zh->zh_replay_seq == 0);
996eda14cbcSMatt Macy 
997eda14cbcSMatt Macy 	blk = zh->zh_log;
998eda14cbcSMatt Macy 
999eda14cbcSMatt Macy 	/*
1000eda14cbcSMatt Macy 	 * Allocate an initial log block if:
1001eda14cbcSMatt Macy 	 *    - there isn't one already
1002eda14cbcSMatt Macy 	 *    - the existing block is the wrong endianness
1003eda14cbcSMatt Macy 	 */
1004eda14cbcSMatt Macy 	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
1005eda14cbcSMatt Macy 		tx = dmu_tx_create(zilog->zl_os);
1006*61145dc2SMartin Matuska 		VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
1007eda14cbcSMatt Macy 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1008eda14cbcSMatt Macy 		txg = dmu_tx_get_txg(tx);
1009eda14cbcSMatt Macy 
1010eda14cbcSMatt Macy 		if (!BP_IS_HOLE(&blk)) {
1011eda14cbcSMatt Macy 			zio_free(zilog->zl_spa, txg, &blk);
1012eda14cbcSMatt Macy 			BP_ZERO(&blk);
1013eda14cbcSMatt Macy 		}
1014eda14cbcSMatt Macy 
1015eda14cbcSMatt Macy 		error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
1016eda14cbcSMatt Macy 		    ZIL_MIN_BLKSZ, &slog);
1017eda14cbcSMatt Macy 		if (error == 0)
1018eda14cbcSMatt Macy 			zil_init_log_chain(zilog, &blk);
1019eda14cbcSMatt Macy 	}
1020eda14cbcSMatt Macy 
1021eda14cbcSMatt Macy 	/*
1022eda14cbcSMatt Macy 	 * Allocate a log write block (lwb) for the first log block.
1023eda14cbcSMatt Macy 	 */
1024eda14cbcSMatt Macy 	if (error == 0)
1025315ee00fSMartin Matuska 		lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
1026eda14cbcSMatt Macy 
1027eda14cbcSMatt Macy 	/*
1028eda14cbcSMatt Macy 	 * If we just allocated the first log block, commit our transaction
1029eda14cbcSMatt Macy 	 * and wait for zil_sync() to stuff the block pointer into zh_log.
1030eda14cbcSMatt Macy 	 * (zh is part of the MOS, so we cannot modify it in open context.)
1031eda14cbcSMatt Macy 	 */
1032eda14cbcSMatt Macy 	if (tx != NULL) {
1033c03c5b1cSMartin Matuska 		/*
1034c03c5b1cSMartin Matuska 		 * If "zilsaxattr" feature is enabled on zpool, then activate
1035c03c5b1cSMartin Matuska 		 * it now when we're creating the ZIL chain. We can't wait with
1036c03c5b1cSMartin Matuska 		 * this until we write the first xattr log record because we
1037c03c5b1cSMartin Matuska 		 * need to wait for the feature activation to sync out.
1038c03c5b1cSMartin Matuska 		 */
1039c03c5b1cSMartin Matuska 		if (spa_feature_is_enabled(zilog->zl_spa,
1040c03c5b1cSMartin Matuska 		    SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) !=
1041c03c5b1cSMartin Matuska 		    DMU_OST_ZVOL) {
1042c03c5b1cSMartin Matuska 			mutex_enter(&ds->ds_lock);
1043c03c5b1cSMartin Matuska 			ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
1044c03c5b1cSMartin Matuska 			    (void *)B_TRUE;
1045c03c5b1cSMartin Matuska 			mutex_exit(&ds->ds_lock);
1046c03c5b1cSMartin Matuska 		}
1047c03c5b1cSMartin Matuska 
1048eda14cbcSMatt Macy 		dmu_tx_commit(tx);
1049eda14cbcSMatt Macy 		txg_wait_synced(zilog->zl_dmu_pool, txg);
1050c03c5b1cSMartin Matuska 	} else {
1051c03c5b1cSMartin Matuska 		/*
1052c03c5b1cSMartin Matuska 		 * This branch covers the case where we enable the feature on a
1053c03c5b1cSMartin Matuska 		 * zpool that has existing ZIL headers.
1054c03c5b1cSMartin Matuska 		 */
1055c03c5b1cSMartin Matuska 		zil_commit_activate_saxattr_feature(zilog);
1056eda14cbcSMatt Macy 	}
1057c03c5b1cSMartin Matuska 	IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
1058c03c5b1cSMartin Matuska 	    dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL,
1059c03c5b1cSMartin Matuska 	    dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR));
1060eda14cbcSMatt Macy 
1061da5137abSMartin Matuska 	ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
1062eda14cbcSMatt Macy 	IMPLY(error == 0, lwb != NULL);
1063eda14cbcSMatt Macy 
1064eda14cbcSMatt Macy 	return (lwb);
1065eda14cbcSMatt Macy }
1066eda14cbcSMatt Macy 
1067eda14cbcSMatt Macy /*
1068eda14cbcSMatt Macy  * In one tx, free all log blocks and clear the log header. If keep_first
1069eda14cbcSMatt Macy  * is set, then we're replaying a log with no content. We want to keep the
1070eda14cbcSMatt Macy  * first block, however, so that the first synchronous transaction doesn't
1071eda14cbcSMatt Macy  * require a txg_wait_synced() in zil_create(). We don't need to
1072eda14cbcSMatt Macy  * txg_wait_synced() here either when keep_first is set, because both
1073eda14cbcSMatt Macy  * zil_create() and zil_destroy() will wait for any in-progress destroys
1074eda14cbcSMatt Macy  * to complete.
1075dbd5678dSMartin Matuska  * Return B_TRUE if there were any entries to replay.
1076eda14cbcSMatt Macy  */
1077dbd5678dSMartin Matuska boolean_t
zil_destroy(zilog_t * zilog,boolean_t keep_first)1078eda14cbcSMatt Macy zil_destroy(zilog_t *zilog, boolean_t keep_first)
1079eda14cbcSMatt Macy {
1080eda14cbcSMatt Macy 	const zil_header_t *zh = zilog->zl_header;
1081eda14cbcSMatt Macy 	lwb_t *lwb;
1082eda14cbcSMatt Macy 	dmu_tx_t *tx;
1083eda14cbcSMatt Macy 	uint64_t txg;
1084eda14cbcSMatt Macy 
1085eda14cbcSMatt Macy 	/*
1086eda14cbcSMatt Macy 	 * Wait for any previous destroy to complete.
1087eda14cbcSMatt Macy 	 */
1088eda14cbcSMatt Macy 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1089eda14cbcSMatt Macy 
1090eda14cbcSMatt Macy 	zilog->zl_old_header = *zh;		/* debugging aid */
1091eda14cbcSMatt Macy 
1092eda14cbcSMatt Macy 	if (BP_IS_HOLE(&zh->zh_log))
1093dbd5678dSMartin Matuska 		return (B_FALSE);
1094eda14cbcSMatt Macy 
1095eda14cbcSMatt Macy 	tx = dmu_tx_create(zilog->zl_os);
1096*61145dc2SMartin Matuska 	VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
1097eda14cbcSMatt Macy 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1098eda14cbcSMatt Macy 	txg = dmu_tx_get_txg(tx);
1099eda14cbcSMatt Macy 
1100eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
1101eda14cbcSMatt Macy 
1102eda14cbcSMatt Macy 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
1103eda14cbcSMatt Macy 	zilog->zl_destroy_txg = txg;
1104eda14cbcSMatt Macy 	zilog->zl_keep_first = keep_first;
1105eda14cbcSMatt Macy 
1106eda14cbcSMatt Macy 	if (!list_is_empty(&zilog->zl_lwb_list)) {
1107eda14cbcSMatt Macy 		ASSERT(zh->zh_claim_txg == 0);
1108eda14cbcSMatt Macy 		VERIFY(!keep_first);
1109c0a83fe0SMartin Matuska 		while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
1110eda14cbcSMatt Macy 			if (lwb->lwb_buf != NULL)
1111eda14cbcSMatt Macy 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1112315ee00fSMartin Matuska 			if (!BP_IS_HOLE(&lwb->lwb_blk))
1113eda14cbcSMatt Macy 				zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
1114eda14cbcSMatt Macy 			zil_free_lwb(zilog, lwb);
1115eda14cbcSMatt Macy 		}
1116eda14cbcSMatt Macy 	} else if (!keep_first) {
1117eda14cbcSMatt Macy 		zil_destroy_sync(zilog, tx);
1118eda14cbcSMatt Macy 	}
1119eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
1120eda14cbcSMatt Macy 
1121eda14cbcSMatt Macy 	dmu_tx_commit(tx);
1122dbd5678dSMartin Matuska 
1123dbd5678dSMartin Matuska 	return (B_TRUE);
1124eda14cbcSMatt Macy }
1125eda14cbcSMatt Macy 
1126eda14cbcSMatt Macy void
zil_destroy_sync(zilog_t * zilog,dmu_tx_t * tx)1127eda14cbcSMatt Macy zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
1128eda14cbcSMatt Macy {
1129eda14cbcSMatt Macy 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1130eda14cbcSMatt Macy 	(void) zil_parse(zilog, zil_free_log_block,
1131eda14cbcSMatt Macy 	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
1132eda14cbcSMatt Macy }
1133eda14cbcSMatt Macy 
1134eda14cbcSMatt Macy int
zil_claim(dsl_pool_t * dp,dsl_dataset_t * ds,void * txarg)1135eda14cbcSMatt Macy zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
1136eda14cbcSMatt Macy {
1137eda14cbcSMatt Macy 	dmu_tx_t *tx = txarg;
1138eda14cbcSMatt Macy 	zilog_t *zilog;
1139eda14cbcSMatt Macy 	uint64_t first_txg;
1140eda14cbcSMatt Macy 	zil_header_t *zh;
1141eda14cbcSMatt Macy 	objset_t *os;
1142eda14cbcSMatt Macy 	int error;
1143eda14cbcSMatt Macy 
1144eda14cbcSMatt Macy 	error = dmu_objset_own_obj(dp, ds->ds_object,
1145eda14cbcSMatt Macy 	    DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
1146eda14cbcSMatt Macy 	if (error != 0) {
1147eda14cbcSMatt Macy 		/*
1148eda14cbcSMatt Macy 		 * EBUSY indicates that the objset is inconsistent, in which
1149eda14cbcSMatt Macy 		 * case it can not have a ZIL.
1150eda14cbcSMatt Macy 		 */
1151eda14cbcSMatt Macy 		if (error != EBUSY) {
1152eda14cbcSMatt Macy 			cmn_err(CE_WARN, "can't open objset for %llu, error %u",
1153eda14cbcSMatt Macy 			    (unsigned long long)ds->ds_object, error);
1154eda14cbcSMatt Macy 		}
1155eda14cbcSMatt Macy 
1156eda14cbcSMatt Macy 		return (0);
1157eda14cbcSMatt Macy 	}
1158eda14cbcSMatt Macy 
1159eda14cbcSMatt Macy 	zilog = dmu_objset_zil(os);
1160eda14cbcSMatt Macy 	zh = zil_header_in_syncing_context(zilog);
1161eda14cbcSMatt Macy 	ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
1162eda14cbcSMatt Macy 	first_txg = spa_min_claim_txg(zilog->zl_spa);
1163eda14cbcSMatt Macy 
1164eda14cbcSMatt Macy 	/*
1165eda14cbcSMatt Macy 	 * If the spa_log_state is not set to be cleared, check whether
1166eda14cbcSMatt Macy 	 * the current uberblock is a checkpoint one and if the current
1167eda14cbcSMatt Macy 	 * header has been claimed before moving on.
1168eda14cbcSMatt Macy 	 *
1169eda14cbcSMatt Macy 	 * If the current uberblock is a checkpointed uberblock then
1170eda14cbcSMatt Macy 	 * one of the following scenarios took place:
1171eda14cbcSMatt Macy 	 *
1172eda14cbcSMatt Macy 	 * 1] We are currently rewinding to the checkpoint of the pool.
1173eda14cbcSMatt Macy 	 * 2] We crashed in the middle of a checkpoint rewind but we
1174eda14cbcSMatt Macy 	 *    did manage to write the checkpointed uberblock to the
1175eda14cbcSMatt Macy 	 *    vdev labels, so when we tried to import the pool again
1176eda14cbcSMatt Macy 	 *    the checkpointed uberblock was selected from the import
1177eda14cbcSMatt Macy 	 *    procedure.
1178eda14cbcSMatt Macy 	 *
1179eda14cbcSMatt Macy 	 * In both cases we want to zero out all the ZIL blocks, except
1180eda14cbcSMatt Macy 	 * the ones that have been claimed at the time of the checkpoint
1181eda14cbcSMatt Macy 	 * (their zh_claim_txg != 0). The reason is that these blocks
1182eda14cbcSMatt Macy 	 * may be corrupted since we may have reused their locations on
1183eda14cbcSMatt Macy 	 * disk after we took the checkpoint.
1184eda14cbcSMatt Macy 	 *
1185eda14cbcSMatt Macy 	 * We could try to set spa_log_state to SPA_LOG_CLEAR earlier
1186eda14cbcSMatt Macy 	 * when we first figure out whether the current uberblock is
1187eda14cbcSMatt Macy 	 * checkpointed or not. Unfortunately, that would discard all
1188eda14cbcSMatt Macy 	 * the logs, including the ones that are claimed, and we would
1189eda14cbcSMatt Macy 	 * leak space.
1190eda14cbcSMatt Macy 	 */
1191eda14cbcSMatt Macy 	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
1192eda14cbcSMatt Macy 	    (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
1193eda14cbcSMatt Macy 	    zh->zh_claim_txg == 0)) {
1194eda14cbcSMatt Macy 		if (!BP_IS_HOLE(&zh->zh_log)) {
1195eda14cbcSMatt Macy 			(void) zil_parse(zilog, zil_clear_log_block,
1196eda14cbcSMatt Macy 			    zil_noop_log_record, tx, first_txg, B_FALSE);
1197eda14cbcSMatt Macy 		}
1198eda14cbcSMatt Macy 		BP_ZERO(&zh->zh_log);
1199eda14cbcSMatt Macy 		if (os->os_encrypted)
1200eda14cbcSMatt Macy 			os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
1201eda14cbcSMatt Macy 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
1202eda14cbcSMatt Macy 		dmu_objset_disown(os, B_FALSE, FTAG);
1203eda14cbcSMatt Macy 		return (0);
1204eda14cbcSMatt Macy 	}
1205eda14cbcSMatt Macy 
1206eda14cbcSMatt Macy 	/*
1207eda14cbcSMatt Macy 	 * If we are not rewinding and opening the pool normally, then
1208eda14cbcSMatt Macy 	 * the min_claim_txg should be equal to the first txg of the pool.
1209eda14cbcSMatt Macy 	 */
1210eda14cbcSMatt Macy 	ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
1211eda14cbcSMatt Macy 
1212eda14cbcSMatt Macy 	/*
1213eda14cbcSMatt Macy 	 * Claim all log blocks if we haven't already done so, and remember
1214eda14cbcSMatt Macy 	 * the highest claimed sequence number.  This ensures that if we can
1215eda14cbcSMatt Macy 	 * read only part of the log now (e.g. due to a missing device),
1216eda14cbcSMatt Macy 	 * but we can read the entire log later, we will not try to replay
1217eda14cbcSMatt Macy 	 * or destroy beyond the last block we successfully claimed.
1218eda14cbcSMatt Macy 	 */
1219eda14cbcSMatt Macy 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
1220eda14cbcSMatt Macy 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
1221eda14cbcSMatt Macy 		(void) zil_parse(zilog, zil_claim_log_block,
1222eda14cbcSMatt Macy 		    zil_claim_log_record, tx, first_txg, B_FALSE);
1223eda14cbcSMatt Macy 		zh->zh_claim_txg = first_txg;
1224eda14cbcSMatt Macy 		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
1225eda14cbcSMatt Macy 		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
1226eda14cbcSMatt Macy 		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
1227eda14cbcSMatt Macy 			zh->zh_flags |= ZIL_REPLAY_NEEDED;
1228eda14cbcSMatt Macy 		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
1229eda14cbcSMatt Macy 		if (os->os_encrypted)
1230eda14cbcSMatt Macy 			os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
1231eda14cbcSMatt Macy 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
1232eda14cbcSMatt Macy 	}
1233eda14cbcSMatt Macy 
1234eda14cbcSMatt Macy 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
1235eda14cbcSMatt Macy 	dmu_objset_disown(os, B_FALSE, FTAG);
1236eda14cbcSMatt Macy 	return (0);
1237eda14cbcSMatt Macy }
1238eda14cbcSMatt Macy 
1239eda14cbcSMatt Macy /*
1240eda14cbcSMatt Macy  * Check the log by walking the log chain.
1241eda14cbcSMatt Macy  * Checksum errors are ok as they indicate the end of the chain.
1242eda14cbcSMatt Macy  * Any other error (no device or read failure) returns an error.
1243eda14cbcSMatt Macy  */
1244eda14cbcSMatt Macy int
zil_check_log_chain(dsl_pool_t * dp,dsl_dataset_t * ds,void * tx)1245eda14cbcSMatt Macy zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
1246eda14cbcSMatt Macy {
1247e92ffd9bSMartin Matuska 	(void) dp;
1248eda14cbcSMatt Macy 	zilog_t *zilog;
1249eda14cbcSMatt Macy 	objset_t *os;
1250eda14cbcSMatt Macy 	blkptr_t *bp;
1251eda14cbcSMatt Macy 	int error;
1252eda14cbcSMatt Macy 
1253eda14cbcSMatt Macy 	ASSERT(tx == NULL);
1254eda14cbcSMatt Macy 
1255eda14cbcSMatt Macy 	error = dmu_objset_from_ds(ds, &os);
1256eda14cbcSMatt Macy 	if (error != 0) {
1257eda14cbcSMatt Macy 		cmn_err(CE_WARN, "can't open objset %llu, error %d",
1258eda14cbcSMatt Macy 		    (unsigned long long)ds->ds_object, error);
1259eda14cbcSMatt Macy 		return (0);
1260eda14cbcSMatt Macy 	}
1261eda14cbcSMatt Macy 
1262eda14cbcSMatt Macy 	zilog = dmu_objset_zil(os);
1263eda14cbcSMatt Macy 	bp = (blkptr_t *)&zilog->zl_header->zh_log;
1264eda14cbcSMatt Macy 
1265eda14cbcSMatt Macy 	if (!BP_IS_HOLE(bp)) {
1266eda14cbcSMatt Macy 		vdev_t *vd;
1267eda14cbcSMatt Macy 		boolean_t valid = B_TRUE;
1268eda14cbcSMatt Macy 
1269eda14cbcSMatt Macy 		/*
1270eda14cbcSMatt Macy 		 * Check the first block and determine if it's on a log device
1271eda14cbcSMatt Macy 		 * which may have been removed or faulted prior to loading this
1272eda14cbcSMatt Macy 		 * pool.  If so, there's no point in checking the rest of the
1273eda14cbcSMatt Macy 		 * log as its content should have already been synced to the
1274eda14cbcSMatt Macy 		 * pool.
1275eda14cbcSMatt Macy 		 */
1276eda14cbcSMatt Macy 		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
1277eda14cbcSMatt Macy 		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
1278eda14cbcSMatt Macy 		if (vd->vdev_islog && vdev_is_dead(vd))
1279eda14cbcSMatt Macy 			valid = vdev_log_state_valid(vd);
1280eda14cbcSMatt Macy 		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
1281eda14cbcSMatt Macy 
1282eda14cbcSMatt Macy 		if (!valid)
1283eda14cbcSMatt Macy 			return (0);
1284eda14cbcSMatt Macy 
1285eda14cbcSMatt Macy 		/*
1286eda14cbcSMatt Macy 		 * Check whether the current uberblock is checkpointed (e.g.
1287eda14cbcSMatt Macy 		 * we are rewinding) and whether the current header has been
1288eda14cbcSMatt Macy 		 * claimed or not. If it hasn't then skip verifying it. We
1289eda14cbcSMatt Macy 		 * do this because its ZIL blocks may be part of the pool's
1290eda14cbcSMatt Macy 		 * state before the rewind, which is no longer valid.
1291eda14cbcSMatt Macy 		 */
1292eda14cbcSMatt Macy 		zil_header_t *zh = zil_header_in_syncing_context(zilog);
1293eda14cbcSMatt Macy 		if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
1294eda14cbcSMatt Macy 		    zh->zh_claim_txg == 0)
1295eda14cbcSMatt Macy 			return (0);
1296eda14cbcSMatt Macy 	}
1297eda14cbcSMatt Macy 
1298eda14cbcSMatt Macy 	/*
1299eda14cbcSMatt Macy 	 * Because tx == NULL, zil_claim_log_block() will not actually claim
1300eda14cbcSMatt Macy 	 * any blocks, but just determine whether it is possible to do so.
1301eda14cbcSMatt Macy 	 * In addition to checking the log chain, zil_claim_log_block()
1302eda14cbcSMatt Macy 	 * will invoke zio_claim() with a done func of spa_claim_notify(),
1303eda14cbcSMatt Macy 	 * which will update spa_max_claim_txg.  See spa_load() for details.
1304eda14cbcSMatt Macy 	 */
1305eda14cbcSMatt Macy 	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
1306eda14cbcSMatt Macy 	    zilog->zl_header->zh_claim_txg ? -1ULL :
1307eda14cbcSMatt Macy 	    spa_min_claim_txg(os->os_spa), B_FALSE);
1308eda14cbcSMatt Macy 
1309eda14cbcSMatt Macy 	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
1310eda14cbcSMatt Macy }
1311eda14cbcSMatt Macy 
1312eda14cbcSMatt Macy /*
1313eda14cbcSMatt Macy  * When an itx is "skipped", this function is used to properly mark the
1314eda14cbcSMatt Macy  * waiter as "done, and signal any thread(s) waiting on it. An itx can
1315eda14cbcSMatt Macy  * be skipped (and not committed to an lwb) for a variety of reasons,
1316eda14cbcSMatt Macy  * one of them being that the itx was committed via spa_sync(), prior to
1317eda14cbcSMatt Macy  * it being committed to an lwb; this can happen if a thread calling
1318eda14cbcSMatt Macy  * zil_commit() is racing with spa_sync().
1319eda14cbcSMatt Macy  */
1320eda14cbcSMatt Macy static void
zil_commit_waiter_skip(zil_commit_waiter_t * zcw)1321eda14cbcSMatt Macy zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
1322eda14cbcSMatt Macy {
1323eda14cbcSMatt Macy 	mutex_enter(&zcw->zcw_lock);
1324eda14cbcSMatt Macy 	ASSERT3B(zcw->zcw_done, ==, B_FALSE);
1325eda14cbcSMatt Macy 	zcw->zcw_done = B_TRUE;
1326eda14cbcSMatt Macy 	cv_broadcast(&zcw->zcw_cv);
1327eda14cbcSMatt Macy 	mutex_exit(&zcw->zcw_lock);
1328eda14cbcSMatt Macy }
1329eda14cbcSMatt Macy 
1330eda14cbcSMatt Macy /*
1331eda14cbcSMatt Macy  * This function is used when the given waiter is to be linked into an
1332eda14cbcSMatt Macy  * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
1333eda14cbcSMatt Macy  * At this point, the waiter will no longer be referenced by the itx,
1334eda14cbcSMatt Macy  * and instead, will be referenced by the lwb.
1335eda14cbcSMatt Macy  */
1336eda14cbcSMatt Macy static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t * zcw,lwb_t * lwb)1337eda14cbcSMatt Macy zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
1338eda14cbcSMatt Macy {
1339eda14cbcSMatt Macy 	/*
1340eda14cbcSMatt Macy 	 * The lwb_waiters field of the lwb is protected by the zilog's
1341315ee00fSMartin Matuska 	 * zl_issuer_lock while the lwb is open and zl_lock otherwise.
1342315ee00fSMartin Matuska 	 * zl_issuer_lock also protects leaving the open state.
1343315ee00fSMartin Matuska 	 * zcw_lwb setting is protected by zl_issuer_lock and state !=
1344315ee00fSMartin Matuska 	 * flush_done, which transition is protected by zl_lock.
1345eda14cbcSMatt Macy 	 */
1346315ee00fSMartin Matuska 	ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_issuer_lock));
1347315ee00fSMartin Matuska 	IMPLY(lwb->lwb_state != LWB_STATE_OPENED,
1348315ee00fSMartin Matuska 	    MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
1349315ee00fSMartin Matuska 	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW);
1350315ee00fSMartin Matuska 	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
1351eda14cbcSMatt Macy 
1352eda14cbcSMatt Macy 	ASSERT(!list_link_active(&zcw->zcw_node));
1353eda14cbcSMatt Macy 	list_insert_tail(&lwb->lwb_waiters, zcw);
1354315ee00fSMartin Matuska 	ASSERT3P(zcw->zcw_lwb, ==, NULL);
1355eda14cbcSMatt Macy 	zcw->zcw_lwb = lwb;
1356eda14cbcSMatt Macy }
1357eda14cbcSMatt Macy 
1358eda14cbcSMatt Macy /*
1359eda14cbcSMatt Macy  * This function is used when zio_alloc_zil() fails to allocate a ZIL
1360eda14cbcSMatt Macy  * block, and the given waiter must be linked to the "nolwb waiters"
1361eda14cbcSMatt Macy  * list inside of zil_process_commit_list().
1362eda14cbcSMatt Macy  */
1363eda14cbcSMatt Macy static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t * zcw,list_t * nolwb)1364eda14cbcSMatt Macy zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
1365eda14cbcSMatt Macy {
1366eda14cbcSMatt Macy 	ASSERT(!list_link_active(&zcw->zcw_node));
1367eda14cbcSMatt Macy 	list_insert_tail(nolwb, zcw);
1368315ee00fSMartin Matuska 	ASSERT3P(zcw->zcw_lwb, ==, NULL);
1369eda14cbcSMatt Macy }
1370eda14cbcSMatt Macy 
1371eda14cbcSMatt Macy void
zil_lwb_add_block(lwb_t * lwb,const blkptr_t * bp)1372eda14cbcSMatt Macy zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
1373eda14cbcSMatt Macy {
1374eda14cbcSMatt Macy 	avl_tree_t *t = &lwb->lwb_vdev_tree;
1375eda14cbcSMatt Macy 	avl_index_t where;
1376eda14cbcSMatt Macy 	zil_vdev_node_t *zv, zvsearch;
1377eda14cbcSMatt Macy 	int ndvas = BP_GET_NDVAS(bp);
1378eda14cbcSMatt Macy 	int i;
1379eda14cbcSMatt Macy 
13802ad756a6SMartin Matuska 	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
13812ad756a6SMartin Matuska 	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
13822ad756a6SMartin Matuska 
1383eda14cbcSMatt Macy 	if (zil_nocacheflush)
1384eda14cbcSMatt Macy 		return;
1385eda14cbcSMatt Macy 
1386eda14cbcSMatt Macy 	mutex_enter(&lwb->lwb_vdev_lock);
1387eda14cbcSMatt Macy 	for (i = 0; i < ndvas; i++) {
1388eda14cbcSMatt Macy 		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
1389eda14cbcSMatt Macy 		if (avl_find(t, &zvsearch, &where) == NULL) {
1390eda14cbcSMatt Macy 			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
1391eda14cbcSMatt Macy 			zv->zv_vdev = zvsearch.zv_vdev;
1392eda14cbcSMatt Macy 			avl_insert(t, zv, where);
1393eda14cbcSMatt Macy 		}
1394eda14cbcSMatt Macy 	}
1395eda14cbcSMatt Macy 	mutex_exit(&lwb->lwb_vdev_lock);
1396eda14cbcSMatt Macy }
1397eda14cbcSMatt Macy 
1398eda14cbcSMatt Macy static void
zil_lwb_flush_defer(lwb_t * lwb,lwb_t * nlwb)1399eda14cbcSMatt Macy zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
1400eda14cbcSMatt Macy {
1401eda14cbcSMatt Macy 	avl_tree_t *src = &lwb->lwb_vdev_tree;
1402eda14cbcSMatt Macy 	avl_tree_t *dst = &nlwb->lwb_vdev_tree;
1403eda14cbcSMatt Macy 	void *cookie = NULL;
1404eda14cbcSMatt Macy 	zil_vdev_node_t *zv;
1405eda14cbcSMatt Macy 
1406eda14cbcSMatt Macy 	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
1407eda14cbcSMatt Macy 	ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
1408eda14cbcSMatt Macy 	ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
1409eda14cbcSMatt Macy 
1410eda14cbcSMatt Macy 	/*
1411eda14cbcSMatt Macy 	 * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
1412eda14cbcSMatt Macy 	 * not need the protection of lwb_vdev_lock (it will only be modified
1413eda14cbcSMatt Macy 	 * while holding zilog->zl_lock) as its writes and those of its
1414eda14cbcSMatt Macy 	 * children have all completed.  The younger 'nlwb' may be waiting on
1415eda14cbcSMatt Macy 	 * future writes to additional vdevs.
1416eda14cbcSMatt Macy 	 */
1417eda14cbcSMatt Macy 	mutex_enter(&nlwb->lwb_vdev_lock);
1418eda14cbcSMatt Macy 	/*
1419eda14cbcSMatt Macy 	 * Tear down the 'lwb' vdev tree, ensuring that entries which do not
1420eda14cbcSMatt Macy 	 * exist in 'nlwb' are moved to it, freeing any would-be duplicates.
1421eda14cbcSMatt Macy 	 */
1422eda14cbcSMatt Macy 	while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
1423eda14cbcSMatt Macy 		avl_index_t where;
1424eda14cbcSMatt Macy 
1425eda14cbcSMatt Macy 		if (avl_find(dst, zv, &where) == NULL) {
1426eda14cbcSMatt Macy 			avl_insert(dst, zv, where);
1427eda14cbcSMatt Macy 		} else {
1428eda14cbcSMatt Macy 			kmem_free(zv, sizeof (*zv));
1429eda14cbcSMatt Macy 		}
1430eda14cbcSMatt Macy 	}
1431eda14cbcSMatt Macy 	mutex_exit(&nlwb->lwb_vdev_lock);
1432eda14cbcSMatt Macy }
1433eda14cbcSMatt Macy 
1434eda14cbcSMatt Macy void
zil_lwb_add_txg(lwb_t * lwb,uint64_t txg)1435eda14cbcSMatt Macy zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
1436eda14cbcSMatt Macy {
1437eda14cbcSMatt Macy 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1438eda14cbcSMatt Macy }
1439eda14cbcSMatt Macy 
1440eda14cbcSMatt Macy /*
14411719886fSMartin Matuska  * This function is a called after all vdevs associated with a given lwb write
14421719886fSMartin Matuska  * have completed their flush command; or as soon as the lwb write completes,
14431719886fSMartin Matuska  * if "zil_nocacheflush" is set. Further, all "previous" lwb's will have
14441719886fSMartin Matuska  * completed before this function is called; i.e. this function is called for
14451719886fSMartin Matuska  * all previous lwbs before it's called for "this" lwb (enforced via zio the
14461719886fSMartin Matuska  * dependencies configured in zil_lwb_set_zio_dependency()).
1447eda14cbcSMatt Macy  *
14481719886fSMartin Matuska  * The intention is for this function to be called as soon as the contents of
14491719886fSMartin Matuska  * an lwb are considered "stable" on disk, and will survive any sudden loss of
14501719886fSMartin Matuska  * power. At this point, any threads waiting for the lwb to reach this state
14511719886fSMartin Matuska  * are signalled, and the "waiter" structures are marked "done".
1452eda14cbcSMatt Macy  */
1453eda14cbcSMatt Macy static void
zil_lwb_flush_vdevs_done(zio_t * zio)1454eda14cbcSMatt Macy zil_lwb_flush_vdevs_done(zio_t *zio)
1455eda14cbcSMatt Macy {
1456eda14cbcSMatt Macy 	lwb_t *lwb = zio->io_private;
1457eda14cbcSMatt Macy 	zilog_t *zilog = lwb->lwb_zilog;
1458eda14cbcSMatt Macy 	zil_commit_waiter_t *zcw;
1459eda14cbcSMatt Macy 	itx_t *itx;
1460eda14cbcSMatt Macy 
1461eda14cbcSMatt Macy 	spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
1462eda14cbcSMatt Macy 
1463c0a83fe0SMartin Matuska 	hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp;
1464eda14cbcSMatt Macy 
1465eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
1466eda14cbcSMatt Macy 
1467c0a83fe0SMartin Matuska 	zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8;
1468eda14cbcSMatt Macy 
1469eda14cbcSMatt Macy 	lwb->lwb_root_zio = NULL;
1470eda14cbcSMatt Macy 
14712ad756a6SMartin Matuska 	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
14722ad756a6SMartin Matuska 	lwb->lwb_state = LWB_STATE_FLUSH_DONE;
14732ad756a6SMartin Matuska 
1474eda14cbcSMatt Macy 	if (zilog->zl_last_lwb_opened == lwb) {
1475eda14cbcSMatt Macy 		/*
1476eda14cbcSMatt Macy 		 * Remember the highest committed log sequence number
1477eda14cbcSMatt Macy 		 * for ztest. We only update this value when all the log
1478eda14cbcSMatt Macy 		 * writes succeeded, because ztest wants to ASSERT that
1479eda14cbcSMatt Macy 		 * it got the whole log chain.
1480eda14cbcSMatt Macy 		 */
1481eda14cbcSMatt Macy 		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1482eda14cbcSMatt Macy 	}
1483eda14cbcSMatt Macy 
14842ad756a6SMartin Matuska 	while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
14854e8d558cSMartin Matuska 		zil_itx_destroy(itx);
14864e8d558cSMartin Matuska 
14872ad756a6SMartin Matuska 	while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
1488eda14cbcSMatt Macy 		mutex_enter(&zcw->zcw_lock);
1489eda14cbcSMatt Macy 
14902ad756a6SMartin Matuska 		ASSERT3P(zcw->zcw_lwb, ==, lwb);
1491eda14cbcSMatt Macy 		zcw->zcw_lwb = NULL;
149253b70c86SMartin Matuska 		/*
149353b70c86SMartin Matuska 		 * We expect any ZIO errors from child ZIOs to have been
149453b70c86SMartin Matuska 		 * propagated "up" to this specific LWB's root ZIO, in
149553b70c86SMartin Matuska 		 * order for this error handling to work correctly. This
149653b70c86SMartin Matuska 		 * includes ZIO errors from either this LWB's write or
149753b70c86SMartin Matuska 		 * flush, as well as any errors from other dependent LWBs
149853b70c86SMartin Matuska 		 * (e.g. a root LWB ZIO that might be a child of this LWB).
149953b70c86SMartin Matuska 		 *
150053b70c86SMartin Matuska 		 * With that said, it's important to note that LWB flush
150153b70c86SMartin Matuska 		 * errors are not propagated up to the LWB root ZIO.
150253b70c86SMartin Matuska 		 * This is incorrect behavior, and results in VDEV flush
150353b70c86SMartin Matuska 		 * errors not being handled correctly here. See the
150453b70c86SMartin Matuska 		 * comment above the call to "zio_flush" for details.
150553b70c86SMartin Matuska 		 */
1506eda14cbcSMatt Macy 
1507eda14cbcSMatt Macy 		zcw->zcw_zio_error = zio->io_error;
1508eda14cbcSMatt Macy 
1509eda14cbcSMatt Macy 		ASSERT3B(zcw->zcw_done, ==, B_FALSE);
1510eda14cbcSMatt Macy 		zcw->zcw_done = B_TRUE;
1511eda14cbcSMatt Macy 		cv_broadcast(&zcw->zcw_cv);
1512eda14cbcSMatt Macy 
1513eda14cbcSMatt Macy 		mutex_exit(&zcw->zcw_lock);
1514eda14cbcSMatt Macy 	}
15152ad756a6SMartin Matuska 
15162ad756a6SMartin Matuska 	uint64_t txg = lwb->lwb_issued_txg;
15172ad756a6SMartin Matuska 
15182ad756a6SMartin Matuska 	/* Once we drop the lock, lwb may be freed by zil_sync(). */
15192ad756a6SMartin Matuska 	mutex_exit(&zilog->zl_lock);
1520eda14cbcSMatt Macy 
1521e3aa18adSMartin Matuska 	mutex_enter(&zilog->zl_lwb_io_lock);
1522e3aa18adSMartin Matuska 	ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0);
1523e3aa18adSMartin Matuska 	zilog->zl_lwb_inflight[txg & TXG_MASK]--;
1524e3aa18adSMartin Matuska 	if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0)
1525e3aa18adSMartin Matuska 		cv_broadcast(&zilog->zl_lwb_io_cv);
1526e3aa18adSMartin Matuska 	mutex_exit(&zilog->zl_lwb_io_lock);
1527e3aa18adSMartin Matuska }
1528e3aa18adSMartin Matuska 
1529eda14cbcSMatt Macy /*
1530e3aa18adSMartin Matuska  * Wait for the completion of all issued write/flush of that txg provided.
1531e3aa18adSMartin Matuska  * It guarantees zil_lwb_flush_vdevs_done() is called and returned.
1532eda14cbcSMatt Macy  */
1533e3aa18adSMartin Matuska static void
zil_lwb_flush_wait_all(zilog_t * zilog,uint64_t txg)1534e3aa18adSMartin Matuska zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg)
1535e3aa18adSMartin Matuska {
1536e3aa18adSMartin Matuska 	ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa));
1537e3aa18adSMartin Matuska 
1538e3aa18adSMartin Matuska 	mutex_enter(&zilog->zl_lwb_io_lock);
1539e3aa18adSMartin Matuska 	while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0)
1540e3aa18adSMartin Matuska 		cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock);
1541e3aa18adSMartin Matuska 	mutex_exit(&zilog->zl_lwb_io_lock);
1542e3aa18adSMartin Matuska 
1543e3aa18adSMartin Matuska #ifdef ZFS_DEBUG
1544e3aa18adSMartin Matuska 	mutex_enter(&zilog->zl_lock);
1545e3aa18adSMartin Matuska 	mutex_enter(&zilog->zl_lwb_io_lock);
1546e3aa18adSMartin Matuska 	lwb_t *lwb = list_head(&zilog->zl_lwb_list);
1547315ee00fSMartin Matuska 	while (lwb != NULL) {
1548e3aa18adSMartin Matuska 		if (lwb->lwb_issued_txg <= txg) {
1549e3aa18adSMartin Matuska 			ASSERT(lwb->lwb_state != LWB_STATE_ISSUED);
1550e3aa18adSMartin Matuska 			ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE);
1551e3aa18adSMartin Matuska 			IMPLY(lwb->lwb_issued_txg > 0,
1552e3aa18adSMartin Matuska 			    lwb->lwb_state == LWB_STATE_FLUSH_DONE);
1553e3aa18adSMartin Matuska 		}
1554c0a83fe0SMartin Matuska 		IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE ||
1555c0a83fe0SMartin Matuska 		    lwb->lwb_state == LWB_STATE_FLUSH_DONE,
1556e3aa18adSMartin Matuska 		    lwb->lwb_buf == NULL);
1557e3aa18adSMartin Matuska 		lwb = list_next(&zilog->zl_lwb_list, lwb);
1558e3aa18adSMartin Matuska 	}
1559e3aa18adSMartin Matuska 	mutex_exit(&zilog->zl_lwb_io_lock);
1560e3aa18adSMartin Matuska 	mutex_exit(&zilog->zl_lock);
1561e3aa18adSMartin Matuska #endif
1562eda14cbcSMatt Macy }
1563eda14cbcSMatt Macy 
1564eda14cbcSMatt Macy /*
15651719886fSMartin Matuska  * This is called when an lwb's write zio completes. The callback's purpose is
15661719886fSMartin Matuska  * to issue the flush commands for the vdevs in the lwb's lwb_vdev_tree. The
15671719886fSMartin Matuska  * tree will contain the vdevs involved in writing out this specific lwb's
15681719886fSMartin Matuska  * data, and in the case that cache flushes have been deferred, vdevs involved
15691719886fSMartin Matuska  * in writing the data for previous lwbs. The writes corresponding to all the
15701719886fSMartin Matuska  * vdevs in the lwb_vdev_tree will have completed by the time this is called,
15711719886fSMartin Matuska  * due to the zio dependencies configured in zil_lwb_set_zio_dependency(),
15721719886fSMartin Matuska  * which takes deferred flushes into account. The lwb will be "done" once
15731719886fSMartin Matuska  * zil_lwb_flush_vdevs_done() is called, which occurs in the zio completion
15741719886fSMartin Matuska  * callback for the lwb's root zio.
1575eda14cbcSMatt Macy  */
1576eda14cbcSMatt Macy static void
zil_lwb_write_done(zio_t * zio)1577eda14cbcSMatt Macy zil_lwb_write_done(zio_t *zio)
1578eda14cbcSMatt Macy {
1579eda14cbcSMatt Macy 	lwb_t *lwb = zio->io_private;
1580eda14cbcSMatt Macy 	spa_t *spa = zio->io_spa;
1581eda14cbcSMatt Macy 	zilog_t *zilog = lwb->lwb_zilog;
1582eda14cbcSMatt Macy 	avl_tree_t *t = &lwb->lwb_vdev_tree;
1583eda14cbcSMatt Macy 	void *cookie = NULL;
1584eda14cbcSMatt Macy 	zil_vdev_node_t *zv;
1585eda14cbcSMatt Macy 	lwb_t *nlwb;
1586eda14cbcSMatt Macy 
1587eda14cbcSMatt Macy 	ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
1588eda14cbcSMatt Macy 
1589184c1b94SMartin Matuska 	abd_free(zio->io_abd);
1590c0a83fe0SMartin Matuska 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1591c0a83fe0SMartin Matuska 	lwb->lwb_buf = NULL;
1592eda14cbcSMatt Macy 
1593eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
1594eda14cbcSMatt Macy 	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
1595eda14cbcSMatt Macy 	lwb->lwb_state = LWB_STATE_WRITE_DONE;
1596315ee00fSMartin Matuska 	lwb->lwb_child_zio = NULL;
1597eda14cbcSMatt Macy 	lwb->lwb_write_zio = NULL;
1598cbfe9975SMartin Matuska 
1599cbfe9975SMartin Matuska 	/*
1600cbfe9975SMartin Matuska 	 * If nlwb is not yet issued, zil_lwb_set_zio_dependency() is not
1601cbfe9975SMartin Matuska 	 * called for it yet, and when it will be, it won't be able to make
1602cbfe9975SMartin Matuska 	 * its write ZIO a parent this ZIO.  In such case we can not defer
1603cbfe9975SMartin Matuska 	 * our flushes or below may be a race between the done callbacks.
1604cbfe9975SMartin Matuska 	 */
1605eda14cbcSMatt Macy 	nlwb = list_next(&zilog->zl_lwb_list, lwb);
1606cbfe9975SMartin Matuska 	if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
1607cbfe9975SMartin Matuska 		nlwb = NULL;
1608eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
1609eda14cbcSMatt Macy 
1610eda14cbcSMatt Macy 	if (avl_numnodes(t) == 0)
1611eda14cbcSMatt Macy 		return;
1612eda14cbcSMatt Macy 
1613eda14cbcSMatt Macy 	/*
1614eda14cbcSMatt Macy 	 * If there was an IO error, we're not going to call zio_flush()
1615eda14cbcSMatt Macy 	 * on these vdevs, so we simply empty the tree and free the
1616eda14cbcSMatt Macy 	 * nodes. We avoid calling zio_flush() since there isn't any
1617eda14cbcSMatt Macy 	 * good reason for doing so, after the lwb block failed to be
1618eda14cbcSMatt Macy 	 * written out.
161953b70c86SMartin Matuska 	 *
162053b70c86SMartin Matuska 	 * Additionally, we don't perform any further error handling at
162153b70c86SMartin Matuska 	 * this point (e.g. setting "zcw_zio_error" appropriately), as
162253b70c86SMartin Matuska 	 * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
162353b70c86SMartin Matuska 	 * we expect any error seen here, to have been propagated to
162453b70c86SMartin Matuska 	 * that function).
1625eda14cbcSMatt Macy 	 */
1626eda14cbcSMatt Macy 	if (zio->io_error != 0) {
1627eda14cbcSMatt Macy 		while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
1628eda14cbcSMatt Macy 			kmem_free(zv, sizeof (*zv));
1629eda14cbcSMatt Macy 		return;
1630eda14cbcSMatt Macy 	}
1631eda14cbcSMatt Macy 
1632eda14cbcSMatt Macy 	/*
16331719886fSMartin Matuska 	 * If this lwb does not have any threads waiting for it to complete, we
16341719886fSMartin Matuska 	 * want to defer issuing the flush command to the vdevs written to by
16351719886fSMartin Matuska 	 * "this" lwb, and instead rely on the "next" lwb to handle the flush
16361719886fSMartin Matuska 	 * command for those vdevs. Thus, we merge the vdev tree of "this" lwb
16371719886fSMartin Matuska 	 * with the vdev tree of the "next" lwb in the list, and assume the
16381719886fSMartin Matuska 	 * "next" lwb will handle flushing the vdevs (or deferring the flush(s)
16391719886fSMartin Matuska 	 * again).
1640eda14cbcSMatt Macy 	 *
16411719886fSMartin Matuska 	 * This is a useful performance optimization, especially for workloads
16421719886fSMartin Matuska 	 * with lots of async write activity and few sync write and/or fsync
16431719886fSMartin Matuska 	 * activity, as it has the potential to coalesce multiple flush
16441719886fSMartin Matuska 	 * commands to a vdev into one.
1645eda14cbcSMatt Macy 	 */
1646c0a83fe0SMartin Matuska 	if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) {
1647eda14cbcSMatt Macy 		zil_lwb_flush_defer(lwb, nlwb);
1648eda14cbcSMatt Macy 		ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
1649eda14cbcSMatt Macy 		return;
1650eda14cbcSMatt Macy 	}
1651eda14cbcSMatt Macy 
1652eda14cbcSMatt Macy 	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
1653eda14cbcSMatt Macy 		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
1654a2b560ccSMartin Matuska 		if (vd != NULL) {
165553b70c86SMartin Matuska 			/*
165653b70c86SMartin Matuska 			 * The "ZIO_FLAG_DONT_PROPAGATE" is currently
165753b70c86SMartin Matuska 			 * always used within "zio_flush". This means,
165853b70c86SMartin Matuska 			 * any errors when flushing the vdev(s), will
165953b70c86SMartin Matuska 			 * (unfortunately) not be handled correctly,
166053b70c86SMartin Matuska 			 * since these "zio_flush" errors will not be
166153b70c86SMartin Matuska 			 * propagated up to "zil_lwb_flush_vdevs_done".
166253b70c86SMartin Matuska 			 */
1663eda14cbcSMatt Macy 			zio_flush(lwb->lwb_root_zio, vd);
166453b70c86SMartin Matuska 		}
1665eda14cbcSMatt Macy 		kmem_free(zv, sizeof (*zv));
1666eda14cbcSMatt Macy 	}
1667eda14cbcSMatt Macy }
1668eda14cbcSMatt Macy 
1669315ee00fSMartin Matuska /*
1670315ee00fSMartin Matuska  * Build the zio dependency chain, which is used to preserve the ordering of
1671315ee00fSMartin Matuska  * lwb completions that is required by the semantics of the ZIL. Each new lwb
1672315ee00fSMartin Matuska  * zio becomes a parent of the previous lwb zio, such that the new lwb's zio
1673315ee00fSMartin Matuska  * cannot complete until the previous lwb's zio completes.
1674315ee00fSMartin Matuska  *
1675315ee00fSMartin Matuska  * This is required by the semantics of zil_commit(): the commit waiters
1676315ee00fSMartin Matuska  * attached to the lwbs will be woken in the lwb zio's completion callback,
1677315ee00fSMartin Matuska  * so this zio dependency graph ensures the waiters are woken in the correct
1678315ee00fSMartin Matuska  * order (the same order the lwbs were created).
1679315ee00fSMartin Matuska  */
1680eda14cbcSMatt Macy static void
zil_lwb_set_zio_dependency(zilog_t * zilog,lwb_t * lwb)1681eda14cbcSMatt Macy zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
1682eda14cbcSMatt Macy {
1683eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_lock));
1684eda14cbcSMatt Macy 
1685315ee00fSMartin Matuska 	lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb);
1686315ee00fSMartin Matuska 	if (prev_lwb == NULL ||
1687315ee00fSMartin Matuska 	    prev_lwb->lwb_state == LWB_STATE_FLUSH_DONE)
1688315ee00fSMartin Matuska 		return;
1689eda14cbcSMatt Macy 
1690eda14cbcSMatt Macy 	/*
1691315ee00fSMartin Matuska 	 * If the previous lwb's write hasn't already completed, we also want
1692315ee00fSMartin Matuska 	 * to order the completion of the lwb write zios (above, we only order
1693315ee00fSMartin Matuska 	 * the completion of the lwb root zios). This is required because of
16941719886fSMartin Matuska 	 * how we can defer the flush commands for each lwb.
1695eda14cbcSMatt Macy 	 *
16961719886fSMartin Matuska 	 * When the flush commands are deferred, the previous lwb will rely on
16971719886fSMartin Matuska 	 * this lwb to flush the vdevs written to by that previous lwb. Thus,
16981719886fSMartin Matuska 	 * we need to ensure this lwb doesn't issue the flush until after the
16991719886fSMartin Matuska 	 * previous lwb's write completes. We ensure this ordering by setting
17001719886fSMartin Matuska 	 * the zio parent/child relationship here.
1701eda14cbcSMatt Macy 	 *
17021719886fSMartin Matuska 	 * Without this relationship on the lwb's write zio, it's possible for
17031719886fSMartin Matuska 	 * this lwb's write to complete prior to the previous lwb's write
1704315ee00fSMartin Matuska 	 * completing; and thus, the vdevs for the previous lwb would be
1705315ee00fSMartin Matuska 	 * flushed prior to that lwb's data being written to those vdevs (the
1706315ee00fSMartin Matuska 	 * vdevs are flushed in the lwb write zio's completion handler,
1707315ee00fSMartin Matuska 	 * zil_lwb_write_done()).
1708eda14cbcSMatt Macy 	 */
1709315ee00fSMartin Matuska 	if (prev_lwb->lwb_state == LWB_STATE_ISSUED) {
1710315ee00fSMartin Matuska 		ASSERT3P(prev_lwb->lwb_write_zio, !=, NULL);
1711315ee00fSMartin Matuska 		zio_add_child(lwb->lwb_write_zio, prev_lwb->lwb_write_zio);
1712315ee00fSMartin Matuska 	} else {
1713315ee00fSMartin Matuska 		ASSERT3S(prev_lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
1714315ee00fSMartin Matuska 	}
1715eda14cbcSMatt Macy 
1716315ee00fSMartin Matuska 	ASSERT3P(prev_lwb->lwb_root_zio, !=, NULL);
1717315ee00fSMartin Matuska 	zio_add_child(lwb->lwb_root_zio, prev_lwb->lwb_root_zio);
1718eda14cbcSMatt Macy }
1719eda14cbcSMatt Macy 
1720eda14cbcSMatt Macy 
1721eda14cbcSMatt Macy /*
1722eda14cbcSMatt Macy  * This function's purpose is to "open" an lwb such that it is ready to
1723315ee00fSMartin Matuska  * accept new itxs being committed to it. This function is idempotent; if
1724315ee00fSMartin Matuska  * the passed in lwb has already been opened, it is essentially a no-op.
1725eda14cbcSMatt Macy  */
1726eda14cbcSMatt Macy static void
zil_lwb_write_open(zilog_t * zilog,lwb_t * lwb)1727eda14cbcSMatt Macy zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
1728eda14cbcSMatt Macy {
1729eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
1730eda14cbcSMatt Macy 
1731315ee00fSMartin Matuska 	if (lwb->lwb_state != LWB_STATE_NEW) {
1732315ee00fSMartin Matuska 		ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
17334e8d558cSMartin Matuska 		return;
17344e8d558cSMartin Matuska 	}
17354e8d558cSMartin Matuska 
1736315ee00fSMartin Matuska 	mutex_enter(&zilog->zl_lock);
1737eda14cbcSMatt Macy 	lwb->lwb_state = LWB_STATE_OPENED;
1738eda14cbcSMatt Macy 	zilog->zl_last_lwb_opened = lwb;
1739eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
1740eda14cbcSMatt Macy }
1741eda14cbcSMatt Macy 
1742eda14cbcSMatt Macy /*
1743eda14cbcSMatt Macy  * Maximum block size used by the ZIL.  This is picked up when the ZIL is
1744eda14cbcSMatt Macy  * initialized.  Otherwise this should not be used directly; see
1745eda14cbcSMatt Macy  * zl_max_block_size instead.
1746eda14cbcSMatt Macy  */
1747be181ee2SMartin Matuska static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
1748eda14cbcSMatt Macy 
1749eda14cbcSMatt Macy /*
1750b356da80SMartin Matuska  * Plan splitting of the provided burst size between several blocks.
1751b356da80SMartin Matuska  */
1752b356da80SMartin Matuska static uint_t
zil_lwb_plan(zilog_t * zilog,uint64_t size,uint_t * minsize)1753b356da80SMartin Matuska zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
1754b356da80SMartin Matuska {
1755b356da80SMartin Matuska 	uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t);
1756b356da80SMartin Matuska 
1757b356da80SMartin Matuska 	if (size <= md) {
1758b356da80SMartin Matuska 		/*
1759b356da80SMartin Matuska 		 * Small bursts are written as-is in one block.
1760b356da80SMartin Matuska 		 */
1761b356da80SMartin Matuska 		*minsize = size;
1762b356da80SMartin Matuska 		return (size);
1763b356da80SMartin Matuska 	} else if (size > 8 * md) {
1764b356da80SMartin Matuska 		/*
1765b356da80SMartin Matuska 		 * Big bursts use maximum blocks.  The first block size
1766b356da80SMartin Matuska 		 * is hard to predict, but it does not really matter.
1767b356da80SMartin Matuska 		 */
1768b356da80SMartin Matuska 		*minsize = 0;
1769b356da80SMartin Matuska 		return (md);
1770b356da80SMartin Matuska 	}
1771b356da80SMartin Matuska 
1772b356da80SMartin Matuska 	/*
1773b356da80SMartin Matuska 	 * Medium bursts try to divide evenly to better utilize several SLOG
1774b356da80SMartin Matuska 	 * VDEVs.  The first block size we predict assuming the worst case of
1775b356da80SMartin Matuska 	 * maxing out others.  Fall back to using maximum blocks if due to
1776b356da80SMartin Matuska 	 * large records or wasted space we can not predict anything better.
1777b356da80SMartin Matuska 	 */
1778b356da80SMartin Matuska 	uint_t s = size;
1779b356da80SMartin Matuska 	uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t));
1780b356da80SMartin Matuska 	uint_t chunk = DIV_ROUND_UP(s, n);
1781b356da80SMartin Matuska 	uint_t waste = zil_max_waste_space(zilog);
1782b356da80SMartin Matuska 	waste = MAX(waste, zilog->zl_cur_max);
1783b356da80SMartin Matuska 	if (chunk <= md - waste) {
1784b356da80SMartin Matuska 		*minsize = MAX(s - (md - waste) * (n - 1), waste);
1785b356da80SMartin Matuska 		return (chunk);
1786b356da80SMartin Matuska 	} else {
1787b356da80SMartin Matuska 		*minsize = 0;
1788b356da80SMartin Matuska 		return (md);
1789b356da80SMartin Matuska 	}
1790b356da80SMartin Matuska }
1791b356da80SMartin Matuska 
1792b356da80SMartin Matuska /*
1793b356da80SMartin Matuska  * Try to predict next block size based on previous history.  Make prediction
1794b356da80SMartin Matuska  * sufficient for 7 of 8 previous bursts.  Don't try to save if the saving is
1795b356da80SMartin Matuska  * less then 50%, extra writes may cost more, but we don't want single spike
1796b356da80SMartin Matuska  * to badly affect our predictions.
1797b356da80SMartin Matuska  */
1798b356da80SMartin Matuska static uint_t
zil_lwb_predict(zilog_t * zilog)1799b356da80SMartin Matuska zil_lwb_predict(zilog_t *zilog)
1800b356da80SMartin Matuska {
1801b356da80SMartin Matuska 	uint_t m, o;
1802b356da80SMartin Matuska 
1803b356da80SMartin Matuska 	/* If we are in the middle of a burst, take it into account also. */
1804b356da80SMartin Matuska 	if (zilog->zl_cur_size > 0) {
1805b356da80SMartin Matuska 		o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m);
1806b356da80SMartin Matuska 	} else {
1807b356da80SMartin Matuska 		o = UINT_MAX;
1808b356da80SMartin Matuska 		m = 0;
1809b356da80SMartin Matuska 	}
1810b356da80SMartin Matuska 
1811b356da80SMartin Matuska 	/* Find minimum optimal size.  We don't need to go below that. */
1812b356da80SMartin Matuska 	for (int i = 0; i < ZIL_BURSTS; i++)
1813b356da80SMartin Matuska 		o = MIN(o, zilog->zl_prev_opt[i]);
1814b356da80SMartin Matuska 
1815b356da80SMartin Matuska 	/* Find two biggest minimal first block sizes above the optimal. */
1816b356da80SMartin Matuska 	uint_t m1 = MAX(m, o), m2 = o;
1817b356da80SMartin Matuska 	for (int i = 0; i < ZIL_BURSTS; i++) {
1818b356da80SMartin Matuska 		m = zilog->zl_prev_min[i];
1819b356da80SMartin Matuska 		if (m >= m1) {
1820b356da80SMartin Matuska 			m2 = m1;
1821b356da80SMartin Matuska 			m1 = m;
1822b356da80SMartin Matuska 		} else if (m > m2) {
1823b356da80SMartin Matuska 			m2 = m;
1824b356da80SMartin Matuska 		}
1825b356da80SMartin Matuska 	}
1826b356da80SMartin Matuska 
1827b356da80SMartin Matuska 	/*
1828b356da80SMartin Matuska 	 * If second minimum size gives 50% saving -- use it.  It may cost us
1829b356da80SMartin Matuska 	 * one additional write later, but the space saving is just too big.
1830b356da80SMartin Matuska 	 */
1831b356da80SMartin Matuska 	return ((m1 < m2 * 2) ? m1 : m2);
1832b356da80SMartin Matuska }
1833b356da80SMartin Matuska 
1834b356da80SMartin Matuska /*
18354e8d558cSMartin Matuska  * Close the log block for being issued and allocate the next one.
18364e8d558cSMartin Matuska  * Has to be called under zl_issuer_lock to chain more lwbs.
1837eda14cbcSMatt Macy  */
1838eda14cbcSMatt Macy static lwb_t *
zil_lwb_write_close(zilog_t * zilog,lwb_t * lwb,lwb_state_t state)1839315ee00fSMartin Matuska zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
1840eda14cbcSMatt Macy {
1841b356da80SMartin Matuska 	uint64_t blksz, plan, plan2;
1842eda14cbcSMatt Macy 
1843eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
1844eda14cbcSMatt Macy 	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
1845315ee00fSMartin Matuska 	lwb->lwb_state = LWB_STATE_CLOSED;
1846eda14cbcSMatt Macy 
18474e8d558cSMartin Matuska 	/*
1848315ee00fSMartin Matuska 	 * If there was an allocation failure then returned NULL will trigger
1849315ee00fSMartin Matuska 	 * zil_commit_writer_stall() at the caller.  This is inherently racy,
1850315ee00fSMartin Matuska 	 * since allocation may not have happened yet.
18514e8d558cSMartin Matuska 	 */
1852315ee00fSMartin Matuska 	if (lwb->lwb_error != 0)
1853315ee00fSMartin Matuska 		return (NULL);
1854eda14cbcSMatt Macy 
1855eda14cbcSMatt Macy 	/*
1856eda14cbcSMatt Macy 	 * Log blocks are pre-allocated.  Here we select the size of the next
1857b356da80SMartin Matuska 	 * block, based on what's left of this burst and the previous history.
1858b356da80SMartin Matuska 	 * While we try to only write used part of the block, we can't just
1859b356da80SMartin Matuska 	 * always allocate the maximum block size because we can exhaust all
1860b356da80SMartin Matuska 	 * available pool log space, so we try to be reasonable.
1861eda14cbcSMatt Macy 	 */
1862b356da80SMartin Matuska 	if (zilog->zl_cur_left > 0) {
1863b356da80SMartin Matuska 		/*
1864b356da80SMartin Matuska 		 * We are in the middle of a burst and know how much is left.
1865b356da80SMartin Matuska 		 * But if workload is multi-threaded there may be more soon.
1866b356da80SMartin Matuska 		 * Try to predict what can it be and plan for the worst case.
1867b356da80SMartin Matuska 		 */
1868b356da80SMartin Matuska 		uint_t m;
1869b356da80SMartin Matuska 		plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
1870b356da80SMartin Matuska 		if (zilog->zl_parallel) {
1871b356da80SMartin Matuska 			plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left +
1872b356da80SMartin Matuska 			    zil_lwb_predict(zilog), &m);
1873b356da80SMartin Matuska 			if (plan < plan2)
1874b356da80SMartin Matuska 				plan = plan2;
1875b356da80SMartin Matuska 		}
1876b356da80SMartin Matuska 	} else {
1877b356da80SMartin Matuska 		/*
1878b356da80SMartin Matuska 		 * The previous burst is done and we can only predict what
1879b356da80SMartin Matuska 		 * will come next.
1880b356da80SMartin Matuska 		 */
1881b356da80SMartin Matuska 		plan = zil_lwb_predict(zilog);
1882b356da80SMartin Matuska 	}
1883b356da80SMartin Matuska 	blksz = plan + sizeof (zil_chain_t);
1884b356da80SMartin Matuska 	blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t);
1885b356da80SMartin Matuska 	blksz = MIN(blksz, zilog->zl_max_block_size);
1886b356da80SMartin Matuska 	DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz,
1887b356da80SMartin Matuska 	    uint64_t, plan);
1888eda14cbcSMatt Macy 
1889b356da80SMartin Matuska 	return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state));
18904e8d558cSMartin Matuska }
18914e8d558cSMartin Matuska 
18924e8d558cSMartin Matuska /*
18934e8d558cSMartin Matuska  * Finalize previously closed block and issue the write zio.
18944e8d558cSMartin Matuska  */
18954e8d558cSMartin Matuska static void
zil_lwb_write_issue(zilog_t * zilog,lwb_t * lwb)18964e8d558cSMartin Matuska zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
18974e8d558cSMartin Matuska {
1898315ee00fSMartin Matuska 	spa_t *spa = zilog->zl_spa;
18994e8d558cSMartin Matuska 	zil_chain_t *zilc;
1900315ee00fSMartin Matuska 	boolean_t slog;
1901315ee00fSMartin Matuska 	zbookmark_phys_t zb;
1902315ee00fSMartin Matuska 	zio_priority_t prio;
1903315ee00fSMartin Matuska 	int error;
19044e8d558cSMartin Matuska 
1905315ee00fSMartin Matuska 	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
1906315ee00fSMartin Matuska 
1907315ee00fSMartin Matuska 	/* Actually fill the lwb with the data. */
19084e8d558cSMartin Matuska 	for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
19094e8d558cSMartin Matuska 	    itx = list_next(&lwb->lwb_itxs, itx))
19104e8d558cSMartin Matuska 		zil_lwb_commit(zilog, lwb, itx);
19114e8d558cSMartin Matuska 	lwb->lwb_nused = lwb->lwb_nfilled;
1912525fe93dSMartin Matuska 	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
19134e8d558cSMartin Matuska 
1914315ee00fSMartin Matuska 	lwb->lwb_root_zio = zio_root(spa, zil_lwb_flush_vdevs_done, lwb,
1915315ee00fSMartin Matuska 	    ZIO_FLAG_CANFAIL);
1916315ee00fSMartin Matuska 
1917315ee00fSMartin Matuska 	/*
1918315ee00fSMartin Matuska 	 * The lwb is now ready to be issued, but it can be only if it already
1919315ee00fSMartin Matuska 	 * got its block pointer allocated or the allocation has failed.
1920315ee00fSMartin Matuska 	 * Otherwise leave it as-is, relying on some other thread to issue it
1921315ee00fSMartin Matuska 	 * after allocating its block pointer via calling zil_lwb_write_issue()
1922315ee00fSMartin Matuska 	 * for the previous lwb(s) in the chain.
1923315ee00fSMartin Matuska 	 */
1924315ee00fSMartin Matuska 	mutex_enter(&zilog->zl_lock);
1925315ee00fSMartin Matuska 	lwb->lwb_state = LWB_STATE_READY;
1926315ee00fSMartin Matuska 	if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) {
1927315ee00fSMartin Matuska 		mutex_exit(&zilog->zl_lock);
1928315ee00fSMartin Matuska 		return;
1929315ee00fSMartin Matuska 	}
1930315ee00fSMartin Matuska 	mutex_exit(&zilog->zl_lock);
1931315ee00fSMartin Matuska 
1932315ee00fSMartin Matuska next_lwb:
1933315ee00fSMartin Matuska 	if (lwb->lwb_slim)
1934315ee00fSMartin Matuska 		zilc = (zil_chain_t *)lwb->lwb_buf;
1935315ee00fSMartin Matuska 	else
1936315ee00fSMartin Matuska 		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax);
1937315ee00fSMartin Matuska 	int wsz = lwb->lwb_sz;
1938315ee00fSMartin Matuska 	if (lwb->lwb_error == 0) {
1939315ee00fSMartin Matuska 		abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz);
1940b356da80SMartin Matuska 		if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk)
1941315ee00fSMartin Matuska 			prio = ZIO_PRIORITY_SYNC_WRITE;
1942315ee00fSMartin Matuska 		else
1943315ee00fSMartin Matuska 			prio = ZIO_PRIORITY_ASYNC_WRITE;
1944315ee00fSMartin Matuska 		SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1945315ee00fSMartin Matuska 		    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
1946315ee00fSMartin Matuska 		    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
1947315ee00fSMartin Matuska 		lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0,
1948315ee00fSMartin Matuska 		    &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done,
1949315ee00fSMartin Matuska 		    lwb, prio, ZIO_FLAG_CANFAIL, &zb);
1950315ee00fSMartin Matuska 		zil_lwb_add_block(lwb, &lwb->lwb_blk);
1951315ee00fSMartin Matuska 
1952315ee00fSMartin Matuska 		if (lwb->lwb_slim) {
1953eda14cbcSMatt Macy 			/* For Slim ZIL only write what is used. */
1954315ee00fSMartin Matuska 			wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ,
1955315ee00fSMartin Matuska 			    int);
19564e8d558cSMartin Matuska 			ASSERT3S(wsz, <=, lwb->lwb_sz);
1957eda14cbcSMatt Macy 			zio_shrink(lwb->lwb_write_zio, wsz);
1958e639e0d2SMartin Matuska 			wsz = lwb->lwb_write_zio->io_size;
1959eda14cbcSMatt Macy 		}
1960315ee00fSMartin Matuska 		memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
1961eda14cbcSMatt Macy 		zilc->zc_pad = 0;
1962eda14cbcSMatt Macy 		zilc->zc_nused = lwb->lwb_nused;
1963eda14cbcSMatt Macy 		zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1964315ee00fSMartin Matuska 	} else {
1965315ee00fSMartin Matuska 		/*
1966315ee00fSMartin Matuska 		 * We can't write the lwb if there was an allocation failure,
1967315ee00fSMartin Matuska 		 * so create a null zio instead just to maintain dependencies.
1968315ee00fSMartin Matuska 		 */
1969315ee00fSMartin Matuska 		lwb->lwb_write_zio = zio_null(lwb->lwb_root_zio, spa, NULL,
1970315ee00fSMartin Matuska 		    zil_lwb_write_done, lwb, ZIO_FLAG_CANFAIL);
1971315ee00fSMartin Matuska 		lwb->lwb_write_zio->io_error = lwb->lwb_error;
1972315ee00fSMartin Matuska 	}
1973315ee00fSMartin Matuska 	if (lwb->lwb_child_zio)
1974315ee00fSMartin Matuska 		zio_add_child(lwb->lwb_write_zio, lwb->lwb_child_zio);
1975eda14cbcSMatt Macy 
1976eda14cbcSMatt Macy 	/*
1977315ee00fSMartin Matuska 	 * Open transaction to allocate the next block pointer.
1978eda14cbcSMatt Macy 	 */
1979315ee00fSMartin Matuska 	dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1980*61145dc2SMartin Matuska 	VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
1981315ee00fSMartin Matuska 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1982315ee00fSMartin Matuska 	uint64_t txg = dmu_tx_get_txg(tx);
1983315ee00fSMartin Matuska 
1984315ee00fSMartin Matuska 	/*
1985315ee00fSMartin Matuska 	 * Allocate next the block pointer unless we are already in error.
1986315ee00fSMartin Matuska 	 */
1987315ee00fSMartin Matuska 	lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb);
1988315ee00fSMartin Matuska 	blkptr_t *bp = &zilc->zc_next_blk;
1989315ee00fSMartin Matuska 	BP_ZERO(bp);
1990315ee00fSMartin Matuska 	error = lwb->lwb_error;
1991315ee00fSMartin Matuska 	if (error == 0) {
1992315ee00fSMartin Matuska 		error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
1993315ee00fSMartin Matuska 		    &slog);
1994315ee00fSMartin Matuska 	}
1995315ee00fSMartin Matuska 	if (error == 0) {
1996783d3ff6SMartin Matuska 		ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), ==, txg);
1997315ee00fSMartin Matuska 		BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 :
1998315ee00fSMartin Matuska 		    ZIO_CHECKSUM_ZILOG);
1999315ee00fSMartin Matuska 		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
2000315ee00fSMartin Matuska 		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
2001315ee00fSMartin Matuska 	}
2002315ee00fSMartin Matuska 
2003315ee00fSMartin Matuska 	/*
2004315ee00fSMartin Matuska 	 * Reduce TXG open time by incrementing inflight counter and committing
2005315ee00fSMartin Matuska 	 * the transaciton.  zil_sync() will wait for it to return to zero.
2006315ee00fSMartin Matuska 	 */
2007315ee00fSMartin Matuska 	mutex_enter(&zilog->zl_lwb_io_lock);
2008315ee00fSMartin Matuska 	lwb->lwb_issued_txg = txg;
2009315ee00fSMartin Matuska 	zilog->zl_lwb_inflight[txg & TXG_MASK]++;
2010315ee00fSMartin Matuska 	zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
2011315ee00fSMartin Matuska 	mutex_exit(&zilog->zl_lwb_io_lock);
2012315ee00fSMartin Matuska 	dmu_tx_commit(tx);
2013315ee00fSMartin Matuska 
2014315ee00fSMartin Matuska 	spa_config_enter(spa, SCL_STATE, lwb, RW_READER);
2015315ee00fSMartin Matuska 
2016315ee00fSMartin Matuska 	/*
2017315ee00fSMartin Matuska 	 * We've completed all potentially blocking operations.  Update the
2018315ee00fSMartin Matuska 	 * nlwb and allow it proceed without possible lock order reversals.
2019315ee00fSMartin Matuska 	 */
2020315ee00fSMartin Matuska 	mutex_enter(&zilog->zl_lock);
2021315ee00fSMartin Matuska 	zil_lwb_set_zio_dependency(zilog, lwb);
2022315ee00fSMartin Matuska 	lwb->lwb_state = LWB_STATE_ISSUED;
2023315ee00fSMartin Matuska 
2024315ee00fSMartin Matuska 	if (nlwb) {
2025315ee00fSMartin Matuska 		nlwb->lwb_blk = *bp;
2026315ee00fSMartin Matuska 		nlwb->lwb_error = error;
2027315ee00fSMartin Matuska 		nlwb->lwb_slog = slog;
2028315ee00fSMartin Matuska 		nlwb->lwb_alloc_txg = txg;
2029315ee00fSMartin Matuska 		if (nlwb->lwb_state != LWB_STATE_READY)
2030315ee00fSMartin Matuska 			nlwb = NULL;
2031315ee00fSMartin Matuska 	}
2032315ee00fSMartin Matuska 	mutex_exit(&zilog->zl_lock);
2033eda14cbcSMatt Macy 
20344e8d558cSMartin Matuska 	if (lwb->lwb_slog) {
20354e8d558cSMartin Matuska 		ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
20364e8d558cSMartin Matuska 		ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
20374e8d558cSMartin Matuska 		    lwb->lwb_nused);
20384e8d558cSMartin Matuska 		ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write,
20394e8d558cSMartin Matuska 		    wsz);
20404e8d558cSMartin Matuska 		ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc,
20414e8d558cSMartin Matuska 		    BP_GET_LSIZE(&lwb->lwb_blk));
20424e8d558cSMartin Matuska 	} else {
20434e8d558cSMartin Matuska 		ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count);
20444e8d558cSMartin Matuska 		ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes,
20454e8d558cSMartin Matuska 		    lwb->lwb_nused);
20464e8d558cSMartin Matuska 		ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write,
20474e8d558cSMartin Matuska 		    wsz);
20484e8d558cSMartin Matuska 		ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc,
20494e8d558cSMartin Matuska 		    BP_GET_LSIZE(&lwb->lwb_blk));
20504e8d558cSMartin Matuska 	}
2051eda14cbcSMatt Macy 	lwb->lwb_issued_timestamp = gethrtime();
2052315ee00fSMartin Matuska 	if (lwb->lwb_child_zio)
2053315ee00fSMartin Matuska 		zio_nowait(lwb->lwb_child_zio);
20542ad756a6SMartin Matuska 	zio_nowait(lwb->lwb_write_zio);
20552ad756a6SMartin Matuska 	zio_nowait(lwb->lwb_root_zio);
2056315ee00fSMartin Matuska 
2057315ee00fSMartin Matuska 	/*
2058315ee00fSMartin Matuska 	 * If nlwb was ready when we gave it the block pointer,
2059315ee00fSMartin Matuska 	 * it is on us to issue it and possibly following ones.
2060315ee00fSMartin Matuska 	 */
2061315ee00fSMartin Matuska 	lwb = nlwb;
2062315ee00fSMartin Matuska 	if (lwb)
2063315ee00fSMartin Matuska 		goto next_lwb;
2064eda14cbcSMatt Macy }
2065eda14cbcSMatt Macy 
2066eda14cbcSMatt Macy /*
20672a58b312SMartin Matuska  * Maximum amount of data that can be put into single log block.
2068eda14cbcSMatt Macy  */
2069eda14cbcSMatt Macy uint64_t
zil_max_log_data(zilog_t * zilog,size_t hdrsize)20702a58b312SMartin Matuska zil_max_log_data(zilog_t *zilog, size_t hdrsize)
2071eda14cbcSMatt Macy {
20722a58b312SMartin Matuska 	return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize);
2073eda14cbcSMatt Macy }
2074eda14cbcSMatt Macy 
2075eda14cbcSMatt Macy /*
2076eda14cbcSMatt Macy  * Maximum amount of log space we agree to waste to reduce number of
2077b2526e8bSMartin Matuska  * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~6%).
2078eda14cbcSMatt Macy  */
2079eda14cbcSMatt Macy static inline uint64_t
zil_max_waste_space(zilog_t * zilog)2080eda14cbcSMatt Macy zil_max_waste_space(zilog_t *zilog)
2081eda14cbcSMatt Macy {
2082b2526e8bSMartin Matuska 	return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16);
2083eda14cbcSMatt Macy }
2084eda14cbcSMatt Macy 
2085eda14cbcSMatt Macy /*
2086eda14cbcSMatt Macy  * Maximum amount of write data for WR_COPIED.  For correctness, consumers
2087eda14cbcSMatt Macy  * must fall back to WR_NEED_COPY if we can't fit the entire record into one
2088eda14cbcSMatt Macy  * maximum sized log block, because each WR_COPIED record must fit in a
2089b2526e8bSMartin Matuska  * single log block.  Below that it is a tradeoff of additional memory copy
2090b2526e8bSMartin Matuska  * and possibly worse log space efficiency vs additional range lock/unlock.
2091eda14cbcSMatt Macy  */
2092b2526e8bSMartin Matuska static uint_t zil_maxcopied = 7680;
2093b2526e8bSMartin Matuska 
2094eda14cbcSMatt Macy uint64_t
zil_max_copied_data(zilog_t * zilog)2095eda14cbcSMatt Macy zil_max_copied_data(zilog_t *zilog)
2096eda14cbcSMatt Macy {
2097b2526e8bSMartin Matuska 	uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t));
2098b2526e8bSMartin Matuska 	return (MIN(max_data, zil_maxcopied));
2099eda14cbcSMatt Macy }
2100eda14cbcSMatt Macy 
2101b356da80SMartin Matuska static uint64_t
zil_itx_record_size(itx_t * itx)2102b356da80SMartin Matuska zil_itx_record_size(itx_t *itx)
2103b356da80SMartin Matuska {
2104b356da80SMartin Matuska 	lr_t *lr = &itx->itx_lr;
2105b356da80SMartin Matuska 
2106b356da80SMartin Matuska 	if (lr->lrc_txtype == TX_COMMIT)
2107b356da80SMartin Matuska 		return (0);
2108b356da80SMartin Matuska 	ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t));
2109b356da80SMartin Matuska 	return (lr->lrc_reclen);
2110b356da80SMartin Matuska }
2111b356da80SMartin Matuska 
2112b356da80SMartin Matuska static uint64_t
zil_itx_data_size(itx_t * itx)2113b356da80SMartin Matuska zil_itx_data_size(itx_t *itx)
2114b356da80SMartin Matuska {
2115b356da80SMartin Matuska 	lr_t *lr = &itx->itx_lr;
2116b356da80SMartin Matuska 	lr_write_t *lrw = (lr_write_t *)lr;
2117b356da80SMartin Matuska 
2118b356da80SMartin Matuska 	if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
2119b356da80SMartin Matuska 		ASSERT3U(lr->lrc_reclen, ==, sizeof (lr_write_t));
2120b356da80SMartin Matuska 		return (P2ROUNDUP_TYPED(lrw->lr_length, sizeof (uint64_t),
2121b356da80SMartin Matuska 		    uint64_t));
2122b356da80SMartin Matuska 	}
2123b356da80SMartin Matuska 	return (0);
2124b356da80SMartin Matuska }
2125b356da80SMartin Matuska 
2126b356da80SMartin Matuska static uint64_t
zil_itx_full_size(itx_t * itx)2127b356da80SMartin Matuska zil_itx_full_size(itx_t *itx)
2128b356da80SMartin Matuska {
2129b356da80SMartin Matuska 	lr_t *lr = &itx->itx_lr;
2130b356da80SMartin Matuska 
2131b356da80SMartin Matuska 	if (lr->lrc_txtype == TX_COMMIT)
2132b356da80SMartin Matuska 		return (0);
2133b356da80SMartin Matuska 	ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t));
2134b356da80SMartin Matuska 	return (lr->lrc_reclen + zil_itx_data_size(itx));
2135b356da80SMartin Matuska }
2136b356da80SMartin Matuska 
21374e8d558cSMartin Matuska /*
21384e8d558cSMartin Matuska  * Estimate space needed in the lwb for the itx.  Allocate more lwbs or
21394e8d558cSMartin Matuska  * split the itx as needed, but don't touch the actual transaction data.
21404e8d558cSMartin Matuska  * Has to be called under zl_issuer_lock to call zil_lwb_write_close()
21414e8d558cSMartin Matuska  * to chain more lwbs.
21424e8d558cSMartin Matuska  */
2143eda14cbcSMatt Macy static lwb_t *
zil_lwb_assign(zilog_t * zilog,lwb_t * lwb,itx_t * itx,list_t * ilwbs)21444e8d558cSMartin Matuska zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
2145eda14cbcSMatt Macy {
21464e8d558cSMartin Matuska 	itx_t *citx;
21474e8d558cSMartin Matuska 	lr_t *lr, *clr;
21484e8d558cSMartin Matuska 	lr_write_t *lrw;
21494e8d558cSMartin Matuska 	uint64_t dlen, dnow, lwb_sp, reclen, max_log_data;
2150eda14cbcSMatt Macy 
2151eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2152eda14cbcSMatt Macy 	ASSERT3P(lwb, !=, NULL);
2153eda14cbcSMatt Macy 	ASSERT3P(lwb->lwb_buf, !=, NULL);
2154eda14cbcSMatt Macy 
2155eda14cbcSMatt Macy 	zil_lwb_write_open(zilog, lwb);
2156eda14cbcSMatt Macy 
21574e8d558cSMartin Matuska 	lr = &itx->itx_lr;
21584e8d558cSMartin Matuska 	lrw = (lr_write_t *)lr;
2159eda14cbcSMatt Macy 
2160eda14cbcSMatt Macy 	/*
2161eda14cbcSMatt Macy 	 * A commit itx doesn't represent any on-disk state; instead
2162eda14cbcSMatt Macy 	 * it's simply used as a place holder on the commit list, and
2163eda14cbcSMatt Macy 	 * provides a mechanism for attaching a "commit waiter" onto the
2164eda14cbcSMatt Macy 	 * correct lwb (such that the waiter can be signalled upon
2165eda14cbcSMatt Macy 	 * completion of that lwb). Thus, we don't process this itx's
2166eda14cbcSMatt Macy 	 * log record if it's a commit itx (these itx's don't have log
2167eda14cbcSMatt Macy 	 * records), and instead link the itx's waiter onto the lwb's
2168eda14cbcSMatt Macy 	 * list of waiters.
2169eda14cbcSMatt Macy 	 *
2170eda14cbcSMatt Macy 	 * For more details, see the comment above zil_commit().
2171eda14cbcSMatt Macy 	 */
21724e8d558cSMartin Matuska 	if (lr->lrc_txtype == TX_COMMIT) {
2173eda14cbcSMatt Macy 		zil_commit_waiter_link_lwb(itx->itx_private, lwb);
21744e8d558cSMartin Matuska 		list_insert_tail(&lwb->lwb_itxs, itx);
2175eda14cbcSMatt Macy 		return (lwb);
2176eda14cbcSMatt Macy 	}
2177eda14cbcSMatt Macy 
2178525fe93dSMartin Matuska 	reclen = lr->lrc_reclen;
2179525fe93dSMartin Matuska 	ASSERT3U(reclen, >=, sizeof (lr_t));
2180525fe93dSMartin Matuska 	ASSERT3U(reclen, <=, zil_max_log_data(zilog, 0));
2181b356da80SMartin Matuska 	dlen = zil_itx_data_size(itx);
2182eda14cbcSMatt Macy 
2183eda14cbcSMatt Macy cont:
2184eda14cbcSMatt Macy 	/*
2185eda14cbcSMatt Macy 	 * If this record won't fit in the current log block, start a new one.
2186eda14cbcSMatt Macy 	 * For WR_NEED_COPY optimize layout for minimal number of chunks.
2187eda14cbcSMatt Macy 	 */
2188315ee00fSMartin Matuska 	lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
21892a58b312SMartin Matuska 	max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t));
2190eda14cbcSMatt Macy 	if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
2191eda14cbcSMatt Macy 	    lwb_sp < zil_max_waste_space(zilog) &&
2192eda14cbcSMatt Macy 	    (dlen % max_log_data == 0 ||
2193eda14cbcSMatt Macy 	    lwb_sp < reclen + dlen % max_log_data))) {
2194315ee00fSMartin Matuska 		list_insert_tail(ilwbs, lwb);
2195315ee00fSMartin Matuska 		lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
2196eda14cbcSMatt Macy 		if (lwb == NULL)
2197eda14cbcSMatt Macy 			return (NULL);
2198315ee00fSMartin Matuska 		lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
2199525fe93dSMartin Matuska 	}
2200eda14cbcSMatt Macy 
2201eda14cbcSMatt Macy 	/*
2202525fe93dSMartin Matuska 	 * There must be enough space in the log block to hold reclen.
2203525fe93dSMartin Matuska 	 * For WR_COPIED, we need to fit the whole record in one block,
2204525fe93dSMartin Matuska 	 * and reclen is the write record header size + the data size.
2205525fe93dSMartin Matuska 	 * For WR_NEED_COPY, we can create multiple records, splitting
2206525fe93dSMartin Matuska 	 * the data into multiple blocks, so we only need to fit one
2207525fe93dSMartin Matuska 	 * word of data per block; in this case reclen is just the header
2208525fe93dSMartin Matuska 	 * size (no data).
2209eda14cbcSMatt Macy 	 */
2210eda14cbcSMatt Macy 	ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
2211eda14cbcSMatt Macy 
2212eda14cbcSMatt Macy 	dnow = MIN(dlen, lwb_sp - reclen);
22134e8d558cSMartin Matuska 	if (dlen > dnow) {
22144e8d558cSMartin Matuska 		ASSERT3U(lr->lrc_txtype, ==, TX_WRITE);
22154e8d558cSMartin Matuska 		ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY);
22164e8d558cSMartin Matuska 		citx = zil_itx_clone(itx);
22174e8d558cSMartin Matuska 		clr = &citx->itx_lr;
22184e8d558cSMartin Matuska 		lr_write_t *clrw = (lr_write_t *)clr;
22194e8d558cSMartin Matuska 		clrw->lr_length = dnow;
22204e8d558cSMartin Matuska 		lrw->lr_offset += dnow;
22214e8d558cSMartin Matuska 		lrw->lr_length -= dnow;
2222b356da80SMartin Matuska 		zilog->zl_cur_left -= dnow;
22234e8d558cSMartin Matuska 	} else {
22244e8d558cSMartin Matuska 		citx = itx;
22254e8d558cSMartin Matuska 		clr = lr;
22264e8d558cSMartin Matuska 	}
22274e8d558cSMartin Matuska 
22284e8d558cSMartin Matuska 	/*
22294e8d558cSMartin Matuska 	 * We're actually making an entry, so update lrc_seq to be the
22304e8d558cSMartin Matuska 	 * log record sequence number.  Note that this is generally not
22314e8d558cSMartin Matuska 	 * equal to the itx sequence number because not all transactions
22324e8d558cSMartin Matuska 	 * are synchronous, and sometimes spa_sync() gets there first.
22334e8d558cSMartin Matuska 	 */
22344e8d558cSMartin Matuska 	clr->lrc_seq = ++zilog->zl_lr_seq;
22354e8d558cSMartin Matuska 
22364e8d558cSMartin Matuska 	lwb->lwb_nused += reclen + dnow;
2237315ee00fSMartin Matuska 	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
22384e8d558cSMartin Matuska 	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
22394e8d558cSMartin Matuska 
22404e8d558cSMartin Matuska 	zil_lwb_add_txg(lwb, lr->lrc_txg);
22414e8d558cSMartin Matuska 	list_insert_tail(&lwb->lwb_itxs, citx);
22424e8d558cSMartin Matuska 
22434e8d558cSMartin Matuska 	dlen -= dnow;
2244b356da80SMartin Matuska 	if (dlen > 0)
22454e8d558cSMartin Matuska 		goto cont;
22464e8d558cSMartin Matuska 
2247315ee00fSMartin Matuska 	if (lr->lrc_txtype == TX_WRITE &&
2248315ee00fSMartin Matuska 	    lr->lrc_txg > spa_freeze_txg(zilog->zl_spa))
22494e8d558cSMartin Matuska 		txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg);
22504e8d558cSMartin Matuska 
22514e8d558cSMartin Matuska 	return (lwb);
22524e8d558cSMartin Matuska }
22534e8d558cSMartin Matuska 
22544e8d558cSMartin Matuska /*
22554e8d558cSMartin Matuska  * Fill the actual transaction data into the lwb, following zil_lwb_assign().
22564e8d558cSMartin Matuska  * Does not require locking.
22574e8d558cSMartin Matuska  */
22584e8d558cSMartin Matuska static void
zil_lwb_commit(zilog_t * zilog,lwb_t * lwb,itx_t * itx)22594e8d558cSMartin Matuska zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
22604e8d558cSMartin Matuska {
22614e8d558cSMartin Matuska 	lr_t *lr, *lrb;
22624e8d558cSMartin Matuska 	lr_write_t *lrw, *lrwb;
22634e8d558cSMartin Matuska 	char *lr_buf;
22644e8d558cSMartin Matuska 	uint64_t dlen, reclen;
22654e8d558cSMartin Matuska 
22664e8d558cSMartin Matuska 	lr = &itx->itx_lr;
22674e8d558cSMartin Matuska 	lrw = (lr_write_t *)lr;
22684e8d558cSMartin Matuska 
22694e8d558cSMartin Matuska 	if (lr->lrc_txtype == TX_COMMIT)
22704e8d558cSMartin Matuska 		return;
22714e8d558cSMartin Matuska 
22724e8d558cSMartin Matuska 	reclen = lr->lrc_reclen;
2273b356da80SMartin Matuska 	dlen = zil_itx_data_size(itx);
22744e8d558cSMartin Matuska 	ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled);
22754e8d558cSMartin Matuska 
22764e8d558cSMartin Matuska 	lr_buf = lwb->lwb_buf + lwb->lwb_nfilled;
22774e8d558cSMartin Matuska 	memcpy(lr_buf, lr, reclen);
22784e8d558cSMartin Matuska 	lrb = (lr_t *)lr_buf;		/* Like lr, but inside lwb. */
22794e8d558cSMartin Matuska 	lrwb = (lr_write_t *)lrb;	/* Like lrw, but inside lwb. */
2280eda14cbcSMatt Macy 
2281271171e0SMartin Matuska 	ZIL_STAT_BUMP(zilog, zil_itx_count);
2282eda14cbcSMatt Macy 
2283eda14cbcSMatt Macy 	/*
2284eda14cbcSMatt Macy 	 * If it's a write, fetch the data or get its blkptr as appropriate.
2285eda14cbcSMatt Macy 	 */
22864e8d558cSMartin Matuska 	if (lr->lrc_txtype == TX_WRITE) {
2287eda14cbcSMatt Macy 		if (itx->itx_wr_state == WR_COPIED) {
2288271171e0SMartin Matuska 			ZIL_STAT_BUMP(zilog, zil_itx_copied_count);
2289271171e0SMartin Matuska 			ZIL_STAT_INCR(zilog, zil_itx_copied_bytes,
2290271171e0SMartin Matuska 			    lrw->lr_length);
2291eda14cbcSMatt Macy 		} else {
2292eda14cbcSMatt Macy 			char *dbuf;
2293eda14cbcSMatt Macy 			int error;
2294eda14cbcSMatt Macy 
2295eda14cbcSMatt Macy 			if (itx->itx_wr_state == WR_NEED_COPY) {
2296eda14cbcSMatt Macy 				dbuf = lr_buf + reclen;
22974e8d558cSMartin Matuska 				lrb->lrc_reclen += dlen;
2298271171e0SMartin Matuska 				ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count);
2299271171e0SMartin Matuska 				ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes,
23004e8d558cSMartin Matuska 				    dlen);
2301eda14cbcSMatt Macy 			} else {
2302eda14cbcSMatt Macy 				ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
2303eda14cbcSMatt Macy 				dbuf = NULL;
2304271171e0SMartin Matuska 				ZIL_STAT_BUMP(zilog, zil_itx_indirect_count);
2305271171e0SMartin Matuska 				ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes,
2306eda14cbcSMatt Macy 				    lrw->lr_length);
2307315ee00fSMartin Matuska 				if (lwb->lwb_child_zio == NULL) {
23086c1e79dfSMartin Matuska 					lwb->lwb_child_zio = zio_null(NULL,
23096c1e79dfSMartin Matuska 					    zilog->zl_spa, NULL, NULL, NULL,
2310315ee00fSMartin Matuska 					    ZIO_FLAG_CANFAIL);
2311315ee00fSMartin Matuska 				}
2312eda14cbcSMatt Macy 			}
2313eda14cbcSMatt Macy 
2314eda14cbcSMatt Macy 			/*
2315315ee00fSMartin Matuska 			 * The "lwb_child_zio" we pass in will become a child of
2316315ee00fSMartin Matuska 			 * "lwb_write_zio", when one is created, so one will be
2317315ee00fSMartin Matuska 			 * a parent of any zio's created by the "zl_get_data".
2318315ee00fSMartin Matuska 			 * This way "lwb_write_zio" will first wait for children
2319315ee00fSMartin Matuska 			 * block pointers before own writing, and then for their
2320315ee00fSMartin Matuska 			 * writing completion before the vdev cache flushing.
2321eda14cbcSMatt Macy 			 */
2322eda14cbcSMatt Macy 			error = zilog->zl_get_data(itx->itx_private,
2323f9693befSMartin Matuska 			    itx->itx_gen, lrwb, dbuf, lwb,
2324315ee00fSMartin Matuska 			    lwb->lwb_child_zio);
23254e8d558cSMartin Matuska 			if (dbuf != NULL && error == 0) {
23261f88aa09SMartin Matuska 				/* Zero any padding bytes in the last block. */
23274e8d558cSMartin Matuska 				memset((char *)dbuf + lrwb->lr_length, 0,
23284e8d558cSMartin Matuska 				    dlen - lrwb->lr_length);
23294e8d558cSMartin Matuska 			}
2330eda14cbcSMatt Macy 
23312a58b312SMartin Matuska 			/*
23322a58b312SMartin Matuska 			 * Typically, the only return values we should see from
23332a58b312SMartin Matuska 			 * ->zl_get_data() are 0, EIO, ENOENT, EEXIST or
23342a58b312SMartin Matuska 			 *  EALREADY. However, it is also possible to see other
23352a58b312SMartin Matuska 			 *  error values such as ENOSPC or EINVAL from
23362a58b312SMartin Matuska 			 *  dmu_read() -> dnode_hold() -> dnode_hold_impl() or
23372a58b312SMartin Matuska 			 *  ENXIO as well as a multitude of others from the
23382a58b312SMartin Matuska 			 *  block layer through dmu_buf_hold() -> dbuf_read()
23392a58b312SMartin Matuska 			 *  -> zio_wait(), as well as through dmu_read() ->
23402a58b312SMartin Matuska 			 *  dnode_hold() -> dnode_hold_impl() -> dbuf_read() ->
23412a58b312SMartin Matuska 			 *  zio_wait(). When these errors happen, we can assume
23422a58b312SMartin Matuska 			 *  that neither an immediate write nor an indirect
23432a58b312SMartin Matuska 			 *  write occurred, so we need to fall back to
23442a58b312SMartin Matuska 			 *  txg_wait_synced(). This is unusual, so we print to
23452a58b312SMartin Matuska 			 *  dmesg whenever one of these errors occurs.
23462a58b312SMartin Matuska 			 */
23472a58b312SMartin Matuska 			switch (error) {
23482a58b312SMartin Matuska 			case 0:
23492a58b312SMartin Matuska 				break;
23502a58b312SMartin Matuska 			default:
23512a58b312SMartin Matuska 				cmn_err(CE_WARN, "zil_lwb_commit() received "
23522a58b312SMartin Matuska 				    "unexpected error %d from ->zl_get_data()"
23532a58b312SMartin Matuska 				    ". Falling back to txg_wait_synced().",
23542a58b312SMartin Matuska 				    error);
23552a58b312SMartin Matuska 				zfs_fallthrough;
23562a58b312SMartin Matuska 			case EIO:
23574e8d558cSMartin Matuska 				txg_wait_synced(zilog->zl_dmu_pool,
23584e8d558cSMartin Matuska 				    lr->lrc_txg);
23592a58b312SMartin Matuska 				zfs_fallthrough;
23602a58b312SMartin Matuska 			case ENOENT:
23612a58b312SMartin Matuska 				zfs_fallthrough;
23622a58b312SMartin Matuska 			case EEXIST:
23632a58b312SMartin Matuska 				zfs_fallthrough;
23642a58b312SMartin Matuska 			case EALREADY:
23654e8d558cSMartin Matuska 				return;
2366eda14cbcSMatt Macy 			}
2367eda14cbcSMatt Macy 		}
2368eda14cbcSMatt Macy 	}
2369eda14cbcSMatt Macy 
23704e8d558cSMartin Matuska 	lwb->lwb_nfilled += reclen + dlen;
23714e8d558cSMartin Matuska 	ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused);
23724e8d558cSMartin Matuska 	ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t)));
2373eda14cbcSMatt Macy }
2374eda14cbcSMatt Macy 
2375eda14cbcSMatt Macy itx_t *
zil_itx_create(uint64_t txtype,size_t olrsize)23761f88aa09SMartin Matuska zil_itx_create(uint64_t txtype, size_t olrsize)
2377eda14cbcSMatt Macy {
23781f88aa09SMartin Matuska 	size_t itxsize, lrsize;
2379eda14cbcSMatt Macy 	itx_t *itx;
2380eda14cbcSMatt Macy 
2381525fe93dSMartin Matuska 	ASSERT3U(olrsize, >=, sizeof (lr_t));
23821f88aa09SMartin Matuska 	lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
2383525fe93dSMartin Matuska 	ASSERT3U(lrsize, >=, olrsize);
2384eda14cbcSMatt Macy 	itxsize = offsetof(itx_t, itx_lr) + lrsize;
2385eda14cbcSMatt Macy 
2386eda14cbcSMatt Macy 	itx = zio_data_buf_alloc(itxsize);
2387eda14cbcSMatt Macy 	itx->itx_lr.lrc_txtype = txtype;
2388eda14cbcSMatt Macy 	itx->itx_lr.lrc_reclen = lrsize;
2389eda14cbcSMatt Macy 	itx->itx_lr.lrc_seq = 0;	/* defensive */
2390da5137abSMartin Matuska 	memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize);
2391eda14cbcSMatt Macy 	itx->itx_sync = B_TRUE;		/* default is synchronous */
2392eda14cbcSMatt Macy 	itx->itx_callback = NULL;
2393eda14cbcSMatt Macy 	itx->itx_callback_data = NULL;
2394eda14cbcSMatt Macy 	itx->itx_size = itxsize;
2395eda14cbcSMatt Macy 
2396eda14cbcSMatt Macy 	return (itx);
2397eda14cbcSMatt Macy }
2398eda14cbcSMatt Macy 
23994e8d558cSMartin Matuska static itx_t *
zil_itx_clone(itx_t * oitx)24004e8d558cSMartin Matuska zil_itx_clone(itx_t *oitx)
24014e8d558cSMartin Matuska {
2402525fe93dSMartin Matuska 	ASSERT3U(oitx->itx_size, >=, sizeof (itx_t));
2403525fe93dSMartin Matuska 	ASSERT3U(oitx->itx_size, ==,
2404525fe93dSMartin Matuska 	    offsetof(itx_t, itx_lr) + oitx->itx_lr.lrc_reclen);
2405525fe93dSMartin Matuska 
24064e8d558cSMartin Matuska 	itx_t *itx = zio_data_buf_alloc(oitx->itx_size);
24074e8d558cSMartin Matuska 	memcpy(itx, oitx, oitx->itx_size);
24084e8d558cSMartin Matuska 	itx->itx_callback = NULL;
24094e8d558cSMartin Matuska 	itx->itx_callback_data = NULL;
24104e8d558cSMartin Matuska 	return (itx);
24114e8d558cSMartin Matuska }
24124e8d558cSMartin Matuska 
2413eda14cbcSMatt Macy void
zil_itx_destroy(itx_t * itx)2414eda14cbcSMatt Macy zil_itx_destroy(itx_t *itx)
2415eda14cbcSMatt Macy {
2416525fe93dSMartin Matuska 	ASSERT3U(itx->itx_size, >=, sizeof (itx_t));
2417525fe93dSMartin Matuska 	ASSERT3U(itx->itx_lr.lrc_reclen, ==,
2418525fe93dSMartin Matuska 	    itx->itx_size - offsetof(itx_t, itx_lr));
2419eda14cbcSMatt Macy 	IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
2420eda14cbcSMatt Macy 	IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
2421eda14cbcSMatt Macy 
2422eda14cbcSMatt Macy 	if (itx->itx_callback != NULL)
2423eda14cbcSMatt Macy 		itx->itx_callback(itx->itx_callback_data);
2424eda14cbcSMatt Macy 
2425eda14cbcSMatt Macy 	zio_data_buf_free(itx, itx->itx_size);
2426eda14cbcSMatt Macy }
2427eda14cbcSMatt Macy 
2428eda14cbcSMatt Macy /*
2429eda14cbcSMatt Macy  * Free up the sync and async itxs. The itxs_t has already been detached
2430eda14cbcSMatt Macy  * so no locks are needed.
2431eda14cbcSMatt Macy  */
2432eda14cbcSMatt Macy static void
zil_itxg_clean(void * arg)24333f9d360cSMartin Matuska zil_itxg_clean(void *arg)
2434eda14cbcSMatt Macy {
2435eda14cbcSMatt Macy 	itx_t *itx;
2436eda14cbcSMatt Macy 	list_t *list;
2437eda14cbcSMatt Macy 	avl_tree_t *t;
2438eda14cbcSMatt Macy 	void *cookie;
24393f9d360cSMartin Matuska 	itxs_t *itxs = arg;
2440eda14cbcSMatt Macy 	itx_async_node_t *ian;
2441eda14cbcSMatt Macy 
2442eda14cbcSMatt Macy 	list = &itxs->i_sync_list;
2443c0a83fe0SMartin Matuska 	while ((itx = list_remove_head(list)) != NULL) {
2444eda14cbcSMatt Macy 		/*
2445eda14cbcSMatt Macy 		 * In the general case, commit itxs will not be found
2446eda14cbcSMatt Macy 		 * here, as they'll be committed to an lwb via
24474e8d558cSMartin Matuska 		 * zil_lwb_assign(), and free'd in that function. Having
2448eda14cbcSMatt Macy 		 * said that, it is still possible for commit itxs to be
2449eda14cbcSMatt Macy 		 * found here, due to the following race:
2450eda14cbcSMatt Macy 		 *
2451eda14cbcSMatt Macy 		 *	- a thread calls zil_commit() which assigns the
2452eda14cbcSMatt Macy 		 *	  commit itx to a per-txg i_sync_list
2453eda14cbcSMatt Macy 		 *	- zil_itxg_clean() is called (e.g. via spa_sync())
2454eda14cbcSMatt Macy 		 *	  while the waiter is still on the i_sync_list
2455eda14cbcSMatt Macy 		 *
2456eda14cbcSMatt Macy 		 * There's nothing to prevent syncing the txg while the
2457eda14cbcSMatt Macy 		 * waiter is on the i_sync_list. This normally doesn't
2458eda14cbcSMatt Macy 		 * happen because spa_sync() is slower than zil_commit(),
2459eda14cbcSMatt Macy 		 * but if zil_commit() calls txg_wait_synced() (e.g.
2460eda14cbcSMatt Macy 		 * because zil_create() or zil_commit_writer_stall() is
2461eda14cbcSMatt Macy 		 * called) we will hit this case.
2462eda14cbcSMatt Macy 		 */
2463eda14cbcSMatt Macy 		if (itx->itx_lr.lrc_txtype == TX_COMMIT)
2464eda14cbcSMatt Macy 			zil_commit_waiter_skip(itx->itx_private);
2465eda14cbcSMatt Macy 
2466eda14cbcSMatt Macy 		zil_itx_destroy(itx);
2467eda14cbcSMatt Macy 	}
2468eda14cbcSMatt Macy 
2469eda14cbcSMatt Macy 	cookie = NULL;
2470eda14cbcSMatt Macy 	t = &itxs->i_async_tree;
2471eda14cbcSMatt Macy 	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
2472eda14cbcSMatt Macy 		list = &ian->ia_list;
2473c0a83fe0SMartin Matuska 		while ((itx = list_remove_head(list)) != NULL) {
2474eda14cbcSMatt Macy 			/* commit itxs should never be on the async lists. */
2475eda14cbcSMatt Macy 			ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
2476eda14cbcSMatt Macy 			zil_itx_destroy(itx);
2477eda14cbcSMatt Macy 		}
2478eda14cbcSMatt Macy 		list_destroy(list);
2479eda14cbcSMatt Macy 		kmem_free(ian, sizeof (itx_async_node_t));
2480eda14cbcSMatt Macy 	}
2481eda14cbcSMatt Macy 	avl_destroy(t);
2482eda14cbcSMatt Macy 
2483eda14cbcSMatt Macy 	kmem_free(itxs, sizeof (itxs_t));
2484eda14cbcSMatt Macy }
2485eda14cbcSMatt Macy 
2486eda14cbcSMatt Macy static int
zil_aitx_compare(const void * x1,const void * x2)2487eda14cbcSMatt Macy zil_aitx_compare(const void *x1, const void *x2)
2488eda14cbcSMatt Macy {
2489eda14cbcSMatt Macy 	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
2490eda14cbcSMatt Macy 	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
2491eda14cbcSMatt Macy 
2492eda14cbcSMatt Macy 	return (TREE_CMP(o1, o2));
2493eda14cbcSMatt Macy }
2494eda14cbcSMatt Macy 
2495eda14cbcSMatt Macy /*
2496eda14cbcSMatt Macy  * Remove all async itx with the given oid.
2497eda14cbcSMatt Macy  */
2498eda14cbcSMatt Macy void
zil_remove_async(zilog_t * zilog,uint64_t oid)2499eda14cbcSMatt Macy zil_remove_async(zilog_t *zilog, uint64_t oid)
2500eda14cbcSMatt Macy {
2501eda14cbcSMatt Macy 	uint64_t otxg, txg;
2502525fe93dSMartin Matuska 	itx_async_node_t *ian, ian_search;
2503eda14cbcSMatt Macy 	avl_tree_t *t;
2504eda14cbcSMatt Macy 	avl_index_t where;
2505eda14cbcSMatt Macy 	list_t clean_list;
2506eda14cbcSMatt Macy 	itx_t *itx;
2507eda14cbcSMatt Macy 
2508eda14cbcSMatt Macy 	ASSERT(oid != 0);
2509eda14cbcSMatt Macy 	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
2510eda14cbcSMatt Macy 
2511eda14cbcSMatt Macy 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
2512eda14cbcSMatt Macy 		otxg = ZILTEST_TXG;
2513eda14cbcSMatt Macy 	else
2514eda14cbcSMatt Macy 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
2515eda14cbcSMatt Macy 
2516eda14cbcSMatt Macy 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
2517eda14cbcSMatt Macy 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2518eda14cbcSMatt Macy 
2519eda14cbcSMatt Macy 		mutex_enter(&itxg->itxg_lock);
2520eda14cbcSMatt Macy 		if (itxg->itxg_txg != txg) {
2521eda14cbcSMatt Macy 			mutex_exit(&itxg->itxg_lock);
2522eda14cbcSMatt Macy 			continue;
2523eda14cbcSMatt Macy 		}
2524eda14cbcSMatt Macy 
2525eda14cbcSMatt Macy 		/*
2526eda14cbcSMatt Macy 		 * Locate the object node and append its list.
2527eda14cbcSMatt Macy 		 */
2528eda14cbcSMatt Macy 		t = &itxg->itxg_itxs->i_async_tree;
2529525fe93dSMartin Matuska 		ian_search.ia_foid = oid;
2530525fe93dSMartin Matuska 		ian = avl_find(t, &ian_search, &where);
2531eda14cbcSMatt Macy 		if (ian != NULL)
2532eda14cbcSMatt Macy 			list_move_tail(&clean_list, &ian->ia_list);
2533eda14cbcSMatt Macy 		mutex_exit(&itxg->itxg_lock);
2534eda14cbcSMatt Macy 	}
2535c0a83fe0SMartin Matuska 	while ((itx = list_remove_head(&clean_list)) != NULL) {
2536eda14cbcSMatt Macy 		/* commit itxs should never be on the async lists. */
2537eda14cbcSMatt Macy 		ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
2538eda14cbcSMatt Macy 		zil_itx_destroy(itx);
2539eda14cbcSMatt Macy 	}
2540eda14cbcSMatt Macy 	list_destroy(&clean_list);
2541eda14cbcSMatt Macy }
2542eda14cbcSMatt Macy 
2543eda14cbcSMatt Macy void
zil_itx_assign(zilog_t * zilog,itx_t * itx,dmu_tx_t * tx)2544eda14cbcSMatt Macy zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
2545eda14cbcSMatt Macy {
2546eda14cbcSMatt Macy 	uint64_t txg;
2547eda14cbcSMatt Macy 	itxg_t *itxg;
2548eda14cbcSMatt Macy 	itxs_t *itxs, *clean = NULL;
2549eda14cbcSMatt Macy 
2550eda14cbcSMatt Macy 	/*
2551eda14cbcSMatt Macy 	 * Ensure the data of a renamed file is committed before the rename.
2552eda14cbcSMatt Macy 	 */
2553eda14cbcSMatt Macy 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
2554eda14cbcSMatt Macy 		zil_async_to_sync(zilog, itx->itx_oid);
2555eda14cbcSMatt Macy 
2556eda14cbcSMatt Macy 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
2557eda14cbcSMatt Macy 		txg = ZILTEST_TXG;
2558eda14cbcSMatt Macy 	else
2559eda14cbcSMatt Macy 		txg = dmu_tx_get_txg(tx);
2560eda14cbcSMatt Macy 
2561eda14cbcSMatt Macy 	itxg = &zilog->zl_itxg[txg & TXG_MASK];
2562eda14cbcSMatt Macy 	mutex_enter(&itxg->itxg_lock);
2563eda14cbcSMatt Macy 	itxs = itxg->itxg_itxs;
2564eda14cbcSMatt Macy 	if (itxg->itxg_txg != txg) {
2565eda14cbcSMatt Macy 		if (itxs != NULL) {
2566eda14cbcSMatt Macy 			/*
2567eda14cbcSMatt Macy 			 * The zil_clean callback hasn't got around to cleaning
2568eda14cbcSMatt Macy 			 * this itxg. Save the itxs for release below.
2569eda14cbcSMatt Macy 			 * This should be rare.
2570eda14cbcSMatt Macy 			 */
2571eda14cbcSMatt Macy 			zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
257233b8c039SMartin Matuska 			    "txg %llu", (u_longlong_t)itxg->itxg_txg);
2573eda14cbcSMatt Macy 			clean = itxg->itxg_itxs;
2574eda14cbcSMatt Macy 		}
2575eda14cbcSMatt Macy 		itxg->itxg_txg = txg;
2576eda14cbcSMatt Macy 		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
2577eda14cbcSMatt Macy 		    KM_SLEEP);
2578eda14cbcSMatt Macy 
2579eda14cbcSMatt Macy 		list_create(&itxs->i_sync_list, sizeof (itx_t),
2580eda14cbcSMatt Macy 		    offsetof(itx_t, itx_node));
2581eda14cbcSMatt Macy 		avl_create(&itxs->i_async_tree, zil_aitx_compare,
2582eda14cbcSMatt Macy 		    sizeof (itx_async_node_t),
2583eda14cbcSMatt Macy 		    offsetof(itx_async_node_t, ia_node));
2584eda14cbcSMatt Macy 	}
2585eda14cbcSMatt Macy 	if (itx->itx_sync) {
2586eda14cbcSMatt Macy 		list_insert_tail(&itxs->i_sync_list, itx);
2587eda14cbcSMatt Macy 	} else {
2588eda14cbcSMatt Macy 		avl_tree_t *t = &itxs->i_async_tree;
2589eda14cbcSMatt Macy 		uint64_t foid =
2590eda14cbcSMatt Macy 		    LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
2591eda14cbcSMatt Macy 		itx_async_node_t *ian;
2592eda14cbcSMatt Macy 		avl_index_t where;
2593eda14cbcSMatt Macy 
2594eda14cbcSMatt Macy 		ian = avl_find(t, &foid, &where);
2595eda14cbcSMatt Macy 		if (ian == NULL) {
2596eda14cbcSMatt Macy 			ian = kmem_alloc(sizeof (itx_async_node_t),
2597eda14cbcSMatt Macy 			    KM_SLEEP);
2598eda14cbcSMatt Macy 			list_create(&ian->ia_list, sizeof (itx_t),
2599eda14cbcSMatt Macy 			    offsetof(itx_t, itx_node));
2600eda14cbcSMatt Macy 			ian->ia_foid = foid;
2601eda14cbcSMatt Macy 			avl_insert(t, ian, where);
2602eda14cbcSMatt Macy 		}
2603eda14cbcSMatt Macy 		list_insert_tail(&ian->ia_list, itx);
2604eda14cbcSMatt Macy 	}
2605eda14cbcSMatt Macy 
2606eda14cbcSMatt Macy 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
2607eda14cbcSMatt Macy 
2608eda14cbcSMatt Macy 	/*
2609eda14cbcSMatt Macy 	 * We don't want to dirty the ZIL using ZILTEST_TXG, because
2610eda14cbcSMatt Macy 	 * zil_clean() will never be called using ZILTEST_TXG. Thus, we
2611eda14cbcSMatt Macy 	 * need to be careful to always dirty the ZIL using the "real"
2612eda14cbcSMatt Macy 	 * TXG (not itxg_txg) even when the SPA is frozen.
2613eda14cbcSMatt Macy 	 */
2614eda14cbcSMatt Macy 	zilog_dirty(zilog, dmu_tx_get_txg(tx));
2615eda14cbcSMatt Macy 	mutex_exit(&itxg->itxg_lock);
2616eda14cbcSMatt Macy 
2617eda14cbcSMatt Macy 	/* Release the old itxs now we've dropped the lock */
2618eda14cbcSMatt Macy 	if (clean != NULL)
2619eda14cbcSMatt Macy 		zil_itxg_clean(clean);
2620eda14cbcSMatt Macy }
2621eda14cbcSMatt Macy 
2622eda14cbcSMatt Macy /*
2623eda14cbcSMatt Macy  * If there are any in-memory intent log transactions which have now been
2624eda14cbcSMatt Macy  * synced then start up a taskq to free them. We should only do this after we
2625eda14cbcSMatt Macy  * have written out the uberblocks (i.e. txg has been committed) so that
2626eda14cbcSMatt Macy  * don't inadvertently clean out in-memory log records that would be required
2627eda14cbcSMatt Macy  * by zil_commit().
2628eda14cbcSMatt Macy  */
2629eda14cbcSMatt Macy void
zil_clean(zilog_t * zilog,uint64_t synced_txg)2630eda14cbcSMatt Macy zil_clean(zilog_t *zilog, uint64_t synced_txg)
2631eda14cbcSMatt Macy {
2632eda14cbcSMatt Macy 	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
2633eda14cbcSMatt Macy 	itxs_t *clean_me;
2634eda14cbcSMatt Macy 
2635eda14cbcSMatt Macy 	ASSERT3U(synced_txg, <, ZILTEST_TXG);
2636eda14cbcSMatt Macy 
2637eda14cbcSMatt Macy 	mutex_enter(&itxg->itxg_lock);
2638eda14cbcSMatt Macy 	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
2639eda14cbcSMatt Macy 		mutex_exit(&itxg->itxg_lock);
2640eda14cbcSMatt Macy 		return;
2641eda14cbcSMatt Macy 	}
2642eda14cbcSMatt Macy 	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
2643eda14cbcSMatt Macy 	ASSERT3U(itxg->itxg_txg, !=, 0);
2644eda14cbcSMatt Macy 	clean_me = itxg->itxg_itxs;
2645eda14cbcSMatt Macy 	itxg->itxg_itxs = NULL;
2646eda14cbcSMatt Macy 	itxg->itxg_txg = 0;
2647eda14cbcSMatt Macy 	mutex_exit(&itxg->itxg_lock);
2648eda14cbcSMatt Macy 	/*
2649eda14cbcSMatt Macy 	 * Preferably start a task queue to free up the old itxs but
2650eda14cbcSMatt Macy 	 * if taskq_dispatch can't allocate resources to do that then
2651eda14cbcSMatt Macy 	 * free it in-line. This should be rare. Note, using TQ_SLEEP
2652eda14cbcSMatt Macy 	 * created a bad performance problem.
2653eda14cbcSMatt Macy 	 */
2654eda14cbcSMatt Macy 	ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
2655eda14cbcSMatt Macy 	ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
2656eda14cbcSMatt Macy 	taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
26573f9d360cSMartin Matuska 	    zil_itxg_clean, clean_me, TQ_NOSLEEP);
2658eda14cbcSMatt Macy 	if (id == TASKQID_INVALID)
2659eda14cbcSMatt Macy 		zil_itxg_clean(clean_me);
2660eda14cbcSMatt Macy }
2661eda14cbcSMatt Macy 
2662eda14cbcSMatt Macy /*
2663eda14cbcSMatt Macy  * This function will traverse the queue of itxs that need to be
2664eda14cbcSMatt Macy  * committed, and move them onto the ZIL's zl_itx_commit_list.
2665eda14cbcSMatt Macy  */
26660a97523dSMartin Matuska static uint64_t
zil_get_commit_list(zilog_t * zilog)2667eda14cbcSMatt Macy zil_get_commit_list(zilog_t *zilog)
2668eda14cbcSMatt Macy {
26690a97523dSMartin Matuska 	uint64_t otxg, txg, wtxg = 0;
2670eda14cbcSMatt Macy 	list_t *commit_list = &zilog->zl_itx_commit_list;
2671eda14cbcSMatt Macy 
2672eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2673eda14cbcSMatt Macy 
2674eda14cbcSMatt Macy 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
2675eda14cbcSMatt Macy 		otxg = ZILTEST_TXG;
2676eda14cbcSMatt Macy 	else
2677eda14cbcSMatt Macy 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
2678eda14cbcSMatt Macy 
2679eda14cbcSMatt Macy 	/*
2680eda14cbcSMatt Macy 	 * This is inherently racy, since there is nothing to prevent
2681eda14cbcSMatt Macy 	 * the last synced txg from changing. That's okay since we'll
2682eda14cbcSMatt Macy 	 * only commit things in the future.
2683eda14cbcSMatt Macy 	 */
2684eda14cbcSMatt Macy 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
2685eda14cbcSMatt Macy 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2686eda14cbcSMatt Macy 
2687eda14cbcSMatt Macy 		mutex_enter(&itxg->itxg_lock);
2688eda14cbcSMatt Macy 		if (itxg->itxg_txg != txg) {
2689eda14cbcSMatt Macy 			mutex_exit(&itxg->itxg_lock);
2690eda14cbcSMatt Macy 			continue;
2691eda14cbcSMatt Macy 		}
2692eda14cbcSMatt Macy 
2693eda14cbcSMatt Macy 		/*
2694eda14cbcSMatt Macy 		 * If we're adding itx records to the zl_itx_commit_list,
2695eda14cbcSMatt Macy 		 * then the zil better be dirty in this "txg". We can assert
2696eda14cbcSMatt Macy 		 * that here since we're holding the itxg_lock which will
2697eda14cbcSMatt Macy 		 * prevent spa_sync from cleaning it. Once we add the itxs
2698eda14cbcSMatt Macy 		 * to the zl_itx_commit_list we must commit it to disk even
2699eda14cbcSMatt Macy 		 * if it's unnecessary (i.e. the txg was synced).
2700eda14cbcSMatt Macy 		 */
2701eda14cbcSMatt Macy 		ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
2702eda14cbcSMatt Macy 		    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
27030a97523dSMartin Matuska 		list_t *sync_list = &itxg->itxg_itxs->i_sync_list;
2704b356da80SMartin Matuska 		itx_t *itx = NULL;
27050a97523dSMartin Matuska 		if (unlikely(zilog->zl_suspend > 0)) {
27060a97523dSMartin Matuska 			/*
27070a97523dSMartin Matuska 			 * ZIL was just suspended, but we lost the race.
27080a97523dSMartin Matuska 			 * Allow all earlier itxs to be committed, but ask
27090a97523dSMartin Matuska 			 * caller to do txg_wait_synced(txg) for any new.
27100a97523dSMartin Matuska 			 */
27110a97523dSMartin Matuska 			if (!list_is_empty(sync_list))
27120a97523dSMartin Matuska 				wtxg = MAX(wtxg, txg);
27130a97523dSMartin Matuska 		} else {
2714b356da80SMartin Matuska 			itx = list_head(sync_list);
27150a97523dSMartin Matuska 			list_move_tail(commit_list, sync_list);
27160a97523dSMartin Matuska 		}
2717eda14cbcSMatt Macy 
2718eda14cbcSMatt Macy 		mutex_exit(&itxg->itxg_lock);
2719b356da80SMartin Matuska 
2720b356da80SMartin Matuska 		while (itx != NULL) {
2721b356da80SMartin Matuska 			uint64_t s = zil_itx_full_size(itx);
2722b356da80SMartin Matuska 			zilog->zl_cur_size += s;
2723b356da80SMartin Matuska 			zilog->zl_cur_left += s;
2724b356da80SMartin Matuska 			s = zil_itx_record_size(itx);
2725b356da80SMartin Matuska 			zilog->zl_cur_max = MAX(zilog->zl_cur_max, s);
2726b356da80SMartin Matuska 			itx = list_next(commit_list, itx);
2727b356da80SMartin Matuska 		}
2728eda14cbcSMatt Macy 	}
27290a97523dSMartin Matuska 	return (wtxg);
2730eda14cbcSMatt Macy }
2731eda14cbcSMatt Macy 
2732eda14cbcSMatt Macy /*
2733eda14cbcSMatt Macy  * Move the async itxs for a specified object to commit into sync lists.
2734eda14cbcSMatt Macy  */
2735eda14cbcSMatt Macy void
zil_async_to_sync(zilog_t * zilog,uint64_t foid)2736eda14cbcSMatt Macy zil_async_to_sync(zilog_t *zilog, uint64_t foid)
2737eda14cbcSMatt Macy {
2738eda14cbcSMatt Macy 	uint64_t otxg, txg;
2739525fe93dSMartin Matuska 	itx_async_node_t *ian, ian_search;
2740eda14cbcSMatt Macy 	avl_tree_t *t;
2741eda14cbcSMatt Macy 	avl_index_t where;
2742eda14cbcSMatt Macy 
2743eda14cbcSMatt Macy 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
2744eda14cbcSMatt Macy 		otxg = ZILTEST_TXG;
2745eda14cbcSMatt Macy 	else
2746eda14cbcSMatt Macy 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
2747eda14cbcSMatt Macy 
2748eda14cbcSMatt Macy 	/*
2749eda14cbcSMatt Macy 	 * This is inherently racy, since there is nothing to prevent
2750eda14cbcSMatt Macy 	 * the last synced txg from changing.
2751eda14cbcSMatt Macy 	 */
2752eda14cbcSMatt Macy 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
2753eda14cbcSMatt Macy 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
2754eda14cbcSMatt Macy 
2755eda14cbcSMatt Macy 		mutex_enter(&itxg->itxg_lock);
2756eda14cbcSMatt Macy 		if (itxg->itxg_txg != txg) {
2757eda14cbcSMatt Macy 			mutex_exit(&itxg->itxg_lock);
2758eda14cbcSMatt Macy 			continue;
2759eda14cbcSMatt Macy 		}
2760eda14cbcSMatt Macy 
2761eda14cbcSMatt Macy 		/*
2762eda14cbcSMatt Macy 		 * If a foid is specified then find that node and append its
2763eda14cbcSMatt Macy 		 * list. Otherwise walk the tree appending all the lists
2764eda14cbcSMatt Macy 		 * to the sync list. We add to the end rather than the
2765eda14cbcSMatt Macy 		 * beginning to ensure the create has happened.
2766eda14cbcSMatt Macy 		 */
2767eda14cbcSMatt Macy 		t = &itxg->itxg_itxs->i_async_tree;
2768eda14cbcSMatt Macy 		if (foid != 0) {
2769525fe93dSMartin Matuska 			ian_search.ia_foid = foid;
2770525fe93dSMartin Matuska 			ian = avl_find(t, &ian_search, &where);
2771eda14cbcSMatt Macy 			if (ian != NULL) {
2772eda14cbcSMatt Macy 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
2773eda14cbcSMatt Macy 				    &ian->ia_list);
2774eda14cbcSMatt Macy 			}
2775eda14cbcSMatt Macy 		} else {
2776eda14cbcSMatt Macy 			void *cookie = NULL;
2777eda14cbcSMatt Macy 
2778eda14cbcSMatt Macy 			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
2779eda14cbcSMatt Macy 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
2780eda14cbcSMatt Macy 				    &ian->ia_list);
2781eda14cbcSMatt Macy 				list_destroy(&ian->ia_list);
2782eda14cbcSMatt Macy 				kmem_free(ian, sizeof (itx_async_node_t));
2783eda14cbcSMatt Macy 			}
2784eda14cbcSMatt Macy 		}
2785eda14cbcSMatt Macy 		mutex_exit(&itxg->itxg_lock);
2786eda14cbcSMatt Macy 	}
2787eda14cbcSMatt Macy }
2788eda14cbcSMatt Macy 
2789eda14cbcSMatt Macy /*
2790eda14cbcSMatt Macy  * This function will prune commit itxs that are at the head of the
2791eda14cbcSMatt Macy  * commit list (it won't prune past the first non-commit itx), and
2792eda14cbcSMatt Macy  * either: a) attach them to the last lwb that's still pending
2793eda14cbcSMatt Macy  * completion, or b) skip them altogether.
2794eda14cbcSMatt Macy  *
2795eda14cbcSMatt Macy  * This is used as a performance optimization to prevent commit itxs
2796eda14cbcSMatt Macy  * from generating new lwbs when it's unnecessary to do so.
2797eda14cbcSMatt Macy  */
2798eda14cbcSMatt Macy static void
zil_prune_commit_list(zilog_t * zilog)2799eda14cbcSMatt Macy zil_prune_commit_list(zilog_t *zilog)
2800eda14cbcSMatt Macy {
2801eda14cbcSMatt Macy 	itx_t *itx;
2802eda14cbcSMatt Macy 
2803eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2804eda14cbcSMatt Macy 
2805eda14cbcSMatt Macy 	while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
2806eda14cbcSMatt Macy 		lr_t *lrc = &itx->itx_lr;
2807eda14cbcSMatt Macy 		if (lrc->lrc_txtype != TX_COMMIT)
2808eda14cbcSMatt Macy 			break;
2809eda14cbcSMatt Macy 
2810eda14cbcSMatt Macy 		mutex_enter(&zilog->zl_lock);
2811eda14cbcSMatt Macy 
2812eda14cbcSMatt Macy 		lwb_t *last_lwb = zilog->zl_last_lwb_opened;
2813eda14cbcSMatt Macy 		if (last_lwb == NULL ||
2814eda14cbcSMatt Macy 		    last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
2815eda14cbcSMatt Macy 			/*
2816eda14cbcSMatt Macy 			 * All of the itxs this waiter was waiting on
2817eda14cbcSMatt Macy 			 * must have already completed (or there were
2818eda14cbcSMatt Macy 			 * never any itx's for it to wait on), so it's
2819eda14cbcSMatt Macy 			 * safe to skip this waiter and mark it done.
2820eda14cbcSMatt Macy 			 */
2821eda14cbcSMatt Macy 			zil_commit_waiter_skip(itx->itx_private);
2822eda14cbcSMatt Macy 		} else {
2823eda14cbcSMatt Macy 			zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
2824eda14cbcSMatt Macy 		}
2825eda14cbcSMatt Macy 
2826eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_lock);
2827eda14cbcSMatt Macy 
2828eda14cbcSMatt Macy 		list_remove(&zilog->zl_itx_commit_list, itx);
2829eda14cbcSMatt Macy 		zil_itx_destroy(itx);
2830eda14cbcSMatt Macy 	}
2831eda14cbcSMatt Macy 
2832eda14cbcSMatt Macy 	IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
2833eda14cbcSMatt Macy }
2834eda14cbcSMatt Macy 
2835eda14cbcSMatt Macy static void
zil_commit_writer_stall(zilog_t * zilog)2836eda14cbcSMatt Macy zil_commit_writer_stall(zilog_t *zilog)
2837eda14cbcSMatt Macy {
2838eda14cbcSMatt Macy 	/*
2839eda14cbcSMatt Macy 	 * When zio_alloc_zil() fails to allocate the next lwb block on
2840eda14cbcSMatt Macy 	 * disk, we must call txg_wait_synced() to ensure all of the
2841eda14cbcSMatt Macy 	 * lwbs in the zilog's zl_lwb_list are synced and then freed (in
2842eda14cbcSMatt Macy 	 * zil_sync()), such that any subsequent ZIL writer (i.e. a call
2843eda14cbcSMatt Macy 	 * to zil_process_commit_list()) will have to call zil_create(),
2844eda14cbcSMatt Macy 	 * and start a new ZIL chain.
2845eda14cbcSMatt Macy 	 *
2846eda14cbcSMatt Macy 	 * Since zil_alloc_zil() failed, the lwb that was previously
2847eda14cbcSMatt Macy 	 * issued does not have a pointer to the "next" lwb on disk.
2848eda14cbcSMatt Macy 	 * Thus, if another ZIL writer thread was to allocate the "next"
2849eda14cbcSMatt Macy 	 * on-disk lwb, that block could be leaked in the event of a
2850eda14cbcSMatt Macy 	 * crash (because the previous lwb on-disk would not point to
2851eda14cbcSMatt Macy 	 * it).
2852eda14cbcSMatt Macy 	 *
2853eda14cbcSMatt Macy 	 * We must hold the zilog's zl_issuer_lock while we do this, to
2854eda14cbcSMatt Macy 	 * ensure no new threads enter zil_process_commit_list() until
2855eda14cbcSMatt Macy 	 * all lwb's in the zl_lwb_list have been synced and freed
2856eda14cbcSMatt Macy 	 * (which is achieved via the txg_wait_synced() call).
2857eda14cbcSMatt Macy 	 */
2858eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2859ce4dcb97SMartin Matuska 	ZIL_STAT_BUMP(zilog, zil_commit_stall_count);
2860eda14cbcSMatt Macy 	txg_wait_synced(zilog->zl_dmu_pool, 0);
2861c0a83fe0SMartin Matuska 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
2862eda14cbcSMatt Macy }
2863eda14cbcSMatt Macy 
28646c1e79dfSMartin Matuska static void
zil_burst_done(zilog_t * zilog)28656c1e79dfSMartin Matuska zil_burst_done(zilog_t *zilog)
28666c1e79dfSMartin Matuska {
28676c1e79dfSMartin Matuska 	if (!list_is_empty(&zilog->zl_itx_commit_list) ||
2868b356da80SMartin Matuska 	    zilog->zl_cur_size == 0)
28696c1e79dfSMartin Matuska 		return;
28706c1e79dfSMartin Matuska 
28716c1e79dfSMartin Matuska 	if (zilog->zl_parallel)
28726c1e79dfSMartin Matuska 		zilog->zl_parallel--;
28736c1e79dfSMartin Matuska 
2874b356da80SMartin Matuska 	uint_t r = (zilog->zl_prev_rotor + 1) & (ZIL_BURSTS - 1);
2875b356da80SMartin Matuska 	zilog->zl_prev_rotor = r;
2876b356da80SMartin Matuska 	zilog->zl_prev_opt[r] = zil_lwb_plan(zilog, zilog->zl_cur_size,
2877b356da80SMartin Matuska 	    &zilog->zl_prev_min[r]);
2878b356da80SMartin Matuska 
2879b356da80SMartin Matuska 	zilog->zl_cur_size = 0;
2880b356da80SMartin Matuska 	zilog->zl_cur_max = 0;
2881b356da80SMartin Matuska 	zilog->zl_cur_left = 0;
28826c1e79dfSMartin Matuska }
28836c1e79dfSMartin Matuska 
2884eda14cbcSMatt Macy /*
2885eda14cbcSMatt Macy  * This function will traverse the commit list, creating new lwbs as
2886eda14cbcSMatt Macy  * needed, and committing the itxs from the commit list to these newly
2887eda14cbcSMatt Macy  * created lwbs. Additionally, as a new lwb is created, the previous
2888eda14cbcSMatt Macy  * lwb will be issued to the zio layer to be written to disk.
2889eda14cbcSMatt Macy  */
2890eda14cbcSMatt Macy static void
zil_process_commit_list(zilog_t * zilog,zil_commit_waiter_t * zcw,list_t * ilwbs)28914e8d558cSMartin Matuska zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
2892eda14cbcSMatt Macy {
2893eda14cbcSMatt Macy 	spa_t *spa = zilog->zl_spa;
2894eda14cbcSMatt Macy 	list_t nolwb_itxs;
2895eda14cbcSMatt Macy 	list_t nolwb_waiters;
289615f0b8c3SMartin Matuska 	lwb_t *lwb, *plwb;
2897eda14cbcSMatt Macy 	itx_t *itx;
2898eda14cbcSMatt Macy 
2899eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
2900eda14cbcSMatt Macy 
2901eda14cbcSMatt Macy 	/*
2902eda14cbcSMatt Macy 	 * Return if there's nothing to commit before we dirty the fs by
2903eda14cbcSMatt Macy 	 * calling zil_create().
2904eda14cbcSMatt Macy 	 */
2905c0a83fe0SMartin Matuska 	if (list_is_empty(&zilog->zl_itx_commit_list))
2906eda14cbcSMatt Macy 		return;
2907eda14cbcSMatt Macy 
2908eda14cbcSMatt Macy 	list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
2909eda14cbcSMatt Macy 	list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
2910eda14cbcSMatt Macy 	    offsetof(zil_commit_waiter_t, zcw_node));
2911eda14cbcSMatt Macy 
2912eda14cbcSMatt Macy 	lwb = list_tail(&zilog->zl_lwb_list);
2913eda14cbcSMatt Macy 	if (lwb == NULL) {
2914eda14cbcSMatt Macy 		lwb = zil_create(zilog);
2915eda14cbcSMatt Macy 	} else {
2916c03c5b1cSMartin Matuska 		/*
2917c03c5b1cSMartin Matuska 		 * Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will
2918c03c5b1cSMartin Matuska 		 * have already been created (zl_lwb_list not empty).
2919c03c5b1cSMartin Matuska 		 */
2920c03c5b1cSMartin Matuska 		zil_commit_activate_saxattr_feature(zilog);
2921315ee00fSMartin Matuska 		ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
2922315ee00fSMartin Matuska 		    lwb->lwb_state == LWB_STATE_OPENED);
29236c1e79dfSMartin Matuska 
29246c1e79dfSMartin Matuska 		/*
29256c1e79dfSMartin Matuska 		 * If the lwb is still opened, it means the workload is really
29266c1e79dfSMartin Matuska 		 * multi-threaded and we won the chance of write aggregation.
29276c1e79dfSMartin Matuska 		 * If it is not opened yet, but previous lwb is still not
29286c1e79dfSMartin Matuska 		 * flushed, it still means the workload is multi-threaded, but
29296c1e79dfSMartin Matuska 		 * there was too much time between the commits to aggregate, so
29306c1e79dfSMartin Matuska 		 * we try aggregation next times, but without too much hopes.
29316c1e79dfSMartin Matuska 		 */
29326c1e79dfSMartin Matuska 		if (lwb->lwb_state == LWB_STATE_OPENED) {
29336c1e79dfSMartin Matuska 			zilog->zl_parallel = ZIL_BURSTS;
29346c1e79dfSMartin Matuska 		} else if ((plwb = list_prev(&zilog->zl_lwb_list, lwb))
29356c1e79dfSMartin Matuska 		    != NULL && plwb->lwb_state != LWB_STATE_FLUSH_DONE) {
29366c1e79dfSMartin Matuska 			zilog->zl_parallel = MAX(zilog->zl_parallel,
29376c1e79dfSMartin Matuska 			    ZIL_BURSTS / 2);
29386c1e79dfSMartin Matuska 		}
2939eda14cbcSMatt Macy 	}
2940eda14cbcSMatt Macy 
2941c0a83fe0SMartin Matuska 	while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) {
2942eda14cbcSMatt Macy 		lr_t *lrc = &itx->itx_lr;
2943eda14cbcSMatt Macy 		uint64_t txg = lrc->lrc_txg;
2944eda14cbcSMatt Macy 
2945eda14cbcSMatt Macy 		ASSERT3U(txg, !=, 0);
2946eda14cbcSMatt Macy 
2947eda14cbcSMatt Macy 		if (lrc->lrc_txtype == TX_COMMIT) {
2948eda14cbcSMatt Macy 			DTRACE_PROBE2(zil__process__commit__itx,
2949eda14cbcSMatt Macy 			    zilog_t *, zilog, itx_t *, itx);
2950eda14cbcSMatt Macy 		} else {
2951eda14cbcSMatt Macy 			DTRACE_PROBE2(zil__process__normal__itx,
2952eda14cbcSMatt Macy 			    zilog_t *, zilog, itx_t *, itx);
2953eda14cbcSMatt Macy 		}
2954eda14cbcSMatt Macy 
2955eda14cbcSMatt Macy 		boolean_t synced = txg <= spa_last_synced_txg(spa);
2956eda14cbcSMatt Macy 		boolean_t frozen = txg > spa_freeze_txg(spa);
2957eda14cbcSMatt Macy 
2958eda14cbcSMatt Macy 		/*
2959eda14cbcSMatt Macy 		 * If the txg of this itx has already been synced out, then
2960eda14cbcSMatt Macy 		 * we don't need to commit this itx to an lwb. This is
2961eda14cbcSMatt Macy 		 * because the data of this itx will have already been
2962eda14cbcSMatt Macy 		 * written to the main pool. This is inherently racy, and
2963eda14cbcSMatt Macy 		 * it's still ok to commit an itx whose txg has already
2964eda14cbcSMatt Macy 		 * been synced; this will result in a write that's
2965eda14cbcSMatt Macy 		 * unnecessary, but will do no harm.
2966eda14cbcSMatt Macy 		 *
2967eda14cbcSMatt Macy 		 * With that said, we always want to commit TX_COMMIT itxs
2968eda14cbcSMatt Macy 		 * to an lwb, regardless of whether or not that itx's txg
2969eda14cbcSMatt Macy 		 * has been synced out. We do this to ensure any OPENED lwb
2970eda14cbcSMatt Macy 		 * will always have at least one zil_commit_waiter_t linked
2971eda14cbcSMatt Macy 		 * to the lwb.
2972eda14cbcSMatt Macy 		 *
2973eda14cbcSMatt Macy 		 * As a counter-example, if we skipped TX_COMMIT itx's
2974eda14cbcSMatt Macy 		 * whose txg had already been synced, the following
2975eda14cbcSMatt Macy 		 * situation could occur if we happened to be racing with
2976eda14cbcSMatt Macy 		 * spa_sync:
2977eda14cbcSMatt Macy 		 *
2978eda14cbcSMatt Macy 		 * 1. We commit a non-TX_COMMIT itx to an lwb, where the
2979eda14cbcSMatt Macy 		 *    itx's txg is 10 and the last synced txg is 9.
2980eda14cbcSMatt Macy 		 * 2. spa_sync finishes syncing out txg 10.
2981eda14cbcSMatt Macy 		 * 3. We move to the next itx in the list, it's a TX_COMMIT
2982eda14cbcSMatt Macy 		 *    whose txg is 10, so we skip it rather than committing
2983eda14cbcSMatt Macy 		 *    it to the lwb used in (1).
2984eda14cbcSMatt Macy 		 *
2985eda14cbcSMatt Macy 		 * If the itx that is skipped in (3) is the last TX_COMMIT
2986eda14cbcSMatt Macy 		 * itx in the commit list, than it's possible for the lwb
2987eda14cbcSMatt Macy 		 * used in (1) to remain in the OPENED state indefinitely.
2988eda14cbcSMatt Macy 		 *
2989eda14cbcSMatt Macy 		 * To prevent the above scenario from occurring, ensuring
2990eda14cbcSMatt Macy 		 * that once an lwb is OPENED it will transition to ISSUED
2991eda14cbcSMatt Macy 		 * and eventually DONE, we always commit TX_COMMIT itx's to
2992eda14cbcSMatt Macy 		 * an lwb here, even if that itx's txg has already been
2993eda14cbcSMatt Macy 		 * synced.
2994eda14cbcSMatt Macy 		 *
2995eda14cbcSMatt Macy 		 * Finally, if the pool is frozen, we _always_ commit the
2996eda14cbcSMatt Macy 		 * itx.  The point of freezing the pool is to prevent data
2997eda14cbcSMatt Macy 		 * from being written to the main pool via spa_sync, and
2998eda14cbcSMatt Macy 		 * instead rely solely on the ZIL to persistently store the
2999eda14cbcSMatt Macy 		 * data; i.e.  when the pool is frozen, the last synced txg
3000eda14cbcSMatt Macy 		 * value can't be trusted.
3001eda14cbcSMatt Macy 		 */
3002eda14cbcSMatt Macy 		if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
3003eda14cbcSMatt Macy 			if (lwb != NULL) {
30044e8d558cSMartin Matuska 				lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs);
30054e8d558cSMartin Matuska 				if (lwb == NULL) {
3006eda14cbcSMatt Macy 					list_insert_tail(&nolwb_itxs, itx);
30074e8d558cSMartin Matuska 				} else if ((zcw->zcw_lwb != NULL &&
30084e8d558cSMartin Matuska 				    zcw->zcw_lwb != lwb) || zcw->zcw_done) {
30094e8d558cSMartin Matuska 					/*
30104e8d558cSMartin Matuska 					 * Our lwb is done, leave the rest of
30114e8d558cSMartin Matuska 					 * itx list to somebody else who care.
30124e8d558cSMartin Matuska 					 */
30136c1e79dfSMartin Matuska 					zilog->zl_parallel = ZIL_BURSTS;
3014b356da80SMartin Matuska 					zilog->zl_cur_left -=
3015b356da80SMartin Matuska 					    zil_itx_full_size(itx);
30164e8d558cSMartin Matuska 					break;
30174e8d558cSMartin Matuska 				}
3018eda14cbcSMatt Macy 			} else {
3019eda14cbcSMatt Macy 				if (lrc->lrc_txtype == TX_COMMIT) {
3020eda14cbcSMatt Macy 					zil_commit_waiter_link_nolwb(
3021eda14cbcSMatt Macy 					    itx->itx_private, &nolwb_waiters);
3022eda14cbcSMatt Macy 				}
3023eda14cbcSMatt Macy 				list_insert_tail(&nolwb_itxs, itx);
3024eda14cbcSMatt Macy 			}
3025b356da80SMartin Matuska 			zilog->zl_cur_left -= zil_itx_full_size(itx);
3026eda14cbcSMatt Macy 		} else {
3027eda14cbcSMatt Macy 			ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
3028b356da80SMartin Matuska 			zilog->zl_cur_left -= zil_itx_full_size(itx);
3029eda14cbcSMatt Macy 			zil_itx_destroy(itx);
3030eda14cbcSMatt Macy 		}
3031eda14cbcSMatt Macy 	}
3032eda14cbcSMatt Macy 
3033eda14cbcSMatt Macy 	if (lwb == NULL) {
3034eda14cbcSMatt Macy 		/*
3035eda14cbcSMatt Macy 		 * This indicates zio_alloc_zil() failed to allocate the
3036eda14cbcSMatt Macy 		 * "next" lwb on-disk. When this happens, we must stall
3037eda14cbcSMatt Macy 		 * the ZIL write pipeline; see the comment within
3038eda14cbcSMatt Macy 		 * zil_commit_writer_stall() for more details.
3039eda14cbcSMatt Macy 		 */
30404e8d558cSMartin Matuska 		while ((lwb = list_remove_head(ilwbs)) != NULL)
30414e8d558cSMartin Matuska 			zil_lwb_write_issue(zilog, lwb);
3042eda14cbcSMatt Macy 		zil_commit_writer_stall(zilog);
3043eda14cbcSMatt Macy 
3044eda14cbcSMatt Macy 		/*
3045eda14cbcSMatt Macy 		 * Additionally, we have to signal and mark the "nolwb"
3046eda14cbcSMatt Macy 		 * waiters as "done" here, since without an lwb, we
3047eda14cbcSMatt Macy 		 * can't do this via zil_lwb_flush_vdevs_done() like
3048eda14cbcSMatt Macy 		 * normal.
3049eda14cbcSMatt Macy 		 */
3050eda14cbcSMatt Macy 		zil_commit_waiter_t *zcw;
3051c0a83fe0SMartin Matuska 		while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
3052eda14cbcSMatt Macy 			zil_commit_waiter_skip(zcw);
3053eda14cbcSMatt Macy 
3054eda14cbcSMatt Macy 		/*
3055eda14cbcSMatt Macy 		 * And finally, we have to destroy the itx's that
3056eda14cbcSMatt Macy 		 * couldn't be committed to an lwb; this will also call
3057eda14cbcSMatt Macy 		 * the itx's callback if one exists for the itx.
3058eda14cbcSMatt Macy 		 */
3059c0a83fe0SMartin Matuska 		while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
3060eda14cbcSMatt Macy 			zil_itx_destroy(itx);
3061eda14cbcSMatt Macy 	} else {
3062eda14cbcSMatt Macy 		ASSERT(list_is_empty(&nolwb_waiters));
3063eda14cbcSMatt Macy 		ASSERT3P(lwb, !=, NULL);
3064315ee00fSMartin Matuska 		ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
3065315ee00fSMartin Matuska 		    lwb->lwb_state == LWB_STATE_OPENED);
3066eda14cbcSMatt Macy 
3067eda14cbcSMatt Macy 		/*
3068eda14cbcSMatt Macy 		 * At this point, the ZIL block pointed at by the "lwb"
3069315ee00fSMartin Matuska 		 * variable is in "new" or "opened" state.
3070eda14cbcSMatt Macy 		 *
3071315ee00fSMartin Matuska 		 * If it's "new", then no itxs have been committed to it, so
3072315ee00fSMartin Matuska 		 * there's no point in issuing its zio (i.e. it's "empty").
3073eda14cbcSMatt Macy 		 *
3074315ee00fSMartin Matuska 		 * If it's "opened", then it contains one or more itxs that
3075eda14cbcSMatt Macy 		 * eventually need to be committed to stable storage. In
3076eda14cbcSMatt Macy 		 * this case we intentionally do not issue the lwb's zio
3077eda14cbcSMatt Macy 		 * to disk yet, and instead rely on one of the following
3078eda14cbcSMatt Macy 		 * two mechanisms for issuing the zio:
3079eda14cbcSMatt Macy 		 *
3080315ee00fSMartin Matuska 		 * 1. Ideally, there will be more ZIL activity occurring on
3081315ee00fSMartin Matuska 		 * the system, such that this function will be immediately
3082315ee00fSMartin Matuska 		 * called again by different thread and this lwb will be
3083315ee00fSMartin Matuska 		 * closed by zil_lwb_assign().  This way, the lwb will be
3084315ee00fSMartin Matuska 		 * "full" when it is issued to disk, and we'll make use of
3085315ee00fSMartin Matuska 		 * the lwb's size the best we can.
3086eda14cbcSMatt Macy 		 *
3087eda14cbcSMatt Macy 		 * 2. If there isn't sufficient ZIL activity occurring on
3088315ee00fSMartin Matuska 		 * the system, zil_commit_waiter() will close it and issue
3089315ee00fSMartin Matuska 		 * the zio.  If this occurs, the lwb is not guaranteed
3090eda14cbcSMatt Macy 		 * to be "full" by the time its zio is issued, and means
3091eda14cbcSMatt Macy 		 * the size of the lwb was "too large" given the amount
3092eda14cbcSMatt Macy 		 * of ZIL activity occurring on the system at that time.
3093eda14cbcSMatt Macy 		 *
3094eda14cbcSMatt Macy 		 * We do this for a couple of reasons:
3095eda14cbcSMatt Macy 		 *
3096eda14cbcSMatt Macy 		 * 1. To try and reduce the number of IOPs needed to
3097eda14cbcSMatt Macy 		 * write the same number of itxs. If an lwb has space
3098eda14cbcSMatt Macy 		 * available in its buffer for more itxs, and more itxs
3099eda14cbcSMatt Macy 		 * will be committed relatively soon (relative to the
3100eda14cbcSMatt Macy 		 * latency of performing a write), then it's beneficial
3101eda14cbcSMatt Macy 		 * to wait for these "next" itxs. This way, more itxs
3102eda14cbcSMatt Macy 		 * can be committed to stable storage with fewer writes.
3103eda14cbcSMatt Macy 		 *
3104eda14cbcSMatt Macy 		 * 2. To try and use the largest lwb block size that the
3105eda14cbcSMatt Macy 		 * incoming rate of itxs can support. Again, this is to
3106eda14cbcSMatt Macy 		 * try and pack as many itxs into as few lwbs as
3107eda14cbcSMatt Macy 		 * possible, without significantly impacting the latency
3108eda14cbcSMatt Macy 		 * of each individual itx.
3109eda14cbcSMatt Macy 		 */
31106c1e79dfSMartin Matuska 		if (lwb->lwb_state == LWB_STATE_OPENED && !zilog->zl_parallel) {
3111b356da80SMartin Matuska 			zil_burst_done(zilog);
3112315ee00fSMartin Matuska 			list_insert_tail(ilwbs, lwb);
31136c1e79dfSMartin Matuska 			lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
31144e8d558cSMartin Matuska 			if (lwb == NULL) {
31156c1e79dfSMartin Matuska 				while ((lwb = list_remove_head(ilwbs)) != NULL)
31164e8d558cSMartin Matuska 					zil_lwb_write_issue(zilog, lwb);
311715f0b8c3SMartin Matuska 				zil_commit_writer_stall(zilog);
311815f0b8c3SMartin Matuska 			}
311915f0b8c3SMartin Matuska 		}
3120eda14cbcSMatt Macy 	}
3121eda14cbcSMatt Macy }
3122eda14cbcSMatt Macy 
3123eda14cbcSMatt Macy /*
3124eda14cbcSMatt Macy  * This function is responsible for ensuring the passed in commit waiter
3125eda14cbcSMatt Macy  * (and associated commit itx) is committed to an lwb. If the waiter is
3126eda14cbcSMatt Macy  * not already committed to an lwb, all itxs in the zilog's queue of
3127eda14cbcSMatt Macy  * itxs will be processed. The assumption is the passed in waiter's
3128eda14cbcSMatt Macy  * commit itx will found in the queue just like the other non-commit
3129eda14cbcSMatt Macy  * itxs, such that when the entire queue is processed, the waiter will
3130eda14cbcSMatt Macy  * have been committed to an lwb.
3131eda14cbcSMatt Macy  *
3132eda14cbcSMatt Macy  * The lwb associated with the passed in waiter is not guaranteed to
3133eda14cbcSMatt Macy  * have been issued by the time this function completes. If the lwb is
3134eda14cbcSMatt Macy  * not issued, we rely on future calls to zil_commit_writer() to issue
3135eda14cbcSMatt Macy  * the lwb, or the timeout mechanism found in zil_commit_waiter().
3136eda14cbcSMatt Macy  */
31370a97523dSMartin Matuska static uint64_t
zil_commit_writer(zilog_t * zilog,zil_commit_waiter_t * zcw)3138eda14cbcSMatt Macy zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
3139eda14cbcSMatt Macy {
31404e8d558cSMartin Matuska 	list_t ilwbs;
31414e8d558cSMartin Matuska 	lwb_t *lwb;
31420a97523dSMartin Matuska 	uint64_t wtxg = 0;
31434e8d558cSMartin Matuska 
3144eda14cbcSMatt Macy 	ASSERT(!MUTEX_HELD(&zilog->zl_lock));
3145eda14cbcSMatt Macy 	ASSERT(spa_writeable(zilog->zl_spa));
3146eda14cbcSMatt Macy 
31474e8d558cSMartin Matuska 	list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node));
3148eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_issuer_lock);
3149eda14cbcSMatt Macy 
3150eda14cbcSMatt Macy 	if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
3151eda14cbcSMatt Macy 		/*
3152eda14cbcSMatt Macy 		 * It's possible that, while we were waiting to acquire
3153eda14cbcSMatt Macy 		 * the "zl_issuer_lock", another thread committed this
3154eda14cbcSMatt Macy 		 * waiter to an lwb. If that occurs, we bail out early,
3155eda14cbcSMatt Macy 		 * without processing any of the zilog's queue of itxs.
3156eda14cbcSMatt Macy 		 *
3157eda14cbcSMatt Macy 		 * On certain workloads and system configurations, the
3158eda14cbcSMatt Macy 		 * "zl_issuer_lock" can become highly contended. In an
3159eda14cbcSMatt Macy 		 * attempt to reduce this contention, we immediately drop
3160eda14cbcSMatt Macy 		 * the lock if the waiter has already been processed.
3161eda14cbcSMatt Macy 		 *
3162eda14cbcSMatt Macy 		 * We've measured this optimization to reduce CPU spent
3163eda14cbcSMatt Macy 		 * contending on this lock by up to 5%, using a system
3164eda14cbcSMatt Macy 		 * with 32 CPUs, low latency storage (~50 usec writes),
3165eda14cbcSMatt Macy 		 * and 1024 threads performing sync writes.
3166eda14cbcSMatt Macy 		 */
3167eda14cbcSMatt Macy 		goto out;
3168eda14cbcSMatt Macy 	}
3169eda14cbcSMatt Macy 
3170271171e0SMartin Matuska 	ZIL_STAT_BUMP(zilog, zil_commit_writer_count);
3171eda14cbcSMatt Macy 
31720a97523dSMartin Matuska 	wtxg = zil_get_commit_list(zilog);
3173eda14cbcSMatt Macy 	zil_prune_commit_list(zilog);
31744e8d558cSMartin Matuska 	zil_process_commit_list(zilog, zcw, &ilwbs);
3175eda14cbcSMatt Macy 
3176eda14cbcSMatt Macy out:
3177eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_issuer_lock);
31784e8d558cSMartin Matuska 	while ((lwb = list_remove_head(&ilwbs)) != NULL)
31794e8d558cSMartin Matuska 		zil_lwb_write_issue(zilog, lwb);
31804e8d558cSMartin Matuska 	list_destroy(&ilwbs);
31810a97523dSMartin Matuska 	return (wtxg);
3182eda14cbcSMatt Macy }
3183eda14cbcSMatt Macy 
3184eda14cbcSMatt Macy static void
zil_commit_waiter_timeout(zilog_t * zilog,zil_commit_waiter_t * zcw)3185eda14cbcSMatt Macy zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
3186eda14cbcSMatt Macy {
3187eda14cbcSMatt Macy 	ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
3188eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&zcw->zcw_lock));
3189eda14cbcSMatt Macy 	ASSERT3B(zcw->zcw_done, ==, B_FALSE);
3190eda14cbcSMatt Macy 
3191eda14cbcSMatt Macy 	lwb_t *lwb = zcw->zcw_lwb;
3192eda14cbcSMatt Macy 	ASSERT3P(lwb, !=, NULL);
3193315ee00fSMartin Matuska 	ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW);
3194eda14cbcSMatt Macy 
3195eda14cbcSMatt Macy 	/*
3196eda14cbcSMatt Macy 	 * If the lwb has already been issued by another thread, we can
3197eda14cbcSMatt Macy 	 * immediately return since there's no work to be done (the
3198eda14cbcSMatt Macy 	 * point of this function is to issue the lwb). Additionally, we
3199eda14cbcSMatt Macy 	 * do this prior to acquiring the zl_issuer_lock, to avoid
3200eda14cbcSMatt Macy 	 * acquiring it when it's not necessary to do so.
3201eda14cbcSMatt Macy 	 */
3202315ee00fSMartin Matuska 	if (lwb->lwb_state != LWB_STATE_OPENED)
3203eda14cbcSMatt Macy 		return;
3204eda14cbcSMatt Macy 
3205eda14cbcSMatt Macy 	/*
32064e8d558cSMartin Matuska 	 * In order to call zil_lwb_write_close() we must hold the
3207eda14cbcSMatt Macy 	 * zilog's "zl_issuer_lock". We can't simply acquire that lock,
3208eda14cbcSMatt Macy 	 * since we're already holding the commit waiter's "zcw_lock",
3209eda14cbcSMatt Macy 	 * and those two locks are acquired in the opposite order
3210eda14cbcSMatt Macy 	 * elsewhere.
3211eda14cbcSMatt Macy 	 */
3212eda14cbcSMatt Macy 	mutex_exit(&zcw->zcw_lock);
3213eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_issuer_lock);
3214eda14cbcSMatt Macy 	mutex_enter(&zcw->zcw_lock);
3215eda14cbcSMatt Macy 
3216eda14cbcSMatt Macy 	/*
3217eda14cbcSMatt Macy 	 * Since we just dropped and re-acquired the commit waiter's
3218eda14cbcSMatt Macy 	 * lock, we have to re-check to see if the waiter was marked
3219eda14cbcSMatt Macy 	 * "done" during that process. If the waiter was marked "done",
3220eda14cbcSMatt Macy 	 * the "lwb" pointer is no longer valid (it can be free'd after
3221eda14cbcSMatt Macy 	 * the waiter is marked "done"), so without this check we could
3222eda14cbcSMatt Macy 	 * wind up with a use-after-free error below.
3223eda14cbcSMatt Macy 	 */
32244e8d558cSMartin Matuska 	if (zcw->zcw_done) {
3225315ee00fSMartin Matuska 		mutex_exit(&zilog->zl_issuer_lock);
3226315ee00fSMartin Matuska 		return;
32274e8d558cSMartin Matuska 	}
3228eda14cbcSMatt Macy 
3229eda14cbcSMatt Macy 	ASSERT3P(lwb, ==, zcw->zcw_lwb);
3230eda14cbcSMatt Macy 
3231eda14cbcSMatt Macy 	/*
3232eda14cbcSMatt Macy 	 * We've already checked this above, but since we hadn't acquired
3233eda14cbcSMatt Macy 	 * the zilog's zl_issuer_lock, we have to perform this check a
3234eda14cbcSMatt Macy 	 * second time while holding the lock.
3235eda14cbcSMatt Macy 	 *
3236eda14cbcSMatt Macy 	 * We don't need to hold the zl_lock since the lwb cannot transition
3237315ee00fSMartin Matuska 	 * from OPENED to CLOSED while we hold the zl_issuer_lock. The lwb
3238315ee00fSMartin Matuska 	 * _can_ transition from CLOSED to DONE, but it's OK to race with
3239eda14cbcSMatt Macy 	 * that transition since we treat the lwb the same, whether it's in
3240315ee00fSMartin Matuska 	 * the CLOSED, ISSUED or DONE states.
3241eda14cbcSMatt Macy 	 *
3242eda14cbcSMatt Macy 	 * The important thing, is we treat the lwb differently depending on
3243315ee00fSMartin Matuska 	 * if it's OPENED or CLOSED, and block any other threads that might
3244315ee00fSMartin Matuska 	 * attempt to close/issue this lwb. For that reason we hold the
3245eda14cbcSMatt Macy 	 * zl_issuer_lock when checking the lwb_state; we must not call
3246315ee00fSMartin Matuska 	 * zil_lwb_write_close() if the lwb had already been closed/issued.
3247eda14cbcSMatt Macy 	 *
3248eda14cbcSMatt Macy 	 * See the comment above the lwb_state_t structure definition for
3249eda14cbcSMatt Macy 	 * more details on the lwb states, and locking requirements.
3250eda14cbcSMatt Macy 	 */
3251315ee00fSMartin Matuska 	if (lwb->lwb_state != LWB_STATE_OPENED) {
3252315ee00fSMartin Matuska 		mutex_exit(&zilog->zl_issuer_lock);
3253315ee00fSMartin Matuska 		return;
32544e8d558cSMartin Matuska 	}
3255eda14cbcSMatt Macy 
3256315ee00fSMartin Matuska 	/*
3257315ee00fSMartin Matuska 	 * We do not need zcw_lock once we hold zl_issuer_lock and know lwb
3258315ee00fSMartin Matuska 	 * is still open.  But we have to drop it to avoid a deadlock in case
3259315ee00fSMartin Matuska 	 * callback of zio issued by zil_lwb_write_issue() try to get it,
3260315ee00fSMartin Matuska 	 * while zil_lwb_write_issue() is blocked on attempt to issue next
3261315ee00fSMartin Matuska 	 * lwb it found in LWB_STATE_READY state.
3262315ee00fSMartin Matuska 	 */
3263315ee00fSMartin Matuska 	mutex_exit(&zcw->zcw_lock);
3264eda14cbcSMatt Macy 
3265eda14cbcSMatt Macy 	/*
3266eda14cbcSMatt Macy 	 * As described in the comments above zil_commit_waiter() and
3267eda14cbcSMatt Macy 	 * zil_process_commit_list(), we need to issue this lwb's zio
3268eda14cbcSMatt Macy 	 * since we've reached the commit waiter's timeout and it still
3269eda14cbcSMatt Macy 	 * hasn't been issued.
3270eda14cbcSMatt Macy 	 */
3271b356da80SMartin Matuska 	zil_burst_done(zilog);
3272315ee00fSMartin Matuska 	lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
3273eda14cbcSMatt Macy 
3274315ee00fSMartin Matuska 	ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
3275eda14cbcSMatt Macy 
3276eda14cbcSMatt Macy 	if (nlwb == NULL) {
3277eda14cbcSMatt Macy 		/*
32784e8d558cSMartin Matuska 		 * When zil_lwb_write_close() returns NULL, this
3279eda14cbcSMatt Macy 		 * indicates zio_alloc_zil() failed to allocate the
3280eda14cbcSMatt Macy 		 * "next" lwb on-disk. When this occurs, the ZIL write
3281eda14cbcSMatt Macy 		 * pipeline must be stalled; see the comment within the
3282eda14cbcSMatt Macy 		 * zil_commit_writer_stall() function for more details.
3283eda14cbcSMatt Macy 		 */
32844e8d558cSMartin Matuska 		zil_lwb_write_issue(zilog, lwb);
3285eda14cbcSMatt Macy 		zil_commit_writer_stall(zilog);
3286eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_issuer_lock);
3287315ee00fSMartin Matuska 	} else {
3288315ee00fSMartin Matuska 		mutex_exit(&zilog->zl_issuer_lock);
32894e8d558cSMartin Matuska 		zil_lwb_write_issue(zilog, lwb);
3290315ee00fSMartin Matuska 	}
3291315ee00fSMartin Matuska 	mutex_enter(&zcw->zcw_lock);
3292eda14cbcSMatt Macy }
3293eda14cbcSMatt Macy 
3294eda14cbcSMatt Macy /*
3295eda14cbcSMatt Macy  * This function is responsible for performing the following two tasks:
3296eda14cbcSMatt Macy  *
3297eda14cbcSMatt Macy  * 1. its primary responsibility is to block until the given "commit
3298eda14cbcSMatt Macy  *    waiter" is considered "done".
3299eda14cbcSMatt Macy  *
3300eda14cbcSMatt Macy  * 2. its secondary responsibility is to issue the zio for the lwb that
3301eda14cbcSMatt Macy  *    the given "commit waiter" is waiting on, if this function has
3302eda14cbcSMatt Macy  *    waited "long enough" and the lwb is still in the "open" state.
3303eda14cbcSMatt Macy  *
3304eda14cbcSMatt Macy  * Given a sufficient amount of itxs being generated and written using
33054e8d558cSMartin Matuska  * the ZIL, the lwb's zio will be issued via the zil_lwb_assign()
3306eda14cbcSMatt Macy  * function. If this does not occur, this secondary responsibility will
3307eda14cbcSMatt Macy  * ensure the lwb is issued even if there is not other synchronous
3308eda14cbcSMatt Macy  * activity on the system.
3309eda14cbcSMatt Macy  *
3310eda14cbcSMatt Macy  * For more details, see zil_process_commit_list(); more specifically,
3311eda14cbcSMatt Macy  * the comment at the bottom of that function.
3312eda14cbcSMatt Macy  */
3313eda14cbcSMatt Macy static void
zil_commit_waiter(zilog_t * zilog,zil_commit_waiter_t * zcw)3314eda14cbcSMatt Macy zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
3315eda14cbcSMatt Macy {
3316eda14cbcSMatt Macy 	ASSERT(!MUTEX_HELD(&zilog->zl_lock));
3317eda14cbcSMatt Macy 	ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
3318eda14cbcSMatt Macy 	ASSERT(spa_writeable(zilog->zl_spa));
3319eda14cbcSMatt Macy 
3320eda14cbcSMatt Macy 	mutex_enter(&zcw->zcw_lock);
3321eda14cbcSMatt Macy 
3322eda14cbcSMatt Macy 	/*
3323eda14cbcSMatt Macy 	 * The timeout is scaled based on the lwb latency to avoid
3324eda14cbcSMatt Macy 	 * significantly impacting the latency of each individual itx.
3325eda14cbcSMatt Macy 	 * For more details, see the comment at the bottom of the
3326eda14cbcSMatt Macy 	 * zil_process_commit_list() function.
3327eda14cbcSMatt Macy 	 */
3328eda14cbcSMatt Macy 	int pct = MAX(zfs_commit_timeout_pct, 1);
3329eda14cbcSMatt Macy 	hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
3330eda14cbcSMatt Macy 	hrtime_t wakeup = gethrtime() + sleep;
3331eda14cbcSMatt Macy 	boolean_t timedout = B_FALSE;
3332eda14cbcSMatt Macy 
3333eda14cbcSMatt Macy 	while (!zcw->zcw_done) {
3334eda14cbcSMatt Macy 		ASSERT(MUTEX_HELD(&zcw->zcw_lock));
3335eda14cbcSMatt Macy 
3336eda14cbcSMatt Macy 		lwb_t *lwb = zcw->zcw_lwb;
3337eda14cbcSMatt Macy 
3338eda14cbcSMatt Macy 		/*
3339eda14cbcSMatt Macy 		 * Usually, the waiter will have a non-NULL lwb field here,
3340eda14cbcSMatt Macy 		 * but it's possible for it to be NULL as a result of
3341eda14cbcSMatt Macy 		 * zil_commit() racing with spa_sync().
3342eda14cbcSMatt Macy 		 *
3343eda14cbcSMatt Macy 		 * When zil_clean() is called, it's possible for the itxg
3344eda14cbcSMatt Macy 		 * list (which may be cleaned via a taskq) to contain
3345eda14cbcSMatt Macy 		 * commit itxs. When this occurs, the commit waiters linked
3346eda14cbcSMatt Macy 		 * off of these commit itxs will not be committed to an
3347eda14cbcSMatt Macy 		 * lwb.  Additionally, these commit waiters will not be
3348eda14cbcSMatt Macy 		 * marked done until zil_commit_waiter_skip() is called via
3349eda14cbcSMatt Macy 		 * zil_itxg_clean().
3350eda14cbcSMatt Macy 		 *
3351eda14cbcSMatt Macy 		 * Thus, it's possible for this commit waiter (i.e. the
3352eda14cbcSMatt Macy 		 * "zcw" variable) to be found in this "in between" state;
3353eda14cbcSMatt Macy 		 * where it's "zcw_lwb" field is NULL, and it hasn't yet
3354eda14cbcSMatt Macy 		 * been skipped, so it's "zcw_done" field is still B_FALSE.
3355eda14cbcSMatt Macy 		 */
3356315ee00fSMartin Matuska 		IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_NEW);
3357eda14cbcSMatt Macy 
3358eda14cbcSMatt Macy 		if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
3359eda14cbcSMatt Macy 			ASSERT3B(timedout, ==, B_FALSE);
3360eda14cbcSMatt Macy 
3361eda14cbcSMatt Macy 			/*
3362eda14cbcSMatt Macy 			 * If the lwb hasn't been issued yet, then we
3363eda14cbcSMatt Macy 			 * need to wait with a timeout, in case this
3364eda14cbcSMatt Macy 			 * function needs to issue the lwb after the
3365eda14cbcSMatt Macy 			 * timeout is reached; responsibility (2) from
3366eda14cbcSMatt Macy 			 * the comment above this function.
3367eda14cbcSMatt Macy 			 */
3368eda14cbcSMatt Macy 			int rc = cv_timedwait_hires(&zcw->zcw_cv,
3369eda14cbcSMatt Macy 			    &zcw->zcw_lock, wakeup, USEC2NSEC(1),
3370eda14cbcSMatt Macy 			    CALLOUT_FLAG_ABSOLUTE);
3371eda14cbcSMatt Macy 
3372eda14cbcSMatt Macy 			if (rc != -1 || zcw->zcw_done)
3373eda14cbcSMatt Macy 				continue;
3374eda14cbcSMatt Macy 
3375eda14cbcSMatt Macy 			timedout = B_TRUE;
3376eda14cbcSMatt Macy 			zil_commit_waiter_timeout(zilog, zcw);
3377eda14cbcSMatt Macy 
3378eda14cbcSMatt Macy 			if (!zcw->zcw_done) {
3379eda14cbcSMatt Macy 				/*
3380eda14cbcSMatt Macy 				 * If the commit waiter has already been
3381eda14cbcSMatt Macy 				 * marked "done", it's possible for the
3382eda14cbcSMatt Macy 				 * waiter's lwb structure to have already
3383eda14cbcSMatt Macy 				 * been freed.  Thus, we can only reliably
3384eda14cbcSMatt Macy 				 * make these assertions if the waiter
3385eda14cbcSMatt Macy 				 * isn't done.
3386eda14cbcSMatt Macy 				 */
3387eda14cbcSMatt Macy 				ASSERT3P(lwb, ==, zcw->zcw_lwb);
3388eda14cbcSMatt Macy 				ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
3389eda14cbcSMatt Macy 			}
3390eda14cbcSMatt Macy 		} else {
3391eda14cbcSMatt Macy 			/*
3392eda14cbcSMatt Macy 			 * If the lwb isn't open, then it must have already
3393eda14cbcSMatt Macy 			 * been issued. In that case, there's no need to
3394eda14cbcSMatt Macy 			 * use a timeout when waiting for the lwb to
3395eda14cbcSMatt Macy 			 * complete.
3396eda14cbcSMatt Macy 			 *
3397eda14cbcSMatt Macy 			 * Additionally, if the lwb is NULL, the waiter
3398eda14cbcSMatt Macy 			 * will soon be signaled and marked done via
3399eda14cbcSMatt Macy 			 * zil_clean() and zil_itxg_clean(), so no timeout
3400eda14cbcSMatt Macy 			 * is required.
3401eda14cbcSMatt Macy 			 */
3402eda14cbcSMatt Macy 
3403eda14cbcSMatt Macy 			IMPLY(lwb != NULL,
3404315ee00fSMartin Matuska 			    lwb->lwb_state == LWB_STATE_CLOSED ||
3405315ee00fSMartin Matuska 			    lwb->lwb_state == LWB_STATE_READY ||
3406eda14cbcSMatt Macy 			    lwb->lwb_state == LWB_STATE_ISSUED ||
3407eda14cbcSMatt Macy 			    lwb->lwb_state == LWB_STATE_WRITE_DONE ||
3408eda14cbcSMatt Macy 			    lwb->lwb_state == LWB_STATE_FLUSH_DONE);
3409eda14cbcSMatt Macy 			cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
3410eda14cbcSMatt Macy 		}
3411eda14cbcSMatt Macy 	}
3412eda14cbcSMatt Macy 
3413eda14cbcSMatt Macy 	mutex_exit(&zcw->zcw_lock);
3414eda14cbcSMatt Macy }
3415eda14cbcSMatt Macy 
3416eda14cbcSMatt Macy static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)3417eda14cbcSMatt Macy zil_alloc_commit_waiter(void)
3418eda14cbcSMatt Macy {
3419eda14cbcSMatt Macy 	zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
3420eda14cbcSMatt Macy 
3421eda14cbcSMatt Macy 	cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
3422eda14cbcSMatt Macy 	mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
3423eda14cbcSMatt Macy 	list_link_init(&zcw->zcw_node);
3424eda14cbcSMatt Macy 	zcw->zcw_lwb = NULL;
3425eda14cbcSMatt Macy 	zcw->zcw_done = B_FALSE;
3426eda14cbcSMatt Macy 	zcw->zcw_zio_error = 0;
3427eda14cbcSMatt Macy 
3428eda14cbcSMatt Macy 	return (zcw);
3429eda14cbcSMatt Macy }
3430eda14cbcSMatt Macy 
3431eda14cbcSMatt Macy static void
zil_free_commit_waiter(zil_commit_waiter_t * zcw)3432eda14cbcSMatt Macy zil_free_commit_waiter(zil_commit_waiter_t *zcw)
3433eda14cbcSMatt Macy {
3434eda14cbcSMatt Macy 	ASSERT(!list_link_active(&zcw->zcw_node));
3435eda14cbcSMatt Macy 	ASSERT3P(zcw->zcw_lwb, ==, NULL);
3436eda14cbcSMatt Macy 	ASSERT3B(zcw->zcw_done, ==, B_TRUE);
3437eda14cbcSMatt Macy 	mutex_destroy(&zcw->zcw_lock);
3438eda14cbcSMatt Macy 	cv_destroy(&zcw->zcw_cv);
3439eda14cbcSMatt Macy 	kmem_cache_free(zil_zcw_cache, zcw);
3440eda14cbcSMatt Macy }
3441eda14cbcSMatt Macy 
3442eda14cbcSMatt Macy /*
3443eda14cbcSMatt Macy  * This function is used to create a TX_COMMIT itx and assign it. This
3444eda14cbcSMatt Macy  * way, it will be linked into the ZIL's list of synchronous itxs, and
3445eda14cbcSMatt Macy  * then later committed to an lwb (or skipped) when
3446eda14cbcSMatt Macy  * zil_process_commit_list() is called.
3447eda14cbcSMatt Macy  */
3448eda14cbcSMatt Macy static void
zil_commit_itx_assign(zilog_t * zilog,zil_commit_waiter_t * zcw)3449eda14cbcSMatt Macy zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
3450eda14cbcSMatt Macy {
3451eda14cbcSMatt Macy 	dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
3452d411c1d6SMartin Matuska 
3453d411c1d6SMartin Matuska 	/*
3454d411c1d6SMartin Matuska 	 * Since we are not going to create any new dirty data, and we
3455d411c1d6SMartin Matuska 	 * can even help with clearing the existing dirty data, we
3456d411c1d6SMartin Matuska 	 * should not be subject to the dirty data based delays. We
3457*61145dc2SMartin Matuska 	 * use DMU_TX_NOTHROTTLE to bypass the delay mechanism.
3458d411c1d6SMartin Matuska 	 */
3459*61145dc2SMartin Matuska 	VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
3460eda14cbcSMatt Macy 
3461eda14cbcSMatt Macy 	itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
3462eda14cbcSMatt Macy 	itx->itx_sync = B_TRUE;
3463eda14cbcSMatt Macy 	itx->itx_private = zcw;
3464eda14cbcSMatt Macy 
3465eda14cbcSMatt Macy 	zil_itx_assign(zilog, itx, tx);
3466eda14cbcSMatt Macy 
3467eda14cbcSMatt Macy 	dmu_tx_commit(tx);
3468eda14cbcSMatt Macy }
3469eda14cbcSMatt Macy 
3470eda14cbcSMatt Macy /*
3471eda14cbcSMatt Macy  * Commit ZFS Intent Log transactions (itxs) to stable storage.
3472eda14cbcSMatt Macy  *
3473eda14cbcSMatt Macy  * When writing ZIL transactions to the on-disk representation of the
3474eda14cbcSMatt Macy  * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
3475eda14cbcSMatt Macy  * itxs can be committed to a single lwb. Once a lwb is written and
3476eda14cbcSMatt Macy  * committed to stable storage (i.e. the lwb is written, and vdevs have
3477eda14cbcSMatt Macy  * been flushed), each itx that was committed to that lwb is also
3478eda14cbcSMatt Macy  * considered to be committed to stable storage.
3479eda14cbcSMatt Macy  *
3480eda14cbcSMatt Macy  * When an itx is committed to an lwb, the log record (lr_t) contained
3481eda14cbcSMatt Macy  * by the itx is copied into the lwb's zio buffer, and once this buffer
3482eda14cbcSMatt Macy  * is written to disk, it becomes an on-disk ZIL block.
3483eda14cbcSMatt Macy  *
3484eda14cbcSMatt Macy  * As itxs are generated, they're inserted into the ZIL's queue of
3485eda14cbcSMatt Macy  * uncommitted itxs. The semantics of zil_commit() are such that it will
3486eda14cbcSMatt Macy  * block until all itxs that were in the queue when it was called, are
3487eda14cbcSMatt Macy  * committed to stable storage.
3488eda14cbcSMatt Macy  *
3489eda14cbcSMatt Macy  * If "foid" is zero, this means all "synchronous" and "asynchronous"
3490eda14cbcSMatt Macy  * itxs, for all objects in the dataset, will be committed to stable
3491eda14cbcSMatt Macy  * storage prior to zil_commit() returning. If "foid" is non-zero, all
3492eda14cbcSMatt Macy  * "synchronous" itxs for all objects, but only "asynchronous" itxs
3493eda14cbcSMatt Macy  * that correspond to the foid passed in, will be committed to stable
3494eda14cbcSMatt Macy  * storage prior to zil_commit() returning.
3495eda14cbcSMatt Macy  *
3496eda14cbcSMatt Macy  * Generally speaking, when zil_commit() is called, the consumer doesn't
3497eda14cbcSMatt Macy  * actually care about _all_ of the uncommitted itxs. Instead, they're
3498eda14cbcSMatt Macy  * simply trying to waiting for a specific itx to be committed to disk,
3499eda14cbcSMatt Macy  * but the interface(s) for interacting with the ZIL don't allow such
3500eda14cbcSMatt Macy  * fine-grained communication. A better interface would allow a consumer
3501eda14cbcSMatt Macy  * to create and assign an itx, and then pass a reference to this itx to
3502eda14cbcSMatt Macy  * zil_commit(); such that zil_commit() would return as soon as that
3503eda14cbcSMatt Macy  * specific itx was committed to disk (instead of waiting for _all_
3504eda14cbcSMatt Macy  * itxs to be committed).
3505eda14cbcSMatt Macy  *
3506eda14cbcSMatt Macy  * When a thread calls zil_commit() a special "commit itx" will be
3507eda14cbcSMatt Macy  * generated, along with a corresponding "waiter" for this commit itx.
3508eda14cbcSMatt Macy  * zil_commit() will wait on this waiter's CV, such that when the waiter
3509eda14cbcSMatt Macy  * is marked done, and signaled, zil_commit() will return.
3510eda14cbcSMatt Macy  *
3511eda14cbcSMatt Macy  * This commit itx is inserted into the queue of uncommitted itxs. This
3512eda14cbcSMatt Macy  * provides an easy mechanism for determining which itxs were in the
3513eda14cbcSMatt Macy  * queue prior to zil_commit() having been called, and which itxs were
3514eda14cbcSMatt Macy  * added after zil_commit() was called.
3515eda14cbcSMatt Macy  *
3516e3aa18adSMartin Matuska  * The commit itx is special; it doesn't have any on-disk representation.
3517eda14cbcSMatt Macy  * When a commit itx is "committed" to an lwb, the waiter associated
3518eda14cbcSMatt Macy  * with it is linked onto the lwb's list of waiters. Then, when that lwb
3519eda14cbcSMatt Macy  * completes, each waiter on the lwb's list is marked done and signaled
3520eda14cbcSMatt Macy  * -- allowing the thread waiting on the waiter to return from zil_commit().
3521eda14cbcSMatt Macy  *
3522eda14cbcSMatt Macy  * It's important to point out a few critical factors that allow us
3523eda14cbcSMatt Macy  * to make use of the commit itxs, commit waiters, per-lwb lists of
3524eda14cbcSMatt Macy  * commit waiters, and zio completion callbacks like we're doing:
3525eda14cbcSMatt Macy  *
3526eda14cbcSMatt Macy  *   1. The list of waiters for each lwb is traversed, and each commit
3527eda14cbcSMatt Macy  *      waiter is marked "done" and signaled, in the zio completion
3528eda14cbcSMatt Macy  *      callback of the lwb's zio[*].
3529eda14cbcSMatt Macy  *
3530eda14cbcSMatt Macy  *      * Actually, the waiters are signaled in the zio completion
35311719886fSMartin Matuska  *        callback of the root zio for the flush commands that are sent to
35321719886fSMartin Matuska  *        the vdevs upon completion of the lwb zio.
3533eda14cbcSMatt Macy  *
3534eda14cbcSMatt Macy  *   2. When the itxs are inserted into the ZIL's queue of uncommitted
3535eda14cbcSMatt Macy  *      itxs, the order in which they are inserted is preserved[*]; as
3536eda14cbcSMatt Macy  *      itxs are added to the queue, they are added to the tail of
3537eda14cbcSMatt Macy  *      in-memory linked lists.
3538eda14cbcSMatt Macy  *
3539eda14cbcSMatt Macy  *      When committing the itxs to lwbs (to be written to disk), they
3540eda14cbcSMatt Macy  *      are committed in the same order in which the itxs were added to
3541eda14cbcSMatt Macy  *      the uncommitted queue's linked list(s); i.e. the linked list of
3542eda14cbcSMatt Macy  *      itxs to commit is traversed from head to tail, and each itx is
3543eda14cbcSMatt Macy  *      committed to an lwb in that order.
3544eda14cbcSMatt Macy  *
3545eda14cbcSMatt Macy  *      * To clarify:
3546eda14cbcSMatt Macy  *
3547eda14cbcSMatt Macy  *        - the order of "sync" itxs is preserved w.r.t. other
3548eda14cbcSMatt Macy  *          "sync" itxs, regardless of the corresponding objects.
3549eda14cbcSMatt Macy  *        - the order of "async" itxs is preserved w.r.t. other
3550eda14cbcSMatt Macy  *          "async" itxs corresponding to the same object.
3551eda14cbcSMatt Macy  *        - the order of "async" itxs is *not* preserved w.r.t. other
3552eda14cbcSMatt Macy  *          "async" itxs corresponding to different objects.
3553eda14cbcSMatt Macy  *        - the order of "sync" itxs w.r.t. "async" itxs (or vice
3554eda14cbcSMatt Macy  *          versa) is *not* preserved, even for itxs that correspond
3555eda14cbcSMatt Macy  *          to the same object.
3556eda14cbcSMatt Macy  *
3557eda14cbcSMatt Macy  *      For more details, see: zil_itx_assign(), zil_async_to_sync(),
3558eda14cbcSMatt Macy  *      zil_get_commit_list(), and zil_process_commit_list().
3559eda14cbcSMatt Macy  *
3560eda14cbcSMatt Macy  *   3. The lwbs represent a linked list of blocks on disk. Thus, any
3561eda14cbcSMatt Macy  *      lwb cannot be considered committed to stable storage, until its
3562eda14cbcSMatt Macy  *      "previous" lwb is also committed to stable storage. This fact,
3563eda14cbcSMatt Macy  *      coupled with the fact described above, means that itxs are
3564eda14cbcSMatt Macy  *      committed in (roughly) the order in which they were generated.
3565eda14cbcSMatt Macy  *      This is essential because itxs are dependent on prior itxs.
3566eda14cbcSMatt Macy  *      Thus, we *must not* deem an itx as being committed to stable
3567eda14cbcSMatt Macy  *      storage, until *all* prior itxs have also been committed to
3568eda14cbcSMatt Macy  *      stable storage.
3569eda14cbcSMatt Macy  *
3570eda14cbcSMatt Macy  *      To enforce this ordering of lwb zio's, while still leveraging as
3571eda14cbcSMatt Macy  *      much of the underlying storage performance as possible, we rely
3572eda14cbcSMatt Macy  *      on two fundamental concepts:
3573eda14cbcSMatt Macy  *
3574eda14cbcSMatt Macy  *          1. The creation and issuance of lwb zio's is protected by
3575eda14cbcSMatt Macy  *             the zilog's "zl_issuer_lock", which ensures only a single
3576eda14cbcSMatt Macy  *             thread is creating and/or issuing lwb's at a time
3577eda14cbcSMatt Macy  *          2. The "previous" lwb is a child of the "current" lwb
3578eda14cbcSMatt Macy  *             (leveraging the zio parent-child dependency graph)
3579eda14cbcSMatt Macy  *
3580eda14cbcSMatt Macy  *      By relying on this parent-child zio relationship, we can have
3581eda14cbcSMatt Macy  *      many lwb zio's concurrently issued to the underlying storage,
3582eda14cbcSMatt Macy  *      but the order in which they complete will be the same order in
3583eda14cbcSMatt Macy  *      which they were created.
3584eda14cbcSMatt Macy  */
3585eda14cbcSMatt Macy void
zil_commit(zilog_t * zilog,uint64_t foid)3586eda14cbcSMatt Macy zil_commit(zilog_t *zilog, uint64_t foid)
3587eda14cbcSMatt Macy {
3588eda14cbcSMatt Macy 	/*
3589eda14cbcSMatt Macy 	 * We should never attempt to call zil_commit on a snapshot for
3590eda14cbcSMatt Macy 	 * a couple of reasons:
3591eda14cbcSMatt Macy 	 *
3592eda14cbcSMatt Macy 	 * 1. A snapshot may never be modified, thus it cannot have any
3593eda14cbcSMatt Macy 	 *    in-flight itxs that would have modified the dataset.
3594eda14cbcSMatt Macy 	 *
3595eda14cbcSMatt Macy 	 * 2. By design, when zil_commit() is called, a commit itx will
3596eda14cbcSMatt Macy 	 *    be assigned to this zilog; as a result, the zilog will be
3597eda14cbcSMatt Macy 	 *    dirtied. We must not dirty the zilog of a snapshot; there's
3598eda14cbcSMatt Macy 	 *    checks in the code that enforce this invariant, and will
3599eda14cbcSMatt Macy 	 *    cause a panic if it's not upheld.
3600eda14cbcSMatt Macy 	 */
3601eda14cbcSMatt Macy 	ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
3602eda14cbcSMatt Macy 
3603eda14cbcSMatt Macy 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
3604eda14cbcSMatt Macy 		return;
3605eda14cbcSMatt Macy 
3606eda14cbcSMatt Macy 	if (!spa_writeable(zilog->zl_spa)) {
3607eda14cbcSMatt Macy 		/*
3608eda14cbcSMatt Macy 		 * If the SPA is not writable, there should never be any
3609eda14cbcSMatt Macy 		 * pending itxs waiting to be committed to disk. If that
3610eda14cbcSMatt Macy 		 * weren't true, we'd skip writing those itxs out, and
3611eda14cbcSMatt Macy 		 * would break the semantics of zil_commit(); thus, we're
3612eda14cbcSMatt Macy 		 * verifying that truth before we return to the caller.
3613eda14cbcSMatt Macy 		 */
3614eda14cbcSMatt Macy 		ASSERT(list_is_empty(&zilog->zl_lwb_list));
3615eda14cbcSMatt Macy 		ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
3616eda14cbcSMatt Macy 		for (int i = 0; i < TXG_SIZE; i++)
3617eda14cbcSMatt Macy 			ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
3618eda14cbcSMatt Macy 		return;
3619eda14cbcSMatt Macy 	}
3620eda14cbcSMatt Macy 
3621eda14cbcSMatt Macy 	/*
3622eda14cbcSMatt Macy 	 * If the ZIL is suspended, we don't want to dirty it by calling
3623eda14cbcSMatt Macy 	 * zil_commit_itx_assign() below, nor can we write out
3624eda14cbcSMatt Macy 	 * lwbs like would be done in zil_commit_write(). Thus, we
3625eda14cbcSMatt Macy 	 * simply rely on txg_wait_synced() to maintain the necessary
3626eda14cbcSMatt Macy 	 * semantics, and avoid calling those functions altogether.
3627eda14cbcSMatt Macy 	 */
3628eda14cbcSMatt Macy 	if (zilog->zl_suspend > 0) {
3629ce4dcb97SMartin Matuska 		ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
3630eda14cbcSMatt Macy 		txg_wait_synced(zilog->zl_dmu_pool, 0);
3631eda14cbcSMatt Macy 		return;
3632eda14cbcSMatt Macy 	}
3633eda14cbcSMatt Macy 
3634eda14cbcSMatt Macy 	zil_commit_impl(zilog, foid);
3635eda14cbcSMatt Macy }
3636eda14cbcSMatt Macy 
3637eda14cbcSMatt Macy void
zil_commit_impl(zilog_t * zilog,uint64_t foid)3638eda14cbcSMatt Macy zil_commit_impl(zilog_t *zilog, uint64_t foid)
3639eda14cbcSMatt Macy {
3640271171e0SMartin Matuska 	ZIL_STAT_BUMP(zilog, zil_commit_count);
3641eda14cbcSMatt Macy 
3642eda14cbcSMatt Macy 	/*
3643eda14cbcSMatt Macy 	 * Move the "async" itxs for the specified foid to the "sync"
3644eda14cbcSMatt Macy 	 * queues, such that they will be later committed (or skipped)
3645eda14cbcSMatt Macy 	 * to an lwb when zil_process_commit_list() is called.
3646eda14cbcSMatt Macy 	 *
3647eda14cbcSMatt Macy 	 * Since these "async" itxs must be committed prior to this
3648eda14cbcSMatt Macy 	 * call to zil_commit returning, we must perform this operation
3649eda14cbcSMatt Macy 	 * before we call zil_commit_itx_assign().
3650eda14cbcSMatt Macy 	 */
3651eda14cbcSMatt Macy 	zil_async_to_sync(zilog, foid);
3652eda14cbcSMatt Macy 
3653eda14cbcSMatt Macy 	/*
3654eda14cbcSMatt Macy 	 * We allocate a new "waiter" structure which will initially be
3655eda14cbcSMatt Macy 	 * linked to the commit itx using the itx's "itx_private" field.
3656eda14cbcSMatt Macy 	 * Since the commit itx doesn't represent any on-disk state,
3657eda14cbcSMatt Macy 	 * when it's committed to an lwb, rather than copying the its
3658eda14cbcSMatt Macy 	 * lr_t into the lwb's buffer, the commit itx's "waiter" will be
3659eda14cbcSMatt Macy 	 * added to the lwb's list of waiters. Then, when the lwb is
3660eda14cbcSMatt Macy 	 * committed to stable storage, each waiter in the lwb's list of
3661eda14cbcSMatt Macy 	 * waiters will be marked "done", and signalled.
3662eda14cbcSMatt Macy 	 *
3663eda14cbcSMatt Macy 	 * We must create the waiter and assign the commit itx prior to
3664eda14cbcSMatt Macy 	 * calling zil_commit_writer(), or else our specific commit itx
3665eda14cbcSMatt Macy 	 * is not guaranteed to be committed to an lwb prior to calling
3666eda14cbcSMatt Macy 	 * zil_commit_waiter().
3667eda14cbcSMatt Macy 	 */
3668eda14cbcSMatt Macy 	zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
3669eda14cbcSMatt Macy 	zil_commit_itx_assign(zilog, zcw);
3670eda14cbcSMatt Macy 
36710a97523dSMartin Matuska 	uint64_t wtxg = zil_commit_writer(zilog, zcw);
3672eda14cbcSMatt Macy 	zil_commit_waiter(zilog, zcw);
3673eda14cbcSMatt Macy 
3674eda14cbcSMatt Macy 	if (zcw->zcw_zio_error != 0) {
3675eda14cbcSMatt Macy 		/*
3676eda14cbcSMatt Macy 		 * If there was an error writing out the ZIL blocks that
3677eda14cbcSMatt Macy 		 * this thread is waiting on, then we fallback to
3678eda14cbcSMatt Macy 		 * relying on spa_sync() to write out the data this
3679eda14cbcSMatt Macy 		 * thread is waiting on. Obviously this has performance
3680eda14cbcSMatt Macy 		 * implications, but the expectation is for this to be
3681eda14cbcSMatt Macy 		 * an exceptional case, and shouldn't occur often.
3682eda14cbcSMatt Macy 		 */
3683ce4dcb97SMartin Matuska 		ZIL_STAT_BUMP(zilog, zil_commit_error_count);
3684eda14cbcSMatt Macy 		DTRACE_PROBE2(zil__commit__io__error,
3685eda14cbcSMatt Macy 		    zilog_t *, zilog, zil_commit_waiter_t *, zcw);
3686eda14cbcSMatt Macy 		txg_wait_synced(zilog->zl_dmu_pool, 0);
36870a97523dSMartin Matuska 	} else if (wtxg != 0) {
3688ce4dcb97SMartin Matuska 		ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
36890a97523dSMartin Matuska 		txg_wait_synced(zilog->zl_dmu_pool, wtxg);
3690eda14cbcSMatt Macy 	}
3691eda14cbcSMatt Macy 
3692eda14cbcSMatt Macy 	zil_free_commit_waiter(zcw);
3693eda14cbcSMatt Macy }
3694eda14cbcSMatt Macy 
3695eda14cbcSMatt Macy /*
3696eda14cbcSMatt Macy  * Called in syncing context to free committed log blocks and update log header.
3697eda14cbcSMatt Macy  */
3698eda14cbcSMatt Macy void
zil_sync(zilog_t * zilog,dmu_tx_t * tx)3699eda14cbcSMatt Macy zil_sync(zilog_t *zilog, dmu_tx_t *tx)
3700eda14cbcSMatt Macy {
3701eda14cbcSMatt Macy 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
3702eda14cbcSMatt Macy 	uint64_t txg = dmu_tx_get_txg(tx);
3703eda14cbcSMatt Macy 	spa_t *spa = zilog->zl_spa;
3704eda14cbcSMatt Macy 	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
3705eda14cbcSMatt Macy 	lwb_t *lwb;
3706eda14cbcSMatt Macy 
3707eda14cbcSMatt Macy 	/*
3708eda14cbcSMatt Macy 	 * We don't zero out zl_destroy_txg, so make sure we don't try
3709eda14cbcSMatt Macy 	 * to destroy it twice.
3710eda14cbcSMatt Macy 	 */
3711eda14cbcSMatt Macy 	if (spa_sync_pass(spa) != 1)
3712eda14cbcSMatt Macy 		return;
3713eda14cbcSMatt Macy 
3714e3aa18adSMartin Matuska 	zil_lwb_flush_wait_all(zilog, txg);
3715e3aa18adSMartin Matuska 
3716eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
3717eda14cbcSMatt Macy 
3718eda14cbcSMatt Macy 	ASSERT(zilog->zl_stop_sync == 0);
3719eda14cbcSMatt Macy 
3720eda14cbcSMatt Macy 	if (*replayed_seq != 0) {
3721eda14cbcSMatt Macy 		ASSERT(zh->zh_replay_seq < *replayed_seq);
3722eda14cbcSMatt Macy 		zh->zh_replay_seq = *replayed_seq;
3723eda14cbcSMatt Macy 		*replayed_seq = 0;
3724eda14cbcSMatt Macy 	}
3725eda14cbcSMatt Macy 
3726eda14cbcSMatt Macy 	if (zilog->zl_destroy_txg == txg) {
3727eda14cbcSMatt Macy 		blkptr_t blk = zh->zh_log;
3728c03c5b1cSMartin Matuska 		dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
3729eda14cbcSMatt Macy 
3730c0a83fe0SMartin Matuska 		ASSERT(list_is_empty(&zilog->zl_lwb_list));
3731eda14cbcSMatt Macy 
3732da5137abSMartin Matuska 		memset(zh, 0, sizeof (zil_header_t));
3733da5137abSMartin Matuska 		memset(zilog->zl_replayed_seq, 0,
3734da5137abSMartin Matuska 		    sizeof (zilog->zl_replayed_seq));
3735eda14cbcSMatt Macy 
3736eda14cbcSMatt Macy 		if (zilog->zl_keep_first) {
3737eda14cbcSMatt Macy 			/*
3738eda14cbcSMatt Macy 			 * If this block was part of log chain that couldn't
3739eda14cbcSMatt Macy 			 * be claimed because a device was missing during
3740eda14cbcSMatt Macy 			 * zil_claim(), but that device later returns,
3741eda14cbcSMatt Macy 			 * then this block could erroneously appear valid.
3742eda14cbcSMatt Macy 			 * To guard against this, assign a new GUID to the new
3743eda14cbcSMatt Macy 			 * log chain so it doesn't matter what blk points to.
3744eda14cbcSMatt Macy 			 */
3745eda14cbcSMatt Macy 			zil_init_log_chain(zilog, &blk);
3746eda14cbcSMatt Macy 			zh->zh_log = blk;
3747c03c5b1cSMartin Matuska 		} else {
3748c03c5b1cSMartin Matuska 			/*
3749c03c5b1cSMartin Matuska 			 * A destroyed ZIL chain can't contain any TX_SETSAXATTR
3750c03c5b1cSMartin Matuska 			 * records. So, deactivate the feature for this dataset.
3751c03c5b1cSMartin Matuska 			 * We activate it again when we start a new ZIL chain.
3752c03c5b1cSMartin Matuska 			 */
3753c03c5b1cSMartin Matuska 			if (dsl_dataset_feature_is_active(ds,
3754c03c5b1cSMartin Matuska 			    SPA_FEATURE_ZILSAXATTR))
3755c03c5b1cSMartin Matuska 				dsl_dataset_deactivate_feature(ds,
3756c03c5b1cSMartin Matuska 				    SPA_FEATURE_ZILSAXATTR, tx);
3757eda14cbcSMatt Macy 		}
3758eda14cbcSMatt Macy 	}
3759eda14cbcSMatt Macy 
3760eda14cbcSMatt Macy 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
3761eda14cbcSMatt Macy 		zh->zh_log = lwb->lwb_blk;
3762c0a83fe0SMartin Matuska 		if (lwb->lwb_state != LWB_STATE_FLUSH_DONE ||
3763315ee00fSMartin Matuska 		    lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg)
3764eda14cbcSMatt Macy 			break;
3765eda14cbcSMatt Macy 		list_remove(&zilog->zl_lwb_list, lwb);
3766315ee00fSMartin Matuska 		if (!BP_IS_HOLE(&lwb->lwb_blk))
3767eda14cbcSMatt Macy 			zio_free(spa, txg, &lwb->lwb_blk);
3768eda14cbcSMatt Macy 		zil_free_lwb(zilog, lwb);
3769eda14cbcSMatt Macy 
3770eda14cbcSMatt Macy 		/*
3771eda14cbcSMatt Macy 		 * If we don't have anything left in the lwb list then
3772eda14cbcSMatt Macy 		 * we've had an allocation failure and we need to zero
3773eda14cbcSMatt Macy 		 * out the zil_header blkptr so that we don't end
3774eda14cbcSMatt Macy 		 * up freeing the same block twice.
3775eda14cbcSMatt Macy 		 */
3776c0a83fe0SMartin Matuska 		if (list_is_empty(&zilog->zl_lwb_list))
3777eda14cbcSMatt Macy 			BP_ZERO(&zh->zh_log);
3778eda14cbcSMatt Macy 	}
3779eda14cbcSMatt Macy 
3780eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
3781eda14cbcSMatt Macy }
3782eda14cbcSMatt Macy 
3783eda14cbcSMatt Macy static int
zil_lwb_cons(void * vbuf,void * unused,int kmflag)3784eda14cbcSMatt Macy zil_lwb_cons(void *vbuf, void *unused, int kmflag)
3785eda14cbcSMatt Macy {
3786e92ffd9bSMartin Matuska 	(void) unused, (void) kmflag;
3787eda14cbcSMatt Macy 	lwb_t *lwb = vbuf;
3788eda14cbcSMatt Macy 	list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
3789eda14cbcSMatt Macy 	list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
3790eda14cbcSMatt Macy 	    offsetof(zil_commit_waiter_t, zcw_node));
3791eda14cbcSMatt Macy 	avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
3792eda14cbcSMatt Macy 	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
3793eda14cbcSMatt Macy 	mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
3794eda14cbcSMatt Macy 	return (0);
3795eda14cbcSMatt Macy }
3796eda14cbcSMatt Macy 
3797eda14cbcSMatt Macy static void
zil_lwb_dest(void * vbuf,void * unused)3798eda14cbcSMatt Macy zil_lwb_dest(void *vbuf, void *unused)
3799eda14cbcSMatt Macy {
3800e92ffd9bSMartin Matuska 	(void) unused;
3801eda14cbcSMatt Macy 	lwb_t *lwb = vbuf;
3802eda14cbcSMatt Macy 	mutex_destroy(&lwb->lwb_vdev_lock);
3803eda14cbcSMatt Macy 	avl_destroy(&lwb->lwb_vdev_tree);
3804eda14cbcSMatt Macy 	list_destroy(&lwb->lwb_waiters);
3805eda14cbcSMatt Macy 	list_destroy(&lwb->lwb_itxs);
3806eda14cbcSMatt Macy }
3807eda14cbcSMatt Macy 
3808eda14cbcSMatt Macy void
zil_init(void)3809eda14cbcSMatt Macy zil_init(void)
3810eda14cbcSMatt Macy {
3811eda14cbcSMatt Macy 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
3812eda14cbcSMatt Macy 	    sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
3813eda14cbcSMatt Macy 
3814eda14cbcSMatt Macy 	zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
3815eda14cbcSMatt Macy 	    sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
3816eda14cbcSMatt Macy 
3817271171e0SMartin Matuska 	zil_sums_init(&zil_sums_global);
3818271171e0SMartin Matuska 	zil_kstats_global = kstat_create("zfs", 0, "zil", "misc",
3819eda14cbcSMatt Macy 	    KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
3820eda14cbcSMatt Macy 	    KSTAT_FLAG_VIRTUAL);
3821eda14cbcSMatt Macy 
3822271171e0SMartin Matuska 	if (zil_kstats_global != NULL) {
3823271171e0SMartin Matuska 		zil_kstats_global->ks_data = &zil_stats;
3824271171e0SMartin Matuska 		zil_kstats_global->ks_update = zil_kstats_global_update;
3825271171e0SMartin Matuska 		zil_kstats_global->ks_private = NULL;
3826271171e0SMartin Matuska 		kstat_install(zil_kstats_global);
3827eda14cbcSMatt Macy 	}
3828eda14cbcSMatt Macy }
3829eda14cbcSMatt Macy 
3830eda14cbcSMatt Macy void
zil_fini(void)3831eda14cbcSMatt Macy zil_fini(void)
3832eda14cbcSMatt Macy {
3833eda14cbcSMatt Macy 	kmem_cache_destroy(zil_zcw_cache);
3834eda14cbcSMatt Macy 	kmem_cache_destroy(zil_lwb_cache);
3835eda14cbcSMatt Macy 
3836271171e0SMartin Matuska 	if (zil_kstats_global != NULL) {
3837271171e0SMartin Matuska 		kstat_delete(zil_kstats_global);
3838271171e0SMartin Matuska 		zil_kstats_global = NULL;
3839eda14cbcSMatt Macy 	}
3840271171e0SMartin Matuska 
3841271171e0SMartin Matuska 	zil_sums_fini(&zil_sums_global);
3842eda14cbcSMatt Macy }
3843eda14cbcSMatt Macy 
3844eda14cbcSMatt Macy void
zil_set_sync(zilog_t * zilog,uint64_t sync)3845eda14cbcSMatt Macy zil_set_sync(zilog_t *zilog, uint64_t sync)
3846eda14cbcSMatt Macy {
3847eda14cbcSMatt Macy 	zilog->zl_sync = sync;
3848eda14cbcSMatt Macy }
3849eda14cbcSMatt Macy 
3850eda14cbcSMatt Macy void
zil_set_logbias(zilog_t * zilog,uint64_t logbias)3851eda14cbcSMatt Macy zil_set_logbias(zilog_t *zilog, uint64_t logbias)
3852eda14cbcSMatt Macy {
3853eda14cbcSMatt Macy 	zilog->zl_logbias = logbias;
3854eda14cbcSMatt Macy }
3855eda14cbcSMatt Macy 
3856eda14cbcSMatt Macy zilog_t *
zil_alloc(objset_t * os,zil_header_t * zh_phys)3857eda14cbcSMatt Macy zil_alloc(objset_t *os, zil_header_t *zh_phys)
3858eda14cbcSMatt Macy {
3859eda14cbcSMatt Macy 	zilog_t *zilog;
3860eda14cbcSMatt Macy 
3861eda14cbcSMatt Macy 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
3862eda14cbcSMatt Macy 
3863eda14cbcSMatt Macy 	zilog->zl_header = zh_phys;
3864eda14cbcSMatt Macy 	zilog->zl_os = os;
3865eda14cbcSMatt Macy 	zilog->zl_spa = dmu_objset_spa(os);
3866eda14cbcSMatt Macy 	zilog->zl_dmu_pool = dmu_objset_pool(os);
3867eda14cbcSMatt Macy 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
3868eda14cbcSMatt Macy 	zilog->zl_logbias = dmu_objset_logbias(os);
3869eda14cbcSMatt Macy 	zilog->zl_sync = dmu_objset_syncprop(os);
3870eda14cbcSMatt Macy 	zilog->zl_dirty_max_txg = 0;
3871eda14cbcSMatt Macy 	zilog->zl_last_lwb_opened = NULL;
3872eda14cbcSMatt Macy 	zilog->zl_last_lwb_latency = 0;
3873b356da80SMartin Matuska 	zilog->zl_max_block_size = MIN(MAX(P2ALIGN_TYPED(zil_maxblocksize,
3874b356da80SMartin Matuska 	    ZIL_MIN_BLKSZ, uint64_t), ZIL_MIN_BLKSZ),
3875b356da80SMartin Matuska 	    spa_maxblocksize(dmu_objset_spa(os)));
3876eda14cbcSMatt Macy 
3877eda14cbcSMatt Macy 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
3878eda14cbcSMatt Macy 	mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
3879e3aa18adSMartin Matuska 	mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL);
3880eda14cbcSMatt Macy 
3881eda14cbcSMatt Macy 	for (int i = 0; i < TXG_SIZE; i++) {
3882eda14cbcSMatt Macy 		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
3883eda14cbcSMatt Macy 		    MUTEX_DEFAULT, NULL);
3884eda14cbcSMatt Macy 	}
3885eda14cbcSMatt Macy 
3886eda14cbcSMatt Macy 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
3887eda14cbcSMatt Macy 	    offsetof(lwb_t, lwb_node));
3888eda14cbcSMatt Macy 
3889eda14cbcSMatt Macy 	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
3890eda14cbcSMatt Macy 	    offsetof(itx_t, itx_node));
3891eda14cbcSMatt Macy 
3892eda14cbcSMatt Macy 	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
3893e3aa18adSMartin Matuska 	cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL);
3894eda14cbcSMatt Macy 
3895b356da80SMartin Matuska 	for (int i = 0; i < ZIL_BURSTS; i++) {
3896b356da80SMartin Matuska 		zilog->zl_prev_opt[i] = zilog->zl_max_block_size -
3897b356da80SMartin Matuska 		    sizeof (zil_chain_t);
3898b356da80SMartin Matuska 	}
3899b356da80SMartin Matuska 
3900eda14cbcSMatt Macy 	return (zilog);
3901eda14cbcSMatt Macy }
3902eda14cbcSMatt Macy 
3903eda14cbcSMatt Macy void
zil_free(zilog_t * zilog)3904eda14cbcSMatt Macy zil_free(zilog_t *zilog)
3905eda14cbcSMatt Macy {
3906eda14cbcSMatt Macy 	int i;
3907eda14cbcSMatt Macy 
3908eda14cbcSMatt Macy 	zilog->zl_stop_sync = 1;
3909eda14cbcSMatt Macy 
3910eda14cbcSMatt Macy 	ASSERT0(zilog->zl_suspend);
3911eda14cbcSMatt Macy 	ASSERT0(zilog->zl_suspending);
3912eda14cbcSMatt Macy 
3913eda14cbcSMatt Macy 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
3914eda14cbcSMatt Macy 	list_destroy(&zilog->zl_lwb_list);
3915eda14cbcSMatt Macy 
3916eda14cbcSMatt Macy 	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
3917eda14cbcSMatt Macy 	list_destroy(&zilog->zl_itx_commit_list);
3918eda14cbcSMatt Macy 
3919eda14cbcSMatt Macy 	for (i = 0; i < TXG_SIZE; i++) {
3920eda14cbcSMatt Macy 		/*
3921eda14cbcSMatt Macy 		 * It's possible for an itx to be generated that doesn't dirty
3922eda14cbcSMatt Macy 		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
3923eda14cbcSMatt Macy 		 * callback to remove the entry. We remove those here.
3924eda14cbcSMatt Macy 		 *
3925eda14cbcSMatt Macy 		 * Also free up the ziltest itxs.
3926eda14cbcSMatt Macy 		 */
3927eda14cbcSMatt Macy 		if (zilog->zl_itxg[i].itxg_itxs)
3928eda14cbcSMatt Macy 			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
3929eda14cbcSMatt Macy 		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
3930eda14cbcSMatt Macy 	}
3931eda14cbcSMatt Macy 
3932eda14cbcSMatt Macy 	mutex_destroy(&zilog->zl_issuer_lock);
3933eda14cbcSMatt Macy 	mutex_destroy(&zilog->zl_lock);
3934e3aa18adSMartin Matuska 	mutex_destroy(&zilog->zl_lwb_io_lock);
3935eda14cbcSMatt Macy 
3936eda14cbcSMatt Macy 	cv_destroy(&zilog->zl_cv_suspend);
3937e3aa18adSMartin Matuska 	cv_destroy(&zilog->zl_lwb_io_cv);
3938eda14cbcSMatt Macy 
3939eda14cbcSMatt Macy 	kmem_free(zilog, sizeof (zilog_t));
3940eda14cbcSMatt Macy }
3941eda14cbcSMatt Macy 
3942eda14cbcSMatt Macy /*
3943eda14cbcSMatt Macy  * Open an intent log.
3944eda14cbcSMatt Macy  */
3945eda14cbcSMatt Macy zilog_t *
zil_open(objset_t * os,zil_get_data_t * get_data,zil_sums_t * zil_sums)3946271171e0SMartin Matuska zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums)
3947eda14cbcSMatt Macy {
3948eda14cbcSMatt Macy 	zilog_t *zilog = dmu_objset_zil(os);
3949eda14cbcSMatt Macy 
3950eda14cbcSMatt Macy 	ASSERT3P(zilog->zl_get_data, ==, NULL);
3951eda14cbcSMatt Macy 	ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
3952eda14cbcSMatt Macy 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
3953eda14cbcSMatt Macy 
3954eda14cbcSMatt Macy 	zilog->zl_get_data = get_data;
3955271171e0SMartin Matuska 	zilog->zl_sums = zil_sums;
3956eda14cbcSMatt Macy 
3957eda14cbcSMatt Macy 	return (zilog);
3958eda14cbcSMatt Macy }
3959eda14cbcSMatt Macy 
3960eda14cbcSMatt Macy /*
3961eda14cbcSMatt Macy  * Close an intent log.
3962eda14cbcSMatt Macy  */
3963eda14cbcSMatt Macy void
zil_close(zilog_t * zilog)3964eda14cbcSMatt Macy zil_close(zilog_t *zilog)
3965eda14cbcSMatt Macy {
3966eda14cbcSMatt Macy 	lwb_t *lwb;
3967eda14cbcSMatt Macy 	uint64_t txg;
3968eda14cbcSMatt Macy 
3969eda14cbcSMatt Macy 	if (!dmu_objset_is_snapshot(zilog->zl_os)) {
3970eda14cbcSMatt Macy 		zil_commit(zilog, 0);
3971eda14cbcSMatt Macy 	} else {
3972c0a83fe0SMartin Matuska 		ASSERT(list_is_empty(&zilog->zl_lwb_list));
3973eda14cbcSMatt Macy 		ASSERT0(zilog->zl_dirty_max_txg);
3974eda14cbcSMatt Macy 		ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
3975eda14cbcSMatt Macy 	}
3976eda14cbcSMatt Macy 
3977eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
3978eda14cbcSMatt Macy 	txg = zilog->zl_dirty_max_txg;
3979315ee00fSMartin Matuska 	lwb = list_tail(&zilog->zl_lwb_list);
3980315ee00fSMartin Matuska 	if (lwb != NULL) {
3981315ee00fSMartin Matuska 		txg = MAX(txg, lwb->lwb_alloc_txg);
3982315ee00fSMartin Matuska 		txg = MAX(txg, lwb->lwb_max_txg);
3983315ee00fSMartin Matuska 	}
3984eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
3985eda14cbcSMatt Macy 
3986eda14cbcSMatt Macy 	/*
3987e3aa18adSMartin Matuska 	 * zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends
3988e3aa18adSMartin Matuska 	 * on the time when the dmu_tx transaction is assigned in
3989315ee00fSMartin Matuska 	 * zil_lwb_write_issue().
3990e3aa18adSMartin Matuska 	 */
3991e3aa18adSMartin Matuska 	mutex_enter(&zilog->zl_lwb_io_lock);
3992e3aa18adSMartin Matuska 	txg = MAX(zilog->zl_lwb_max_issued_txg, txg);
3993e3aa18adSMartin Matuska 	mutex_exit(&zilog->zl_lwb_io_lock);
3994e3aa18adSMartin Matuska 
3995e3aa18adSMartin Matuska 	/*
3996e3aa18adSMartin Matuska 	 * We need to use txg_wait_synced() to wait until that txg is synced.
3997e3aa18adSMartin Matuska 	 * zil_sync() will guarantee all lwbs up to that txg have been
3998e3aa18adSMartin Matuska 	 * written out, flushed, and cleaned.
3999eda14cbcSMatt Macy 	 */
4000eda14cbcSMatt Macy 	if (txg != 0)
4001eda14cbcSMatt Macy 		txg_wait_synced(zilog->zl_dmu_pool, txg);
4002eda14cbcSMatt Macy 
4003eda14cbcSMatt Macy 	if (zilog_is_dirty(zilog))
400433b8c039SMartin Matuska 		zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
400533b8c039SMartin Matuska 		    (u_longlong_t)txg);
4006eda14cbcSMatt Macy 	if (txg < spa_freeze_txg(zilog->zl_spa))
4007eda14cbcSMatt Macy 		VERIFY(!zilog_is_dirty(zilog));
4008eda14cbcSMatt Macy 
4009eda14cbcSMatt Macy 	zilog->zl_get_data = NULL;
4010eda14cbcSMatt Macy 
4011eda14cbcSMatt Macy 	/*
4012eda14cbcSMatt Macy 	 * We should have only one lwb left on the list; remove it now.
4013eda14cbcSMatt Macy 	 */
4014eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
4015c0a83fe0SMartin Matuska 	lwb = list_remove_head(&zilog->zl_lwb_list);
4016eda14cbcSMatt Macy 	if (lwb != NULL) {
4017c0a83fe0SMartin Matuska 		ASSERT(list_is_empty(&zilog->zl_lwb_list));
4018315ee00fSMartin Matuska 		ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW);
4019eda14cbcSMatt Macy 		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
4020eda14cbcSMatt Macy 		zil_free_lwb(zilog, lwb);
4021eda14cbcSMatt Macy 	}
4022eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
4023eda14cbcSMatt Macy }
4024eda14cbcSMatt Macy 
4025a0b956f5SMartin Matuska static const char *suspend_tag = "zil suspending";
4026eda14cbcSMatt Macy 
4027eda14cbcSMatt Macy /*
4028eda14cbcSMatt Macy  * Suspend an intent log.  While in suspended mode, we still honor
4029eda14cbcSMatt Macy  * synchronous semantics, but we rely on txg_wait_synced() to do it.
4030eda14cbcSMatt Macy  * On old version pools, we suspend the log briefly when taking a
4031eda14cbcSMatt Macy  * snapshot so that it will have an empty intent log.
4032eda14cbcSMatt Macy  *
4033eda14cbcSMatt Macy  * Long holds are not really intended to be used the way we do here --
4034eda14cbcSMatt Macy  * held for such a short time.  A concurrent caller of dsl_dataset_long_held()
4035eda14cbcSMatt Macy  * could fail.  Therefore we take pains to only put a long hold if it is
4036eda14cbcSMatt Macy  * actually necessary.  Fortunately, it will only be necessary if the
4037eda14cbcSMatt Macy  * objset is currently mounted (or the ZVOL equivalent).  In that case it
4038eda14cbcSMatt Macy  * will already have a long hold, so we are not really making things any worse.
4039eda14cbcSMatt Macy  *
4040eda14cbcSMatt Macy  * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
4041eda14cbcSMatt Macy  * zvol_state_t), and use their mechanism to prevent their hold from being
4042eda14cbcSMatt Macy  * dropped (e.g. VFS_HOLD()).  However, that would be even more pain for
4043eda14cbcSMatt Macy  * very little gain.
4044eda14cbcSMatt Macy  *
4045eda14cbcSMatt Macy  * if cookiep == NULL, this does both the suspend & resume.
4046eda14cbcSMatt Macy  * Otherwise, it returns with the dataset "long held", and the cookie
4047eda14cbcSMatt Macy  * should be passed into zil_resume().
4048eda14cbcSMatt Macy  */
4049eda14cbcSMatt Macy int
zil_suspend(const char * osname,void ** cookiep)4050eda14cbcSMatt Macy zil_suspend(const char *osname, void **cookiep)
4051eda14cbcSMatt Macy {
4052eda14cbcSMatt Macy 	objset_t *os;
4053eda14cbcSMatt Macy 	zilog_t *zilog;
4054eda14cbcSMatt Macy 	const zil_header_t *zh;
4055eda14cbcSMatt Macy 	int error;
4056eda14cbcSMatt Macy 
4057eda14cbcSMatt Macy 	error = dmu_objset_hold(osname, suspend_tag, &os);
4058eda14cbcSMatt Macy 	if (error != 0)
4059eda14cbcSMatt Macy 		return (error);
4060eda14cbcSMatt Macy 	zilog = dmu_objset_zil(os);
4061eda14cbcSMatt Macy 
4062eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
4063eda14cbcSMatt Macy 	zh = zilog->zl_header;
4064eda14cbcSMatt Macy 
4065eda14cbcSMatt Macy 	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
4066eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_lock);
4067eda14cbcSMatt Macy 		dmu_objset_rele(os, suspend_tag);
4068eda14cbcSMatt Macy 		return (SET_ERROR(EBUSY));
4069eda14cbcSMatt Macy 	}
4070eda14cbcSMatt Macy 
4071eda14cbcSMatt Macy 	/*
4072eda14cbcSMatt Macy 	 * Don't put a long hold in the cases where we can avoid it.  This
4073eda14cbcSMatt Macy 	 * is when there is no cookie so we are doing a suspend & resume
4074eda14cbcSMatt Macy 	 * (i.e. called from zil_vdev_offline()), and there's nothing to do
4075eda14cbcSMatt Macy 	 * for the suspend because it's already suspended, or there's no ZIL.
4076eda14cbcSMatt Macy 	 */
4077eda14cbcSMatt Macy 	if (cookiep == NULL && !zilog->zl_suspending &&
4078eda14cbcSMatt Macy 	    (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
4079eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_lock);
4080eda14cbcSMatt Macy 		dmu_objset_rele(os, suspend_tag);
4081eda14cbcSMatt Macy 		return (0);
4082eda14cbcSMatt Macy 	}
4083eda14cbcSMatt Macy 
4084eda14cbcSMatt Macy 	dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
4085eda14cbcSMatt Macy 	dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
4086eda14cbcSMatt Macy 
4087eda14cbcSMatt Macy 	zilog->zl_suspend++;
4088eda14cbcSMatt Macy 
4089eda14cbcSMatt Macy 	if (zilog->zl_suspend > 1) {
4090eda14cbcSMatt Macy 		/*
4091eda14cbcSMatt Macy 		 * Someone else is already suspending it.
4092eda14cbcSMatt Macy 		 * Just wait for them to finish.
4093eda14cbcSMatt Macy 		 */
4094eda14cbcSMatt Macy 
4095eda14cbcSMatt Macy 		while (zilog->zl_suspending)
4096eda14cbcSMatt Macy 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
4097eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_lock);
4098eda14cbcSMatt Macy 
4099eda14cbcSMatt Macy 		if (cookiep == NULL)
4100eda14cbcSMatt Macy 			zil_resume(os);
4101eda14cbcSMatt Macy 		else
4102eda14cbcSMatt Macy 			*cookiep = os;
4103eda14cbcSMatt Macy 		return (0);
4104eda14cbcSMatt Macy 	}
4105eda14cbcSMatt Macy 
4106eda14cbcSMatt Macy 	/*
4107eda14cbcSMatt Macy 	 * If there is no pointer to an on-disk block, this ZIL must not
4108eda14cbcSMatt Macy 	 * be active (e.g. filesystem not mounted), so there's nothing
4109eda14cbcSMatt Macy 	 * to clean up.
4110eda14cbcSMatt Macy 	 */
4111eda14cbcSMatt Macy 	if (BP_IS_HOLE(&zh->zh_log)) {
4112eda14cbcSMatt Macy 		ASSERT(cookiep != NULL); /* fast path already handled */
4113eda14cbcSMatt Macy 
4114eda14cbcSMatt Macy 		*cookiep = os;
4115eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_lock);
4116eda14cbcSMatt Macy 		return (0);
4117eda14cbcSMatt Macy 	}
4118eda14cbcSMatt Macy 
4119eda14cbcSMatt Macy 	/*
4120eda14cbcSMatt Macy 	 * The ZIL has work to do. Ensure that the associated encryption
4121eda14cbcSMatt Macy 	 * key will remain mapped while we are committing the log by
4122eda14cbcSMatt Macy 	 * grabbing a reference to it. If the key isn't loaded we have no
4123eda14cbcSMatt Macy 	 * choice but to return an error until the wrapping key is loaded.
4124eda14cbcSMatt Macy 	 */
4125eda14cbcSMatt Macy 	if (os->os_encrypted &&
4126eda14cbcSMatt Macy 	    dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
4127eda14cbcSMatt Macy 		zilog->zl_suspend--;
4128eda14cbcSMatt Macy 		mutex_exit(&zilog->zl_lock);
4129eda14cbcSMatt Macy 		dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
4130eda14cbcSMatt Macy 		dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
4131eda14cbcSMatt Macy 		return (SET_ERROR(EACCES));
4132eda14cbcSMatt Macy 	}
4133eda14cbcSMatt Macy 
4134eda14cbcSMatt Macy 	zilog->zl_suspending = B_TRUE;
4135eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
4136eda14cbcSMatt Macy 
4137eda14cbcSMatt Macy 	/*
4138eda14cbcSMatt Macy 	 * We need to use zil_commit_impl to ensure we wait for all
4139315ee00fSMartin Matuska 	 * LWB_STATE_OPENED, _CLOSED and _READY lwbs to be committed
4140eda14cbcSMatt Macy 	 * to disk before proceeding. If we used zil_commit instead, it
4141eda14cbcSMatt Macy 	 * would just call txg_wait_synced(), because zl_suspend is set.
4142eda14cbcSMatt Macy 	 * txg_wait_synced() doesn't wait for these lwb's to be
4143eda14cbcSMatt Macy 	 * LWB_STATE_FLUSH_DONE before returning.
4144eda14cbcSMatt Macy 	 */
4145eda14cbcSMatt Macy 	zil_commit_impl(zilog, 0);
4146eda14cbcSMatt Macy 
4147eda14cbcSMatt Macy 	/*
4148eda14cbcSMatt Macy 	 * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
4149eda14cbcSMatt Macy 	 * use txg_wait_synced() to ensure the data from the zilog has
4150eda14cbcSMatt Macy 	 * migrated to the main pool before calling zil_destroy().
4151eda14cbcSMatt Macy 	 */
4152eda14cbcSMatt Macy 	txg_wait_synced(zilog->zl_dmu_pool, 0);
4153eda14cbcSMatt Macy 
4154eda14cbcSMatt Macy 	zil_destroy(zilog, B_FALSE);
4155eda14cbcSMatt Macy 
4156eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
4157eda14cbcSMatt Macy 	zilog->zl_suspending = B_FALSE;
4158eda14cbcSMatt Macy 	cv_broadcast(&zilog->zl_cv_suspend);
4159eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
4160eda14cbcSMatt Macy 
4161eda14cbcSMatt Macy 	if (os->os_encrypted)
4162eda14cbcSMatt Macy 		dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
4163eda14cbcSMatt Macy 
4164eda14cbcSMatt Macy 	if (cookiep == NULL)
4165eda14cbcSMatt Macy 		zil_resume(os);
4166eda14cbcSMatt Macy 	else
4167eda14cbcSMatt Macy 		*cookiep = os;
4168eda14cbcSMatt Macy 	return (0);
4169eda14cbcSMatt Macy }
4170eda14cbcSMatt Macy 
4171eda14cbcSMatt Macy void
zil_resume(void * cookie)4172eda14cbcSMatt Macy zil_resume(void *cookie)
4173eda14cbcSMatt Macy {
4174eda14cbcSMatt Macy 	objset_t *os = cookie;
4175eda14cbcSMatt Macy 	zilog_t *zilog = dmu_objset_zil(os);
4176eda14cbcSMatt Macy 
4177eda14cbcSMatt Macy 	mutex_enter(&zilog->zl_lock);
4178eda14cbcSMatt Macy 	ASSERT(zilog->zl_suspend != 0);
4179eda14cbcSMatt Macy 	zilog->zl_suspend--;
4180eda14cbcSMatt Macy 	mutex_exit(&zilog->zl_lock);
4181eda14cbcSMatt Macy 	dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
4182eda14cbcSMatt Macy 	dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
4183eda14cbcSMatt Macy }
4184eda14cbcSMatt Macy 
4185eda14cbcSMatt Macy typedef struct zil_replay_arg {
4186e92ffd9bSMartin Matuska 	zil_replay_func_t *const *zr_replay;
4187eda14cbcSMatt Macy 	void		*zr_arg;
4188eda14cbcSMatt Macy 	boolean_t	zr_byteswap;
4189eda14cbcSMatt Macy 	char		*zr_lr;
4190eda14cbcSMatt Macy } zil_replay_arg_t;
4191eda14cbcSMatt Macy 
4192eda14cbcSMatt Macy static int
zil_replay_error(zilog_t * zilog,const lr_t * lr,int error)4193180f8225SMatt Macy zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
4194eda14cbcSMatt Macy {
4195eda14cbcSMatt Macy 	char name[ZFS_MAX_DATASET_NAME_LEN];
4196eda14cbcSMatt Macy 
4197eda14cbcSMatt Macy 	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
4198eda14cbcSMatt Macy 
4199eda14cbcSMatt Macy 	dmu_objset_name(zilog->zl_os, name);
4200eda14cbcSMatt Macy 
4201eda14cbcSMatt Macy 	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
4202eda14cbcSMatt Macy 	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
4203eda14cbcSMatt Macy 	    (u_longlong_t)lr->lrc_seq,
4204eda14cbcSMatt Macy 	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
4205eda14cbcSMatt Macy 	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
4206eda14cbcSMatt Macy 
4207eda14cbcSMatt Macy 	return (error);
4208eda14cbcSMatt Macy }
4209eda14cbcSMatt Macy 
4210eda14cbcSMatt Macy static int
zil_replay_log_record(zilog_t * zilog,const lr_t * lr,void * zra,uint64_t claim_txg)4211180f8225SMatt Macy zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
4212180f8225SMatt Macy     uint64_t claim_txg)
4213eda14cbcSMatt Macy {
4214eda14cbcSMatt Macy 	zil_replay_arg_t *zr = zra;
4215eda14cbcSMatt Macy 	const zil_header_t *zh = zilog->zl_header;
4216eda14cbcSMatt Macy 	uint64_t reclen = lr->lrc_reclen;
4217eda14cbcSMatt Macy 	uint64_t txtype = lr->lrc_txtype;
4218eda14cbcSMatt Macy 	int error = 0;
4219eda14cbcSMatt Macy 
4220eda14cbcSMatt Macy 	zilog->zl_replaying_seq = lr->lrc_seq;
4221eda14cbcSMatt Macy 
4222eda14cbcSMatt Macy 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
4223eda14cbcSMatt Macy 		return (0);
4224eda14cbcSMatt Macy 
4225eda14cbcSMatt Macy 	if (lr->lrc_txg < claim_txg)		/* already committed */
4226eda14cbcSMatt Macy 		return (0);
4227eda14cbcSMatt Macy 
4228eda14cbcSMatt Macy 	/* Strip case-insensitive bit, still present in log record */
4229eda14cbcSMatt Macy 	txtype &= ~TX_CI;
4230eda14cbcSMatt Macy 
4231eda14cbcSMatt Macy 	if (txtype == 0 || txtype >= TX_MAX_TYPE)
4232eda14cbcSMatt Macy 		return (zil_replay_error(zilog, lr, EINVAL));
4233eda14cbcSMatt Macy 
4234eda14cbcSMatt Macy 	/*
4235eda14cbcSMatt Macy 	 * If this record type can be logged out of order, the object
4236eda14cbcSMatt Macy 	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
4237eda14cbcSMatt Macy 	 */
4238eda14cbcSMatt Macy 	if (TX_OOO(txtype)) {
4239eda14cbcSMatt Macy 		error = dmu_object_info(zilog->zl_os,
4240eda14cbcSMatt Macy 		    LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
4241eda14cbcSMatt Macy 		if (error == ENOENT || error == EEXIST)
4242eda14cbcSMatt Macy 			return (0);
4243eda14cbcSMatt Macy 	}
4244eda14cbcSMatt Macy 
4245eda14cbcSMatt Macy 	/*
4246eda14cbcSMatt Macy 	 * Make a copy of the data so we can revise and extend it.
4247eda14cbcSMatt Macy 	 */
4248da5137abSMartin Matuska 	memcpy(zr->zr_lr, lr, reclen);
4249eda14cbcSMatt Macy 
4250eda14cbcSMatt Macy 	/*
4251eda14cbcSMatt Macy 	 * If this is a TX_WRITE with a blkptr, suck in the data.
4252eda14cbcSMatt Macy 	 */
4253eda14cbcSMatt Macy 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
4254eda14cbcSMatt Macy 		error = zil_read_log_data(zilog, (lr_write_t *)lr,
4255eda14cbcSMatt Macy 		    zr->zr_lr + reclen);
4256eda14cbcSMatt Macy 		if (error != 0)
4257eda14cbcSMatt Macy 			return (zil_replay_error(zilog, lr, error));
4258eda14cbcSMatt Macy 	}
4259eda14cbcSMatt Macy 
4260eda14cbcSMatt Macy 	/*
4261eda14cbcSMatt Macy 	 * The log block containing this lr may have been byteswapped
4262eda14cbcSMatt Macy 	 * so that we can easily examine common fields like lrc_txtype.
4263eda14cbcSMatt Macy 	 * However, the log is a mix of different record types, and only the
4264eda14cbcSMatt Macy 	 * replay vectors know how to byteswap their records.  Therefore, if
4265eda14cbcSMatt Macy 	 * the lr was byteswapped, undo it before invoking the replay vector.
4266eda14cbcSMatt Macy 	 */
4267eda14cbcSMatt Macy 	if (zr->zr_byteswap)
4268eda14cbcSMatt Macy 		byteswap_uint64_array(zr->zr_lr, reclen);
4269eda14cbcSMatt Macy 
4270eda14cbcSMatt Macy 	/*
4271eda14cbcSMatt Macy 	 * We must now do two things atomically: replay this log record,
4272eda14cbcSMatt Macy 	 * and update the log header sequence number to reflect the fact that
4273eda14cbcSMatt Macy 	 * we did so. At the end of each replay function the sequence number
4274eda14cbcSMatt Macy 	 * is updated if we are in replay mode.
4275eda14cbcSMatt Macy 	 */
4276eda14cbcSMatt Macy 	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
4277eda14cbcSMatt Macy 	if (error != 0) {
4278eda14cbcSMatt Macy 		/*
4279eda14cbcSMatt Macy 		 * The DMU's dnode layer doesn't see removes until the txg
4280eda14cbcSMatt Macy 		 * commits, so a subsequent claim can spuriously fail with
4281eda14cbcSMatt Macy 		 * EEXIST. So if we receive any error we try syncing out
4282eda14cbcSMatt Macy 		 * any removes then retry the transaction.  Note that we
4283eda14cbcSMatt Macy 		 * specify B_FALSE for byteswap now, so we don't do it twice.
4284eda14cbcSMatt Macy 		 */
4285eda14cbcSMatt Macy 		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
4286eda14cbcSMatt Macy 		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
4287eda14cbcSMatt Macy 		if (error != 0)
4288eda14cbcSMatt Macy 			return (zil_replay_error(zilog, lr, error));
4289eda14cbcSMatt Macy 	}
4290eda14cbcSMatt Macy 	return (0);
4291eda14cbcSMatt Macy }
4292eda14cbcSMatt Macy 
4293eda14cbcSMatt Macy static int
zil_incr_blks(zilog_t * zilog,const blkptr_t * bp,void * arg,uint64_t claim_txg)4294180f8225SMatt Macy zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
4295eda14cbcSMatt Macy {
4296e92ffd9bSMartin Matuska 	(void) bp, (void) arg, (void) claim_txg;
4297e92ffd9bSMartin Matuska 
4298eda14cbcSMatt Macy 	zilog->zl_replay_blks++;
4299eda14cbcSMatt Macy 
4300eda14cbcSMatt Macy 	return (0);
4301eda14cbcSMatt Macy }
4302eda14cbcSMatt Macy 
4303eda14cbcSMatt Macy /*
4304eda14cbcSMatt Macy  * If this dataset has a non-empty intent log, replay it and destroy it.
4305dbd5678dSMartin Matuska  * Return B_TRUE if there were any entries to replay.
4306eda14cbcSMatt Macy  */
4307dbd5678dSMartin Matuska boolean_t
zil_replay(objset_t * os,void * arg,zil_replay_func_t * const replay_func[TX_MAX_TYPE])4308e92ffd9bSMartin Matuska zil_replay(objset_t *os, void *arg,
4309e92ffd9bSMartin Matuska     zil_replay_func_t *const replay_func[TX_MAX_TYPE])
4310eda14cbcSMatt Macy {
4311eda14cbcSMatt Macy 	zilog_t *zilog = dmu_objset_zil(os);
4312eda14cbcSMatt Macy 	const zil_header_t *zh = zilog->zl_header;
4313eda14cbcSMatt Macy 	zil_replay_arg_t zr;
4314eda14cbcSMatt Macy 
4315eda14cbcSMatt Macy 	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
4316dbd5678dSMartin Matuska 		return (zil_destroy(zilog, B_TRUE));
4317eda14cbcSMatt Macy 	}
4318eda14cbcSMatt Macy 
4319eda14cbcSMatt Macy 	zr.zr_replay = replay_func;
4320eda14cbcSMatt Macy 	zr.zr_arg = arg;
4321eda14cbcSMatt Macy 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
4322eda14cbcSMatt Macy 	zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
4323eda14cbcSMatt Macy 
4324eda14cbcSMatt Macy 	/*
4325eda14cbcSMatt Macy 	 * Wait for in-progress removes to sync before starting replay.
4326eda14cbcSMatt Macy 	 */
4327eda14cbcSMatt Macy 	txg_wait_synced(zilog->zl_dmu_pool, 0);
4328eda14cbcSMatt Macy 
4329eda14cbcSMatt Macy 	zilog->zl_replay = B_TRUE;
4330eda14cbcSMatt Macy 	zilog->zl_replay_time = ddi_get_lbolt();
4331eda14cbcSMatt Macy 	ASSERT(zilog->zl_replay_blks == 0);
4332eda14cbcSMatt Macy 	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
4333eda14cbcSMatt Macy 	    zh->zh_claim_txg, B_TRUE);
4334eda14cbcSMatt Macy 	vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
4335eda14cbcSMatt Macy 
4336eda14cbcSMatt Macy 	zil_destroy(zilog, B_FALSE);
4337eda14cbcSMatt Macy 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
4338eda14cbcSMatt Macy 	zilog->zl_replay = B_FALSE;
4339dbd5678dSMartin Matuska 
4340dbd5678dSMartin Matuska 	return (B_TRUE);
4341eda14cbcSMatt Macy }
4342eda14cbcSMatt Macy 
4343eda14cbcSMatt Macy boolean_t
zil_replaying(zilog_t * zilog,dmu_tx_t * tx)4344eda14cbcSMatt Macy zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
4345eda14cbcSMatt Macy {
4346eda14cbcSMatt Macy 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
4347eda14cbcSMatt Macy 		return (B_TRUE);
4348eda14cbcSMatt Macy 
4349eda14cbcSMatt Macy 	if (zilog->zl_replay) {
4350eda14cbcSMatt Macy 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
4351eda14cbcSMatt Macy 		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
4352eda14cbcSMatt Macy 		    zilog->zl_replaying_seq;
4353eda14cbcSMatt Macy 		return (B_TRUE);
4354eda14cbcSMatt Macy 	}
4355eda14cbcSMatt Macy 
4356eda14cbcSMatt Macy 	return (B_FALSE);
4357eda14cbcSMatt Macy }
4358eda14cbcSMatt Macy 
4359eda14cbcSMatt Macy int
zil_reset(const char * osname,void * arg)4360eda14cbcSMatt Macy zil_reset(const char *osname, void *arg)
4361eda14cbcSMatt Macy {
4362e92ffd9bSMartin Matuska 	(void) arg;
4363eda14cbcSMatt Macy 
4364e92ffd9bSMartin Matuska 	int error = zil_suspend(osname, NULL);
4365eda14cbcSMatt Macy 	/* EACCES means crypto key not loaded */
4366eda14cbcSMatt Macy 	if ((error == EACCES) || (error == EBUSY))
4367eda14cbcSMatt Macy 		return (SET_ERROR(error));
4368eda14cbcSMatt Macy 	if (error != 0)
4369eda14cbcSMatt Macy 		return (SET_ERROR(EEXIST));
4370eda14cbcSMatt Macy 	return (0);
4371eda14cbcSMatt Macy }
4372eda14cbcSMatt Macy 
4373eda14cbcSMatt Macy EXPORT_SYMBOL(zil_alloc);
4374eda14cbcSMatt Macy EXPORT_SYMBOL(zil_free);
4375eda14cbcSMatt Macy EXPORT_SYMBOL(zil_open);
4376eda14cbcSMatt Macy EXPORT_SYMBOL(zil_close);
4377eda14cbcSMatt Macy EXPORT_SYMBOL(zil_replay);
4378eda14cbcSMatt Macy EXPORT_SYMBOL(zil_replaying);
4379eda14cbcSMatt Macy EXPORT_SYMBOL(zil_destroy);
4380eda14cbcSMatt Macy EXPORT_SYMBOL(zil_destroy_sync);
4381eda14cbcSMatt Macy EXPORT_SYMBOL(zil_itx_create);
4382eda14cbcSMatt Macy EXPORT_SYMBOL(zil_itx_destroy);
4383eda14cbcSMatt Macy EXPORT_SYMBOL(zil_itx_assign);
4384eda14cbcSMatt Macy EXPORT_SYMBOL(zil_commit);
4385eda14cbcSMatt Macy EXPORT_SYMBOL(zil_claim);
4386eda14cbcSMatt Macy EXPORT_SYMBOL(zil_check_log_chain);
4387eda14cbcSMatt Macy EXPORT_SYMBOL(zil_sync);
4388eda14cbcSMatt Macy EXPORT_SYMBOL(zil_clean);
4389eda14cbcSMatt Macy EXPORT_SYMBOL(zil_suspend);
4390eda14cbcSMatt Macy EXPORT_SYMBOL(zil_resume);
4391eda14cbcSMatt Macy EXPORT_SYMBOL(zil_lwb_add_block);
4392eda14cbcSMatt Macy EXPORT_SYMBOL(zil_bp_tree_add);
4393eda14cbcSMatt Macy EXPORT_SYMBOL(zil_set_sync);
4394eda14cbcSMatt Macy EXPORT_SYMBOL(zil_set_logbias);
4395271171e0SMartin Matuska EXPORT_SYMBOL(zil_sums_init);
4396271171e0SMartin Matuska EXPORT_SYMBOL(zil_sums_fini);
4397271171e0SMartin Matuska EXPORT_SYMBOL(zil_kstat_values_update);
4398eda14cbcSMatt Macy 
4399be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW,
4400eda14cbcSMatt Macy 	"ZIL block open timeout percentage");
4401eda14cbcSMatt Macy 
4402eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
4403eda14cbcSMatt Macy 	"Disable intent logging replay");
4404eda14cbcSMatt Macy 
4405eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
4406eda14cbcSMatt Macy 	"Disable ZIL cache flushes");
4407eda14cbcSMatt Macy 
4408dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW,
4409eda14cbcSMatt Macy 	"Limit in bytes slog sync writes per commit");
4410eda14cbcSMatt Macy 
4411be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
4412eda14cbcSMatt Macy 	"Limit in bytes of ZIL log block size");
4413b2526e8bSMartin Matuska 
4414b2526e8bSMartin Matuska ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW,
4415b2526e8bSMartin Matuska 	"Limit in bytes WR_COPIED size");
4416