xref: /illumos-gate/usr/src/uts/common/fs/zfs/zil.c (revision f8cbe0e7fd4f172d5ed456a8f7425890e1ea20cd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Integros [integros.com]
25  */
26 
27 /* Portions Copyright 2010 Robert Milkowski */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/zap.h>
33 #include <sys/arc.h>
34 #include <sys/stat.h>
35 #include <sys/resource.h>
36 #include <sys/zil.h>
37 #include <sys/zil_impl.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/abd.h>
43 
44 /*
45  * The zfs intent log (ZIL) saves transaction records of system calls
46  * that change the file system in memory with enough information
47  * to be able to replay them. These are stored in memory until
48  * either the DMU transaction group (txg) commits them to the stable pool
49  * and they can be discarded, or they are flushed to the stable log
50  * (also in the pool) due to a fsync, O_DSYNC or other synchronous
51  * requirement. In the event of a panic or power fail then those log
52  * records (transactions) are replayed.
53  *
54  * There is one ZIL per file system. Its on-disk (pool) format consists
55  * of 3 parts:
56  *
57  * 	- ZIL header
58  * 	- ZIL blocks
59  * 	- ZIL records
60  *
61  * A log record holds a system call transaction. Log blocks can
62  * hold many log records and the blocks are chained together.
63  * Each ZIL block contains a block pointer (blkptr_t) to the next
64  * ZIL block in the chain. The ZIL header points to the first
65  * block in the chain. Note there is not a fixed place in the pool
66  * to hold blocks. They are dynamically allocated and freed as
67  * needed from the blocks available. Figure X shows the ZIL structure:
68  */
69 
70 /*
71  * Disable intent logging replay.  This global ZIL switch affects all pools.
72  */
73 int zil_replay_disable = 0;
74 
75 /*
76  * Tunable parameter for debugging or performance analysis.  Setting
77  * zfs_nocacheflush will cause corruption on power loss if a volatile
78  * out-of-order write cache is enabled.
79  */
80 boolean_t zfs_nocacheflush = B_FALSE;
81 
82 /*
83  * Limit SLOG write size per commit executed with synchronous priority.
84  * Any writes above that will be executed with lower (asynchronous) priority
85  * to limit potential SLOG device abuse by single active ZIL writer.
86  */
87 uint64_t zil_slog_bulk = 768 * 1024;
88 
89 static kmem_cache_t *zil_lwb_cache;
90 
91 static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
92 
93 #define	LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
94     sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
95 
96 static int
97 zil_bp_compare(const void *x1, const void *x2)
98 {
99 	const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
100 	const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
101 
102 	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
103 		return (-1);
104 	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
105 		return (1);
106 
107 	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
108 		return (-1);
109 	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
110 		return (1);
111 
112 	return (0);
113 }
114 
115 static void
116 zil_bp_tree_init(zilog_t *zilog)
117 {
118 	avl_create(&zilog->zl_bp_tree, zil_bp_compare,
119 	    sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
120 }
121 
122 static void
123 zil_bp_tree_fini(zilog_t *zilog)
124 {
125 	avl_tree_t *t = &zilog->zl_bp_tree;
126 	zil_bp_node_t *zn;
127 	void *cookie = NULL;
128 
129 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
130 		kmem_free(zn, sizeof (zil_bp_node_t));
131 
132 	avl_destroy(t);
133 }
134 
135 int
136 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
137 {
138 	avl_tree_t *t = &zilog->zl_bp_tree;
139 	const dva_t *dva;
140 	zil_bp_node_t *zn;
141 	avl_index_t where;
142 
143 	if (BP_IS_EMBEDDED(bp))
144 		return (0);
145 
146 	dva = BP_IDENTITY(bp);
147 
148 	if (avl_find(t, dva, &where) != NULL)
149 		return (SET_ERROR(EEXIST));
150 
151 	zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
152 	zn->zn_dva = *dva;
153 	avl_insert(t, zn, where);
154 
155 	return (0);
156 }
157 
158 static zil_header_t *
159 zil_header_in_syncing_context(zilog_t *zilog)
160 {
161 	return ((zil_header_t *)zilog->zl_header);
162 }
163 
164 static void
165 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
166 {
167 	zio_cksum_t *zc = &bp->blk_cksum;
168 
169 	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
170 	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
171 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
172 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
173 }
174 
175 /*
176  * Read a log block and make sure it's valid.
177  */
178 static int
179 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
180     char **end)
181 {
182 	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
183 	arc_flags_t aflags = ARC_FLAG_WAIT;
184 	arc_buf_t *abuf = NULL;
185 	zbookmark_phys_t zb;
186 	int error;
187 
188 	if (zilog->zl_header->zh_claim_txg == 0)
189 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
190 
191 	if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
192 		zio_flags |= ZIO_FLAG_SPECULATIVE;
193 
194 	SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
195 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
196 
197 	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
198 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
199 
200 	if (error == 0) {
201 		zio_cksum_t cksum = bp->blk_cksum;
202 
203 		/*
204 		 * Validate the checksummed log block.
205 		 *
206 		 * Sequence numbers should be... sequential.  The checksum
207 		 * verifier for the next block should be bp's checksum plus 1.
208 		 *
209 		 * Also check the log chain linkage and size used.
210 		 */
211 		cksum.zc_word[ZIL_ZC_SEQ]++;
212 
213 		if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
214 			zil_chain_t *zilc = abuf->b_data;
215 			char *lr = (char *)(zilc + 1);
216 			uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
217 
218 			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
219 			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
220 				error = SET_ERROR(ECKSUM);
221 			} else {
222 				ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
223 				bcopy(lr, dst, len);
224 				*end = (char *)dst + len;
225 				*nbp = zilc->zc_next_blk;
226 			}
227 		} else {
228 			char *lr = abuf->b_data;
229 			uint64_t size = BP_GET_LSIZE(bp);
230 			zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
231 
232 			if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
233 			    sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
234 			    (zilc->zc_nused > (size - sizeof (*zilc)))) {
235 				error = SET_ERROR(ECKSUM);
236 			} else {
237 				ASSERT3U(zilc->zc_nused, <=,
238 				    SPA_OLD_MAXBLOCKSIZE);
239 				bcopy(lr, dst, zilc->zc_nused);
240 				*end = (char *)dst + zilc->zc_nused;
241 				*nbp = zilc->zc_next_blk;
242 			}
243 		}
244 
245 		arc_buf_destroy(abuf, &abuf);
246 	}
247 
248 	return (error);
249 }
250 
251 /*
252  * Read a TX_WRITE log data block.
253  */
254 static int
255 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
256 {
257 	enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
258 	const blkptr_t *bp = &lr->lr_blkptr;
259 	arc_flags_t aflags = ARC_FLAG_WAIT;
260 	arc_buf_t *abuf = NULL;
261 	zbookmark_phys_t zb;
262 	int error;
263 
264 	if (BP_IS_HOLE(bp)) {
265 		if (wbuf != NULL)
266 			bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
267 		return (0);
268 	}
269 
270 	if (zilog->zl_header->zh_claim_txg == 0)
271 		zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
272 
273 	SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
274 	    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
275 
276 	error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
277 	    ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
278 
279 	if (error == 0) {
280 		if (wbuf != NULL)
281 			bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
282 		arc_buf_destroy(abuf, &abuf);
283 	}
284 
285 	return (error);
286 }
287 
288 /*
289  * Parse the intent log, and call parse_func for each valid record within.
290  */
291 int
292 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
293     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
294 {
295 	const zil_header_t *zh = zilog->zl_header;
296 	boolean_t claimed = !!zh->zh_claim_txg;
297 	uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
298 	uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
299 	uint64_t max_blk_seq = 0;
300 	uint64_t max_lr_seq = 0;
301 	uint64_t blk_count = 0;
302 	uint64_t lr_count = 0;
303 	blkptr_t blk, next_blk;
304 	char *lrbuf, *lrp;
305 	int error = 0;
306 
307 	/*
308 	 * Old logs didn't record the maximum zh_claim_lr_seq.
309 	 */
310 	if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
311 		claim_lr_seq = UINT64_MAX;
312 
313 	/*
314 	 * Starting at the block pointed to by zh_log we read the log chain.
315 	 * For each block in the chain we strongly check that block to
316 	 * ensure its validity.  We stop when an invalid block is found.
317 	 * For each block pointer in the chain we call parse_blk_func().
318 	 * For each record in each valid block we call parse_lr_func().
319 	 * If the log has been claimed, stop if we encounter a sequence
320 	 * number greater than the highest claimed sequence number.
321 	 */
322 	lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
323 	zil_bp_tree_init(zilog);
324 
325 	for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
326 		uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
327 		int reclen;
328 		char *end;
329 
330 		if (blk_seq > claim_blk_seq)
331 			break;
332 		if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
333 			break;
334 		ASSERT3U(max_blk_seq, <, blk_seq);
335 		max_blk_seq = blk_seq;
336 		blk_count++;
337 
338 		if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
339 			break;
340 
341 		error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
342 		if (error != 0)
343 			break;
344 
345 		for (lrp = lrbuf; lrp < end; lrp += reclen) {
346 			lr_t *lr = (lr_t *)lrp;
347 			reclen = lr->lrc_reclen;
348 			ASSERT3U(reclen, >=, sizeof (lr_t));
349 			if (lr->lrc_seq > claim_lr_seq)
350 				goto done;
351 			if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
352 				goto done;
353 			ASSERT3U(max_lr_seq, <, lr->lrc_seq);
354 			max_lr_seq = lr->lrc_seq;
355 			lr_count++;
356 		}
357 	}
358 done:
359 	zilog->zl_parse_error = error;
360 	zilog->zl_parse_blk_seq = max_blk_seq;
361 	zilog->zl_parse_lr_seq = max_lr_seq;
362 	zilog->zl_parse_blk_count = blk_count;
363 	zilog->zl_parse_lr_count = lr_count;
364 
365 	ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
366 	    (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
367 
368 	zil_bp_tree_fini(zilog);
369 	zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
370 
371 	return (error);
372 }
373 
374 static int
375 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
376 {
377 	/*
378 	 * Claim log block if not already committed and not already claimed.
379 	 * If tx == NULL, just verify that the block is claimable.
380 	 */
381 	if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
382 	    zil_bp_tree_add(zilog, bp) != 0)
383 		return (0);
384 
385 	return (zio_wait(zio_claim(NULL, zilog->zl_spa,
386 	    tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
387 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
388 }
389 
390 static int
391 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
392 {
393 	lr_write_t *lr = (lr_write_t *)lrc;
394 	int error;
395 
396 	if (lrc->lrc_txtype != TX_WRITE)
397 		return (0);
398 
399 	/*
400 	 * If the block is not readable, don't claim it.  This can happen
401 	 * in normal operation when a log block is written to disk before
402 	 * some of the dmu_sync() blocks it points to.  In this case, the
403 	 * transaction cannot have been committed to anyone (we would have
404 	 * waited for all writes to be stable first), so it is semantically
405 	 * correct to declare this the end of the log.
406 	 */
407 	if (lr->lr_blkptr.blk_birth >= first_txg &&
408 	    (error = zil_read_log_data(zilog, lr, NULL)) != 0)
409 		return (error);
410 	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
411 }
412 
413 /* ARGSUSED */
414 static int
415 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
416 {
417 	zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
418 
419 	return (0);
420 }
421 
422 static int
423 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
424 {
425 	lr_write_t *lr = (lr_write_t *)lrc;
426 	blkptr_t *bp = &lr->lr_blkptr;
427 
428 	/*
429 	 * If we previously claimed it, we need to free it.
430 	 */
431 	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
432 	    bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
433 	    !BP_IS_HOLE(bp))
434 		zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
435 
436 	return (0);
437 }
438 
439 static lwb_t *
440 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg)
441 {
442 	lwb_t *lwb;
443 
444 	lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
445 	lwb->lwb_zilog = zilog;
446 	lwb->lwb_blk = *bp;
447 	lwb->lwb_slog = slog;
448 	lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
449 	lwb->lwb_max_txg = txg;
450 	lwb->lwb_zio = NULL;
451 	lwb->lwb_tx = NULL;
452 	if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
453 		lwb->lwb_nused = sizeof (zil_chain_t);
454 		lwb->lwb_sz = BP_GET_LSIZE(bp);
455 	} else {
456 		lwb->lwb_nused = 0;
457 		lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
458 	}
459 
460 	mutex_enter(&zilog->zl_lock);
461 	list_insert_tail(&zilog->zl_lwb_list, lwb);
462 	mutex_exit(&zilog->zl_lock);
463 
464 	return (lwb);
465 }
466 
467 /*
468  * Called when we create in-memory log transactions so that we know
469  * to cleanup the itxs at the end of spa_sync().
470  */
471 void
472 zilog_dirty(zilog_t *zilog, uint64_t txg)
473 {
474 	dsl_pool_t *dp = zilog->zl_dmu_pool;
475 	dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
476 
477 	if (ds->ds_is_snapshot)
478 		panic("dirtying snapshot!");
479 
480 	if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
481 		/* up the hold count until we can be written out */
482 		dmu_buf_add_ref(ds->ds_dbuf, zilog);
483 	}
484 }
485 
486 /*
487  * Determine if the zil is dirty in the specified txg. Callers wanting to
488  * ensure that the dirty state does not change must hold the itxg_lock for
489  * the specified txg. Holding the lock will ensure that the zil cannot be
490  * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
491  * state.
492  */
493 boolean_t
494 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
495 {
496 	dsl_pool_t *dp = zilog->zl_dmu_pool;
497 
498 	if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
499 		return (B_TRUE);
500 	return (B_FALSE);
501 }
502 
503 /*
504  * Determine if the zil is dirty. The zil is considered dirty if it has
505  * any pending itx records that have not been cleaned by zil_clean().
506  */
507 boolean_t
508 zilog_is_dirty(zilog_t *zilog)
509 {
510 	dsl_pool_t *dp = zilog->zl_dmu_pool;
511 
512 	for (int t = 0; t < TXG_SIZE; t++) {
513 		if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
514 			return (B_TRUE);
515 	}
516 	return (B_FALSE);
517 }
518 
519 /*
520  * Create an on-disk intent log.
521  */
522 static lwb_t *
523 zil_create(zilog_t *zilog)
524 {
525 	const zil_header_t *zh = zilog->zl_header;
526 	lwb_t *lwb = NULL;
527 	uint64_t txg = 0;
528 	dmu_tx_t *tx = NULL;
529 	blkptr_t blk;
530 	int error = 0;
531 	boolean_t slog = FALSE;
532 
533 	/*
534 	 * Wait for any previous destroy to complete.
535 	 */
536 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
537 
538 	ASSERT(zh->zh_claim_txg == 0);
539 	ASSERT(zh->zh_replay_seq == 0);
540 
541 	blk = zh->zh_log;
542 
543 	/*
544 	 * Allocate an initial log block if:
545 	 *    - there isn't one already
546 	 *    - the existing block is the wrong endianess
547 	 */
548 	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
549 		tx = dmu_tx_create(zilog->zl_os);
550 		VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
551 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
552 		txg = dmu_tx_get_txg(tx);
553 
554 		if (!BP_IS_HOLE(&blk)) {
555 			zio_free_zil(zilog->zl_spa, txg, &blk);
556 			BP_ZERO(&blk);
557 		}
558 
559 		error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
560 		    ZIL_MIN_BLKSZ, &slog);
561 
562 		if (error == 0)
563 			zil_init_log_chain(zilog, &blk);
564 	}
565 
566 	/*
567 	 * Allocate a log write buffer (lwb) for the first log block.
568 	 */
569 	if (error == 0)
570 		lwb = zil_alloc_lwb(zilog, &blk, slog, txg);
571 
572 	/*
573 	 * If we just allocated the first log block, commit our transaction
574 	 * and wait for zil_sync() to stuff the block poiner into zh_log.
575 	 * (zh is part of the MOS, so we cannot modify it in open context.)
576 	 */
577 	if (tx != NULL) {
578 		dmu_tx_commit(tx);
579 		txg_wait_synced(zilog->zl_dmu_pool, txg);
580 	}
581 
582 	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
583 
584 	return (lwb);
585 }
586 
587 /*
588  * In one tx, free all log blocks and clear the log header.
589  * If keep_first is set, then we're replaying a log with no content.
590  * We want to keep the first block, however, so that the first
591  * synchronous transaction doesn't require a txg_wait_synced()
592  * in zil_create().  We don't need to txg_wait_synced() here either
593  * when keep_first is set, because both zil_create() and zil_destroy()
594  * will wait for any in-progress destroys to complete.
595  */
596 void
597 zil_destroy(zilog_t *zilog, boolean_t keep_first)
598 {
599 	const zil_header_t *zh = zilog->zl_header;
600 	lwb_t *lwb;
601 	dmu_tx_t *tx;
602 	uint64_t txg;
603 
604 	/*
605 	 * Wait for any previous destroy to complete.
606 	 */
607 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
608 
609 	zilog->zl_old_header = *zh;		/* debugging aid */
610 
611 	if (BP_IS_HOLE(&zh->zh_log))
612 		return;
613 
614 	tx = dmu_tx_create(zilog->zl_os);
615 	VERIFY(dmu_tx_assign(tx, TXG_WAIT) == 0);
616 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
617 	txg = dmu_tx_get_txg(tx);
618 
619 	mutex_enter(&zilog->zl_lock);
620 
621 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
622 	zilog->zl_destroy_txg = txg;
623 	zilog->zl_keep_first = keep_first;
624 
625 	if (!list_is_empty(&zilog->zl_lwb_list)) {
626 		ASSERT(zh->zh_claim_txg == 0);
627 		VERIFY(!keep_first);
628 		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
629 			list_remove(&zilog->zl_lwb_list, lwb);
630 			if (lwb->lwb_buf != NULL)
631 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
632 			zio_free_zil(zilog->zl_spa, txg, &lwb->lwb_blk);
633 			kmem_cache_free(zil_lwb_cache, lwb);
634 		}
635 	} else if (!keep_first) {
636 		zil_destroy_sync(zilog, tx);
637 	}
638 	mutex_exit(&zilog->zl_lock);
639 
640 	dmu_tx_commit(tx);
641 }
642 
643 void
644 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
645 {
646 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
647 	(void) zil_parse(zilog, zil_free_log_block,
648 	    zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
649 }
650 
651 int
652 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
653 {
654 	dmu_tx_t *tx = txarg;
655 	uint64_t first_txg = dmu_tx_get_txg(tx);
656 	zilog_t *zilog;
657 	zil_header_t *zh;
658 	objset_t *os;
659 	int error;
660 
661 	error = dmu_objset_own_obj(dp, ds->ds_object,
662 	    DMU_OST_ANY, B_FALSE, FTAG, &os);
663 	if (error != 0) {
664 		/*
665 		 * EBUSY indicates that the objset is inconsistent, in which
666 		 * case it can not have a ZIL.
667 		 */
668 		if (error != EBUSY) {
669 			cmn_err(CE_WARN, "can't open objset for %llu, error %u",
670 			    (unsigned long long)ds->ds_object, error);
671 		}
672 		return (0);
673 	}
674 
675 	zilog = dmu_objset_zil(os);
676 	zh = zil_header_in_syncing_context(zilog);
677 
678 	if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
679 		if (!BP_IS_HOLE(&zh->zh_log))
680 			zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
681 		BP_ZERO(&zh->zh_log);
682 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
683 		dmu_objset_disown(os, FTAG);
684 		return (0);
685 	}
686 
687 	/*
688 	 * Claim all log blocks if we haven't already done so, and remember
689 	 * the highest claimed sequence number.  This ensures that if we can
690 	 * read only part of the log now (e.g. due to a missing device),
691 	 * but we can read the entire log later, we will not try to replay
692 	 * or destroy beyond the last block we successfully claimed.
693 	 */
694 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
695 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
696 		(void) zil_parse(zilog, zil_claim_log_block,
697 		    zil_claim_log_record, tx, first_txg);
698 		zh->zh_claim_txg = first_txg;
699 		zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
700 		zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
701 		if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
702 			zh->zh_flags |= ZIL_REPLAY_NEEDED;
703 		zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
704 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
705 	}
706 
707 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
708 	dmu_objset_disown(os, FTAG);
709 	return (0);
710 }
711 
712 /*
713  * Check the log by walking the log chain.
714  * Checksum errors are ok as they indicate the end of the chain.
715  * Any other error (no device or read failure) returns an error.
716  */
717 /* ARGSUSED */
718 int
719 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
720 {
721 	zilog_t *zilog;
722 	objset_t *os;
723 	blkptr_t *bp;
724 	int error;
725 
726 	ASSERT(tx == NULL);
727 
728 	error = dmu_objset_from_ds(ds, &os);
729 	if (error != 0) {
730 		cmn_err(CE_WARN, "can't open objset %llu, error %d",
731 		    (unsigned long long)ds->ds_object, error);
732 		return (0);
733 	}
734 
735 	zilog = dmu_objset_zil(os);
736 	bp = (blkptr_t *)&zilog->zl_header->zh_log;
737 
738 	/*
739 	 * Check the first block and determine if it's on a log device
740 	 * which may have been removed or faulted prior to loading this
741 	 * pool.  If so, there's no point in checking the rest of the log
742 	 * as its content should have already been synced to the pool.
743 	 */
744 	if (!BP_IS_HOLE(bp)) {
745 		vdev_t *vd;
746 		boolean_t valid = B_TRUE;
747 
748 		spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
749 		vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
750 		if (vd->vdev_islog && vdev_is_dead(vd))
751 			valid = vdev_log_state_valid(vd);
752 		spa_config_exit(os->os_spa, SCL_STATE, FTAG);
753 
754 		if (!valid)
755 			return (0);
756 	}
757 
758 	/*
759 	 * Because tx == NULL, zil_claim_log_block() will not actually claim
760 	 * any blocks, but just determine whether it is possible to do so.
761 	 * In addition to checking the log chain, zil_claim_log_block()
762 	 * will invoke zio_claim() with a done func of spa_claim_notify(),
763 	 * which will update spa_max_claim_txg.  See spa_load() for details.
764 	 */
765 	error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
766 	    zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
767 
768 	return ((error == ECKSUM || error == ENOENT) ? 0 : error);
769 }
770 
771 static int
772 zil_vdev_compare(const void *x1, const void *x2)
773 {
774 	const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
775 	const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
776 
777 	if (v1 < v2)
778 		return (-1);
779 	if (v1 > v2)
780 		return (1);
781 
782 	return (0);
783 }
784 
785 void
786 zil_add_block(zilog_t *zilog, const blkptr_t *bp)
787 {
788 	avl_tree_t *t = &zilog->zl_vdev_tree;
789 	avl_index_t where;
790 	zil_vdev_node_t *zv, zvsearch;
791 	int ndvas = BP_GET_NDVAS(bp);
792 	int i;
793 
794 	if (zfs_nocacheflush)
795 		return;
796 
797 	ASSERT(zilog->zl_writer);
798 
799 	/*
800 	 * Even though we're zl_writer, we still need a lock because the
801 	 * zl_get_data() callbacks may have dmu_sync() done callbacks
802 	 * that will run concurrently.
803 	 */
804 	mutex_enter(&zilog->zl_vdev_lock);
805 	for (i = 0; i < ndvas; i++) {
806 		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
807 		if (avl_find(t, &zvsearch, &where) == NULL) {
808 			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
809 			zv->zv_vdev = zvsearch.zv_vdev;
810 			avl_insert(t, zv, where);
811 		}
812 	}
813 	mutex_exit(&zilog->zl_vdev_lock);
814 }
815 
816 static void
817 zil_flush_vdevs(zilog_t *zilog)
818 {
819 	spa_t *spa = zilog->zl_spa;
820 	avl_tree_t *t = &zilog->zl_vdev_tree;
821 	void *cookie = NULL;
822 	zil_vdev_node_t *zv;
823 	zio_t *zio;
824 
825 	ASSERT(zilog->zl_writer);
826 
827 	/*
828 	 * We don't need zl_vdev_lock here because we're the zl_writer,
829 	 * and all zl_get_data() callbacks are done.
830 	 */
831 	if (avl_numnodes(t) == 0)
832 		return;
833 
834 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
835 
836 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
837 
838 	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
839 		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
840 		if (vd != NULL)
841 			zio_flush(zio, vd);
842 		kmem_free(zv, sizeof (*zv));
843 	}
844 
845 	/*
846 	 * Wait for all the flushes to complete.  Not all devices actually
847 	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
848 	 */
849 	(void) zio_wait(zio);
850 
851 	spa_config_exit(spa, SCL_STATE, FTAG);
852 }
853 
854 /*
855  * Function called when a log block write completes
856  */
857 static void
858 zil_lwb_write_done(zio_t *zio)
859 {
860 	lwb_t *lwb = zio->io_private;
861 	zilog_t *zilog = lwb->lwb_zilog;
862 	dmu_tx_t *tx = lwb->lwb_tx;
863 
864 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
865 	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
866 	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
867 	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
868 	ASSERT(!BP_IS_GANG(zio->io_bp));
869 	ASSERT(!BP_IS_HOLE(zio->io_bp));
870 	ASSERT(BP_GET_FILL(zio->io_bp) == 0);
871 
872 	/*
873 	 * Ensure the lwb buffer pointer is cleared before releasing
874 	 * the txg. If we have had an allocation failure and
875 	 * the txg is waiting to sync then we want want zil_sync()
876 	 * to remove the lwb so that it's not picked up as the next new
877 	 * one in zil_commit_writer(). zil_sync() will only remove
878 	 * the lwb if lwb_buf is null.
879 	 */
880 	abd_put(zio->io_abd);
881 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
882 	mutex_enter(&zilog->zl_lock);
883 	lwb->lwb_buf = NULL;
884 	lwb->lwb_tx = NULL;
885 	mutex_exit(&zilog->zl_lock);
886 
887 	/*
888 	 * Now that we've written this log block, we have a stable pointer
889 	 * to the next block in the chain, so it's OK to let the txg in
890 	 * which we allocated the next block sync.
891 	 */
892 	dmu_tx_commit(tx);
893 }
894 
895 /*
896  * Initialize the io for a log block.
897  */
898 static void
899 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
900 {
901 	zbookmark_phys_t zb;
902 	zio_priority_t prio;
903 
904 	SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
905 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
906 	    lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
907 
908 	if (zilog->zl_root_zio == NULL) {
909 		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
910 		    ZIO_FLAG_CANFAIL);
911 	}
912 	if (lwb->lwb_zio == NULL) {
913 		abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
914 		    BP_GET_LSIZE(&lwb->lwb_blk));
915 		if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
916 			prio = ZIO_PRIORITY_SYNC_WRITE;
917 		else
918 			prio = ZIO_PRIORITY_ASYNC_WRITE;
919 		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
920 		    0, &lwb->lwb_blk, lwb_abd, BP_GET_LSIZE(&lwb->lwb_blk),
921 		    zil_lwb_write_done, lwb, prio,
922 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
923 	}
924 }
925 
926 /*
927  * Define a limited set of intent log block sizes.
928  *
929  * These must be a multiple of 4KB. Note only the amount used (again
930  * aligned to 4KB) actually gets written. However, we can't always just
931  * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
932  */
933 uint64_t zil_block_buckets[] = {
934     4096,		/* non TX_WRITE */
935     8192+4096,		/* data base */
936     32*1024 + 4096, 	/* NFS writes */
937     UINT64_MAX
938 };
939 
940 /*
941  * Start a log block write and advance to the next log block.
942  * Calls are serialized.
943  */
944 static lwb_t *
945 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
946 {
947 	lwb_t *nlwb = NULL;
948 	zil_chain_t *zilc;
949 	spa_t *spa = zilog->zl_spa;
950 	blkptr_t *bp;
951 	dmu_tx_t *tx;
952 	uint64_t txg;
953 	uint64_t zil_blksz, wsz;
954 	int i, error;
955 	boolean_t slog;
956 
957 	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
958 		zilc = (zil_chain_t *)lwb->lwb_buf;
959 		bp = &zilc->zc_next_blk;
960 	} else {
961 		zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
962 		bp = &zilc->zc_next_blk;
963 	}
964 
965 	ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
966 
967 	/*
968 	 * Allocate the next block and save its address in this block
969 	 * before writing it in order to establish the log chain.
970 	 * Note that if the allocation of nlwb synced before we wrote
971 	 * the block that points at it (lwb), we'd leak it if we crashed.
972 	 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
973 	 * We dirty the dataset to ensure that zil_sync() will be called
974 	 * to clean up in the event of allocation failure or I/O failure.
975 	 */
976 	tx = dmu_tx_create(zilog->zl_os);
977 
978 	/*
979 	 * Since we are not going to create any new dirty data and we can even
980 	 * help with clearing the existing dirty data, we should not be subject
981 	 * to the dirty data based delays.
982 	 * We (ab)use TXG_WAITED to bypass the delay mechanism.
983 	 * One side effect from using TXG_WAITED is that dmu_tx_assign() can
984 	 * fail if the pool is suspended.  Those are dramatic circumstances,
985 	 * so we return NULL to signal that the normal ZIL processing is not
986 	 * possible and txg_wait_synced() should be used to ensure that the data
987 	 * is on disk.
988 	 */
989 	error = dmu_tx_assign(tx, TXG_WAITED);
990 	if (error != 0) {
991 		ASSERT3S(error, ==, EIO);
992 		dmu_tx_abort(tx);
993 		return (NULL);
994 	}
995 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
996 	txg = dmu_tx_get_txg(tx);
997 
998 	lwb->lwb_tx = tx;
999 
1000 	/*
1001 	 * Log blocks are pre-allocated. Here we select the size of the next
1002 	 * block, based on size used in the last block.
1003 	 * - first find the smallest bucket that will fit the block from a
1004 	 *   limited set of block sizes. This is because it's faster to write
1005 	 *   blocks allocated from the same metaslab as they are adjacent or
1006 	 *   close.
1007 	 * - next find the maximum from the new suggested size and an array of
1008 	 *   previous sizes. This lessens a picket fence effect of wrongly
1009 	 *   guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1010 	 *   requests.
1011 	 *
1012 	 * Note we only write what is used, but we can't just allocate
1013 	 * the maximum block size because we can exhaust the available
1014 	 * pool log space.
1015 	 */
1016 	zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
1017 	for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
1018 		continue;
1019 	zil_blksz = zil_block_buckets[i];
1020 	if (zil_blksz == UINT64_MAX)
1021 		zil_blksz = SPA_OLD_MAXBLOCKSIZE;
1022 	zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
1023 	for (i = 0; i < ZIL_PREV_BLKS; i++)
1024 		zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1025 	zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1026 
1027 	BP_ZERO(bp);
1028 	/* pass the old blkptr in order to spread log blocks across devs */
1029 	error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog);
1030 	if (error == 0) {
1031 		ASSERT3U(bp->blk_birth, ==, txg);
1032 		bp->blk_cksum = lwb->lwb_blk.blk_cksum;
1033 		bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
1034 
1035 		/*
1036 		 * Allocate a new log write buffer (lwb).
1037 		 */
1038 		nlwb = zil_alloc_lwb(zilog, bp, slog, txg);
1039 
1040 		/* Record the block for later vdev flushing */
1041 		zil_add_block(zilog, &lwb->lwb_blk);
1042 	}
1043 
1044 	if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1045 		/* For Slim ZIL only write what is used. */
1046 		wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1047 		ASSERT3U(wsz, <=, lwb->lwb_sz);
1048 		zio_shrink(lwb->lwb_zio, wsz);
1049 
1050 	} else {
1051 		wsz = lwb->lwb_sz;
1052 	}
1053 
1054 	zilc->zc_pad = 0;
1055 	zilc->zc_nused = lwb->lwb_nused;
1056 	zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1057 
1058 	/*
1059 	 * clear unused data for security
1060 	 */
1061 	bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1062 
1063 	zio_nowait(lwb->lwb_zio); /* Kick off the write for the old log block */
1064 
1065 	/*
1066 	 * If there was an allocation failure then nlwb will be null which
1067 	 * forces a txg_wait_synced().
1068 	 */
1069 	return (nlwb);
1070 }
1071 
1072 static lwb_t *
1073 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1074 {
1075 	lr_t *lrcb, *lrc;
1076 	lr_write_t *lrwb, *lrw;
1077 	char *lr_buf;
1078 	uint64_t dlen, dnow, lwb_sp, reclen, txg;
1079 
1080 	if (lwb == NULL)
1081 		return (NULL);
1082 
1083 	ASSERT(lwb->lwb_buf != NULL);
1084 
1085 	lrc = &itx->itx_lr;		/* Common log record inside itx. */
1086 	lrw = (lr_write_t *)lrc;	/* Write log record inside itx. */
1087 	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
1088 		dlen = P2ROUNDUP_TYPED(
1089 		    lrw->lr_length, sizeof (uint64_t), uint64_t);
1090 	} else {
1091 		dlen = 0;
1092 	}
1093 	reclen = lrc->lrc_reclen;
1094 	zilog->zl_cur_used += (reclen + dlen);
1095 	txg = lrc->lrc_txg;
1096 
1097 	zil_lwb_write_init(zilog, lwb);
1098 
1099 cont:
1100 	/*
1101 	 * If this record won't fit in the current log block, start a new one.
1102 	 * For WR_NEED_COPY optimize layout for minimal number of chunks.
1103 	 */
1104 	lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1105 	if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
1106 	    lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 ||
1107 	    lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) {
1108 		lwb = zil_lwb_write_start(zilog, lwb);
1109 		if (lwb == NULL)
1110 			return (NULL);
1111 		zil_lwb_write_init(zilog, lwb);
1112 		ASSERT(LWB_EMPTY(lwb));
1113 		lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1114 		ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
1115 	}
1116 
1117 	dnow = MIN(dlen, lwb_sp - reclen);
1118 	lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1119 	bcopy(lrc, lr_buf, reclen);
1120 	lrcb = (lr_t *)lr_buf;		/* Like lrc, but inside lwb. */
1121 	lrwb = (lr_write_t *)lrcb;	/* Like lrw, but inside lwb. */
1122 
1123 	/*
1124 	 * If it's a write, fetch the data or get its blkptr as appropriate.
1125 	 */
1126 	if (lrc->lrc_txtype == TX_WRITE) {
1127 		if (txg > spa_freeze_txg(zilog->zl_spa))
1128 			txg_wait_synced(zilog->zl_dmu_pool, txg);
1129 		if (itx->itx_wr_state != WR_COPIED) {
1130 			char *dbuf;
1131 			int error;
1132 
1133 			if (itx->itx_wr_state == WR_NEED_COPY) {
1134 				dbuf = lr_buf + reclen;
1135 				lrcb->lrc_reclen += dnow;
1136 				if (lrwb->lr_length > dnow)
1137 					lrwb->lr_length = dnow;
1138 				lrw->lr_offset += dnow;
1139 				lrw->lr_length -= dnow;
1140 			} else {
1141 				ASSERT(itx->itx_wr_state == WR_INDIRECT);
1142 				dbuf = NULL;
1143 			}
1144 			error = zilog->zl_get_data(
1145 			    itx->itx_private, lrwb, dbuf, lwb->lwb_zio);
1146 			if (error == EIO) {
1147 				txg_wait_synced(zilog->zl_dmu_pool, txg);
1148 				return (lwb);
1149 			}
1150 			if (error != 0) {
1151 				ASSERT(error == ENOENT || error == EEXIST ||
1152 				    error == EALREADY);
1153 				return (lwb);
1154 			}
1155 		}
1156 	}
1157 
1158 	/*
1159 	 * We're actually making an entry, so update lrc_seq to be the
1160 	 * log record sequence number.  Note that this is generally not
1161 	 * equal to the itx sequence number because not all transactions
1162 	 * are synchronous, and sometimes spa_sync() gets there first.
1163 	 */
1164 	lrcb->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
1165 	lwb->lwb_nused += reclen + dnow;
1166 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
1167 	ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1168 	ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1169 
1170 	dlen -= dnow;
1171 	if (dlen > 0) {
1172 		zilog->zl_cur_used += reclen;
1173 		goto cont;
1174 	}
1175 
1176 	return (lwb);
1177 }
1178 
1179 itx_t *
1180 zil_itx_create(uint64_t txtype, size_t lrsize)
1181 {
1182 	itx_t *itx;
1183 
1184 	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1185 
1186 	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1187 	itx->itx_lr.lrc_txtype = txtype;
1188 	itx->itx_lr.lrc_reclen = lrsize;
1189 	itx->itx_lr.lrc_seq = 0;	/* defensive */
1190 	itx->itx_sync = B_TRUE;		/* default is synchronous */
1191 
1192 	return (itx);
1193 }
1194 
1195 void
1196 zil_itx_destroy(itx_t *itx)
1197 {
1198 	kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1199 }
1200 
1201 /*
1202  * Free up the sync and async itxs. The itxs_t has already been detached
1203  * so no locks are needed.
1204  */
1205 static void
1206 zil_itxg_clean(itxs_t *itxs)
1207 {
1208 	itx_t *itx;
1209 	list_t *list;
1210 	avl_tree_t *t;
1211 	void *cookie;
1212 	itx_async_node_t *ian;
1213 
1214 	list = &itxs->i_sync_list;
1215 	while ((itx = list_head(list)) != NULL) {
1216 		list_remove(list, itx);
1217 		kmem_free(itx, offsetof(itx_t, itx_lr) +
1218 		    itx->itx_lr.lrc_reclen);
1219 	}
1220 
1221 	cookie = NULL;
1222 	t = &itxs->i_async_tree;
1223 	while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1224 		list = &ian->ia_list;
1225 		while ((itx = list_head(list)) != NULL) {
1226 			list_remove(list, itx);
1227 			kmem_free(itx, offsetof(itx_t, itx_lr) +
1228 			    itx->itx_lr.lrc_reclen);
1229 		}
1230 		list_destroy(list);
1231 		kmem_free(ian, sizeof (itx_async_node_t));
1232 	}
1233 	avl_destroy(t);
1234 
1235 	kmem_free(itxs, sizeof (itxs_t));
1236 }
1237 
1238 static int
1239 zil_aitx_compare(const void *x1, const void *x2)
1240 {
1241 	const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1242 	const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1243 
1244 	if (o1 < o2)
1245 		return (-1);
1246 	if (o1 > o2)
1247 		return (1);
1248 
1249 	return (0);
1250 }
1251 
1252 /*
1253  * Remove all async itx with the given oid.
1254  */
1255 static void
1256 zil_remove_async(zilog_t *zilog, uint64_t oid)
1257 {
1258 	uint64_t otxg, txg;
1259 	itx_async_node_t *ian;
1260 	avl_tree_t *t;
1261 	avl_index_t where;
1262 	list_t clean_list;
1263 	itx_t *itx;
1264 
1265 	ASSERT(oid != 0);
1266 	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1267 
1268 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1269 		otxg = ZILTEST_TXG;
1270 	else
1271 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1272 
1273 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1274 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1275 
1276 		mutex_enter(&itxg->itxg_lock);
1277 		if (itxg->itxg_txg != txg) {
1278 			mutex_exit(&itxg->itxg_lock);
1279 			continue;
1280 		}
1281 
1282 		/*
1283 		 * Locate the object node and append its list.
1284 		 */
1285 		t = &itxg->itxg_itxs->i_async_tree;
1286 		ian = avl_find(t, &oid, &where);
1287 		if (ian != NULL)
1288 			list_move_tail(&clean_list, &ian->ia_list);
1289 		mutex_exit(&itxg->itxg_lock);
1290 	}
1291 	while ((itx = list_head(&clean_list)) != NULL) {
1292 		list_remove(&clean_list, itx);
1293 		kmem_free(itx, offsetof(itx_t, itx_lr) +
1294 		    itx->itx_lr.lrc_reclen);
1295 	}
1296 	list_destroy(&clean_list);
1297 }
1298 
1299 void
1300 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1301 {
1302 	uint64_t txg;
1303 	itxg_t *itxg;
1304 	itxs_t *itxs, *clean = NULL;
1305 
1306 	/*
1307 	 * Object ids can be re-instantiated in the next txg so
1308 	 * remove any async transactions to avoid future leaks.
1309 	 * This can happen if a fsync occurs on the re-instantiated
1310 	 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1311 	 * the new file data and flushes a write record for the old object.
1312 	 */
1313 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1314 		zil_remove_async(zilog, itx->itx_oid);
1315 
1316 	/*
1317 	 * Ensure the data of a renamed file is committed before the rename.
1318 	 */
1319 	if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1320 		zil_async_to_sync(zilog, itx->itx_oid);
1321 
1322 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1323 		txg = ZILTEST_TXG;
1324 	else
1325 		txg = dmu_tx_get_txg(tx);
1326 
1327 	itxg = &zilog->zl_itxg[txg & TXG_MASK];
1328 	mutex_enter(&itxg->itxg_lock);
1329 	itxs = itxg->itxg_itxs;
1330 	if (itxg->itxg_txg != txg) {
1331 		if (itxs != NULL) {
1332 			/*
1333 			 * The zil_clean callback hasn't got around to cleaning
1334 			 * this itxg. Save the itxs for release below.
1335 			 * This should be rare.
1336 			 */
1337 			zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
1338 			    "txg %llu", itxg->itxg_txg);
1339 			clean = itxg->itxg_itxs;
1340 		}
1341 		itxg->itxg_txg = txg;
1342 		itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1343 
1344 		list_create(&itxs->i_sync_list, sizeof (itx_t),
1345 		    offsetof(itx_t, itx_node));
1346 		avl_create(&itxs->i_async_tree, zil_aitx_compare,
1347 		    sizeof (itx_async_node_t),
1348 		    offsetof(itx_async_node_t, ia_node));
1349 	}
1350 	if (itx->itx_sync) {
1351 		list_insert_tail(&itxs->i_sync_list, itx);
1352 	} else {
1353 		avl_tree_t *t = &itxs->i_async_tree;
1354 		uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1355 		itx_async_node_t *ian;
1356 		avl_index_t where;
1357 
1358 		ian = avl_find(t, &foid, &where);
1359 		if (ian == NULL) {
1360 			ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1361 			list_create(&ian->ia_list, sizeof (itx_t),
1362 			    offsetof(itx_t, itx_node));
1363 			ian->ia_foid = foid;
1364 			avl_insert(t, ian, where);
1365 		}
1366 		list_insert_tail(&ian->ia_list, itx);
1367 	}
1368 
1369 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1370 	zilog_dirty(zilog, txg);
1371 	mutex_exit(&itxg->itxg_lock);
1372 
1373 	/* Release the old itxs now we've dropped the lock */
1374 	if (clean != NULL)
1375 		zil_itxg_clean(clean);
1376 }
1377 
1378 /*
1379  * If there are any in-memory intent log transactions which have now been
1380  * synced then start up a taskq to free them. We should only do this after we
1381  * have written out the uberblocks (i.e. txg has been comitted) so that
1382  * don't inadvertently clean out in-memory log records that would be required
1383  * by zil_commit().
1384  */
1385 void
1386 zil_clean(zilog_t *zilog, uint64_t synced_txg)
1387 {
1388 	itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1389 	itxs_t *clean_me;
1390 
1391 	mutex_enter(&itxg->itxg_lock);
1392 	if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1393 		mutex_exit(&itxg->itxg_lock);
1394 		return;
1395 	}
1396 	ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1397 	ASSERT(itxg->itxg_txg != 0);
1398 	ASSERT(zilog->zl_clean_taskq != NULL);
1399 	clean_me = itxg->itxg_itxs;
1400 	itxg->itxg_itxs = NULL;
1401 	itxg->itxg_txg = 0;
1402 	mutex_exit(&itxg->itxg_lock);
1403 	/*
1404 	 * Preferably start a task queue to free up the old itxs but
1405 	 * if taskq_dispatch can't allocate resources to do that then
1406 	 * free it in-line. This should be rare. Note, using TQ_SLEEP
1407 	 * created a bad performance problem.
1408 	 */
1409 	if (taskq_dispatch(zilog->zl_clean_taskq,
1410 	    (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL)
1411 		zil_itxg_clean(clean_me);
1412 }
1413 
1414 /*
1415  * Get the list of itxs to commit into zl_itx_commit_list.
1416  */
1417 static void
1418 zil_get_commit_list(zilog_t *zilog)
1419 {
1420 	uint64_t otxg, txg;
1421 	list_t *commit_list = &zilog->zl_itx_commit_list;
1422 
1423 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1424 		otxg = ZILTEST_TXG;
1425 	else
1426 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1427 
1428 	/*
1429 	 * This is inherently racy, since there is nothing to prevent
1430 	 * the last synced txg from changing. That's okay since we'll
1431 	 * only commit things in the future.
1432 	 */
1433 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1434 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1435 
1436 		mutex_enter(&itxg->itxg_lock);
1437 		if (itxg->itxg_txg != txg) {
1438 			mutex_exit(&itxg->itxg_lock);
1439 			continue;
1440 		}
1441 
1442 		/*
1443 		 * If we're adding itx records to the zl_itx_commit_list,
1444 		 * then the zil better be dirty in this "txg". We can assert
1445 		 * that here since we're holding the itxg_lock which will
1446 		 * prevent spa_sync from cleaning it. Once we add the itxs
1447 		 * to the zl_itx_commit_list we must commit it to disk even
1448 		 * if it's unnecessary (i.e. the txg was synced).
1449 		 */
1450 		ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
1451 		    spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1452 		list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1453 
1454 		mutex_exit(&itxg->itxg_lock);
1455 	}
1456 }
1457 
1458 /*
1459  * Move the async itxs for a specified object to commit into sync lists.
1460  */
1461 static void
1462 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1463 {
1464 	uint64_t otxg, txg;
1465 	itx_async_node_t *ian;
1466 	avl_tree_t *t;
1467 	avl_index_t where;
1468 
1469 	if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1470 		otxg = ZILTEST_TXG;
1471 	else
1472 		otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1473 
1474 	/*
1475 	 * This is inherently racy, since there is nothing to prevent
1476 	 * the last synced txg from changing.
1477 	 */
1478 	for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1479 		itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1480 
1481 		mutex_enter(&itxg->itxg_lock);
1482 		if (itxg->itxg_txg != txg) {
1483 			mutex_exit(&itxg->itxg_lock);
1484 			continue;
1485 		}
1486 
1487 		/*
1488 		 * If a foid is specified then find that node and append its
1489 		 * list. Otherwise walk the tree appending all the lists
1490 		 * to the sync list. We add to the end rather than the
1491 		 * beginning to ensure the create has happened.
1492 		 */
1493 		t = &itxg->itxg_itxs->i_async_tree;
1494 		if (foid != 0) {
1495 			ian = avl_find(t, &foid, &where);
1496 			if (ian != NULL) {
1497 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1498 				    &ian->ia_list);
1499 			}
1500 		} else {
1501 			void *cookie = NULL;
1502 
1503 			while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1504 				list_move_tail(&itxg->itxg_itxs->i_sync_list,
1505 				    &ian->ia_list);
1506 				list_destroy(&ian->ia_list);
1507 				kmem_free(ian, sizeof (itx_async_node_t));
1508 			}
1509 		}
1510 		mutex_exit(&itxg->itxg_lock);
1511 	}
1512 }
1513 
1514 static void
1515 zil_commit_writer(zilog_t *zilog)
1516 {
1517 	uint64_t txg;
1518 	itx_t *itx;
1519 	lwb_t *lwb;
1520 	spa_t *spa = zilog->zl_spa;
1521 	int error = 0;
1522 
1523 	ASSERT(zilog->zl_root_zio == NULL);
1524 
1525 	mutex_exit(&zilog->zl_lock);
1526 
1527 	zil_get_commit_list(zilog);
1528 
1529 	/*
1530 	 * Return if there's nothing to commit before we dirty the fs by
1531 	 * calling zil_create().
1532 	 */
1533 	if (list_head(&zilog->zl_itx_commit_list) == NULL) {
1534 		mutex_enter(&zilog->zl_lock);
1535 		return;
1536 	}
1537 
1538 	if (zilog->zl_suspend) {
1539 		lwb = NULL;
1540 	} else {
1541 		lwb = list_tail(&zilog->zl_lwb_list);
1542 		if (lwb == NULL)
1543 			lwb = zil_create(zilog);
1544 	}
1545 
1546 	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1547 	while (itx = list_head(&zilog->zl_itx_commit_list)) {
1548 		txg = itx->itx_lr.lrc_txg;
1549 		ASSERT3U(txg, !=, 0);
1550 
1551 		/*
1552 		 * This is inherently racy and may result in us writing
1553 		 * out a log block for a txg that was just synced. This is
1554 		 * ok since we'll end cleaning up that log block the next
1555 		 * time we call zil_sync().
1556 		 */
1557 		if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
1558 			lwb = zil_lwb_commit(zilog, itx, lwb);
1559 		list_remove(&zilog->zl_itx_commit_list, itx);
1560 		kmem_free(itx, offsetof(itx_t, itx_lr)
1561 		    + itx->itx_lr.lrc_reclen);
1562 	}
1563 	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1564 
1565 	/* write the last block out */
1566 	if (lwb != NULL && lwb->lwb_zio != NULL)
1567 		lwb = zil_lwb_write_start(zilog, lwb);
1568 
1569 	zilog->zl_cur_used = 0;
1570 
1571 	/*
1572 	 * Wait if necessary for the log blocks to be on stable storage.
1573 	 */
1574 	if (zilog->zl_root_zio) {
1575 		error = zio_wait(zilog->zl_root_zio);
1576 		zilog->zl_root_zio = NULL;
1577 		zil_flush_vdevs(zilog);
1578 	}
1579 
1580 	if (error || lwb == NULL)
1581 		txg_wait_synced(zilog->zl_dmu_pool, 0);
1582 
1583 	mutex_enter(&zilog->zl_lock);
1584 
1585 	/*
1586 	 * Remember the highest committed log sequence number for ztest.
1587 	 * We only update this value when all the log writes succeeded,
1588 	 * because ztest wants to ASSERT that it got the whole log chain.
1589 	 */
1590 	if (error == 0 && lwb != NULL)
1591 		zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1592 }
1593 
1594 /*
1595  * Commit zfs transactions to stable storage.
1596  * If foid is 0 push out all transactions, otherwise push only those
1597  * for that object or might reference that object.
1598  *
1599  * itxs are committed in batches. In a heavily stressed zil there will be
1600  * a commit writer thread who is writing out a bunch of itxs to the log
1601  * for a set of committing threads (cthreads) in the same batch as the writer.
1602  * Those cthreads are all waiting on the same cv for that batch.
1603  *
1604  * There will also be a different and growing batch of threads that are
1605  * waiting to commit (qthreads). When the committing batch completes
1606  * a transition occurs such that the cthreads exit and the qthreads become
1607  * cthreads. One of the new cthreads becomes the writer thread for the
1608  * batch. Any new threads arriving become new qthreads.
1609  *
1610  * Only 2 condition variables are needed and there's no transition
1611  * between the two cvs needed. They just flip-flop between qthreads
1612  * and cthreads.
1613  *
1614  * Using this scheme we can efficiently wakeup up only those threads
1615  * that have been committed.
1616  */
1617 void
1618 zil_commit(zilog_t *zilog, uint64_t foid)
1619 {
1620 	uint64_t mybatch;
1621 
1622 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
1623 		return;
1624 
1625 	/* move the async itxs for the foid to the sync queues */
1626 	zil_async_to_sync(zilog, foid);
1627 
1628 	mutex_enter(&zilog->zl_lock);
1629 	mybatch = zilog->zl_next_batch;
1630 	while (zilog->zl_writer) {
1631 		cv_wait(&zilog->zl_cv_batch[mybatch & 1], &zilog->zl_lock);
1632 		if (mybatch <= zilog->zl_com_batch) {
1633 			mutex_exit(&zilog->zl_lock);
1634 			return;
1635 		}
1636 	}
1637 
1638 	zilog->zl_next_batch++;
1639 	zilog->zl_writer = B_TRUE;
1640 	zil_commit_writer(zilog);
1641 	zilog->zl_com_batch = mybatch;
1642 	zilog->zl_writer = B_FALSE;
1643 	mutex_exit(&zilog->zl_lock);
1644 
1645 	/* wake up one thread to become the next writer */
1646 	cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
1647 
1648 	/* wake up all threads waiting for this batch to be committed */
1649 	cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
1650 }
1651 
1652 /*
1653  * Called in syncing context to free committed log blocks and update log header.
1654  */
1655 void
1656 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1657 {
1658 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1659 	uint64_t txg = dmu_tx_get_txg(tx);
1660 	spa_t *spa = zilog->zl_spa;
1661 	uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
1662 	lwb_t *lwb;
1663 
1664 	/*
1665 	 * We don't zero out zl_destroy_txg, so make sure we don't try
1666 	 * to destroy it twice.
1667 	 */
1668 	if (spa_sync_pass(spa) != 1)
1669 		return;
1670 
1671 	mutex_enter(&zilog->zl_lock);
1672 
1673 	ASSERT(zilog->zl_stop_sync == 0);
1674 
1675 	if (*replayed_seq != 0) {
1676 		ASSERT(zh->zh_replay_seq < *replayed_seq);
1677 		zh->zh_replay_seq = *replayed_seq;
1678 		*replayed_seq = 0;
1679 	}
1680 
1681 	if (zilog->zl_destroy_txg == txg) {
1682 		blkptr_t blk = zh->zh_log;
1683 
1684 		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1685 
1686 		bzero(zh, sizeof (zil_header_t));
1687 		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1688 
1689 		if (zilog->zl_keep_first) {
1690 			/*
1691 			 * If this block was part of log chain that couldn't
1692 			 * be claimed because a device was missing during
1693 			 * zil_claim(), but that device later returns,
1694 			 * then this block could erroneously appear valid.
1695 			 * To guard against this, assign a new GUID to the new
1696 			 * log chain so it doesn't matter what blk points to.
1697 			 */
1698 			zil_init_log_chain(zilog, &blk);
1699 			zh->zh_log = blk;
1700 		}
1701 	}
1702 
1703 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1704 		zh->zh_log = lwb->lwb_blk;
1705 		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1706 			break;
1707 		list_remove(&zilog->zl_lwb_list, lwb);
1708 		zio_free_zil(spa, txg, &lwb->lwb_blk);
1709 		kmem_cache_free(zil_lwb_cache, lwb);
1710 
1711 		/*
1712 		 * If we don't have anything left in the lwb list then
1713 		 * we've had an allocation failure and we need to zero
1714 		 * out the zil_header blkptr so that we don't end
1715 		 * up freeing the same block twice.
1716 		 */
1717 		if (list_head(&zilog->zl_lwb_list) == NULL)
1718 			BP_ZERO(&zh->zh_log);
1719 	}
1720 	mutex_exit(&zilog->zl_lock);
1721 }
1722 
1723 void
1724 zil_init(void)
1725 {
1726 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1727 	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1728 }
1729 
1730 void
1731 zil_fini(void)
1732 {
1733 	kmem_cache_destroy(zil_lwb_cache);
1734 }
1735 
1736 void
1737 zil_set_sync(zilog_t *zilog, uint64_t sync)
1738 {
1739 	zilog->zl_sync = sync;
1740 }
1741 
1742 void
1743 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
1744 {
1745 	zilog->zl_logbias = logbias;
1746 }
1747 
1748 zilog_t *
1749 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1750 {
1751 	zilog_t *zilog;
1752 
1753 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1754 
1755 	zilog->zl_header = zh_phys;
1756 	zilog->zl_os = os;
1757 	zilog->zl_spa = dmu_objset_spa(os);
1758 	zilog->zl_dmu_pool = dmu_objset_pool(os);
1759 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1760 	zilog->zl_logbias = dmu_objset_logbias(os);
1761 	zilog->zl_sync = dmu_objset_syncprop(os);
1762 	zilog->zl_next_batch = 1;
1763 
1764 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1765 
1766 	for (int i = 0; i < TXG_SIZE; i++) {
1767 		mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
1768 		    MUTEX_DEFAULT, NULL);
1769 	}
1770 
1771 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1772 	    offsetof(lwb_t, lwb_node));
1773 
1774 	list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
1775 	    offsetof(itx_t, itx_node));
1776 
1777 	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1778 
1779 	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1780 	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1781 
1782 	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1783 	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1784 	cv_init(&zilog->zl_cv_batch[0], NULL, CV_DEFAULT, NULL);
1785 	cv_init(&zilog->zl_cv_batch[1], NULL, CV_DEFAULT, NULL);
1786 
1787 	return (zilog);
1788 }
1789 
1790 void
1791 zil_free(zilog_t *zilog)
1792 {
1793 	zilog->zl_stop_sync = 1;
1794 
1795 	ASSERT0(zilog->zl_suspend);
1796 	ASSERT0(zilog->zl_suspending);
1797 
1798 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1799 	list_destroy(&zilog->zl_lwb_list);
1800 
1801 	avl_destroy(&zilog->zl_vdev_tree);
1802 	mutex_destroy(&zilog->zl_vdev_lock);
1803 
1804 	ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
1805 	list_destroy(&zilog->zl_itx_commit_list);
1806 
1807 	for (int i = 0; i < TXG_SIZE; i++) {
1808 		/*
1809 		 * It's possible for an itx to be generated that doesn't dirty
1810 		 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
1811 		 * callback to remove the entry. We remove those here.
1812 		 *
1813 		 * Also free up the ziltest itxs.
1814 		 */
1815 		if (zilog->zl_itxg[i].itxg_itxs)
1816 			zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
1817 		mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
1818 	}
1819 
1820 	mutex_destroy(&zilog->zl_lock);
1821 
1822 	cv_destroy(&zilog->zl_cv_writer);
1823 	cv_destroy(&zilog->zl_cv_suspend);
1824 	cv_destroy(&zilog->zl_cv_batch[0]);
1825 	cv_destroy(&zilog->zl_cv_batch[1]);
1826 
1827 	kmem_free(zilog, sizeof (zilog_t));
1828 }
1829 
1830 /*
1831  * Open an intent log.
1832  */
1833 zilog_t *
1834 zil_open(objset_t *os, zil_get_data_t *get_data)
1835 {
1836 	zilog_t *zilog = dmu_objset_zil(os);
1837 
1838 	ASSERT(zilog->zl_clean_taskq == NULL);
1839 	ASSERT(zilog->zl_get_data == NULL);
1840 	ASSERT(list_is_empty(&zilog->zl_lwb_list));
1841 
1842 	zilog->zl_get_data = get_data;
1843 	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1844 	    2, 2, TASKQ_PREPOPULATE);
1845 
1846 	return (zilog);
1847 }
1848 
1849 /*
1850  * Close an intent log.
1851  */
1852 void
1853 zil_close(zilog_t *zilog)
1854 {
1855 	lwb_t *lwb;
1856 	uint64_t txg = 0;
1857 
1858 	zil_commit(zilog, 0); /* commit all itx */
1859 
1860 	/*
1861 	 * The lwb_max_txg for the stubby lwb will reflect the last activity
1862 	 * for the zil.  After a txg_wait_synced() on the txg we know all the
1863 	 * callbacks have occurred that may clean the zil.  Only then can we
1864 	 * destroy the zl_clean_taskq.
1865 	 */
1866 	mutex_enter(&zilog->zl_lock);
1867 	lwb = list_tail(&zilog->zl_lwb_list);
1868 	if (lwb != NULL)
1869 		txg = lwb->lwb_max_txg;
1870 	mutex_exit(&zilog->zl_lock);
1871 	if (txg)
1872 		txg_wait_synced(zilog->zl_dmu_pool, txg);
1873 
1874 	if (zilog_is_dirty(zilog))
1875 		zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
1876 	VERIFY(!zilog_is_dirty(zilog));
1877 
1878 	taskq_destroy(zilog->zl_clean_taskq);
1879 	zilog->zl_clean_taskq = NULL;
1880 	zilog->zl_get_data = NULL;
1881 
1882 	/*
1883 	 * We should have only one LWB left on the list; remove it now.
1884 	 */
1885 	mutex_enter(&zilog->zl_lock);
1886 	lwb = list_head(&zilog->zl_lwb_list);
1887 	if (lwb != NULL) {
1888 		ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
1889 		list_remove(&zilog->zl_lwb_list, lwb);
1890 		zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1891 		kmem_cache_free(zil_lwb_cache, lwb);
1892 	}
1893 	mutex_exit(&zilog->zl_lock);
1894 }
1895 
1896 static char *suspend_tag = "zil suspending";
1897 
1898 /*
1899  * Suspend an intent log.  While in suspended mode, we still honor
1900  * synchronous semantics, but we rely on txg_wait_synced() to do it.
1901  * On old version pools, we suspend the log briefly when taking a
1902  * snapshot so that it will have an empty intent log.
1903  *
1904  * Long holds are not really intended to be used the way we do here --
1905  * held for such a short time.  A concurrent caller of dsl_dataset_long_held()
1906  * could fail.  Therefore we take pains to only put a long hold if it is
1907  * actually necessary.  Fortunately, it will only be necessary if the
1908  * objset is currently mounted (or the ZVOL equivalent).  In that case it
1909  * will already have a long hold, so we are not really making things any worse.
1910  *
1911  * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
1912  * zvol_state_t), and use their mechanism to prevent their hold from being
1913  * dropped (e.g. VFS_HOLD()).  However, that would be even more pain for
1914  * very little gain.
1915  *
1916  * if cookiep == NULL, this does both the suspend & resume.
1917  * Otherwise, it returns with the dataset "long held", and the cookie
1918  * should be passed into zil_resume().
1919  */
1920 int
1921 zil_suspend(const char *osname, void **cookiep)
1922 {
1923 	objset_t *os;
1924 	zilog_t *zilog;
1925 	const zil_header_t *zh;
1926 	int error;
1927 
1928 	error = dmu_objset_hold(osname, suspend_tag, &os);
1929 	if (error != 0)
1930 		return (error);
1931 	zilog = dmu_objset_zil(os);
1932 
1933 	mutex_enter(&zilog->zl_lock);
1934 	zh = zilog->zl_header;
1935 
1936 	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1937 		mutex_exit(&zilog->zl_lock);
1938 		dmu_objset_rele(os, suspend_tag);
1939 		return (SET_ERROR(EBUSY));
1940 	}
1941 
1942 	/*
1943 	 * Don't put a long hold in the cases where we can avoid it.  This
1944 	 * is when there is no cookie so we are doing a suspend & resume
1945 	 * (i.e. called from zil_vdev_offline()), and there's nothing to do
1946 	 * for the suspend because it's already suspended, or there's no ZIL.
1947 	 */
1948 	if (cookiep == NULL && !zilog->zl_suspending &&
1949 	    (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
1950 		mutex_exit(&zilog->zl_lock);
1951 		dmu_objset_rele(os, suspend_tag);
1952 		return (0);
1953 	}
1954 
1955 	dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
1956 	dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
1957 
1958 	zilog->zl_suspend++;
1959 
1960 	if (zilog->zl_suspend > 1) {
1961 		/*
1962 		 * Someone else is already suspending it.
1963 		 * Just wait for them to finish.
1964 		 */
1965 
1966 		while (zilog->zl_suspending)
1967 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1968 		mutex_exit(&zilog->zl_lock);
1969 
1970 		if (cookiep == NULL)
1971 			zil_resume(os);
1972 		else
1973 			*cookiep = os;
1974 		return (0);
1975 	}
1976 
1977 	/*
1978 	 * If there is no pointer to an on-disk block, this ZIL must not
1979 	 * be active (e.g. filesystem not mounted), so there's nothing
1980 	 * to clean up.
1981 	 */
1982 	if (BP_IS_HOLE(&zh->zh_log)) {
1983 		ASSERT(cookiep != NULL); /* fast path already handled */
1984 
1985 		*cookiep = os;
1986 		mutex_exit(&zilog->zl_lock);
1987 		return (0);
1988 	}
1989 
1990 	zilog->zl_suspending = B_TRUE;
1991 	mutex_exit(&zilog->zl_lock);
1992 
1993 	zil_commit(zilog, 0);
1994 
1995 	zil_destroy(zilog, B_FALSE);
1996 
1997 	mutex_enter(&zilog->zl_lock);
1998 	zilog->zl_suspending = B_FALSE;
1999 	cv_broadcast(&zilog->zl_cv_suspend);
2000 	mutex_exit(&zilog->zl_lock);
2001 
2002 	if (cookiep == NULL)
2003 		zil_resume(os);
2004 	else
2005 		*cookiep = os;
2006 	return (0);
2007 }
2008 
2009 void
2010 zil_resume(void *cookie)
2011 {
2012 	objset_t *os = cookie;
2013 	zilog_t *zilog = dmu_objset_zil(os);
2014 
2015 	mutex_enter(&zilog->zl_lock);
2016 	ASSERT(zilog->zl_suspend != 0);
2017 	zilog->zl_suspend--;
2018 	mutex_exit(&zilog->zl_lock);
2019 	dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
2020 	dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
2021 }
2022 
2023 typedef struct zil_replay_arg {
2024 	zil_replay_func_t **zr_replay;
2025 	void		*zr_arg;
2026 	boolean_t	zr_byteswap;
2027 	char		*zr_lr;
2028 } zil_replay_arg_t;
2029 
2030 static int
2031 zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
2032 {
2033 	char name[ZFS_MAX_DATASET_NAME_LEN];
2034 
2035 	zilog->zl_replaying_seq--;	/* didn't actually replay this one */
2036 
2037 	dmu_objset_name(zilog->zl_os, name);
2038 
2039 	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
2040 	    "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
2041 	    (u_longlong_t)lr->lrc_seq,
2042 	    (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
2043 	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
2044 
2045 	return (error);
2046 }
2047 
2048 static int
2049 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
2050 {
2051 	zil_replay_arg_t *zr = zra;
2052 	const zil_header_t *zh = zilog->zl_header;
2053 	uint64_t reclen = lr->lrc_reclen;
2054 	uint64_t txtype = lr->lrc_txtype;
2055 	int error = 0;
2056 
2057 	zilog->zl_replaying_seq = lr->lrc_seq;
2058 
2059 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
2060 		return (0);
2061 
2062 	if (lr->lrc_txg < claim_txg)		/* already committed */
2063 		return (0);
2064 
2065 	/* Strip case-insensitive bit, still present in log record */
2066 	txtype &= ~TX_CI;
2067 
2068 	if (txtype == 0 || txtype >= TX_MAX_TYPE)
2069 		return (zil_replay_error(zilog, lr, EINVAL));
2070 
2071 	/*
2072 	 * If this record type can be logged out of order, the object
2073 	 * (lr_foid) may no longer exist.  That's legitimate, not an error.
2074 	 */
2075 	if (TX_OOO(txtype)) {
2076 		error = dmu_object_info(zilog->zl_os,
2077 		    ((lr_ooo_t *)lr)->lr_foid, NULL);
2078 		if (error == ENOENT || error == EEXIST)
2079 			return (0);
2080 	}
2081 
2082 	/*
2083 	 * Make a copy of the data so we can revise and extend it.
2084 	 */
2085 	bcopy(lr, zr->zr_lr, reclen);
2086 
2087 	/*
2088 	 * If this is a TX_WRITE with a blkptr, suck in the data.
2089 	 */
2090 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
2091 		error = zil_read_log_data(zilog, (lr_write_t *)lr,
2092 		    zr->zr_lr + reclen);
2093 		if (error != 0)
2094 			return (zil_replay_error(zilog, lr, error));
2095 	}
2096 
2097 	/*
2098 	 * The log block containing this lr may have been byteswapped
2099 	 * so that we can easily examine common fields like lrc_txtype.
2100 	 * However, the log is a mix of different record types, and only the
2101 	 * replay vectors know how to byteswap their records.  Therefore, if
2102 	 * the lr was byteswapped, undo it before invoking the replay vector.
2103 	 */
2104 	if (zr->zr_byteswap)
2105 		byteswap_uint64_array(zr->zr_lr, reclen);
2106 
2107 	/*
2108 	 * We must now do two things atomically: replay this log record,
2109 	 * and update the log header sequence number to reflect the fact that
2110 	 * we did so. At the end of each replay function the sequence number
2111 	 * is updated if we are in replay mode.
2112 	 */
2113 	error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
2114 	if (error != 0) {
2115 		/*
2116 		 * The DMU's dnode layer doesn't see removes until the txg
2117 		 * commits, so a subsequent claim can spuriously fail with
2118 		 * EEXIST. So if we receive any error we try syncing out
2119 		 * any removes then retry the transaction.  Note that we
2120 		 * specify B_FALSE for byteswap now, so we don't do it twice.
2121 		 */
2122 		txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
2123 		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
2124 		if (error != 0)
2125 			return (zil_replay_error(zilog, lr, error));
2126 	}
2127 	return (0);
2128 }
2129 
2130 /* ARGSUSED */
2131 static int
2132 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
2133 {
2134 	zilog->zl_replay_blks++;
2135 
2136 	return (0);
2137 }
2138 
2139 /*
2140  * If this dataset has a non-empty intent log, replay it and destroy it.
2141  */
2142 void
2143 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
2144 {
2145 	zilog_t *zilog = dmu_objset_zil(os);
2146 	const zil_header_t *zh = zilog->zl_header;
2147 	zil_replay_arg_t zr;
2148 
2149 	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
2150 		zil_destroy(zilog, B_TRUE);
2151 		return;
2152 	}
2153 
2154 	zr.zr_replay = replay_func;
2155 	zr.zr_arg = arg;
2156 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
2157 	zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
2158 
2159 	/*
2160 	 * Wait for in-progress removes to sync before starting replay.
2161 	 */
2162 	txg_wait_synced(zilog->zl_dmu_pool, 0);
2163 
2164 	zilog->zl_replay = B_TRUE;
2165 	zilog->zl_replay_time = ddi_get_lbolt();
2166 	ASSERT(zilog->zl_replay_blks == 0);
2167 	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
2168 	    zh->zh_claim_txg);
2169 	kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
2170 
2171 	zil_destroy(zilog, B_FALSE);
2172 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
2173 	zilog->zl_replay = B_FALSE;
2174 }
2175 
2176 boolean_t
2177 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
2178 {
2179 	if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2180 		return (B_TRUE);
2181 
2182 	if (zilog->zl_replay) {
2183 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
2184 		zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
2185 		    zilog->zl_replaying_seq;
2186 		return (B_TRUE);
2187 	}
2188 
2189 	return (B_FALSE);
2190 }
2191 
2192 /* ARGSUSED */
2193 int
2194 zil_vdev_offline(const char *osname, void *arg)
2195 {
2196 	int error;
2197 
2198 	error = zil_suspend(osname, NULL);
2199 	if (error != 0)
2200 		return (SET_ERROR(EEXIST));
2201 	return (0);
2202 }
2203