xref: /titanic_50/usr/src/uts/common/fs/zfs/zil.c (revision 9a4611f412a6b1f7a0bc7d53d2bb046a95daa4bc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/zfs_context.h>
27 #include <sys/spa.h>
28 #include <sys/spa_impl.h>
29 #include <sys/dmu.h>
30 #include <sys/zap.h>
31 #include <sys/arc.h>
32 #include <sys/stat.h>
33 #include <sys/resource.h>
34 #include <sys/zil.h>
35 #include <sys/zil_impl.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/vdev.h>
38 #include <sys/dmu_tx.h>
39 
40 /*
41  * The zfs intent log (ZIL) saves transaction records of system calls
42  * that change the file system in memory with enough information
43  * to be able to replay them. These are stored in memory until
44  * either the DMU transaction group (txg) commits them to the stable pool
45  * and they can be discarded, or they are flushed to the stable log
46  * (also in the pool) due to a fsync, O_DSYNC or other synchronous
47  * requirement. In the event of a panic or power fail then those log
48  * records (transactions) are replayed.
49  *
50  * There is one ZIL per file system. Its on-disk (pool) format consists
51  * of 3 parts:
52  *
53  * 	- ZIL header
54  * 	- ZIL blocks
55  * 	- ZIL records
56  *
57  * A log record holds a system call transaction. Log blocks can
58  * hold many log records and the blocks are chained together.
59  * Each ZIL block contains a block pointer (blkptr_t) to the next
60  * ZIL block in the chain. The ZIL header points to the first
61  * block in the chain. Note there is not a fixed place in the pool
62  * to hold blocks. They are dynamically allocated and freed as
63  * needed from the blocks available. Figure X shows the ZIL structure:
64  */
65 
66 /*
67  * This global ZIL switch affects all pools
68  */
69 int zil_disable = 0;	/* disable intent logging */
70 
71 /*
72  * Tunable parameter for debugging or performance analysis.  Setting
73  * zfs_nocacheflush will cause corruption on power loss if a volatile
74  * out-of-order write cache is enabled.
75  */
76 boolean_t zfs_nocacheflush = B_FALSE;
77 
78 static kmem_cache_t *zil_lwb_cache;
79 
80 static int
81 zil_dva_compare(const void *x1, const void *x2)
82 {
83 	const dva_t *dva1 = x1;
84 	const dva_t *dva2 = x2;
85 
86 	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
87 		return (-1);
88 	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
89 		return (1);
90 
91 	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
92 		return (-1);
93 	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
94 		return (1);
95 
96 	return (0);
97 }
98 
99 static void
100 zil_dva_tree_init(avl_tree_t *t)
101 {
102 	avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
103 	    offsetof(zil_dva_node_t, zn_node));
104 }
105 
106 static void
107 zil_dva_tree_fini(avl_tree_t *t)
108 {
109 	zil_dva_node_t *zn;
110 	void *cookie = NULL;
111 
112 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
113 		kmem_free(zn, sizeof (zil_dva_node_t));
114 
115 	avl_destroy(t);
116 }
117 
118 static int
119 zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
120 {
121 	zil_dva_node_t *zn;
122 	avl_index_t where;
123 
124 	if (avl_find(t, dva, &where) != NULL)
125 		return (EEXIST);
126 
127 	zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
128 	zn->zn_dva = *dva;
129 	avl_insert(t, zn, where);
130 
131 	return (0);
132 }
133 
134 static zil_header_t *
135 zil_header_in_syncing_context(zilog_t *zilog)
136 {
137 	return ((zil_header_t *)zilog->zl_header);
138 }
139 
140 static void
141 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
142 {
143 	zio_cksum_t *zc = &bp->blk_cksum;
144 
145 	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
146 	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
147 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
148 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
149 }
150 
151 /*
152  * Read a log block, make sure it's valid, and byteswap it if necessary.
153  */
154 static int
155 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
156 {
157 	blkptr_t blk = *bp;
158 	zbookmark_t zb;
159 	uint32_t aflags = ARC_WAIT;
160 	int error;
161 
162 	zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
163 	zb.zb_object = 0;
164 	zb.zb_level = -1;
165 	zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
166 
167 	*abufpp = NULL;
168 
169 	/*
170 	 * We shouldn't be doing any scrubbing while we're doing log
171 	 * replay, it's OK to not lock.
172 	 */
173 	error = arc_read_nolock(NULL, zilog->zl_spa, &blk,
174 	    arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
175 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
176 
177 	if (error == 0) {
178 		char *data = (*abufpp)->b_data;
179 		uint64_t blksz = BP_GET_LSIZE(bp);
180 		zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
181 		zio_cksum_t cksum = bp->blk_cksum;
182 
183 		/*
184 		 * Validate the checksummed log block.
185 		 *
186 		 * Sequence numbers should be... sequential.  The checksum
187 		 * verifier for the next block should be bp's checksum plus 1.
188 		 *
189 		 * Also check the log chain linkage and size used.
190 		 */
191 		cksum.zc_word[ZIL_ZC_SEQ]++;
192 
193 		if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum,
194 		    sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) ||
195 		    (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) {
196 			error = ECKSUM;
197 		}
198 
199 		if (error) {
200 			VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
201 			*abufpp = NULL;
202 		}
203 	}
204 
205 	dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
206 
207 	return (error);
208 }
209 
210 /*
211  * Parse the intent log, and call parse_func for each valid record within.
212  * Return the highest sequence number.
213  */
214 uint64_t
215 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
216     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
217 {
218 	const zil_header_t *zh = zilog->zl_header;
219 	uint64_t claim_seq = zh->zh_claim_seq;
220 	uint64_t seq = 0;
221 	uint64_t max_seq = 0;
222 	blkptr_t blk = zh->zh_log;
223 	arc_buf_t *abuf;
224 	char *lrbuf, *lrp;
225 	zil_trailer_t *ztp;
226 	int reclen, error;
227 
228 	if (BP_IS_HOLE(&blk))
229 		return (max_seq);
230 
231 	/*
232 	 * Starting at the block pointed to by zh_log we read the log chain.
233 	 * For each block in the chain we strongly check that block to
234 	 * ensure its validity.  We stop when an invalid block is found.
235 	 * For each block pointer in the chain we call parse_blk_func().
236 	 * For each record in each valid block we call parse_lr_func().
237 	 * If the log has been claimed, stop if we encounter a sequence
238 	 * number greater than the highest claimed sequence number.
239 	 */
240 	zil_dva_tree_init(&zilog->zl_dva_tree);
241 	for (;;) {
242 		seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
243 
244 		if (claim_seq != 0 && seq > claim_seq)
245 			break;
246 
247 		ASSERT(max_seq < seq);
248 		max_seq = seq;
249 
250 		error = zil_read_log_block(zilog, &blk, &abuf);
251 
252 		if (parse_blk_func != NULL)
253 			parse_blk_func(zilog, &blk, arg, txg);
254 
255 		if (error)
256 			break;
257 
258 		lrbuf = abuf->b_data;
259 		ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
260 		blk = ztp->zit_next_blk;
261 
262 		if (parse_lr_func == NULL) {
263 			VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
264 			continue;
265 		}
266 
267 		for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
268 			lr_t *lr = (lr_t *)lrp;
269 			reclen = lr->lrc_reclen;
270 			ASSERT3U(reclen, >=, sizeof (lr_t));
271 			parse_lr_func(zilog, lr, arg, txg);
272 		}
273 		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
274 	}
275 	zil_dva_tree_fini(&zilog->zl_dva_tree);
276 
277 	return (max_seq);
278 }
279 
280 /* ARGSUSED */
281 static void
282 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
283 {
284 	spa_t *spa = zilog->zl_spa;
285 	int err;
286 
287 	/*
288 	 * Claim log block if not already committed and not already claimed.
289 	 */
290 	if (bp->blk_birth >= first_txg &&
291 	    zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
292 		err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL,
293 		    ZIO_FLAG_MUSTSUCCEED));
294 		ASSERT(err == 0);
295 	}
296 }
297 
298 static void
299 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
300 {
301 	if (lrc->lrc_txtype == TX_WRITE) {
302 		lr_write_t *lr = (lr_write_t *)lrc;
303 		zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
304 	}
305 }
306 
307 /* ARGSUSED */
308 static void
309 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
310 {
311 	zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
312 }
313 
314 static void
315 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
316 {
317 	/*
318 	 * If we previously claimed it, we need to free it.
319 	 */
320 	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
321 		lr_write_t *lr = (lr_write_t *)lrc;
322 		blkptr_t *bp = &lr->lr_blkptr;
323 		if (bp->blk_birth >= claim_txg &&
324 		    !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
325 			(void) arc_free(NULL, zilog->zl_spa,
326 			    dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
327 		}
328 	}
329 }
330 
331 /*
332  * Create an on-disk intent log.
333  */
334 static void
335 zil_create(zilog_t *zilog)
336 {
337 	const zil_header_t *zh = zilog->zl_header;
338 	lwb_t *lwb;
339 	uint64_t txg = 0;
340 	dmu_tx_t *tx = NULL;
341 	blkptr_t blk;
342 	int error = 0;
343 
344 	/*
345 	 * Wait for any previous destroy to complete.
346 	 */
347 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
348 
349 	ASSERT(zh->zh_claim_txg == 0);
350 	ASSERT(zh->zh_replay_seq == 0);
351 
352 	blk = zh->zh_log;
353 
354 	/*
355 	 * If we don't already have an initial log block or we have one
356 	 * but it's the wrong endianness then allocate one.
357 	 */
358 	if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
359 		tx = dmu_tx_create(zilog->zl_os);
360 		(void) dmu_tx_assign(tx, TXG_WAIT);
361 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
362 		txg = dmu_tx_get_txg(tx);
363 
364 		if (!BP_IS_HOLE(&blk)) {
365 			zio_free_blk(zilog->zl_spa, &blk, txg);
366 			BP_ZERO(&blk);
367 		}
368 
369 		error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
370 		    NULL, txg);
371 
372 		if (error == 0)
373 			zil_init_log_chain(zilog, &blk);
374 	}
375 
376 	/*
377 	 * Allocate a log write buffer (lwb) for the first log block.
378 	 */
379 	if (error == 0) {
380 		lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
381 		lwb->lwb_zilog = zilog;
382 		lwb->lwb_blk = blk;
383 		lwb->lwb_nused = 0;
384 		lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
385 		lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
386 		lwb->lwb_max_txg = txg;
387 		lwb->lwb_zio = NULL;
388 
389 		mutex_enter(&zilog->zl_lock);
390 		list_insert_tail(&zilog->zl_lwb_list, lwb);
391 		mutex_exit(&zilog->zl_lock);
392 	}
393 
394 	/*
395 	 * If we just allocated the first log block, commit our transaction
396 	 * and wait for zil_sync() to stuff the block poiner into zh_log.
397 	 * (zh is part of the MOS, so we cannot modify it in open context.)
398 	 */
399 	if (tx != NULL) {
400 		dmu_tx_commit(tx);
401 		txg_wait_synced(zilog->zl_dmu_pool, txg);
402 	}
403 
404 	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
405 }
406 
407 /*
408  * In one tx, free all log blocks and clear the log header.
409  * If keep_first is set, then we're replaying a log with no content.
410  * We want to keep the first block, however, so that the first
411  * synchronous transaction doesn't require a txg_wait_synced()
412  * in zil_create().  We don't need to txg_wait_synced() here either
413  * when keep_first is set, because both zil_create() and zil_destroy()
414  * will wait for any in-progress destroys to complete.
415  */
416 void
417 zil_destroy(zilog_t *zilog, boolean_t keep_first)
418 {
419 	const zil_header_t *zh = zilog->zl_header;
420 	lwb_t *lwb;
421 	dmu_tx_t *tx;
422 	uint64_t txg;
423 
424 	/*
425 	 * Wait for any previous destroy to complete.
426 	 */
427 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
428 
429 	if (BP_IS_HOLE(&zh->zh_log))
430 		return;
431 
432 	tx = dmu_tx_create(zilog->zl_os);
433 	(void) dmu_tx_assign(tx, TXG_WAIT);
434 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
435 	txg = dmu_tx_get_txg(tx);
436 
437 	mutex_enter(&zilog->zl_lock);
438 
439 	/*
440 	 * It is possible for the ZIL to get the previously mounted zilog
441 	 * structure of the same dataset if quickly remounted and the dbuf
442 	 * eviction has not completed. In this case we can see a non
443 	 * empty lwb list and keep_first will be set. We fix this by
444 	 * clearing the keep_first. This will be slower but it's very rare.
445 	 */
446 	if (!list_is_empty(&zilog->zl_lwb_list) && keep_first)
447 		keep_first = B_FALSE;
448 
449 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
450 	zilog->zl_destroy_txg = txg;
451 	zilog->zl_keep_first = keep_first;
452 
453 	if (!list_is_empty(&zilog->zl_lwb_list)) {
454 		ASSERT(zh->zh_claim_txg == 0);
455 		ASSERT(!keep_first);
456 		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
457 			list_remove(&zilog->zl_lwb_list, lwb);
458 			if (lwb->lwb_buf != NULL)
459 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
460 			zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
461 			kmem_cache_free(zil_lwb_cache, lwb);
462 		}
463 	} else {
464 		if (!keep_first) {
465 			(void) zil_parse(zilog, zil_free_log_block,
466 			    zil_free_log_record, tx, zh->zh_claim_txg);
467 		}
468 	}
469 	mutex_exit(&zilog->zl_lock);
470 
471 	dmu_tx_commit(tx);
472 }
473 
474 /*
475  * return true if the initial log block is not valid
476  */
477 static boolean_t
478 zil_empty(zilog_t *zilog)
479 {
480 	const zil_header_t *zh = zilog->zl_header;
481 	arc_buf_t *abuf = NULL;
482 
483 	if (BP_IS_HOLE(&zh->zh_log))
484 		return (B_TRUE);
485 
486 	if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
487 		return (B_TRUE);
488 
489 	VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
490 	return (B_FALSE);
491 }
492 
493 int
494 zil_claim(char *osname, void *txarg)
495 {
496 	dmu_tx_t *tx = txarg;
497 	uint64_t first_txg = dmu_tx_get_txg(tx);
498 	zilog_t *zilog;
499 	zil_header_t *zh;
500 	objset_t *os;
501 	int error;
502 
503 	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
504 	if (error) {
505 		cmn_err(CE_WARN, "can't open objset for %s", osname);
506 		return (0);
507 	}
508 
509 	zilog = dmu_objset_zil(os);
510 	zh = zil_header_in_syncing_context(zilog);
511 
512 	if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) {
513 		if (!BP_IS_HOLE(&zh->zh_log))
514 			zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg);
515 		BP_ZERO(&zh->zh_log);
516 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
517 	}
518 
519 	/*
520 	 * Record here whether the zil has any records to replay.
521 	 * If the header block pointer is null or the block points
522 	 * to the stubby then we know there are no valid log records.
523 	 * We use the header to store this state as the the zilog gets
524 	 * freed later in dmu_objset_close().
525 	 * The flags (and the rest of the header fields) are cleared in
526 	 * zil_sync() as a result of a zil_destroy(), after replaying the log.
527 	 *
528 	 * Note, the intent log can be empty but still need the
529 	 * stubby to be claimed.
530 	 */
531 	if (!zil_empty(zilog)) {
532 		zh->zh_flags |= ZIL_REPLAY_NEEDED;
533 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
534 	}
535 
536 	/*
537 	 * Claim all log blocks if we haven't already done so, and remember
538 	 * the highest claimed sequence number.  This ensures that if we can
539 	 * read only part of the log now (e.g. due to a missing device),
540 	 * but we can read the entire log later, we will not try to replay
541 	 * or destroy beyond the last block we successfully claimed.
542 	 */
543 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
544 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
545 		zh->zh_claim_txg = first_txg;
546 		zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
547 		    zil_claim_log_record, tx, first_txg);
548 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
549 	}
550 
551 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
552 	dmu_objset_close(os);
553 	return (0);
554 }
555 
556 /*
557  * Check the log by walking the log chain.
558  * Checksum errors are ok as they indicate the end of the chain.
559  * Any other error (no device or read failure) returns an error.
560  */
561 /* ARGSUSED */
562 int
563 zil_check_log_chain(char *osname, void *txarg)
564 {
565 	zilog_t *zilog;
566 	zil_header_t *zh;
567 	blkptr_t blk;
568 	arc_buf_t *abuf;
569 	objset_t *os;
570 	char *lrbuf;
571 	zil_trailer_t *ztp;
572 	int error;
573 
574 	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
575 	if (error) {
576 		cmn_err(CE_WARN, "can't open objset for %s", osname);
577 		return (0);
578 	}
579 
580 	zilog = dmu_objset_zil(os);
581 	zh = zil_header_in_syncing_context(zilog);
582 	blk = zh->zh_log;
583 	if (BP_IS_HOLE(&blk)) {
584 		dmu_objset_close(os);
585 		return (0); /* no chain */
586 	}
587 
588 	for (;;) {
589 		error = zil_read_log_block(zilog, &blk, &abuf);
590 		if (error)
591 			break;
592 		lrbuf = abuf->b_data;
593 		ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
594 		blk = ztp->zit_next_blk;
595 		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
596 	}
597 	dmu_objset_close(os);
598 	if (error == ECKSUM)
599 		return (0); /* normal end of chain */
600 	return (error);
601 }
602 
603 static int
604 zil_vdev_compare(const void *x1, const void *x2)
605 {
606 	uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
607 	uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
608 
609 	if (v1 < v2)
610 		return (-1);
611 	if (v1 > v2)
612 		return (1);
613 
614 	return (0);
615 }
616 
617 void
618 zil_add_block(zilog_t *zilog, blkptr_t *bp)
619 {
620 	avl_tree_t *t = &zilog->zl_vdev_tree;
621 	avl_index_t where;
622 	zil_vdev_node_t *zv, zvsearch;
623 	int ndvas = BP_GET_NDVAS(bp);
624 	int i;
625 
626 	if (zfs_nocacheflush)
627 		return;
628 
629 	ASSERT(zilog->zl_writer);
630 
631 	/*
632 	 * Even though we're zl_writer, we still need a lock because the
633 	 * zl_get_data() callbacks may have dmu_sync() done callbacks
634 	 * that will run concurrently.
635 	 */
636 	mutex_enter(&zilog->zl_vdev_lock);
637 	for (i = 0; i < ndvas; i++) {
638 		zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
639 		if (avl_find(t, &zvsearch, &where) == NULL) {
640 			zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
641 			zv->zv_vdev = zvsearch.zv_vdev;
642 			avl_insert(t, zv, where);
643 		}
644 	}
645 	mutex_exit(&zilog->zl_vdev_lock);
646 }
647 
648 void
649 zil_flush_vdevs(zilog_t *zilog)
650 {
651 	spa_t *spa = zilog->zl_spa;
652 	avl_tree_t *t = &zilog->zl_vdev_tree;
653 	void *cookie = NULL;
654 	zil_vdev_node_t *zv;
655 	zio_t *zio;
656 
657 	ASSERT(zilog->zl_writer);
658 
659 	/*
660 	 * We don't need zl_vdev_lock here because we're the zl_writer,
661 	 * and all zl_get_data() callbacks are done.
662 	 */
663 	if (avl_numnodes(t) == 0)
664 		return;
665 
666 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
667 
668 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
669 
670 	while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
671 		vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
672 		if (vd != NULL)
673 			zio_flush(zio, vd);
674 		kmem_free(zv, sizeof (*zv));
675 	}
676 
677 	/*
678 	 * Wait for all the flushes to complete.  Not all devices actually
679 	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
680 	 */
681 	(void) zio_wait(zio);
682 
683 	spa_config_exit(spa, SCL_STATE, FTAG);
684 }
685 
686 /*
687  * Function called when a log block write completes
688  */
689 static void
690 zil_lwb_write_done(zio_t *zio)
691 {
692 	lwb_t *lwb = zio->io_private;
693 	zilog_t *zilog = lwb->lwb_zilog;
694 
695 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
696 	ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG);
697 	ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
698 	ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
699 	ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
700 	ASSERT(!BP_IS_GANG(zio->io_bp));
701 	ASSERT(!BP_IS_HOLE(zio->io_bp));
702 	ASSERT(zio->io_bp->blk_fill == 0);
703 
704 	/*
705 	 * Ensure the lwb buffer pointer is cleared before releasing
706 	 * the txg. If we have had an allocation failure and
707 	 * the txg is waiting to sync then we want want zil_sync()
708 	 * to remove the lwb so that it's not picked up as the next new
709 	 * one in zil_commit_writer(). zil_sync() will only remove
710 	 * the lwb if lwb_buf is null.
711 	 */
712 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
713 	mutex_enter(&zilog->zl_lock);
714 	lwb->lwb_buf = NULL;
715 	if (zio->io_error)
716 		zilog->zl_log_error = B_TRUE;
717 
718 	/*
719 	 * Now that we've written this log block, we have a stable pointer
720 	 * to the next block in the chain, so it's OK to let the txg in
721 	 * which we allocated the next block sync. We still have the
722 	 * zl_lock to ensure zil_sync doesn't kmem free the lwb.
723 	 */
724 	txg_rele_to_sync(&lwb->lwb_txgh);
725 	mutex_exit(&zilog->zl_lock);
726 }
727 
728 /*
729  * Initialize the io for a log block.
730  */
731 static void
732 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
733 {
734 	zbookmark_t zb;
735 
736 	zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
737 	zb.zb_object = 0;
738 	zb.zb_level = -1;
739 	zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
740 
741 	if (zilog->zl_root_zio == NULL) {
742 		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
743 		    ZIO_FLAG_CANFAIL);
744 	}
745 	if (lwb->lwb_zio == NULL) {
746 		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
747 		    0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz,
748 		    zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
749 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb);
750 	}
751 }
752 
753 /*
754  * Start a log block write and advance to the next log block.
755  * Calls are serialized.
756  */
757 static lwb_t *
758 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
759 {
760 	lwb_t *nlwb;
761 	zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
762 	spa_t *spa = zilog->zl_spa;
763 	blkptr_t *bp = &ztp->zit_next_blk;
764 	uint64_t txg;
765 	uint64_t zil_blksz;
766 	int error;
767 
768 	ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
769 
770 	/*
771 	 * Allocate the next block and save its address in this block
772 	 * before writing it in order to establish the log chain.
773 	 * Note that if the allocation of nlwb synced before we wrote
774 	 * the block that points at it (lwb), we'd leak it if we crashed.
775 	 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
776 	 */
777 	txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
778 	txg_rele_to_quiesce(&lwb->lwb_txgh);
779 
780 	/*
781 	 * Pick a ZIL blocksize. We request a size that is the
782 	 * maximum of the previous used size, the current used size and
783 	 * the amount waiting in the queue.
784 	 */
785 	zil_blksz = MAX(zilog->zl_prev_used,
786 	    zilog->zl_cur_used + sizeof (*ztp));
787 	zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
788 	zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
789 	if (zil_blksz > ZIL_MAX_BLKSZ)
790 		zil_blksz = ZIL_MAX_BLKSZ;
791 
792 	BP_ZERO(bp);
793 	/* pass the old blkptr in order to spread log blocks across devs */
794 	error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
795 	if (error) {
796 		dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
797 
798 		/*
799 		 * We dirty the dataset to ensure that zil_sync() will
800 		 * be called to remove this lwb from our zl_lwb_list.
801 		 * Failing to do so, may leave an lwb with a NULL lwb_buf
802 		 * hanging around on the zl_lwb_list.
803 		 */
804 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
805 		dmu_tx_commit(tx);
806 
807 		/*
808 		 * Since we've just experienced an allocation failure so we
809 		 * terminate the current lwb and send it on its way.
810 		 */
811 		ztp->zit_pad = 0;
812 		ztp->zit_nused = lwb->lwb_nused;
813 		ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
814 		zio_nowait(lwb->lwb_zio);
815 
816 		/*
817 		 * By returning NULL the caller will call tx_wait_synced()
818 		 */
819 		return (NULL);
820 	}
821 
822 	ASSERT3U(bp->blk_birth, ==, txg);
823 	ztp->zit_pad = 0;
824 	ztp->zit_nused = lwb->lwb_nused;
825 	ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
826 	bp->blk_cksum = lwb->lwb_blk.blk_cksum;
827 	bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
828 
829 	/*
830 	 * Allocate a new log write buffer (lwb).
831 	 */
832 	nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
833 
834 	nlwb->lwb_zilog = zilog;
835 	nlwb->lwb_blk = *bp;
836 	nlwb->lwb_nused = 0;
837 	nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
838 	nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
839 	nlwb->lwb_max_txg = txg;
840 	nlwb->lwb_zio = NULL;
841 
842 	/*
843 	 * Put new lwb at the end of the log chain
844 	 */
845 	mutex_enter(&zilog->zl_lock);
846 	list_insert_tail(&zilog->zl_lwb_list, nlwb);
847 	mutex_exit(&zilog->zl_lock);
848 
849 	/* Record the block for later vdev flushing */
850 	zil_add_block(zilog, &lwb->lwb_blk);
851 
852 	/*
853 	 * kick off the write for the old log block
854 	 */
855 	dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
856 	ASSERT(lwb->lwb_zio);
857 	zio_nowait(lwb->lwb_zio);
858 
859 	return (nlwb);
860 }
861 
862 static lwb_t *
863 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
864 {
865 	lr_t *lrc = &itx->itx_lr; /* common log record */
866 	lr_write_t *lr = (lr_write_t *)lrc;
867 	uint64_t txg = lrc->lrc_txg;
868 	uint64_t reclen = lrc->lrc_reclen;
869 	uint64_t dlen;
870 
871 	if (lwb == NULL)
872 		return (NULL);
873 	ASSERT(lwb->lwb_buf != NULL);
874 
875 	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
876 		dlen = P2ROUNDUP_TYPED(
877 		    lr->lr_length, sizeof (uint64_t), uint64_t);
878 	else
879 		dlen = 0;
880 
881 	zilog->zl_cur_used += (reclen + dlen);
882 
883 	zil_lwb_write_init(zilog, lwb);
884 
885 	/*
886 	 * If this record won't fit in the current log block, start a new one.
887 	 */
888 	if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
889 		lwb = zil_lwb_write_start(zilog, lwb);
890 		if (lwb == NULL)
891 			return (NULL);
892 		zil_lwb_write_init(zilog, lwb);
893 		ASSERT(lwb->lwb_nused == 0);
894 		if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
895 			txg_wait_synced(zilog->zl_dmu_pool, txg);
896 			return (lwb);
897 		}
898 	}
899 
900 	/*
901 	 * Update the lrc_seq, to be log record sequence number. See zil.h
902 	 * Then copy the record to the log buffer.
903 	 */
904 	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
905 	bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
906 
907 	/*
908 	 * If it's a write, fetch the data or get its blkptr as appropriate.
909 	 */
910 	if (lrc->lrc_txtype == TX_WRITE) {
911 		if (txg > spa_freeze_txg(zilog->zl_spa))
912 			txg_wait_synced(zilog->zl_dmu_pool, txg);
913 		if (itx->itx_wr_state != WR_COPIED) {
914 			char *dbuf;
915 			int error;
916 
917 			/* alignment is guaranteed */
918 			lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
919 			if (dlen) {
920 				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
921 				dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
922 				lr->lr_common.lrc_reclen += dlen;
923 			} else {
924 				ASSERT(itx->itx_wr_state == WR_INDIRECT);
925 				dbuf = NULL;
926 			}
927 			error = zilog->zl_get_data(
928 			    itx->itx_private, lr, dbuf, lwb->lwb_zio);
929 			if (error == EIO) {
930 				txg_wait_synced(zilog->zl_dmu_pool, txg);
931 				return (lwb);
932 			}
933 			if (error) {
934 				ASSERT(error == ENOENT || error == EEXIST ||
935 				    error == EALREADY);
936 				return (lwb);
937 			}
938 		}
939 	}
940 
941 	lwb->lwb_nused += reclen + dlen;
942 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
943 	ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
944 	ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
945 
946 	return (lwb);
947 }
948 
949 itx_t *
950 zil_itx_create(uint64_t txtype, size_t lrsize)
951 {
952 	itx_t *itx;
953 
954 	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
955 
956 	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
957 	itx->itx_lr.lrc_txtype = txtype;
958 	itx->itx_lr.lrc_reclen = lrsize;
959 	itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
960 	itx->itx_lr.lrc_seq = 0;	/* defensive */
961 
962 	return (itx);
963 }
964 
965 uint64_t
966 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
967 {
968 	uint64_t seq;
969 
970 	ASSERT(itx->itx_lr.lrc_seq == 0);
971 
972 	mutex_enter(&zilog->zl_lock);
973 	list_insert_tail(&zilog->zl_itx_list, itx);
974 	zilog->zl_itx_list_sz += itx->itx_sod;
975 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
976 	itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
977 	mutex_exit(&zilog->zl_lock);
978 
979 	return (seq);
980 }
981 
982 /*
983  * Free up all in-memory intent log transactions that have now been synced.
984  */
985 static void
986 zil_itx_clean(zilog_t *zilog)
987 {
988 	uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
989 	uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
990 	list_t clean_list;
991 	itx_t *itx;
992 
993 	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
994 
995 	mutex_enter(&zilog->zl_lock);
996 	/* wait for a log writer to finish walking list */
997 	while (zilog->zl_writer) {
998 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
999 	}
1000 
1001 	/*
1002 	 * Move the sync'd log transactions to a separate list so we can call
1003 	 * kmem_free without holding the zl_lock.
1004 	 *
1005 	 * There is no need to set zl_writer as we don't drop zl_lock here
1006 	 */
1007 	while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
1008 	    itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
1009 		list_remove(&zilog->zl_itx_list, itx);
1010 		zilog->zl_itx_list_sz -= itx->itx_sod;
1011 		list_insert_tail(&clean_list, itx);
1012 	}
1013 	cv_broadcast(&zilog->zl_cv_writer);
1014 	mutex_exit(&zilog->zl_lock);
1015 
1016 	/* destroy sync'd log transactions */
1017 	while ((itx = list_head(&clean_list)) != NULL) {
1018 		list_remove(&clean_list, itx);
1019 		kmem_free(itx, offsetof(itx_t, itx_lr)
1020 		    + itx->itx_lr.lrc_reclen);
1021 	}
1022 	list_destroy(&clean_list);
1023 }
1024 
1025 /*
1026  * If there are any in-memory intent log transactions which have now been
1027  * synced then start up a taskq to free them.
1028  */
1029 void
1030 zil_clean(zilog_t *zilog)
1031 {
1032 	itx_t *itx;
1033 
1034 	mutex_enter(&zilog->zl_lock);
1035 	itx = list_head(&zilog->zl_itx_list);
1036 	if ((itx != NULL) &&
1037 	    (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
1038 		(void) taskq_dispatch(zilog->zl_clean_taskq,
1039 		    (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
1040 	}
1041 	mutex_exit(&zilog->zl_lock);
1042 }
1043 
1044 static void
1045 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
1046 {
1047 	uint64_t txg;
1048 	uint64_t commit_seq = 0;
1049 	itx_t *itx, *itx_next = (itx_t *)-1;
1050 	lwb_t *lwb;
1051 	spa_t *spa;
1052 
1053 	zilog->zl_writer = B_TRUE;
1054 	ASSERT(zilog->zl_root_zio == NULL);
1055 	spa = zilog->zl_spa;
1056 
1057 	if (zilog->zl_suspend) {
1058 		lwb = NULL;
1059 	} else {
1060 		lwb = list_tail(&zilog->zl_lwb_list);
1061 		if (lwb == NULL) {
1062 			/*
1063 			 * Return if there's nothing to flush before we
1064 			 * dirty the fs by calling zil_create()
1065 			 */
1066 			if (list_is_empty(&zilog->zl_itx_list)) {
1067 				zilog->zl_writer = B_FALSE;
1068 				return;
1069 			}
1070 			mutex_exit(&zilog->zl_lock);
1071 			zil_create(zilog);
1072 			mutex_enter(&zilog->zl_lock);
1073 			lwb = list_tail(&zilog->zl_lwb_list);
1074 		}
1075 	}
1076 
1077 	/* Loop through in-memory log transactions filling log blocks. */
1078 	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1079 	for (;;) {
1080 		/*
1081 		 * Find the next itx to push:
1082 		 * Push all transactions related to specified foid and all
1083 		 * other transactions except TX_WRITE, TX_TRUNCATE,
1084 		 * TX_SETATTR and TX_ACL for all other files.
1085 		 */
1086 		if (itx_next != (itx_t *)-1)
1087 			itx = itx_next;
1088 		else
1089 			itx = list_head(&zilog->zl_itx_list);
1090 		for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
1091 			if (foid == 0) /* push all foids? */
1092 				break;
1093 			if (itx->itx_sync) /* push all O_[D]SYNC */
1094 				break;
1095 			switch (itx->itx_lr.lrc_txtype) {
1096 			case TX_SETATTR:
1097 			case TX_WRITE:
1098 			case TX_TRUNCATE:
1099 			case TX_ACL:
1100 				/* lr_foid is same offset for these records */
1101 				if (((lr_write_t *)&itx->itx_lr)->lr_foid
1102 				    != foid) {
1103 					continue; /* skip this record */
1104 				}
1105 			}
1106 			break;
1107 		}
1108 		if (itx == NULL)
1109 			break;
1110 
1111 		if ((itx->itx_lr.lrc_seq > seq) &&
1112 		    ((lwb == NULL) || (lwb->lwb_nused == 0) ||
1113 		    (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) {
1114 			break;
1115 		}
1116 
1117 		/*
1118 		 * Save the next pointer.  Even though we soon drop
1119 		 * zl_lock all threads that may change the list
1120 		 * (another writer or zil_itx_clean) can't do so until
1121 		 * they have zl_writer.
1122 		 */
1123 		itx_next = list_next(&zilog->zl_itx_list, itx);
1124 		list_remove(&zilog->zl_itx_list, itx);
1125 		zilog->zl_itx_list_sz -= itx->itx_sod;
1126 		mutex_exit(&zilog->zl_lock);
1127 		txg = itx->itx_lr.lrc_txg;
1128 		ASSERT(txg);
1129 
1130 		if (txg > spa_last_synced_txg(spa) ||
1131 		    txg > spa_freeze_txg(spa))
1132 			lwb = zil_lwb_commit(zilog, itx, lwb);
1133 		kmem_free(itx, offsetof(itx_t, itx_lr)
1134 		    + itx->itx_lr.lrc_reclen);
1135 		mutex_enter(&zilog->zl_lock);
1136 	}
1137 	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1138 	/* determine commit sequence number */
1139 	itx = list_head(&zilog->zl_itx_list);
1140 	if (itx)
1141 		commit_seq = itx->itx_lr.lrc_seq;
1142 	else
1143 		commit_seq = zilog->zl_itx_seq;
1144 	mutex_exit(&zilog->zl_lock);
1145 
1146 	/* write the last block out */
1147 	if (lwb != NULL && lwb->lwb_zio != NULL)
1148 		lwb = zil_lwb_write_start(zilog, lwb);
1149 
1150 	zilog->zl_prev_used = zilog->zl_cur_used;
1151 	zilog->zl_cur_used = 0;
1152 
1153 	/*
1154 	 * Wait if necessary for the log blocks to be on stable storage.
1155 	 */
1156 	if (zilog->zl_root_zio) {
1157 		DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1158 		(void) zio_wait(zilog->zl_root_zio);
1159 		zilog->zl_root_zio = NULL;
1160 		DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1161 		zil_flush_vdevs(zilog);
1162 	}
1163 
1164 	if (zilog->zl_log_error || lwb == NULL) {
1165 		zilog->zl_log_error = 0;
1166 		txg_wait_synced(zilog->zl_dmu_pool, 0);
1167 	}
1168 
1169 	mutex_enter(&zilog->zl_lock);
1170 	zilog->zl_writer = B_FALSE;
1171 
1172 	ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1173 	zilog->zl_commit_seq = commit_seq;
1174 }
1175 
1176 /*
1177  * Push zfs transactions to stable storage up to the supplied sequence number.
1178  * If foid is 0 push out all transactions, otherwise push only those
1179  * for that file or might have been used to create that file.
1180  */
1181 void
1182 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1183 {
1184 	if (zilog == NULL || seq == 0)
1185 		return;
1186 
1187 	mutex_enter(&zilog->zl_lock);
1188 
1189 	seq = MIN(seq, zilog->zl_itx_seq);	/* cap seq at largest itx seq */
1190 
1191 	while (zilog->zl_writer) {
1192 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1193 		if (seq < zilog->zl_commit_seq) {
1194 			mutex_exit(&zilog->zl_lock);
1195 			return;
1196 		}
1197 	}
1198 	zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1199 	/* wake up others waiting on the commit */
1200 	cv_broadcast(&zilog->zl_cv_writer);
1201 	mutex_exit(&zilog->zl_lock);
1202 }
1203 
1204 /*
1205  * Called in syncing context to free committed log blocks and update log header.
1206  */
1207 void
1208 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1209 {
1210 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1211 	uint64_t txg = dmu_tx_get_txg(tx);
1212 	spa_t *spa = zilog->zl_spa;
1213 	lwb_t *lwb;
1214 
1215 	/*
1216 	 * We don't zero out zl_destroy_txg, so make sure we don't try
1217 	 * to destroy it twice.
1218 	 */
1219 	if (spa_sync_pass(spa) != 1)
1220 		return;
1221 
1222 	mutex_enter(&zilog->zl_lock);
1223 
1224 	ASSERT(zilog->zl_stop_sync == 0);
1225 
1226 	zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK];
1227 
1228 	if (zilog->zl_destroy_txg == txg) {
1229 		blkptr_t blk = zh->zh_log;
1230 
1231 		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1232 
1233 		bzero(zh, sizeof (zil_header_t));
1234 		bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1235 
1236 		if (zilog->zl_keep_first) {
1237 			/*
1238 			 * If this block was part of log chain that couldn't
1239 			 * be claimed because a device was missing during
1240 			 * zil_claim(), but that device later returns,
1241 			 * then this block could erroneously appear valid.
1242 			 * To guard against this, assign a new GUID to the new
1243 			 * log chain so it doesn't matter what blk points to.
1244 			 */
1245 			zil_init_log_chain(zilog, &blk);
1246 			zh->zh_log = blk;
1247 		}
1248 	}
1249 
1250 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1251 		zh->zh_log = lwb->lwb_blk;
1252 		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1253 			break;
1254 		list_remove(&zilog->zl_lwb_list, lwb);
1255 		zio_free_blk(spa, &lwb->lwb_blk, txg);
1256 		kmem_cache_free(zil_lwb_cache, lwb);
1257 
1258 		/*
1259 		 * If we don't have anything left in the lwb list then
1260 		 * we've had an allocation failure and we need to zero
1261 		 * out the zil_header blkptr so that we don't end
1262 		 * up freeing the same block twice.
1263 		 */
1264 		if (list_head(&zilog->zl_lwb_list) == NULL)
1265 			BP_ZERO(&zh->zh_log);
1266 	}
1267 	mutex_exit(&zilog->zl_lock);
1268 }
1269 
1270 void
1271 zil_init(void)
1272 {
1273 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1274 	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1275 }
1276 
1277 void
1278 zil_fini(void)
1279 {
1280 	kmem_cache_destroy(zil_lwb_cache);
1281 }
1282 
1283 zilog_t *
1284 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1285 {
1286 	zilog_t *zilog;
1287 
1288 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1289 
1290 	zilog->zl_header = zh_phys;
1291 	zilog->zl_os = os;
1292 	zilog->zl_spa = dmu_objset_spa(os);
1293 	zilog->zl_dmu_pool = dmu_objset_pool(os);
1294 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1295 
1296 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1297 
1298 	list_create(&zilog->zl_itx_list, sizeof (itx_t),
1299 	    offsetof(itx_t, itx_node));
1300 
1301 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1302 	    offsetof(lwb_t, lwb_node));
1303 
1304 	mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1305 
1306 	avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1307 	    sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1308 
1309 	cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1310 	cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1311 
1312 	return (zilog);
1313 }
1314 
1315 void
1316 zil_free(zilog_t *zilog)
1317 {
1318 	lwb_t *lwb;
1319 
1320 	zilog->zl_stop_sync = 1;
1321 
1322 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1323 		list_remove(&zilog->zl_lwb_list, lwb);
1324 		if (lwb->lwb_buf != NULL)
1325 			zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1326 		kmem_cache_free(zil_lwb_cache, lwb);
1327 	}
1328 	list_destroy(&zilog->zl_lwb_list);
1329 
1330 	avl_destroy(&zilog->zl_vdev_tree);
1331 	mutex_destroy(&zilog->zl_vdev_lock);
1332 
1333 	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1334 	list_destroy(&zilog->zl_itx_list);
1335 	mutex_destroy(&zilog->zl_lock);
1336 
1337 	cv_destroy(&zilog->zl_cv_writer);
1338 	cv_destroy(&zilog->zl_cv_suspend);
1339 
1340 	kmem_free(zilog, sizeof (zilog_t));
1341 }
1342 
1343 /*
1344  * Open an intent log.
1345  */
1346 zilog_t *
1347 zil_open(objset_t *os, zil_get_data_t *get_data)
1348 {
1349 	zilog_t *zilog = dmu_objset_zil(os);
1350 
1351 	zilog->zl_get_data = get_data;
1352 	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1353 	    2, 2, TASKQ_PREPOPULATE);
1354 
1355 	return (zilog);
1356 }
1357 
1358 /*
1359  * Close an intent log.
1360  */
1361 void
1362 zil_close(zilog_t *zilog)
1363 {
1364 	/*
1365 	 * If the log isn't already committed, mark the objset dirty
1366 	 * (so zil_sync() will be called) and wait for that txg to sync.
1367 	 */
1368 	if (!zil_is_committed(zilog)) {
1369 		uint64_t txg;
1370 		dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1371 		(void) dmu_tx_assign(tx, TXG_WAIT);
1372 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1373 		txg = dmu_tx_get_txg(tx);
1374 		dmu_tx_commit(tx);
1375 		txg_wait_synced(zilog->zl_dmu_pool, txg);
1376 	}
1377 
1378 	taskq_destroy(zilog->zl_clean_taskq);
1379 	zilog->zl_clean_taskq = NULL;
1380 	zilog->zl_get_data = NULL;
1381 
1382 	zil_itx_clean(zilog);
1383 	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1384 }
1385 
1386 /*
1387  * Suspend an intent log.  While in suspended mode, we still honor
1388  * synchronous semantics, but we rely on txg_wait_synced() to do it.
1389  * We suspend the log briefly when taking a snapshot so that the snapshot
1390  * contains all the data it's supposed to, and has an empty intent log.
1391  */
1392 int
1393 zil_suspend(zilog_t *zilog)
1394 {
1395 	const zil_header_t *zh = zilog->zl_header;
1396 
1397 	mutex_enter(&zilog->zl_lock);
1398 	if (zh->zh_flags & ZIL_REPLAY_NEEDED) {		/* unplayed log */
1399 		mutex_exit(&zilog->zl_lock);
1400 		return (EBUSY);
1401 	}
1402 	if (zilog->zl_suspend++ != 0) {
1403 		/*
1404 		 * Someone else already began a suspend.
1405 		 * Just wait for them to finish.
1406 		 */
1407 		while (zilog->zl_suspending)
1408 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1409 		mutex_exit(&zilog->zl_lock);
1410 		return (0);
1411 	}
1412 	zilog->zl_suspending = B_TRUE;
1413 	mutex_exit(&zilog->zl_lock);
1414 
1415 	zil_commit(zilog, UINT64_MAX, 0);
1416 
1417 	/*
1418 	 * Wait for any in-flight log writes to complete.
1419 	 */
1420 	mutex_enter(&zilog->zl_lock);
1421 	while (zilog->zl_writer)
1422 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1423 	mutex_exit(&zilog->zl_lock);
1424 
1425 	zil_destroy(zilog, B_FALSE);
1426 
1427 	mutex_enter(&zilog->zl_lock);
1428 	zilog->zl_suspending = B_FALSE;
1429 	cv_broadcast(&zilog->zl_cv_suspend);
1430 	mutex_exit(&zilog->zl_lock);
1431 
1432 	return (0);
1433 }
1434 
1435 void
1436 zil_resume(zilog_t *zilog)
1437 {
1438 	mutex_enter(&zilog->zl_lock);
1439 	ASSERT(zilog->zl_suspend != 0);
1440 	zilog->zl_suspend--;
1441 	mutex_exit(&zilog->zl_lock);
1442 }
1443 
1444 typedef struct zil_replay_arg {
1445 	objset_t	*zr_os;
1446 	zil_replay_func_t **zr_replay;
1447 	void		*zr_arg;
1448 	boolean_t	zr_byteswap;
1449 	char		*zr_lrbuf;
1450 } zil_replay_arg_t;
1451 
1452 static void
1453 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1454 {
1455 	zil_replay_arg_t *zr = zra;
1456 	const zil_header_t *zh = zilog->zl_header;
1457 	uint64_t reclen = lr->lrc_reclen;
1458 	uint64_t txtype = lr->lrc_txtype;
1459 	char *name;
1460 	int pass, error;
1461 
1462 	if (!zilog->zl_replay)			/* giving up */
1463 		return;
1464 
1465 	if (lr->lrc_txg < claim_txg)		/* already committed */
1466 		return;
1467 
1468 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1469 		return;
1470 
1471 	/* Strip case-insensitive bit, still present in log record */
1472 	txtype &= ~TX_CI;
1473 
1474 	if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1475 		error = EINVAL;
1476 		goto bad;
1477 	}
1478 
1479 	/*
1480 	 * Make a copy of the data so we can revise and extend it.
1481 	 */
1482 	bcopy(lr, zr->zr_lrbuf, reclen);
1483 
1484 	/*
1485 	 * The log block containing this lr may have been byteswapped
1486 	 * so that we can easily examine common fields like lrc_txtype.
1487 	 * However, the log is a mix of different data types, and only the
1488 	 * replay vectors know how to byteswap their records.  Therefore, if
1489 	 * the lr was byteswapped, undo it before invoking the replay vector.
1490 	 */
1491 	if (zr->zr_byteswap)
1492 		byteswap_uint64_array(zr->zr_lrbuf, reclen);
1493 
1494 	/*
1495 	 * If this is a TX_WRITE with a blkptr, suck in the data.
1496 	 */
1497 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1498 		lr_write_t *lrw = (lr_write_t *)lr;
1499 		blkptr_t *wbp = &lrw->lr_blkptr;
1500 		uint64_t wlen = lrw->lr_length;
1501 		char *wbuf = zr->zr_lrbuf + reclen;
1502 
1503 		if (BP_IS_HOLE(wbp)) {	/* compressed to a hole */
1504 			bzero(wbuf, wlen);
1505 		} else {
1506 			/*
1507 			 * A subsequent write may have overwritten this block,
1508 			 * in which case wbp may have been been freed and
1509 			 * reallocated, and our read of wbp may fail with a
1510 			 * checksum error.  We can safely ignore this because
1511 			 * the later write will provide the correct data.
1512 			 */
1513 			zbookmark_t zb;
1514 
1515 			zb.zb_objset = dmu_objset_id(zilog->zl_os);
1516 			zb.zb_object = lrw->lr_foid;
1517 			zb.zb_level = -1;
1518 			zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
1519 
1520 			(void) zio_wait(zio_read(NULL, zilog->zl_spa,
1521 			    wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1522 			    ZIO_PRIORITY_SYNC_READ,
1523 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1524 			(void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1525 		}
1526 	}
1527 
1528 	/*
1529 	 * We must now do two things atomically: replay this log record,
1530 	 * and update the log header sequence number to reflect the fact that
1531 	 * we did so. At the end of each replay function the sequence number
1532 	 * is updated if we are in replay mode.
1533 	 */
1534 	for (pass = 1; pass <= 2; pass++) {
1535 		zilog->zl_replaying_seq = lr->lrc_seq;
1536 		/* Only byteswap (if needed) on the 1st pass.  */
1537 		error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1538 		    zr->zr_byteswap && pass == 1);
1539 
1540 		if (!error)
1541 			return;
1542 
1543 		/*
1544 		 * The DMU's dnode layer doesn't see removes until the txg
1545 		 * commits, so a subsequent claim can spuriously fail with
1546 		 * EEXIST. So if we receive any error we try syncing out
1547 		 * any removes then retry the transaction.
1548 		 */
1549 		if (pass == 1)
1550 			txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1551 	}
1552 
1553 bad:
1554 	ASSERT(error);
1555 	name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1556 	dmu_objset_name(zr->zr_os, name);
1557 	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1558 	    "dataset %s, seq 0x%llx, txtype %llu %s\n",
1559 	    error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype,
1560 	    (lr->lrc_txtype & TX_CI) ? "CI" : "");
1561 	zilog->zl_replay = B_FALSE;
1562 	kmem_free(name, MAXNAMELEN);
1563 }
1564 
1565 /* ARGSUSED */
1566 static void
1567 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1568 {
1569 	zilog->zl_replay_blks++;
1570 }
1571 
1572 /*
1573  * If this dataset has a non-empty intent log, replay it and destroy it.
1574  */
1575 void
1576 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
1577 {
1578 	zilog_t *zilog = dmu_objset_zil(os);
1579 	const zil_header_t *zh = zilog->zl_header;
1580 	zil_replay_arg_t zr;
1581 
1582 	if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
1583 		zil_destroy(zilog, B_TRUE);
1584 		return;
1585 	}
1586 
1587 	zr.zr_os = os;
1588 	zr.zr_replay = replay_func;
1589 	zr.zr_arg = arg;
1590 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1591 	zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1592 
1593 	/*
1594 	 * Wait for in-progress removes to sync before starting replay.
1595 	 */
1596 	txg_wait_synced(zilog->zl_dmu_pool, 0);
1597 
1598 	zilog->zl_replay = B_TRUE;
1599 	zilog->zl_replay_time = lbolt;
1600 	ASSERT(zilog->zl_replay_blks == 0);
1601 	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1602 	    zh->zh_claim_txg);
1603 	kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1604 
1605 	zil_destroy(zilog, B_FALSE);
1606 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1607 	zilog->zl_replay = B_FALSE;
1608 }
1609 
1610 /*
1611  * Report whether all transactions are committed
1612  */
1613 int
1614 zil_is_committed(zilog_t *zilog)
1615 {
1616 	lwb_t *lwb;
1617 	int ret;
1618 
1619 	mutex_enter(&zilog->zl_lock);
1620 	while (zilog->zl_writer)
1621 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1622 
1623 	/* recent unpushed intent log transactions? */
1624 	if (!list_is_empty(&zilog->zl_itx_list)) {
1625 		ret = B_FALSE;
1626 		goto out;
1627 	}
1628 
1629 	/* intent log never used? */
1630 	lwb = list_head(&zilog->zl_lwb_list);
1631 	if (lwb == NULL) {
1632 		ret = B_TRUE;
1633 		goto out;
1634 	}
1635 
1636 	/*
1637 	 * more than 1 log buffer means zil_sync() hasn't yet freed
1638 	 * entries after a txg has committed
1639 	 */
1640 	if (list_next(&zilog->zl_lwb_list, lwb)) {
1641 		ret = B_FALSE;
1642 		goto out;
1643 	}
1644 
1645 	ASSERT(zil_empty(zilog));
1646 	ret = B_TRUE;
1647 out:
1648 	cv_broadcast(&zilog->zl_cv_writer);
1649 	mutex_exit(&zilog->zl_lock);
1650 	return (ret);
1651 }
1652 
1653 /* ARGSUSED */
1654 int
1655 zil_vdev_offline(char *osname, void *arg)
1656 {
1657 	objset_t *os;
1658 	zilog_t *zilog;
1659 	int error;
1660 
1661 	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
1662 	if (error)
1663 		return (error);
1664 
1665 	zilog = dmu_objset_zil(os);
1666 	if (zil_suspend(zilog) != 0)
1667 		error = EEXIST;
1668 	else
1669 		zil_resume(zilog);
1670 	dmu_objset_close(os);
1671 	return (error);
1672 }
1673