xref: /titanic_41/usr/src/uts/common/fs/zfs/zil.c (revision 4b22b9337f359bfd063322244f5336cc7c6ffcfa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/dmu.h>
31 #include <sys/zap.h>
32 #include <sys/arc.h>
33 #include <sys/stat.h>
34 #include <sys/resource.h>
35 #include <sys/zil.h>
36 #include <sys/zil_impl.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/vdev.h>
39 #include <sys/dmu_tx.h>
40 
41 /*
42  * The zfs intent log (ZIL) saves transaction records of system calls
43  * that change the file system in memory with enough information
44  * to be able to replay them. These are stored in memory until
45  * either the DMU transaction group (txg) commits them to the stable pool
46  * and they can be discarded, or they are flushed to the stable log
47  * (also in the pool) due to a fsync, O_DSYNC or other synchronous
48  * requirement. In the event of a panic or power fail then those log
49  * records (transactions) are replayed.
50  *
51  * There is one ZIL per file system. Its on-disk (pool) format consists
52  * of 3 parts:
53  *
54  * 	- ZIL header
55  * 	- ZIL blocks
56  * 	- ZIL records
57  *
58  * A log record holds a system call transaction. Log blocks can
59  * hold many log records and the blocks are chained together.
60  * Each ZIL block contains a block pointer (blkptr_t) to the next
61  * ZIL block in the chain. The ZIL header points to the first
62  * block in the chain. Note there is not a fixed place in the pool
63  * to hold blocks. They are dynamically allocated and freed as
64  * needed from the blocks available. Figure X shows the ZIL structure:
65  */
66 
67 /*
68  * This global ZIL switch affects all pools
69  */
70 int zil_disable = 0;	/* disable intent logging */
71 
72 /*
73  * Tunable parameter for debugging or performance analysis.  Setting
74  * zfs_nocacheflush will cause corruption on power loss if a volatile
75  * out-of-order write cache is enabled.
76  */
77 boolean_t zfs_nocacheflush = B_FALSE;
78 
79 static kmem_cache_t *zil_lwb_cache;
80 
81 static int
82 zil_dva_compare(const void *x1, const void *x2)
83 {
84 	const dva_t *dva1 = x1;
85 	const dva_t *dva2 = x2;
86 
87 	if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
88 		return (-1);
89 	if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
90 		return (1);
91 
92 	if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
93 		return (-1);
94 	if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
95 		return (1);
96 
97 	return (0);
98 }
99 
100 static void
101 zil_dva_tree_init(avl_tree_t *t)
102 {
103 	avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
104 	    offsetof(zil_dva_node_t, zn_node));
105 }
106 
107 static void
108 zil_dva_tree_fini(avl_tree_t *t)
109 {
110 	zil_dva_node_t *zn;
111 	void *cookie = NULL;
112 
113 	while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
114 		kmem_free(zn, sizeof (zil_dva_node_t));
115 
116 	avl_destroy(t);
117 }
118 
119 static int
120 zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
121 {
122 	zil_dva_node_t *zn;
123 	avl_index_t where;
124 
125 	if (avl_find(t, dva, &where) != NULL)
126 		return (EEXIST);
127 
128 	zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
129 	zn->zn_dva = *dva;
130 	avl_insert(t, zn, where);
131 
132 	return (0);
133 }
134 
135 static zil_header_t *
136 zil_header_in_syncing_context(zilog_t *zilog)
137 {
138 	return ((zil_header_t *)zilog->zl_header);
139 }
140 
141 static void
142 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
143 {
144 	zio_cksum_t *zc = &bp->blk_cksum;
145 
146 	zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
147 	zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
148 	zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
149 	zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
150 }
151 
152 /*
153  * Read a log block, make sure it's valid, and byteswap it if necessary.
154  */
155 static int
156 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
157 {
158 	blkptr_t blk = *bp;
159 	zbookmark_t zb;
160 	uint32_t aflags = ARC_WAIT;
161 	int error;
162 
163 	zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
164 	zb.zb_object = 0;
165 	zb.zb_level = -1;
166 	zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
167 
168 	*abufpp = NULL;
169 
170 	error = arc_read(NULL, zilog->zl_spa, &blk, byteswap_uint64_array,
171 	    arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
172 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
173 
174 	if (error == 0) {
175 		char *data = (*abufpp)->b_data;
176 		uint64_t blksz = BP_GET_LSIZE(bp);
177 		zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
178 		zio_cksum_t cksum = bp->blk_cksum;
179 
180 		/*
181 		 * Sequence numbers should be... sequential.  The checksum
182 		 * verifier for the next block should be bp's checksum plus 1.
183 		 */
184 		cksum.zc_word[ZIL_ZC_SEQ]++;
185 
186 		if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum)))
187 			error = ESTALE;
188 		else if (BP_IS_HOLE(&ztp->zit_next_blk))
189 			error = ENOENT;
190 		else if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))
191 			error = EOVERFLOW;
192 
193 		if (error) {
194 			VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
195 			*abufpp = NULL;
196 		}
197 	}
198 
199 	dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
200 
201 	return (error);
202 }
203 
204 /*
205  * Parse the intent log, and call parse_func for each valid record within.
206  * Return the highest sequence number.
207  */
208 uint64_t
209 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
210     zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
211 {
212 	const zil_header_t *zh = zilog->zl_header;
213 	uint64_t claim_seq = zh->zh_claim_seq;
214 	uint64_t seq = 0;
215 	uint64_t max_seq = 0;
216 	blkptr_t blk = zh->zh_log;
217 	arc_buf_t *abuf;
218 	char *lrbuf, *lrp;
219 	zil_trailer_t *ztp;
220 	int reclen, error;
221 
222 	if (BP_IS_HOLE(&blk))
223 		return (max_seq);
224 
225 	/*
226 	 * Starting at the block pointed to by zh_log we read the log chain.
227 	 * For each block in the chain we strongly check that block to
228 	 * ensure its validity.  We stop when an invalid block is found.
229 	 * For each block pointer in the chain we call parse_blk_func().
230 	 * For each record in each valid block we call parse_lr_func().
231 	 * If the log has been claimed, stop if we encounter a sequence
232 	 * number greater than the highest claimed sequence number.
233 	 */
234 	zil_dva_tree_init(&zilog->zl_dva_tree);
235 	for (;;) {
236 		seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
237 
238 		if (claim_seq != 0 && seq > claim_seq)
239 			break;
240 
241 		ASSERT(max_seq < seq);
242 		max_seq = seq;
243 
244 		error = zil_read_log_block(zilog, &blk, &abuf);
245 
246 		if (parse_blk_func != NULL)
247 			parse_blk_func(zilog, &blk, arg, txg);
248 
249 		if (error)
250 			break;
251 
252 		lrbuf = abuf->b_data;
253 		ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
254 		blk = ztp->zit_next_blk;
255 
256 		if (parse_lr_func == NULL) {
257 			VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
258 			continue;
259 		}
260 
261 		for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
262 			lr_t *lr = (lr_t *)lrp;
263 			reclen = lr->lrc_reclen;
264 			ASSERT3U(reclen, >=, sizeof (lr_t));
265 			parse_lr_func(zilog, lr, arg, txg);
266 		}
267 		VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
268 	}
269 	zil_dva_tree_fini(&zilog->zl_dva_tree);
270 
271 	return (max_seq);
272 }
273 
274 /* ARGSUSED */
275 static void
276 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
277 {
278 	spa_t *spa = zilog->zl_spa;
279 	int err;
280 
281 	/*
282 	 * Claim log block if not already committed and not already claimed.
283 	 */
284 	if (bp->blk_birth >= first_txg &&
285 	    zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
286 		err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL));
287 		ASSERT(err == 0);
288 	}
289 }
290 
291 static void
292 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
293 {
294 	if (lrc->lrc_txtype == TX_WRITE) {
295 		lr_write_t *lr = (lr_write_t *)lrc;
296 		zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
297 	}
298 }
299 
300 /* ARGSUSED */
301 static void
302 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
303 {
304 	zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
305 }
306 
307 static void
308 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
309 {
310 	/*
311 	 * If we previously claimed it, we need to free it.
312 	 */
313 	if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
314 		lr_write_t *lr = (lr_write_t *)lrc;
315 		blkptr_t *bp = &lr->lr_blkptr;
316 		if (bp->blk_birth >= claim_txg &&
317 		    !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
318 			(void) arc_free(NULL, zilog->zl_spa,
319 			    dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
320 		}
321 	}
322 }
323 
324 /*
325  * Create an on-disk intent log.
326  */
327 static void
328 zil_create(zilog_t *zilog)
329 {
330 	const zil_header_t *zh = zilog->zl_header;
331 	lwb_t *lwb;
332 	uint64_t txg = 0;
333 	dmu_tx_t *tx = NULL;
334 	blkptr_t blk;
335 	int error = 0;
336 
337 	/*
338 	 * Wait for any previous destroy to complete.
339 	 */
340 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
341 
342 	ASSERT(zh->zh_claim_txg == 0);
343 	ASSERT(zh->zh_replay_seq == 0);
344 
345 	blk = zh->zh_log;
346 
347 	/*
348 	 * If we don't already have an initial log block, allocate one now.
349 	 */
350 	if (BP_IS_HOLE(&blk)) {
351 		tx = dmu_tx_create(zilog->zl_os);
352 		(void) dmu_tx_assign(tx, TXG_WAIT);
353 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
354 		txg = dmu_tx_get_txg(tx);
355 
356 		error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
357 		    NULL, txg);
358 
359 		if (error == 0)
360 			zil_init_log_chain(zilog, &blk);
361 	}
362 
363 	/*
364 	 * Allocate a log write buffer (lwb) for the first log block.
365 	 */
366 	if (error == 0) {
367 		lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
368 		lwb->lwb_zilog = zilog;
369 		lwb->lwb_blk = blk;
370 		lwb->lwb_nused = 0;
371 		lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
372 		lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
373 		lwb->lwb_max_txg = txg;
374 		lwb->lwb_zio = NULL;
375 
376 		mutex_enter(&zilog->zl_lock);
377 		list_insert_tail(&zilog->zl_lwb_list, lwb);
378 		mutex_exit(&zilog->zl_lock);
379 	}
380 
381 	/*
382 	 * If we just allocated the first log block, commit our transaction
383 	 * and wait for zil_sync() to stuff the block poiner into zh_log.
384 	 * (zh is part of the MOS, so we cannot modify it in open context.)
385 	 */
386 	if (tx != NULL) {
387 		dmu_tx_commit(tx);
388 		txg_wait_synced(zilog->zl_dmu_pool, txg);
389 	}
390 
391 	ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
392 }
393 
394 /*
395  * In one tx, free all log blocks and clear the log header.
396  * If keep_first is set, then we're replaying a log with no content.
397  * We want to keep the first block, however, so that the first
398  * synchronous transaction doesn't require a txg_wait_synced()
399  * in zil_create().  We don't need to txg_wait_synced() here either
400  * when keep_first is set, because both zil_create() and zil_destroy()
401  * will wait for any in-progress destroys to complete.
402  */
403 void
404 zil_destroy(zilog_t *zilog, boolean_t keep_first)
405 {
406 	const zil_header_t *zh = zilog->zl_header;
407 	lwb_t *lwb;
408 	dmu_tx_t *tx;
409 	uint64_t txg;
410 
411 	/*
412 	 * Wait for any previous destroy to complete.
413 	 */
414 	txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
415 
416 	if (BP_IS_HOLE(&zh->zh_log))
417 		return;
418 
419 	tx = dmu_tx_create(zilog->zl_os);
420 	(void) dmu_tx_assign(tx, TXG_WAIT);
421 	dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
422 	txg = dmu_tx_get_txg(tx);
423 
424 	mutex_enter(&zilog->zl_lock);
425 
426 	ASSERT3U(zilog->zl_destroy_txg, <, txg);
427 	zilog->zl_destroy_txg = txg;
428 	zilog->zl_keep_first = keep_first;
429 
430 	if (!list_is_empty(&zilog->zl_lwb_list)) {
431 		ASSERT(zh->zh_claim_txg == 0);
432 		ASSERT(!keep_first);
433 		while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
434 			list_remove(&zilog->zl_lwb_list, lwb);
435 			if (lwb->lwb_buf != NULL)
436 				zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
437 			zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
438 			kmem_cache_free(zil_lwb_cache, lwb);
439 		}
440 	} else {
441 		if (!keep_first) {
442 			(void) zil_parse(zilog, zil_free_log_block,
443 			    zil_free_log_record, tx, zh->zh_claim_txg);
444 		}
445 	}
446 	mutex_exit(&zilog->zl_lock);
447 
448 	dmu_tx_commit(tx);
449 
450 	if (keep_first)			/* no need to wait in this case */
451 		return;
452 
453 	txg_wait_synced(zilog->zl_dmu_pool, txg);
454 	ASSERT(BP_IS_HOLE(&zh->zh_log));
455 }
456 
457 int
458 zil_claim(char *osname, void *txarg)
459 {
460 	dmu_tx_t *tx = txarg;
461 	uint64_t first_txg = dmu_tx_get_txg(tx);
462 	zilog_t *zilog;
463 	zil_header_t *zh;
464 	objset_t *os;
465 	int error;
466 
467 	error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os);
468 	if (error) {
469 		cmn_err(CE_WARN, "can't process intent log for %s", osname);
470 		return (0);
471 	}
472 
473 	zilog = dmu_objset_zil(os);
474 	zh = zil_header_in_syncing_context(zilog);
475 
476 	/*
477 	 * Claim all log blocks if we haven't already done so, and remember
478 	 * the highest claimed sequence number.  This ensures that if we can
479 	 * read only part of the log now (e.g. due to a missing device),
480 	 * but we can read the entire log later, we will not try to replay
481 	 * or destroy beyond the last block we successfully claimed.
482 	 */
483 	ASSERT3U(zh->zh_claim_txg, <=, first_txg);
484 	if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
485 		zh->zh_claim_txg = first_txg;
486 		zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
487 		    zil_claim_log_record, tx, first_txg);
488 		dsl_dataset_dirty(dmu_objset_ds(os), tx);
489 	}
490 
491 	ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
492 	dmu_objset_close(os);
493 	return (0);
494 }
495 
496 void
497 zil_add_vdev(zilog_t *zilog, uint64_t vdev)
498 {
499 	zil_vdev_t *zv, *new;
500 	uint64_t bmap_sz = sizeof (zilog->zl_vdev_bmap) << 3;
501 	uchar_t *cp;
502 
503 	if (zfs_nocacheflush)
504 		return;
505 
506 	if (vdev < bmap_sz) {
507 		cp = zilog->zl_vdev_bmap + (vdev / 8);
508 		atomic_or_8(cp, 1 << (vdev % 8));
509 	} else  {
510 		/*
511 		 * insert into ordered list
512 		 */
513 		mutex_enter(&zilog->zl_lock);
514 		for (zv = list_head(&zilog->zl_vdev_list); zv != NULL;
515 		    zv = list_next(&zilog->zl_vdev_list, zv)) {
516 			if (zv->vdev == vdev) {
517 				/* duplicate found - just return */
518 				mutex_exit(&zilog->zl_lock);
519 				return;
520 			}
521 			if (zv->vdev > vdev) {
522 				/* insert before this entry */
523 				new = kmem_alloc(sizeof (zil_vdev_t),
524 				    KM_SLEEP);
525 				new->vdev = vdev;
526 				list_insert_before(&zilog->zl_vdev_list,
527 				    zv, new);
528 				mutex_exit(&zilog->zl_lock);
529 				return;
530 			}
531 		}
532 		/* ran off end of list, insert at the end */
533 		ASSERT(zv == NULL);
534 		new = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP);
535 		new->vdev = vdev;
536 		list_insert_tail(&zilog->zl_vdev_list, new);
537 		mutex_exit(&zilog->zl_lock);
538 	}
539 }
540 
541 void
542 zil_flush_vdevs(zilog_t *zilog)
543 {
544 	zil_vdev_t *zv;
545 	zio_t *zio = NULL;
546 	spa_t *spa = zilog->zl_spa;
547 	uint64_t vdev;
548 	uint8_t b;
549 	int i, j;
550 
551 	ASSERT(zilog->zl_writer);
552 
553 	for (i = 0; i < sizeof (zilog->zl_vdev_bmap); i++) {
554 		b = zilog->zl_vdev_bmap[i];
555 		if (b == 0)
556 			continue;
557 		for (j = 0; j < 8; j++) {
558 			if (b & (1 << j)) {
559 				vdev = (i << 3) + j;
560 				zio_flush_vdev(spa, vdev, &zio);
561 			}
562 		}
563 		zilog->zl_vdev_bmap[i] = 0;
564 	}
565 
566 	while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) {
567 		zio_flush_vdev(spa, zv->vdev, &zio);
568 		list_remove(&zilog->zl_vdev_list, zv);
569 		kmem_free(zv, sizeof (zil_vdev_t));
570 	}
571 	/*
572 	 * Wait for all the flushes to complete.  Not all devices actually
573 	 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
574 	 */
575 	if (zio)
576 		(void) zio_wait(zio);
577 }
578 
579 /*
580  * Function called when a log block write completes
581  */
582 static void
583 zil_lwb_write_done(zio_t *zio)
584 {
585 	lwb_t *lwb = zio->io_private;
586 	zilog_t *zilog = lwb->lwb_zilog;
587 
588 	/*
589 	 * Now that we've written this log block, we have a stable pointer
590 	 * to the next block in the chain, so it's OK to let the txg in
591 	 * which we allocated the next block sync.
592 	 */
593 	txg_rele_to_sync(&lwb->lwb_txgh);
594 
595 	zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
596 	mutex_enter(&zilog->zl_lock);
597 	lwb->lwb_buf = NULL;
598 	if (zio->io_error)
599 		zilog->zl_log_error = B_TRUE;
600 	mutex_exit(&zilog->zl_lock);
601 }
602 
603 /*
604  * Initialize the io for a log block.
605  *
606  * Note, we should not initialize the IO until we are about
607  * to use it, since zio_rewrite() does a spa_config_enter().
608  */
609 static void
610 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
611 {
612 	zbookmark_t zb;
613 
614 	zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
615 	zb.zb_object = 0;
616 	zb.zb_level = -1;
617 	zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
618 
619 	if (zilog->zl_root_zio == NULL) {
620 		zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
621 		    ZIO_FLAG_CANFAIL);
622 	}
623 	if (lwb->lwb_zio == NULL) {
624 		lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
625 		    ZIO_CHECKSUM_ZILOG, 0, &lwb->lwb_blk, lwb->lwb_buf,
626 		    lwb->lwb_sz, zil_lwb_write_done, lwb,
627 		    ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_CANFAIL, &zb);
628 	}
629 }
630 
631 /*
632  * Start a log block write and advance to the next log block.
633  * Calls are serialized.
634  */
635 static lwb_t *
636 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
637 {
638 	lwb_t *nlwb;
639 	zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
640 	spa_t *spa = zilog->zl_spa;
641 	blkptr_t *bp = &ztp->zit_next_blk;
642 	uint64_t txg;
643 	uint64_t zil_blksz;
644 	int error;
645 
646 	ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
647 
648 	/*
649 	 * Allocate the next block and save its address in this block
650 	 * before writing it in order to establish the log chain.
651 	 * Note that if the allocation of nlwb synced before we wrote
652 	 * the block that points at it (lwb), we'd leak it if we crashed.
653 	 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
654 	 */
655 	txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
656 	txg_rele_to_quiesce(&lwb->lwb_txgh);
657 
658 	/*
659 	 * Pick a ZIL blocksize. We request a size that is the
660 	 * maximum of the previous used size, the current used size and
661 	 * the amount waiting in the queue.
662 	 */
663 	zil_blksz = MAX(zilog->zl_prev_used,
664 	    zilog->zl_cur_used + sizeof (*ztp));
665 	zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
666 	zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
667 	if (zil_blksz > ZIL_MAX_BLKSZ)
668 		zil_blksz = ZIL_MAX_BLKSZ;
669 
670 	BP_ZERO(bp);
671 	/* pass the old blkptr in order to spread log blocks across devs */
672 	error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
673 	if (error) {
674 		dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
675 
676 		/*
677 		 * We dirty the dataset to ensure that zil_sync() will
678 		 * be called to remove this lwb from our zl_lwb_list.
679 		 * Failing to do so, may leave an lwb with a NULL lwb_buf
680 		 * hanging around on the zl_lwb_list.
681 		 */
682 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
683 		dmu_tx_commit(tx);
684 
685 		/*
686 		 * Since we've just experienced an allocation failure so we
687 		 * terminate the current lwb and send it on its way.
688 		 */
689 		ztp->zit_pad = 0;
690 		ztp->zit_nused = lwb->lwb_nused;
691 		ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
692 		zio_nowait(lwb->lwb_zio);
693 
694 		/*
695 		 * By returning NULL the caller will call tx_wait_synced()
696 		 */
697 		return (NULL);
698 	}
699 
700 	ASSERT3U(bp->blk_birth, ==, txg);
701 	ztp->zit_pad = 0;
702 	ztp->zit_nused = lwb->lwb_nused;
703 	ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
704 	bp->blk_cksum = lwb->lwb_blk.blk_cksum;
705 	bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
706 
707 	/*
708 	 * Allocate a new log write buffer (lwb).
709 	 */
710 	nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
711 
712 	nlwb->lwb_zilog = zilog;
713 	nlwb->lwb_blk = *bp;
714 	nlwb->lwb_nused = 0;
715 	nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
716 	nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
717 	nlwb->lwb_max_txg = txg;
718 	nlwb->lwb_zio = NULL;
719 
720 	/*
721 	 * Put new lwb at the end of the log chain
722 	 */
723 	mutex_enter(&zilog->zl_lock);
724 	list_insert_tail(&zilog->zl_lwb_list, nlwb);
725 	mutex_exit(&zilog->zl_lock);
726 
727 	/* Record the vdev for later flushing */
728 	zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))));
729 
730 	/*
731 	 * kick off the write for the old log block
732 	 */
733 	dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
734 	ASSERT(lwb->lwb_zio);
735 	zio_nowait(lwb->lwb_zio);
736 
737 	return (nlwb);
738 }
739 
740 static lwb_t *
741 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
742 {
743 	lr_t *lrc = &itx->itx_lr; /* common log record */
744 	lr_write_t *lr = (lr_write_t *)lrc;
745 	uint64_t txg = lrc->lrc_txg;
746 	uint64_t reclen = lrc->lrc_reclen;
747 	uint64_t dlen;
748 
749 	if (lwb == NULL)
750 		return (NULL);
751 	ASSERT(lwb->lwb_buf != NULL);
752 
753 	if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
754 		dlen = P2ROUNDUP_TYPED(
755 		    lr->lr_length, sizeof (uint64_t), uint64_t);
756 	else
757 		dlen = 0;
758 
759 	zilog->zl_cur_used += (reclen + dlen);
760 
761 	zil_lwb_write_init(zilog, lwb);
762 
763 	/*
764 	 * If this record won't fit in the current log block, start a new one.
765 	 */
766 	if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
767 		lwb = zil_lwb_write_start(zilog, lwb);
768 		if (lwb == NULL)
769 			return (NULL);
770 		zil_lwb_write_init(zilog, lwb);
771 		ASSERT(lwb->lwb_nused == 0);
772 		if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
773 			txg_wait_synced(zilog->zl_dmu_pool, txg);
774 			return (lwb);
775 		}
776 	}
777 
778 	/*
779 	 * Update the lrc_seq, to be log record sequence number. See zil.h
780 	 * Then copy the record to the log buffer.
781 	 */
782 	lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
783 	bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
784 
785 	/*
786 	 * If it's a write, fetch the data or get its blkptr as appropriate.
787 	 */
788 	if (lrc->lrc_txtype == TX_WRITE) {
789 		if (txg > spa_freeze_txg(zilog->zl_spa))
790 			txg_wait_synced(zilog->zl_dmu_pool, txg);
791 		if (itx->itx_wr_state != WR_COPIED) {
792 			char *dbuf;
793 			int error;
794 
795 			/* alignment is guaranteed */
796 			lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
797 			if (dlen) {
798 				ASSERT(itx->itx_wr_state == WR_NEED_COPY);
799 				dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
800 				lr->lr_common.lrc_reclen += dlen;
801 			} else {
802 				ASSERT(itx->itx_wr_state == WR_INDIRECT);
803 				dbuf = NULL;
804 			}
805 			error = zilog->zl_get_data(
806 			    itx->itx_private, lr, dbuf, lwb->lwb_zio);
807 			if (error) {
808 				ASSERT(error == ENOENT || error == EEXIST ||
809 				    error == EALREADY);
810 				return (lwb);
811 			}
812 		}
813 	}
814 
815 	lwb->lwb_nused += reclen + dlen;
816 	lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
817 	ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
818 	ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
819 
820 	return (lwb);
821 }
822 
823 itx_t *
824 zil_itx_create(int txtype, size_t lrsize)
825 {
826 	itx_t *itx;
827 
828 	lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
829 
830 	itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
831 	itx->itx_lr.lrc_txtype = txtype;
832 	itx->itx_lr.lrc_reclen = lrsize;
833 	itx->itx_lr.lrc_seq = 0;	/* defensive */
834 
835 	return (itx);
836 }
837 
838 uint64_t
839 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
840 {
841 	uint64_t seq;
842 
843 	ASSERT(itx->itx_lr.lrc_seq == 0);
844 
845 	mutex_enter(&zilog->zl_lock);
846 	list_insert_tail(&zilog->zl_itx_list, itx);
847 	zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen;
848 	itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
849 	itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
850 	mutex_exit(&zilog->zl_lock);
851 
852 	return (seq);
853 }
854 
855 /*
856  * Free up all in-memory intent log transactions that have now been synced.
857  */
858 static void
859 zil_itx_clean(zilog_t *zilog)
860 {
861 	uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
862 	uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
863 	list_t clean_list;
864 	itx_t *itx;
865 
866 	list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
867 
868 	mutex_enter(&zilog->zl_lock);
869 	/* wait for a log writer to finish walking list */
870 	while (zilog->zl_writer) {
871 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
872 	}
873 
874 	/*
875 	 * Move the sync'd log transactions to a separate list so we can call
876 	 * kmem_free without holding the zl_lock.
877 	 *
878 	 * There is no need to set zl_writer as we don't drop zl_lock here
879 	 */
880 	while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
881 	    itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
882 		list_remove(&zilog->zl_itx_list, itx);
883 		zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen;
884 		list_insert_tail(&clean_list, itx);
885 	}
886 	cv_broadcast(&zilog->zl_cv_writer);
887 	mutex_exit(&zilog->zl_lock);
888 
889 	/* destroy sync'd log transactions */
890 	while ((itx = list_head(&clean_list)) != NULL) {
891 		list_remove(&clean_list, itx);
892 		kmem_free(itx, offsetof(itx_t, itx_lr)
893 		    + itx->itx_lr.lrc_reclen);
894 	}
895 	list_destroy(&clean_list);
896 }
897 
898 /*
899  * If there are any in-memory intent log transactions which have now been
900  * synced then start up a taskq to free them.
901  */
902 void
903 zil_clean(zilog_t *zilog)
904 {
905 	itx_t *itx;
906 
907 	mutex_enter(&zilog->zl_lock);
908 	itx = list_head(&zilog->zl_itx_list);
909 	if ((itx != NULL) &&
910 	    (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
911 		(void) taskq_dispatch(zilog->zl_clean_taskq,
912 		    (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP);
913 	}
914 	mutex_exit(&zilog->zl_lock);
915 }
916 
917 void
918 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
919 {
920 	uint64_t txg;
921 	uint64_t reclen;
922 	uint64_t commit_seq = 0;
923 	itx_t *itx, *itx_next = (itx_t *)-1;
924 	lwb_t *lwb;
925 	spa_t *spa;
926 
927 	zilog->zl_writer = B_TRUE;
928 	zilog->zl_root_zio = NULL;
929 	spa = zilog->zl_spa;
930 
931 	if (zilog->zl_suspend) {
932 		lwb = NULL;
933 	} else {
934 		lwb = list_tail(&zilog->zl_lwb_list);
935 		if (lwb == NULL) {
936 			/*
937 			 * Return if there's nothing to flush before we
938 			 * dirty the fs by calling zil_create()
939 			 */
940 			if (list_is_empty(&zilog->zl_itx_list)) {
941 				zilog->zl_writer = B_FALSE;
942 				return;
943 			}
944 			mutex_exit(&zilog->zl_lock);
945 			zil_create(zilog);
946 			mutex_enter(&zilog->zl_lock);
947 			lwb = list_tail(&zilog->zl_lwb_list);
948 		}
949 	}
950 
951 	/* Loop through in-memory log transactions filling log blocks. */
952 	DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
953 	for (;;) {
954 		/*
955 		 * Find the next itx to push:
956 		 * Push all transactions related to specified foid and all
957 		 * other transactions except TX_WRITE, TX_TRUNCATE,
958 		 * TX_SETATTR and TX_ACL for all other files.
959 		 */
960 		if (itx_next != (itx_t *)-1)
961 			itx = itx_next;
962 		else
963 			itx = list_head(&zilog->zl_itx_list);
964 		for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
965 			if (foid == 0) /* push all foids? */
966 				break;
967 			if (itx->itx_sync) /* push all O_[D]SYNC */
968 				break;
969 			switch (itx->itx_lr.lrc_txtype) {
970 			case TX_SETATTR:
971 			case TX_WRITE:
972 			case TX_TRUNCATE:
973 			case TX_ACL:
974 				/* lr_foid is same offset for these records */
975 				if (((lr_write_t *)&itx->itx_lr)->lr_foid
976 				    != foid) {
977 					continue; /* skip this record */
978 				}
979 			}
980 			break;
981 		}
982 		if (itx == NULL)
983 			break;
984 
985 		reclen = itx->itx_lr.lrc_reclen;
986 		if ((itx->itx_lr.lrc_seq > seq) &&
987 		    ((lwb == NULL) || (lwb->lwb_nused == 0) ||
988 		    (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)))) {
989 			break;
990 		}
991 
992 		/*
993 		 * Save the next pointer.  Even though we soon drop
994 		 * zl_lock all threads that may change the list
995 		 * (another writer or zil_itx_clean) can't do so until
996 		 * they have zl_writer.
997 		 */
998 		itx_next = list_next(&zilog->zl_itx_list, itx);
999 		list_remove(&zilog->zl_itx_list, itx);
1000 		mutex_exit(&zilog->zl_lock);
1001 		txg = itx->itx_lr.lrc_txg;
1002 		ASSERT(txg);
1003 
1004 		if (txg > spa_last_synced_txg(spa) ||
1005 		    txg > spa_freeze_txg(spa))
1006 			lwb = zil_lwb_commit(zilog, itx, lwb);
1007 		kmem_free(itx, offsetof(itx_t, itx_lr)
1008 		    + itx->itx_lr.lrc_reclen);
1009 		mutex_enter(&zilog->zl_lock);
1010 		zilog->zl_itx_list_sz -= reclen;
1011 	}
1012 	DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1013 	/* determine commit sequence number */
1014 	itx = list_head(&zilog->zl_itx_list);
1015 	if (itx)
1016 		commit_seq = itx->itx_lr.lrc_seq;
1017 	else
1018 		commit_seq = zilog->zl_itx_seq;
1019 	mutex_exit(&zilog->zl_lock);
1020 
1021 	/* write the last block out */
1022 	if (lwb != NULL && lwb->lwb_zio != NULL)
1023 		lwb = zil_lwb_write_start(zilog, lwb);
1024 
1025 	zilog->zl_prev_used = zilog->zl_cur_used;
1026 	zilog->zl_cur_used = 0;
1027 
1028 	/*
1029 	 * Wait if necessary for the log blocks to be on stable storage.
1030 	 */
1031 	if (zilog->zl_root_zio) {
1032 		DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1033 		(void) zio_wait(zilog->zl_root_zio);
1034 		DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1035 		if (!zfs_nocacheflush)
1036 			zil_flush_vdevs(zilog);
1037 	}
1038 
1039 	if (zilog->zl_log_error || lwb == NULL) {
1040 		zilog->zl_log_error = 0;
1041 		txg_wait_synced(zilog->zl_dmu_pool, 0);
1042 	}
1043 
1044 	mutex_enter(&zilog->zl_lock);
1045 	zilog->zl_writer = B_FALSE;
1046 
1047 	ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1048 	zilog->zl_commit_seq = commit_seq;
1049 }
1050 
1051 /*
1052  * Push zfs transactions to stable storage up to the supplied sequence number.
1053  * If foid is 0 push out all transactions, otherwise push only those
1054  * for that file or might have been used to create that file.
1055  */
1056 void
1057 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1058 {
1059 	if (zilog == NULL || seq == 0)
1060 		return;
1061 
1062 	mutex_enter(&zilog->zl_lock);
1063 
1064 	seq = MIN(seq, zilog->zl_itx_seq);	/* cap seq at largest itx seq */
1065 
1066 	while (zilog->zl_writer) {
1067 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1068 		if (seq < zilog->zl_commit_seq) {
1069 			mutex_exit(&zilog->zl_lock);
1070 			return;
1071 		}
1072 	}
1073 	zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1074 	/* wake up others waiting on the commit */
1075 	cv_broadcast(&zilog->zl_cv_writer);
1076 	mutex_exit(&zilog->zl_lock);
1077 }
1078 
1079 /*
1080  * Called in syncing context to free committed log blocks and update log header.
1081  */
1082 void
1083 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1084 {
1085 	zil_header_t *zh = zil_header_in_syncing_context(zilog);
1086 	uint64_t txg = dmu_tx_get_txg(tx);
1087 	spa_t *spa = zilog->zl_spa;
1088 	lwb_t *lwb;
1089 
1090 	mutex_enter(&zilog->zl_lock);
1091 
1092 	ASSERT(zilog->zl_stop_sync == 0);
1093 
1094 	zh->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK];
1095 
1096 	if (zilog->zl_destroy_txg == txg) {
1097 		blkptr_t blk = zh->zh_log;
1098 
1099 		ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1100 		ASSERT(spa_sync_pass(spa) == 1);
1101 
1102 		bzero(zh, sizeof (zil_header_t));
1103 		bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq));
1104 
1105 		if (zilog->zl_keep_first) {
1106 			/*
1107 			 * If this block was part of log chain that couldn't
1108 			 * be claimed because a device was missing during
1109 			 * zil_claim(), but that device later returns,
1110 			 * then this block could erroneously appear valid.
1111 			 * To guard against this, assign a new GUID to the new
1112 			 * log chain so it doesn't matter what blk points to.
1113 			 */
1114 			zil_init_log_chain(zilog, &blk);
1115 			zh->zh_log = blk;
1116 		}
1117 	}
1118 
1119 	for (;;) {
1120 		lwb = list_head(&zilog->zl_lwb_list);
1121 		if (lwb == NULL) {
1122 			mutex_exit(&zilog->zl_lock);
1123 			return;
1124 		}
1125 		zh->zh_log = lwb->lwb_blk;
1126 		if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1127 			break;
1128 		list_remove(&zilog->zl_lwb_list, lwb);
1129 		zio_free_blk(spa, &lwb->lwb_blk, txg);
1130 		kmem_cache_free(zil_lwb_cache, lwb);
1131 
1132 		/*
1133 		 * If we don't have anything left in the lwb list then
1134 		 * we've had an allocation failure and we need to zero
1135 		 * out the zil_header blkptr so that we don't end
1136 		 * up freeing the same block twice.
1137 		 */
1138 		if (list_head(&zilog->zl_lwb_list) == NULL)
1139 			BP_ZERO(&zh->zh_log);
1140 	}
1141 	mutex_exit(&zilog->zl_lock);
1142 }
1143 
1144 void
1145 zil_init(void)
1146 {
1147 	zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1148 	    sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1149 }
1150 
1151 void
1152 zil_fini(void)
1153 {
1154 	kmem_cache_destroy(zil_lwb_cache);
1155 }
1156 
1157 zilog_t *
1158 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1159 {
1160 	zilog_t *zilog;
1161 
1162 	zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1163 
1164 	zilog->zl_header = zh_phys;
1165 	zilog->zl_os = os;
1166 	zilog->zl_spa = dmu_objset_spa(os);
1167 	zilog->zl_dmu_pool = dmu_objset_pool(os);
1168 	zilog->zl_destroy_txg = TXG_INITIAL - 1;
1169 
1170 	mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1171 
1172 	list_create(&zilog->zl_itx_list, sizeof (itx_t),
1173 	    offsetof(itx_t, itx_node));
1174 
1175 	list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1176 	    offsetof(lwb_t, lwb_node));
1177 
1178 	list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t),
1179 	    offsetof(zil_vdev_t, vdev_seq_node));
1180 
1181 	return (zilog);
1182 }
1183 
1184 void
1185 zil_free(zilog_t *zilog)
1186 {
1187 	lwb_t *lwb;
1188 	zil_vdev_t *zv;
1189 
1190 	zilog->zl_stop_sync = 1;
1191 
1192 	while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1193 		list_remove(&zilog->zl_lwb_list, lwb);
1194 		if (lwb->lwb_buf != NULL)
1195 			zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1196 		kmem_cache_free(zil_lwb_cache, lwb);
1197 	}
1198 	list_destroy(&zilog->zl_lwb_list);
1199 
1200 	while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) {
1201 		list_remove(&zilog->zl_vdev_list, zv);
1202 		kmem_free(zv, sizeof (zil_vdev_t));
1203 	}
1204 	list_destroy(&zilog->zl_vdev_list);
1205 
1206 	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1207 	list_destroy(&zilog->zl_itx_list);
1208 	mutex_destroy(&zilog->zl_lock);
1209 
1210 	kmem_free(zilog, sizeof (zilog_t));
1211 }
1212 
1213 /*
1214  * return true if the initial log block is not valid
1215  */
1216 static int
1217 zil_empty(zilog_t *zilog)
1218 {
1219 	const zil_header_t *zh = zilog->zl_header;
1220 	arc_buf_t *abuf = NULL;
1221 
1222 	if (BP_IS_HOLE(&zh->zh_log))
1223 		return (1);
1224 
1225 	if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
1226 		return (1);
1227 
1228 	VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
1229 	return (0);
1230 }
1231 
1232 /*
1233  * Open an intent log.
1234  */
1235 zilog_t *
1236 zil_open(objset_t *os, zil_get_data_t *get_data)
1237 {
1238 	zilog_t *zilog = dmu_objset_zil(os);
1239 
1240 	zilog->zl_get_data = get_data;
1241 	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1242 	    2, 2, TASKQ_PREPOPULATE);
1243 
1244 	return (zilog);
1245 }
1246 
1247 /*
1248  * Close an intent log.
1249  */
1250 void
1251 zil_close(zilog_t *zilog)
1252 {
1253 	/*
1254 	 * If the log isn't already committed, mark the objset dirty
1255 	 * (so zil_sync() will be called) and wait for that txg to sync.
1256 	 */
1257 	if (!zil_is_committed(zilog)) {
1258 		uint64_t txg;
1259 		dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1260 		(void) dmu_tx_assign(tx, TXG_WAIT);
1261 		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1262 		txg = dmu_tx_get_txg(tx);
1263 		dmu_tx_commit(tx);
1264 		txg_wait_synced(zilog->zl_dmu_pool, txg);
1265 	}
1266 
1267 	taskq_destroy(zilog->zl_clean_taskq);
1268 	zilog->zl_clean_taskq = NULL;
1269 	zilog->zl_get_data = NULL;
1270 
1271 	zil_itx_clean(zilog);
1272 	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1273 }
1274 
1275 /*
1276  * Suspend an intent log.  While in suspended mode, we still honor
1277  * synchronous semantics, but we rely on txg_wait_synced() to do it.
1278  * We suspend the log briefly when taking a snapshot so that the snapshot
1279  * contains all the data it's supposed to, and has an empty intent log.
1280  */
1281 int
1282 zil_suspend(zilog_t *zilog)
1283 {
1284 	const zil_header_t *zh = zilog->zl_header;
1285 
1286 	mutex_enter(&zilog->zl_lock);
1287 	if (zh->zh_claim_txg != 0) {		/* unplayed log */
1288 		mutex_exit(&zilog->zl_lock);
1289 		return (EBUSY);
1290 	}
1291 	if (zilog->zl_suspend++ != 0) {
1292 		/*
1293 		 * Someone else already began a suspend.
1294 		 * Just wait for them to finish.
1295 		 */
1296 		while (zilog->zl_suspending)
1297 			cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1298 		ASSERT(BP_IS_HOLE(&zh->zh_log));
1299 		mutex_exit(&zilog->zl_lock);
1300 		return (0);
1301 	}
1302 	zilog->zl_suspending = B_TRUE;
1303 	mutex_exit(&zilog->zl_lock);
1304 
1305 	zil_commit(zilog, UINT64_MAX, 0);
1306 
1307 	/*
1308 	 * Wait for any in-flight log writes to complete.
1309 	 */
1310 	mutex_enter(&zilog->zl_lock);
1311 	while (zilog->zl_writer)
1312 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1313 	mutex_exit(&zilog->zl_lock);
1314 
1315 	zil_destroy(zilog, B_FALSE);
1316 
1317 	mutex_enter(&zilog->zl_lock);
1318 	ASSERT(BP_IS_HOLE(&zh->zh_log));
1319 	zilog->zl_suspending = B_FALSE;
1320 	cv_broadcast(&zilog->zl_cv_suspend);
1321 	mutex_exit(&zilog->zl_lock);
1322 
1323 	return (0);
1324 }
1325 
1326 void
1327 zil_resume(zilog_t *zilog)
1328 {
1329 	mutex_enter(&zilog->zl_lock);
1330 	ASSERT(zilog->zl_suspend != 0);
1331 	zilog->zl_suspend--;
1332 	mutex_exit(&zilog->zl_lock);
1333 }
1334 
1335 typedef struct zil_replay_arg {
1336 	objset_t	*zr_os;
1337 	zil_replay_func_t **zr_replay;
1338 	void		*zr_arg;
1339 	uint64_t	*zr_txgp;
1340 	boolean_t	zr_byteswap;
1341 	char		*zr_lrbuf;
1342 } zil_replay_arg_t;
1343 
1344 static void
1345 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1346 {
1347 	zil_replay_arg_t *zr = zra;
1348 	const zil_header_t *zh = zilog->zl_header;
1349 	uint64_t reclen = lr->lrc_reclen;
1350 	uint64_t txtype = lr->lrc_txtype;
1351 	char *name;
1352 	int pass, error, sunk;
1353 
1354 	if (zilog->zl_stop_replay)
1355 		return;
1356 
1357 	if (lr->lrc_txg < claim_txg)		/* already committed */
1358 		return;
1359 
1360 	if (lr->lrc_seq <= zh->zh_replay_seq)	/* already replayed */
1361 		return;
1362 
1363 	/*
1364 	 * Make a copy of the data so we can revise and extend it.
1365 	 */
1366 	bcopy(lr, zr->zr_lrbuf, reclen);
1367 
1368 	/*
1369 	 * The log block containing this lr may have been byteswapped
1370 	 * so that we can easily examine common fields like lrc_txtype.
1371 	 * However, the log is a mix of different data types, and only the
1372 	 * replay vectors know how to byteswap their records.  Therefore, if
1373 	 * the lr was byteswapped, undo it before invoking the replay vector.
1374 	 */
1375 	if (zr->zr_byteswap)
1376 		byteswap_uint64_array(zr->zr_lrbuf, reclen);
1377 
1378 	/*
1379 	 * If this is a TX_WRITE with a blkptr, suck in the data.
1380 	 */
1381 	if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1382 		lr_write_t *lrw = (lr_write_t *)lr;
1383 		blkptr_t *wbp = &lrw->lr_blkptr;
1384 		uint64_t wlen = lrw->lr_length;
1385 		char *wbuf = zr->zr_lrbuf + reclen;
1386 
1387 		if (BP_IS_HOLE(wbp)) {	/* compressed to a hole */
1388 			bzero(wbuf, wlen);
1389 		} else {
1390 			/*
1391 			 * A subsequent write may have overwritten this block,
1392 			 * in which case wbp may have been been freed and
1393 			 * reallocated, and our read of wbp may fail with a
1394 			 * checksum error.  We can safely ignore this because
1395 			 * the later write will provide the correct data.
1396 			 */
1397 			zbookmark_t zb;
1398 
1399 			zb.zb_objset = dmu_objset_id(zilog->zl_os);
1400 			zb.zb_object = lrw->lr_foid;
1401 			zb.zb_level = -1;
1402 			zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
1403 
1404 			(void) zio_wait(zio_read(NULL, zilog->zl_spa,
1405 			    wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1406 			    ZIO_PRIORITY_SYNC_READ,
1407 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1408 			(void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1409 		}
1410 	}
1411 
1412 	/*
1413 	 * We must now do two things atomically: replay this log record,
1414 	 * and update the log header to reflect the fact that we did so.
1415 	 * We use the DMU's ability to assign into a specific txg to do this.
1416 	 */
1417 	for (pass = 1, sunk = B_FALSE; /* CONSTANTCONDITION */; pass++) {
1418 		uint64_t replay_txg;
1419 		dmu_tx_t *replay_tx;
1420 
1421 		replay_tx = dmu_tx_create(zr->zr_os);
1422 		error = dmu_tx_assign(replay_tx, TXG_WAIT);
1423 		if (error) {
1424 			dmu_tx_abort(replay_tx);
1425 			break;
1426 		}
1427 
1428 		replay_txg = dmu_tx_get_txg(replay_tx);
1429 
1430 		if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1431 			error = EINVAL;
1432 		} else {
1433 			/*
1434 			 * On the first pass, arrange for the replay vector
1435 			 * to fail its dmu_tx_assign().  That's the only way
1436 			 * to ensure that those code paths remain well tested.
1437 			 */
1438 			*zr->zr_txgp = replay_txg - (pass == 1);
1439 			error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1440 			    zr->zr_byteswap);
1441 			*zr->zr_txgp = TXG_NOWAIT;
1442 		}
1443 
1444 		if (error == 0) {
1445 			dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx);
1446 			zilog->zl_replay_seq[replay_txg & TXG_MASK] =
1447 			    lr->lrc_seq;
1448 		}
1449 
1450 		dmu_tx_commit(replay_tx);
1451 
1452 		if (!error)
1453 			return;
1454 
1455 		/*
1456 		 * The DMU's dnode layer doesn't see removes until the txg
1457 		 * commits, so a subsequent claim can spuriously fail with
1458 		 * EEXIST. So if we receive any error other than ERESTART
1459 		 * we try syncing out any removes then retrying the
1460 		 * transaction.
1461 		 */
1462 		if (error != ERESTART && !sunk) {
1463 			txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1464 			sunk = B_TRUE;
1465 			continue; /* retry */
1466 		}
1467 
1468 		if (error != ERESTART)
1469 			break;
1470 
1471 		if (pass != 1)
1472 			txg_wait_open(spa_get_dsl(zilog->zl_spa),
1473 			    replay_txg + 1);
1474 
1475 		dprintf("pass %d, retrying\n", pass);
1476 	}
1477 
1478 	ASSERT(error && error != ERESTART);
1479 	name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1480 	dmu_objset_name(zr->zr_os, name);
1481 	cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1482 	    "dataset %s, seq 0x%llx, txtype %llu\n",
1483 	    error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype);
1484 	zilog->zl_stop_replay = 1;
1485 	kmem_free(name, MAXNAMELEN);
1486 }
1487 
1488 /* ARGSUSED */
1489 static void
1490 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1491 {
1492 	zilog->zl_replay_blks++;
1493 }
1494 
1495 /*
1496  * If this dataset has a non-empty intent log, replay it and destroy it.
1497  */
1498 void
1499 zil_replay(objset_t *os, void *arg, uint64_t *txgp,
1500 	zil_replay_func_t *replay_func[TX_MAX_TYPE])
1501 {
1502 	zilog_t *zilog = dmu_objset_zil(os);
1503 	const zil_header_t *zh = zilog->zl_header;
1504 	zil_replay_arg_t zr;
1505 
1506 	if (zil_empty(zilog)) {
1507 		zil_destroy(zilog, B_TRUE);
1508 		return;
1509 	}
1510 
1511 	zr.zr_os = os;
1512 	zr.zr_replay = replay_func;
1513 	zr.zr_arg = arg;
1514 	zr.zr_txgp = txgp;
1515 	zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1516 	zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1517 
1518 	/*
1519 	 * Wait for in-progress removes to sync before starting replay.
1520 	 */
1521 	txg_wait_synced(zilog->zl_dmu_pool, 0);
1522 
1523 	zilog->zl_stop_replay = 0;
1524 	zilog->zl_replay_time = lbolt;
1525 	ASSERT(zilog->zl_replay_blks == 0);
1526 	(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1527 	    zh->zh_claim_txg);
1528 	kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1529 
1530 	zil_destroy(zilog, B_FALSE);
1531 }
1532 
1533 /*
1534  * Report whether all transactions are committed
1535  */
1536 int
1537 zil_is_committed(zilog_t *zilog)
1538 {
1539 	lwb_t *lwb;
1540 	int ret;
1541 
1542 	mutex_enter(&zilog->zl_lock);
1543 	while (zilog->zl_writer)
1544 		cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1545 
1546 	/* recent unpushed intent log transactions? */
1547 	if (!list_is_empty(&zilog->zl_itx_list)) {
1548 		ret = B_FALSE;
1549 		goto out;
1550 	}
1551 
1552 	/* intent log never used? */
1553 	lwb = list_head(&zilog->zl_lwb_list);
1554 	if (lwb == NULL) {
1555 		ret = B_TRUE;
1556 		goto out;
1557 	}
1558 
1559 	/*
1560 	 * more than 1 log buffer means zil_sync() hasn't yet freed
1561 	 * entries after a txg has committed
1562 	 */
1563 	if (list_next(&zilog->zl_lwb_list, lwb)) {
1564 		ret = B_FALSE;
1565 		goto out;
1566 	}
1567 
1568 	ASSERT(zil_empty(zilog));
1569 	ret = B_TRUE;
1570 out:
1571 	cv_broadcast(&zilog->zl_cv_writer);
1572 	mutex_exit(&zilog->zl_lock);
1573 	return (ret);
1574 }
1575