xref: /freebsd/sys/contrib/openzfs/module/zfs/zio.c (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2017, Intel Corporation.
26  * Copyright (c) 2019, Klara Inc.
27  * Copyright (c) 2019, Allan Jude
28  * Copyright (c) 2021, Datto, Inc.
29  */
30 
31 #include <sys/sysmacros.h>
32 #include <sys/zfs_context.h>
33 #include <sys/fm/fs/zfs.h>
34 #include <sys/spa.h>
35 #include <sys/txg.h>
36 #include <sys/spa_impl.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/zio_impl.h>
40 #include <sys/zio_compress.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/dmu_objset.h>
43 #include <sys/arc.h>
44 #include <sys/brt.h>
45 #include <sys/ddt.h>
46 #include <sys/blkptr.h>
47 #include <sys/zfeature.h>
48 #include <sys/dsl_scan.h>
49 #include <sys/metaslab_impl.h>
50 #include <sys/time.h>
51 #include <sys/trace_zfs.h>
52 #include <sys/abd.h>
53 #include <sys/dsl_crypt.h>
54 #include <cityhash.h>
55 
56 /*
57  * ==========================================================================
58  * I/O type descriptions
59  * ==========================================================================
60  */
61 const char *const zio_type_name[ZIO_TYPES] = {
62 	/*
63 	 * Note: Linux kernel thread name length is limited
64 	 * so these names will differ from upstream open zfs.
65 	 */
66 	"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
67 };
68 
69 int zio_dva_throttle_enabled = B_TRUE;
70 static int zio_deadman_log_all = B_FALSE;
71 
72 /*
73  * ==========================================================================
74  * I/O kmem caches
75  * ==========================================================================
76  */
77 static kmem_cache_t *zio_cache;
78 static kmem_cache_t *zio_link_cache;
79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
81 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
84 #endif
85 
86 /* Mark IOs as "slow" if they take longer than 30 seconds */
87 static uint_t zio_slow_io_ms = (30 * MILLISEC);
88 
89 #define	BP_SPANB(indblkshift, level) \
90 	(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
91 #define	COMPARE_META_LEVEL	0x80000000ul
92 /*
93  * The following actions directly effect the spa's sync-to-convergence logic.
94  * The values below define the sync pass when we start performing the action.
95  * Care should be taken when changing these values as they directly impact
96  * spa_sync() performance. Tuning these values may introduce subtle performance
97  * pathologies and should only be done in the context of performance analysis.
98  * These tunables will eventually be removed and replaced with #defines once
99  * enough analysis has been done to determine optimal values.
100  *
101  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
102  * regular blocks are not deferred.
103  *
104  * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
105  * compression (including of metadata).  In practice, we don't have this
106  * many sync passes, so this has no effect.
107  *
108  * The original intent was that disabling compression would help the sync
109  * passes to converge. However, in practice disabling compression increases
110  * the average number of sync passes, because when we turn compression off, a
111  * lot of block's size will change and thus we have to re-allocate (not
112  * overwrite) them. It also increases the number of 128KB allocations (e.g.
113  * for indirect blocks and spacemaps) because these will not be compressed.
114  * The 128K allocations are especially detrimental to performance on highly
115  * fragmented systems, which may have very few free segments of this size,
116  * and may need to load new metaslabs to satisfy 128K allocations.
117  */
118 
119 /* defer frees starting in this pass */
120 uint_t zfs_sync_pass_deferred_free = 2;
121 
122 /* don't compress starting in this pass */
123 static uint_t zfs_sync_pass_dont_compress = 8;
124 
125 /* rewrite new bps starting in this pass */
126 static uint_t zfs_sync_pass_rewrite = 2;
127 
128 /*
129  * An allocating zio is one that either currently has the DVA allocate
130  * stage set or will have it later in its lifetime.
131  */
132 #define	IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
133 
134 /*
135  * Enable smaller cores by excluding metadata
136  * allocations as well.
137  */
138 int zio_exclude_metadata = 0;
139 static int zio_requeue_io_start_cut_in_line = 1;
140 
141 #ifdef ZFS_DEBUG
142 static const int zio_buf_debug_limit = 16384;
143 #else
144 static const int zio_buf_debug_limit = 0;
145 #endif
146 
147 static inline void __zio_execute(zio_t *zio);
148 
149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
150 
151 void
152 zio_init(void)
153 {
154 	size_t c;
155 
156 	zio_cache = kmem_cache_create("zio_cache",
157 	    sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
158 	zio_link_cache = kmem_cache_create("zio_link_cache",
159 	    sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
160 
161 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
162 		size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
163 		size_t align, cflags, data_cflags;
164 		char name[32];
165 
166 		/*
167 		 * Create cache for each half-power of 2 size, starting from
168 		 * SPA_MINBLOCKSIZE.  It should give us memory space efficiency
169 		 * of ~7/8, sufficient for transient allocations mostly using
170 		 * these caches.
171 		 */
172 		size_t p2 = size;
173 		while (!ISP2(p2))
174 			p2 &= p2 - 1;
175 		if (!IS_P2ALIGNED(size, p2 / 2))
176 			continue;
177 
178 #ifndef _KERNEL
179 		/*
180 		 * If we are using watchpoints, put each buffer on its own page,
181 		 * to eliminate the performance overhead of trapping to the
182 		 * kernel when modifying a non-watched buffer that shares the
183 		 * page with a watched buffer.
184 		 */
185 		if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
186 			continue;
187 #endif
188 
189 		if (IS_P2ALIGNED(size, PAGESIZE))
190 			align = PAGESIZE;
191 		else
192 			align = 1 << (highbit64(size ^ (size - 1)) - 1);
193 
194 		cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
195 		    KMC_NODEBUG : 0;
196 		data_cflags = KMC_NODEBUG;
197 		if (cflags == data_cflags) {
198 			/*
199 			 * Resulting kmem caches would be identical.
200 			 * Save memory by creating only one.
201 			 */
202 			(void) snprintf(name, sizeof (name),
203 			    "zio_buf_comb_%lu", (ulong_t)size);
204 			zio_buf_cache[c] = kmem_cache_create(name, size, align,
205 			    NULL, NULL, NULL, NULL, NULL, cflags);
206 			zio_data_buf_cache[c] = zio_buf_cache[c];
207 			continue;
208 		}
209 		(void) snprintf(name, sizeof (name), "zio_buf_%lu",
210 		    (ulong_t)size);
211 		zio_buf_cache[c] = kmem_cache_create(name, size, align,
212 		    NULL, NULL, NULL, NULL, NULL, cflags);
213 
214 		(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
215 		    (ulong_t)size);
216 		zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
217 		    NULL, NULL, NULL, NULL, NULL, data_cflags);
218 	}
219 
220 	while (--c != 0) {
221 		ASSERT(zio_buf_cache[c] != NULL);
222 		if (zio_buf_cache[c - 1] == NULL)
223 			zio_buf_cache[c - 1] = zio_buf_cache[c];
224 
225 		ASSERT(zio_data_buf_cache[c] != NULL);
226 		if (zio_data_buf_cache[c - 1] == NULL)
227 			zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
228 	}
229 
230 	zio_inject_init();
231 
232 	lz4_init();
233 }
234 
235 void
236 zio_fini(void)
237 {
238 	size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
239 
240 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
241 	for (size_t i = 0; i < n; i++) {
242 		if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
243 			(void) printf("zio_fini: [%d] %llu != %llu\n",
244 			    (int)((i + 1) << SPA_MINBLOCKSHIFT),
245 			    (long long unsigned)zio_buf_cache_allocs[i],
246 			    (long long unsigned)zio_buf_cache_frees[i]);
247 	}
248 #endif
249 
250 	/*
251 	 * The same kmem cache can show up multiple times in both zio_buf_cache
252 	 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
253 	 * sort it out.
254 	 */
255 	for (size_t i = 0; i < n; i++) {
256 		kmem_cache_t *cache = zio_buf_cache[i];
257 		if (cache == NULL)
258 			continue;
259 		for (size_t j = i; j < n; j++) {
260 			if (cache == zio_buf_cache[j])
261 				zio_buf_cache[j] = NULL;
262 			if (cache == zio_data_buf_cache[j])
263 				zio_data_buf_cache[j] = NULL;
264 		}
265 		kmem_cache_destroy(cache);
266 	}
267 
268 	for (size_t i = 0; i < n; i++) {
269 		kmem_cache_t *cache = zio_data_buf_cache[i];
270 		if (cache == NULL)
271 			continue;
272 		for (size_t j = i; j < n; j++) {
273 			if (cache == zio_data_buf_cache[j])
274 				zio_data_buf_cache[j] = NULL;
275 		}
276 		kmem_cache_destroy(cache);
277 	}
278 
279 	for (size_t i = 0; i < n; i++) {
280 		VERIFY3P(zio_buf_cache[i], ==, NULL);
281 		VERIFY3P(zio_data_buf_cache[i], ==, NULL);
282 	}
283 
284 	kmem_cache_destroy(zio_link_cache);
285 	kmem_cache_destroy(zio_cache);
286 
287 	zio_inject_fini();
288 
289 	lz4_fini();
290 }
291 
292 /*
293  * ==========================================================================
294  * Allocate and free I/O buffers
295  * ==========================================================================
296  */
297 
298 #ifdef ZFS_DEBUG
299 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
300 #endif
301 
302 /*
303  * Use empty space after the buffer to detect overflows.
304  *
305  * Since zio_init() creates kmem caches only for certain set of buffer sizes,
306  * allocations of different sizes may have some unused space after the data.
307  * Filling part of that space with a known pattern on allocation and checking
308  * it on free should allow us to detect some buffer overflows.
309  */
310 static void
311 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
312 {
313 #ifdef ZFS_DEBUG
314 	size_t off = P2ROUNDUP(size, sizeof (ulong_t));
315 	ulong_t *canary = p + off / sizeof (ulong_t);
316 	size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
317 	if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
318 	    cache[c] == cache[c + 1])
319 		asize = (c + 2) << SPA_MINBLOCKSHIFT;
320 	for (; off < asize; canary++, off += sizeof (ulong_t))
321 		*canary = zio_buf_canary;
322 #endif
323 }
324 
325 static void
326 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
327 {
328 #ifdef ZFS_DEBUG
329 	size_t off = P2ROUNDUP(size, sizeof (ulong_t));
330 	ulong_t *canary = p + off / sizeof (ulong_t);
331 	size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
332 	if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
333 	    cache[c] == cache[c + 1])
334 		asize = (c + 2) << SPA_MINBLOCKSHIFT;
335 	for (; off < asize; canary++, off += sizeof (ulong_t)) {
336 		if (unlikely(*canary != zio_buf_canary)) {
337 			PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
338 			    p, size, (canary - p) * sizeof (ulong_t),
339 			    *canary, zio_buf_canary);
340 		}
341 	}
342 #endif
343 }
344 
345 /*
346  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
347  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
348  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
349  * excess / transient data in-core during a crashdump.
350  */
351 void *
352 zio_buf_alloc(size_t size)
353 {
354 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
355 
356 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
357 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
358 	atomic_add_64(&zio_buf_cache_allocs[c], 1);
359 #endif
360 
361 	void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
362 	zio_buf_put_canary(p, size, zio_buf_cache, c);
363 	return (p);
364 }
365 
366 /*
367  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
368  * crashdump if the kernel panics.  This exists so that we will limit the amount
369  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
370  * of kernel heap dumped to disk when the kernel panics)
371  */
372 void *
373 zio_data_buf_alloc(size_t size)
374 {
375 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
376 
377 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
378 
379 	void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
380 	zio_buf_put_canary(p, size, zio_data_buf_cache, c);
381 	return (p);
382 }
383 
384 void
385 zio_buf_free(void *buf, size_t size)
386 {
387 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
388 
389 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
390 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
391 	atomic_add_64(&zio_buf_cache_frees[c], 1);
392 #endif
393 
394 	zio_buf_check_canary(buf, size, zio_buf_cache, c);
395 	kmem_cache_free(zio_buf_cache[c], buf);
396 }
397 
398 void
399 zio_data_buf_free(void *buf, size_t size)
400 {
401 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
402 
403 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
404 
405 	zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
406 	kmem_cache_free(zio_data_buf_cache[c], buf);
407 }
408 
409 static void
410 zio_abd_free(void *abd, size_t size)
411 {
412 	(void) size;
413 	abd_free((abd_t *)abd);
414 }
415 
416 /*
417  * ==========================================================================
418  * Push and pop I/O transform buffers
419  * ==========================================================================
420  */
421 void
422 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
423     zio_transform_func_t *transform)
424 {
425 	zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
426 
427 	zt->zt_orig_abd = zio->io_abd;
428 	zt->zt_orig_size = zio->io_size;
429 	zt->zt_bufsize = bufsize;
430 	zt->zt_transform = transform;
431 
432 	zt->zt_next = zio->io_transform_stack;
433 	zio->io_transform_stack = zt;
434 
435 	zio->io_abd = data;
436 	zio->io_size = size;
437 }
438 
439 void
440 zio_pop_transforms(zio_t *zio)
441 {
442 	zio_transform_t *zt;
443 
444 	while ((zt = zio->io_transform_stack) != NULL) {
445 		if (zt->zt_transform != NULL)
446 			zt->zt_transform(zio,
447 			    zt->zt_orig_abd, zt->zt_orig_size);
448 
449 		if (zt->zt_bufsize != 0)
450 			abd_free(zio->io_abd);
451 
452 		zio->io_abd = zt->zt_orig_abd;
453 		zio->io_size = zt->zt_orig_size;
454 		zio->io_transform_stack = zt->zt_next;
455 
456 		kmem_free(zt, sizeof (zio_transform_t));
457 	}
458 }
459 
460 /*
461  * ==========================================================================
462  * I/O transform callbacks for subblocks, decompression, and decryption
463  * ==========================================================================
464  */
465 static void
466 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
467 {
468 	ASSERT(zio->io_size > size);
469 
470 	if (zio->io_type == ZIO_TYPE_READ)
471 		abd_copy(data, zio->io_abd, size);
472 }
473 
474 static void
475 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
476 {
477 	if (zio->io_error == 0) {
478 		void *tmp = abd_borrow_buf(data, size);
479 		int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
480 		    zio->io_abd, tmp, zio->io_size, size,
481 		    &zio->io_prop.zp_complevel);
482 		abd_return_buf_copy(data, tmp, size);
483 
484 		if (zio_injection_enabled && ret == 0)
485 			ret = zio_handle_fault_injection(zio, EINVAL);
486 
487 		if (ret != 0)
488 			zio->io_error = SET_ERROR(EIO);
489 	}
490 }
491 
492 static void
493 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
494 {
495 	int ret;
496 	void *tmp;
497 	blkptr_t *bp = zio->io_bp;
498 	spa_t *spa = zio->io_spa;
499 	uint64_t dsobj = zio->io_bookmark.zb_objset;
500 	uint64_t lsize = BP_GET_LSIZE(bp);
501 	dmu_object_type_t ot = BP_GET_TYPE(bp);
502 	uint8_t salt[ZIO_DATA_SALT_LEN];
503 	uint8_t iv[ZIO_DATA_IV_LEN];
504 	uint8_t mac[ZIO_DATA_MAC_LEN];
505 	boolean_t no_crypt = B_FALSE;
506 
507 	ASSERT(BP_USES_CRYPT(bp));
508 	ASSERT3U(size, !=, 0);
509 
510 	if (zio->io_error != 0)
511 		return;
512 
513 	/*
514 	 * Verify the cksum of MACs stored in an indirect bp. It will always
515 	 * be possible to verify this since it does not require an encryption
516 	 * key.
517 	 */
518 	if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
519 		zio_crypt_decode_mac_bp(bp, mac);
520 
521 		if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
522 			/*
523 			 * We haven't decompressed the data yet, but
524 			 * zio_crypt_do_indirect_mac_checksum() requires
525 			 * decompressed data to be able to parse out the MACs
526 			 * from the indirect block. We decompress it now and
527 			 * throw away the result after we are finished.
528 			 */
529 			tmp = zio_buf_alloc(lsize);
530 			ret = zio_decompress_data(BP_GET_COMPRESS(bp),
531 			    zio->io_abd, tmp, zio->io_size, lsize,
532 			    &zio->io_prop.zp_complevel);
533 			if (ret != 0) {
534 				ret = SET_ERROR(EIO);
535 				goto error;
536 			}
537 			ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
538 			    tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
539 			zio_buf_free(tmp, lsize);
540 		} else {
541 			ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
542 			    zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
543 		}
544 		abd_copy(data, zio->io_abd, size);
545 
546 		if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
547 			ret = zio_handle_decrypt_injection(spa,
548 			    &zio->io_bookmark, ot, ECKSUM);
549 		}
550 		if (ret != 0)
551 			goto error;
552 
553 		return;
554 	}
555 
556 	/*
557 	 * If this is an authenticated block, just check the MAC. It would be
558 	 * nice to separate this out into its own flag, but when this was done,
559 	 * we had run out of bits in what is now zio_flag_t. Future cleanup
560 	 * could make this a flag bit.
561 	 */
562 	if (BP_IS_AUTHENTICATED(bp)) {
563 		if (ot == DMU_OT_OBJSET) {
564 			ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
565 			    dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
566 		} else {
567 			zio_crypt_decode_mac_bp(bp, mac);
568 			ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
569 			    zio->io_abd, size, mac);
570 			if (zio_injection_enabled && ret == 0) {
571 				ret = zio_handle_decrypt_injection(spa,
572 				    &zio->io_bookmark, ot, ECKSUM);
573 			}
574 		}
575 		abd_copy(data, zio->io_abd, size);
576 
577 		if (ret != 0)
578 			goto error;
579 
580 		return;
581 	}
582 
583 	zio_crypt_decode_params_bp(bp, salt, iv);
584 
585 	if (ot == DMU_OT_INTENT_LOG) {
586 		tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
587 		zio_crypt_decode_mac_zil(tmp, mac);
588 		abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
589 	} else {
590 		zio_crypt_decode_mac_bp(bp, mac);
591 	}
592 
593 	ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
594 	    BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
595 	    zio->io_abd, &no_crypt);
596 	if (no_crypt)
597 		abd_copy(data, zio->io_abd, size);
598 
599 	if (ret != 0)
600 		goto error;
601 
602 	return;
603 
604 error:
605 	/* assert that the key was found unless this was speculative */
606 	ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
607 
608 	/*
609 	 * If there was a decryption / authentication error return EIO as
610 	 * the io_error. If this was not a speculative zio, create an ereport.
611 	 */
612 	if (ret == ECKSUM) {
613 		zio->io_error = SET_ERROR(EIO);
614 		if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
615 			spa_log_error(spa, &zio->io_bookmark,
616 			    BP_GET_LOGICAL_BIRTH(zio->io_bp));
617 			(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
618 			    spa, NULL, &zio->io_bookmark, zio, 0);
619 		}
620 	} else {
621 		zio->io_error = ret;
622 	}
623 }
624 
625 /*
626  * ==========================================================================
627  * I/O parent/child relationships and pipeline interlocks
628  * ==========================================================================
629  */
630 zio_t *
631 zio_walk_parents(zio_t *cio, zio_link_t **zl)
632 {
633 	list_t *pl = &cio->io_parent_list;
634 
635 	*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
636 	if (*zl == NULL)
637 		return (NULL);
638 
639 	ASSERT((*zl)->zl_child == cio);
640 	return ((*zl)->zl_parent);
641 }
642 
643 zio_t *
644 zio_walk_children(zio_t *pio, zio_link_t **zl)
645 {
646 	list_t *cl = &pio->io_child_list;
647 
648 	ASSERT(MUTEX_HELD(&pio->io_lock));
649 
650 	*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
651 	if (*zl == NULL)
652 		return (NULL);
653 
654 	ASSERT((*zl)->zl_parent == pio);
655 	return ((*zl)->zl_child);
656 }
657 
658 zio_t *
659 zio_unique_parent(zio_t *cio)
660 {
661 	zio_link_t *zl = NULL;
662 	zio_t *pio = zio_walk_parents(cio, &zl);
663 
664 	VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
665 	return (pio);
666 }
667 
668 void
669 zio_add_child(zio_t *pio, zio_t *cio)
670 {
671 	/*
672 	 * Logical I/Os can have logical, gang, or vdev children.
673 	 * Gang I/Os can have gang or vdev children.
674 	 * Vdev I/Os can only have vdev children.
675 	 * The following ASSERT captures all of these constraints.
676 	 */
677 	ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
678 
679 	/* Parent should not have READY stage if child doesn't have it. */
680 	IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
681 	    (cio->io_child_type != ZIO_CHILD_VDEV),
682 	    (pio->io_pipeline & ZIO_STAGE_READY) == 0);
683 
684 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
685 	zl->zl_parent = pio;
686 	zl->zl_child = cio;
687 
688 	mutex_enter(&pio->io_lock);
689 	mutex_enter(&cio->io_lock);
690 
691 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
692 
693 	uint64_t *countp = pio->io_children[cio->io_child_type];
694 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
695 		countp[w] += !cio->io_state[w];
696 
697 	list_insert_head(&pio->io_child_list, zl);
698 	list_insert_head(&cio->io_parent_list, zl);
699 
700 	mutex_exit(&cio->io_lock);
701 	mutex_exit(&pio->io_lock);
702 }
703 
704 void
705 zio_add_child_first(zio_t *pio, zio_t *cio)
706 {
707 	/*
708 	 * Logical I/Os can have logical, gang, or vdev children.
709 	 * Gang I/Os can have gang or vdev children.
710 	 * Vdev I/Os can only have vdev children.
711 	 * The following ASSERT captures all of these constraints.
712 	 */
713 	ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
714 
715 	/* Parent should not have READY stage if child doesn't have it. */
716 	IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
717 	    (cio->io_child_type != ZIO_CHILD_VDEV),
718 	    (pio->io_pipeline & ZIO_STAGE_READY) == 0);
719 
720 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
721 	zl->zl_parent = pio;
722 	zl->zl_child = cio;
723 
724 	ASSERT(list_is_empty(&cio->io_parent_list));
725 	list_insert_head(&cio->io_parent_list, zl);
726 
727 	mutex_enter(&pio->io_lock);
728 
729 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
730 
731 	uint64_t *countp = pio->io_children[cio->io_child_type];
732 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
733 		countp[w] += !cio->io_state[w];
734 
735 	list_insert_head(&pio->io_child_list, zl);
736 
737 	mutex_exit(&pio->io_lock);
738 }
739 
740 static void
741 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
742 {
743 	ASSERT(zl->zl_parent == pio);
744 	ASSERT(zl->zl_child == cio);
745 
746 	mutex_enter(&pio->io_lock);
747 	mutex_enter(&cio->io_lock);
748 
749 	list_remove(&pio->io_child_list, zl);
750 	list_remove(&cio->io_parent_list, zl);
751 
752 	mutex_exit(&cio->io_lock);
753 	mutex_exit(&pio->io_lock);
754 	kmem_cache_free(zio_link_cache, zl);
755 }
756 
757 static boolean_t
758 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
759 {
760 	boolean_t waiting = B_FALSE;
761 
762 	mutex_enter(&zio->io_lock);
763 	ASSERT(zio->io_stall == NULL);
764 	for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
765 		if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
766 			continue;
767 
768 		uint64_t *countp = &zio->io_children[c][wait];
769 		if (*countp != 0) {
770 			zio->io_stage >>= 1;
771 			ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
772 			zio->io_stall = countp;
773 			waiting = B_TRUE;
774 			break;
775 		}
776 	}
777 	mutex_exit(&zio->io_lock);
778 	return (waiting);
779 }
780 
781 __attribute__((always_inline))
782 static inline void
783 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
784     zio_t **next_to_executep)
785 {
786 	uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
787 	int *errorp = &pio->io_child_error[zio->io_child_type];
788 
789 	mutex_enter(&pio->io_lock);
790 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
791 		*errorp = zio_worst_error(*errorp, zio->io_error);
792 	pio->io_reexecute |= zio->io_reexecute;
793 	ASSERT3U(*countp, >, 0);
794 
795 	(*countp)--;
796 
797 	if (*countp == 0 && pio->io_stall == countp) {
798 		zio_taskq_type_t type =
799 		    pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
800 		    ZIO_TASKQ_INTERRUPT;
801 		pio->io_stall = NULL;
802 		mutex_exit(&pio->io_lock);
803 
804 		/*
805 		 * If we can tell the caller to execute this parent next, do
806 		 * so. We only do this if the parent's zio type matches the
807 		 * child's type. Otherwise dispatch the parent zio in its
808 		 * own taskq.
809 		 *
810 		 * Having the caller execute the parent when possible reduces
811 		 * locking on the zio taskq's, reduces context switch
812 		 * overhead, and has no recursion penalty.  Note that one
813 		 * read from disk typically causes at least 3 zio's: a
814 		 * zio_null(), the logical zio_read(), and then a physical
815 		 * zio.  When the physical ZIO completes, we are able to call
816 		 * zio_done() on all 3 of these zio's from one invocation of
817 		 * zio_execute() by returning the parent back to
818 		 * zio_execute().  Since the parent isn't executed until this
819 		 * thread returns back to zio_execute(), the caller should do
820 		 * so promptly.
821 		 *
822 		 * In other cases, dispatching the parent prevents
823 		 * overflowing the stack when we have deeply nested
824 		 * parent-child relationships, as we do with the "mega zio"
825 		 * of writes for spa_sync(), and the chain of ZIL blocks.
826 		 */
827 		if (next_to_executep != NULL && *next_to_executep == NULL &&
828 		    pio->io_type == zio->io_type) {
829 			*next_to_executep = pio;
830 		} else {
831 			zio_taskq_dispatch(pio, type, B_FALSE);
832 		}
833 	} else {
834 		mutex_exit(&pio->io_lock);
835 	}
836 }
837 
838 static void
839 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
840 {
841 	if (zio->io_child_error[c] != 0 && zio->io_error == 0)
842 		zio->io_error = zio->io_child_error[c];
843 }
844 
845 int
846 zio_bookmark_compare(const void *x1, const void *x2)
847 {
848 	const zio_t *z1 = x1;
849 	const zio_t *z2 = x2;
850 
851 	if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
852 		return (-1);
853 	if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
854 		return (1);
855 
856 	if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
857 		return (-1);
858 	if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
859 		return (1);
860 
861 	if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
862 		return (-1);
863 	if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
864 		return (1);
865 
866 	if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
867 		return (-1);
868 	if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
869 		return (1);
870 
871 	if (z1 < z2)
872 		return (-1);
873 	if (z1 > z2)
874 		return (1);
875 
876 	return (0);
877 }
878 
879 /*
880  * ==========================================================================
881  * Create the various types of I/O (read, write, free, etc)
882  * ==========================================================================
883  */
884 static zio_t *
885 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
886     abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
887     void *private, zio_type_t type, zio_priority_t priority,
888     zio_flag_t flags, vdev_t *vd, uint64_t offset,
889     const zbookmark_phys_t *zb, enum zio_stage stage,
890     enum zio_stage pipeline)
891 {
892 	zio_t *zio;
893 
894 	IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
895 	ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
896 	ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
897 
898 	ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
899 	ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
900 	ASSERT(vd || stage == ZIO_STAGE_OPEN);
901 
902 	IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
903 
904 	zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
905 	memset(zio, 0, sizeof (zio_t));
906 
907 	mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
908 	cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
909 
910 	list_create(&zio->io_parent_list, sizeof (zio_link_t),
911 	    offsetof(zio_link_t, zl_parent_node));
912 	list_create(&zio->io_child_list, sizeof (zio_link_t),
913 	    offsetof(zio_link_t, zl_child_node));
914 	metaslab_trace_init(&zio->io_alloc_list);
915 
916 	if (vd != NULL)
917 		zio->io_child_type = ZIO_CHILD_VDEV;
918 	else if (flags & ZIO_FLAG_GANG_CHILD)
919 		zio->io_child_type = ZIO_CHILD_GANG;
920 	else if (flags & ZIO_FLAG_DDT_CHILD)
921 		zio->io_child_type = ZIO_CHILD_DDT;
922 	else
923 		zio->io_child_type = ZIO_CHILD_LOGICAL;
924 
925 	if (bp != NULL) {
926 		if (type != ZIO_TYPE_WRITE ||
927 		    zio->io_child_type == ZIO_CHILD_DDT) {
928 			zio->io_bp_copy = *bp;
929 			zio->io_bp = &zio->io_bp_copy;	/* so caller can free */
930 		} else {
931 			zio->io_bp = (blkptr_t *)bp;
932 		}
933 		zio->io_bp_orig = *bp;
934 		if (zio->io_child_type == ZIO_CHILD_LOGICAL)
935 			zio->io_logical = zio;
936 		if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
937 			pipeline |= ZIO_GANG_STAGES;
938 	}
939 
940 	zio->io_spa = spa;
941 	zio->io_txg = txg;
942 	zio->io_done = done;
943 	zio->io_private = private;
944 	zio->io_type = type;
945 	zio->io_priority = priority;
946 	zio->io_vd = vd;
947 	zio->io_offset = offset;
948 	zio->io_orig_abd = zio->io_abd = data;
949 	zio->io_orig_size = zio->io_size = psize;
950 	zio->io_lsize = lsize;
951 	zio->io_orig_flags = zio->io_flags = flags;
952 	zio->io_orig_stage = zio->io_stage = stage;
953 	zio->io_orig_pipeline = zio->io_pipeline = pipeline;
954 	zio->io_pipeline_trace = ZIO_STAGE_OPEN;
955 	zio->io_allocator = ZIO_ALLOCATOR_NONE;
956 
957 	zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
958 	    (pipeline & ZIO_STAGE_READY) == 0;
959 	zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
960 
961 	if (zb != NULL)
962 		zio->io_bookmark = *zb;
963 
964 	if (pio != NULL) {
965 		zio->io_metaslab_class = pio->io_metaslab_class;
966 		if (zio->io_logical == NULL)
967 			zio->io_logical = pio->io_logical;
968 		if (zio->io_child_type == ZIO_CHILD_GANG)
969 			zio->io_gang_leader = pio->io_gang_leader;
970 		zio_add_child_first(pio, zio);
971 	}
972 
973 	taskq_init_ent(&zio->io_tqent);
974 
975 	return (zio);
976 }
977 
978 void
979 zio_destroy(zio_t *zio)
980 {
981 	metaslab_trace_fini(&zio->io_alloc_list);
982 	list_destroy(&zio->io_parent_list);
983 	list_destroy(&zio->io_child_list);
984 	mutex_destroy(&zio->io_lock);
985 	cv_destroy(&zio->io_cv);
986 	kmem_cache_free(zio_cache, zio);
987 }
988 
989 /*
990  * ZIO intended to be between others.  Provides synchronization at READY
991  * and DONE pipeline stages and calls the respective callbacks.
992  */
993 zio_t *
994 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
995     void *private, zio_flag_t flags)
996 {
997 	zio_t *zio;
998 
999 	zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1000 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1001 	    ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1002 
1003 	return (zio);
1004 }
1005 
1006 /*
1007  * ZIO intended to be a root of a tree.  Unlike null ZIO does not have a
1008  * READY pipeline stage (is ready on creation), so it should not be used
1009  * as child of any ZIO that may need waiting for grandchildren READY stage
1010  * (any other ZIO type).
1011  */
1012 zio_t *
1013 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1014 {
1015 	zio_t *zio;
1016 
1017 	zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1018 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1019 	    ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1020 
1021 	return (zio);
1022 }
1023 
1024 static int
1025 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1026     enum blk_verify_flag blk_verify, const char *fmt, ...)
1027 {
1028 	va_list adx;
1029 	char buf[256];
1030 
1031 	va_start(adx, fmt);
1032 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
1033 	va_end(adx);
1034 
1035 	zfs_dbgmsg("bad blkptr at %px: "
1036 	    "DVA[0]=%#llx/%#llx "
1037 	    "DVA[1]=%#llx/%#llx "
1038 	    "DVA[2]=%#llx/%#llx "
1039 	    "prop=%#llx "
1040 	    "pad=%#llx,%#llx "
1041 	    "phys_birth=%#llx "
1042 	    "birth=%#llx "
1043 	    "fill=%#llx "
1044 	    "cksum=%#llx/%#llx/%#llx/%#llx",
1045 	    bp,
1046 	    (long long)bp->blk_dva[0].dva_word[0],
1047 	    (long long)bp->blk_dva[0].dva_word[1],
1048 	    (long long)bp->blk_dva[1].dva_word[0],
1049 	    (long long)bp->blk_dva[1].dva_word[1],
1050 	    (long long)bp->blk_dva[2].dva_word[0],
1051 	    (long long)bp->blk_dva[2].dva_word[1],
1052 	    (long long)bp->blk_prop,
1053 	    (long long)bp->blk_pad[0],
1054 	    (long long)bp->blk_pad[1],
1055 	    (long long)BP_GET_PHYSICAL_BIRTH(bp),
1056 	    (long long)BP_GET_LOGICAL_BIRTH(bp),
1057 	    (long long)bp->blk_fill,
1058 	    (long long)bp->blk_cksum.zc_word[0],
1059 	    (long long)bp->blk_cksum.zc_word[1],
1060 	    (long long)bp->blk_cksum.zc_word[2],
1061 	    (long long)bp->blk_cksum.zc_word[3]);
1062 	switch (blk_verify) {
1063 	case BLK_VERIFY_HALT:
1064 		zfs_panic_recover("%s: %s", spa_name(spa), buf);
1065 		break;
1066 	case BLK_VERIFY_LOG:
1067 		zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1068 		break;
1069 	case BLK_VERIFY_ONLY:
1070 		break;
1071 	}
1072 
1073 	return (1);
1074 }
1075 
1076 /*
1077  * Verify the block pointer fields contain reasonable values.  This means
1078  * it only contains known object types, checksum/compression identifiers,
1079  * block sizes within the maximum allowed limits, valid DVAs, etc.
1080  *
1081  * If everything checks out B_TRUE is returned.  The zfs_blkptr_verify
1082  * argument controls the behavior when an invalid field is detected.
1083  *
1084  * Values for blk_verify_flag:
1085  *   BLK_VERIFY_ONLY: evaluate the block
1086  *   BLK_VERIFY_LOG: evaluate the block and log problems
1087  *   BLK_VERIFY_HALT: call zfs_panic_recover on error
1088  *
1089  * Values for blk_config_flag:
1090  *   BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1091  *   BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1092  *   obtained for reader
1093  *   BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1094  *   performance
1095  */
1096 boolean_t
1097 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1098     enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1099 {
1100 	int errors = 0;
1101 
1102 	if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
1103 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1104 		    "blkptr at %px has invalid TYPE %llu",
1105 		    bp, (longlong_t)BP_GET_TYPE(bp));
1106 	}
1107 	if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
1108 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1109 		    "blkptr at %px has invalid CHECKSUM %llu",
1110 		    bp, (longlong_t)BP_GET_CHECKSUM(bp));
1111 	}
1112 	if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
1113 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1114 		    "blkptr at %px has invalid COMPRESS %llu",
1115 		    bp, (longlong_t)BP_GET_COMPRESS(bp));
1116 	}
1117 	if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
1118 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1119 		    "blkptr at %px has invalid LSIZE %llu",
1120 		    bp, (longlong_t)BP_GET_LSIZE(bp));
1121 	}
1122 	if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
1123 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1124 		    "blkptr at %px has invalid PSIZE %llu",
1125 		    bp, (longlong_t)BP_GET_PSIZE(bp));
1126 	}
1127 
1128 	if (BP_IS_EMBEDDED(bp)) {
1129 		if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
1130 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1131 			    "blkptr at %px has invalid ETYPE %llu",
1132 			    bp, (longlong_t)BPE_GET_ETYPE(bp));
1133 		}
1134 	}
1135 
1136 	/*
1137 	 * Do not verify individual DVAs if the config is not trusted. This
1138 	 * will be done once the zio is executed in vdev_mirror_map_alloc.
1139 	 */
1140 	if (!spa->spa_trust_config)
1141 		return (errors == 0);
1142 
1143 	switch (blk_config) {
1144 	case BLK_CONFIG_HELD:
1145 		ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1146 		break;
1147 	case BLK_CONFIG_NEEDED:
1148 		spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1149 		break;
1150 	case BLK_CONFIG_SKIP:
1151 		return (errors == 0);
1152 	default:
1153 		panic("invalid blk_config %u", blk_config);
1154 	}
1155 
1156 	/*
1157 	 * Pool-specific checks.
1158 	 *
1159 	 * Note: it would be nice to verify that the logical birth
1160 	 * and physical birth are not too large.  However,
1161 	 * spa_freeze() allows the birth time of log blocks (and
1162 	 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1163 	 * large.
1164 	 */
1165 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1166 		const dva_t *dva = &bp->blk_dva[i];
1167 		uint64_t vdevid = DVA_GET_VDEV(dva);
1168 
1169 		if (vdevid >= spa->spa_root_vdev->vdev_children) {
1170 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1171 			    "blkptr at %px DVA %u has invalid VDEV %llu",
1172 			    bp, i, (longlong_t)vdevid);
1173 			continue;
1174 		}
1175 		vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1176 		if (vd == NULL) {
1177 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1178 			    "blkptr at %px DVA %u has invalid VDEV %llu",
1179 			    bp, i, (longlong_t)vdevid);
1180 			continue;
1181 		}
1182 		if (vd->vdev_ops == &vdev_hole_ops) {
1183 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1184 			    "blkptr at %px DVA %u has hole VDEV %llu",
1185 			    bp, i, (longlong_t)vdevid);
1186 			continue;
1187 		}
1188 		if (vd->vdev_ops == &vdev_missing_ops) {
1189 			/*
1190 			 * "missing" vdevs are valid during import, but we
1191 			 * don't have their detailed info (e.g. asize), so
1192 			 * we can't perform any more checks on them.
1193 			 */
1194 			continue;
1195 		}
1196 		uint64_t offset = DVA_GET_OFFSET(dva);
1197 		uint64_t asize = DVA_GET_ASIZE(dva);
1198 		if (DVA_GET_GANG(dva))
1199 			asize = vdev_gang_header_asize(vd);
1200 		if (offset + asize > vd->vdev_asize) {
1201 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1202 			    "blkptr at %px DVA %u has invalid OFFSET %llu",
1203 			    bp, i, (longlong_t)offset);
1204 		}
1205 	}
1206 	if (blk_config == BLK_CONFIG_NEEDED)
1207 		spa_config_exit(spa, SCL_VDEV, bp);
1208 
1209 	return (errors == 0);
1210 }
1211 
1212 boolean_t
1213 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1214 {
1215 	(void) bp;
1216 	uint64_t vdevid = DVA_GET_VDEV(dva);
1217 
1218 	if (vdevid >= spa->spa_root_vdev->vdev_children)
1219 		return (B_FALSE);
1220 
1221 	vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1222 	if (vd == NULL)
1223 		return (B_FALSE);
1224 
1225 	if (vd->vdev_ops == &vdev_hole_ops)
1226 		return (B_FALSE);
1227 
1228 	if (vd->vdev_ops == &vdev_missing_ops) {
1229 		return (B_FALSE);
1230 	}
1231 
1232 	uint64_t offset = DVA_GET_OFFSET(dva);
1233 	uint64_t asize = DVA_GET_ASIZE(dva);
1234 
1235 	if (DVA_GET_GANG(dva))
1236 		asize = vdev_gang_header_asize(vd);
1237 	if (offset + asize > vd->vdev_asize)
1238 		return (B_FALSE);
1239 
1240 	return (B_TRUE);
1241 }
1242 
1243 zio_t *
1244 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1245     abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1246     zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1247 {
1248 	zio_t *zio;
1249 
1250 	zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
1251 	    data, size, size, done, private,
1252 	    ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1253 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1254 	    ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1255 
1256 	return (zio);
1257 }
1258 
1259 zio_t *
1260 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1261     abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1262     zio_done_func_t *ready, zio_done_func_t *children_ready,
1263     zio_done_func_t *done, void *private, zio_priority_t priority,
1264     zio_flag_t flags, const zbookmark_phys_t *zb)
1265 {
1266 	zio_t *zio;
1267 
1268 	ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
1269 	    zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
1270 	    zp->zp_compress >= ZIO_COMPRESS_OFF &&
1271 	    zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
1272 	    DMU_OT_IS_VALID(zp->zp_type) &&
1273 	    zp->zp_level < 32 &&
1274 	    zp->zp_copies > 0 &&
1275 	    zp->zp_copies <= spa_max_replication(spa));
1276 
1277 	zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1278 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1279 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1280 	    ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
1281 
1282 	zio->io_ready = ready;
1283 	zio->io_children_ready = children_ready;
1284 	zio->io_prop = *zp;
1285 
1286 	/*
1287 	 * Data can be NULL if we are going to call zio_write_override() to
1288 	 * provide the already-allocated BP.  But we may need the data to
1289 	 * verify a dedup hit (if requested).  In this case, don't try to
1290 	 * dedup (just take the already-allocated BP verbatim). Encrypted
1291 	 * dedup blocks need data as well so we also disable dedup in this
1292 	 * case.
1293 	 */
1294 	if (data == NULL &&
1295 	    (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1296 		zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1297 	}
1298 
1299 	return (zio);
1300 }
1301 
1302 zio_t *
1303 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1304     uint64_t size, zio_done_func_t *done, void *private,
1305     zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1306 {
1307 	zio_t *zio;
1308 
1309 	zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1310 	    ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1311 	    ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1312 
1313 	return (zio);
1314 }
1315 
1316 void
1317 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
1318     boolean_t brtwrite)
1319 {
1320 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1321 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1322 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1323 	ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1324 	ASSERT(!brtwrite || !nopwrite);
1325 
1326 	/*
1327 	 * We must reset the io_prop to match the values that existed
1328 	 * when the bp was first written by dmu_sync() keeping in mind
1329 	 * that nopwrite and dedup are mutually exclusive.
1330 	 */
1331 	zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1332 	zio->io_prop.zp_nopwrite = nopwrite;
1333 	zio->io_prop.zp_brtwrite = brtwrite;
1334 	zio->io_prop.zp_copies = copies;
1335 	zio->io_bp_override = bp;
1336 }
1337 
1338 void
1339 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1340 {
1341 
1342 	(void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1343 
1344 	/*
1345 	 * The check for EMBEDDED is a performance optimization.  We
1346 	 * process the free here (by ignoring it) rather than
1347 	 * putting it on the list and then processing it in zio_free_sync().
1348 	 */
1349 	if (BP_IS_EMBEDDED(bp))
1350 		return;
1351 
1352 	/*
1353 	 * Frees that are for the currently-syncing txg, are not going to be
1354 	 * deferred, and which will not need to do a read (i.e. not GANG or
1355 	 * DEDUP), can be processed immediately.  Otherwise, put them on the
1356 	 * in-memory list for later processing.
1357 	 *
1358 	 * Note that we only defer frees after zfs_sync_pass_deferred_free
1359 	 * when the log space map feature is disabled. [see relevant comment
1360 	 * in spa_sync_iterate_to_convergence()]
1361 	 */
1362 	if (BP_IS_GANG(bp) ||
1363 	    BP_GET_DEDUP(bp) ||
1364 	    txg != spa->spa_syncing_txg ||
1365 	    (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1366 	    !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1367 	    brt_maybe_exists(spa, bp)) {
1368 		metaslab_check_free(spa, bp);
1369 		bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1370 	} else {
1371 		VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
1372 	}
1373 }
1374 
1375 /*
1376  * To improve performance, this function may return NULL if we were able
1377  * to do the free immediately.  This avoids the cost of creating a zio
1378  * (and linking it to the parent, etc).
1379  */
1380 zio_t *
1381 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1382     zio_flag_t flags)
1383 {
1384 	ASSERT(!BP_IS_HOLE(bp));
1385 	ASSERT(spa_syncing_txg(spa) == txg);
1386 
1387 	if (BP_IS_EMBEDDED(bp))
1388 		return (NULL);
1389 
1390 	metaslab_check_free(spa, bp);
1391 	arc_freed(spa, bp);
1392 	dsl_scan_freed(spa, bp);
1393 
1394 	if (BP_IS_GANG(bp) ||
1395 	    BP_GET_DEDUP(bp) ||
1396 	    brt_maybe_exists(spa, bp)) {
1397 		/*
1398 		 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1399 		 * block header, the DDT or the BRT), so issue them
1400 		 * asynchronously so that this thread is not tied up.
1401 		 */
1402 		enum zio_stage stage =
1403 		    ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1404 
1405 		return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1406 		    BP_GET_PSIZE(bp), NULL, NULL,
1407 		    ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1408 		    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1409 	} else {
1410 		metaslab_free(spa, bp, txg, B_FALSE);
1411 		return (NULL);
1412 	}
1413 }
1414 
1415 zio_t *
1416 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1417     zio_done_func_t *done, void *private, zio_flag_t flags)
1418 {
1419 	zio_t *zio;
1420 
1421 	(void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1422 	    BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1423 
1424 	if (BP_IS_EMBEDDED(bp))
1425 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1426 
1427 	/*
1428 	 * A claim is an allocation of a specific block.  Claims are needed
1429 	 * to support immediate writes in the intent log.  The issue is that
1430 	 * immediate writes contain committed data, but in a txg that was
1431 	 * *not* committed.  Upon opening the pool after an unclean shutdown,
1432 	 * the intent log claims all blocks that contain immediate write data
1433 	 * so that the SPA knows they're in use.
1434 	 *
1435 	 * All claims *must* be resolved in the first txg -- before the SPA
1436 	 * starts allocating blocks -- so that nothing is allocated twice.
1437 	 * If txg == 0 we just verify that the block is claimable.
1438 	 */
1439 	ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1440 	    spa_min_claim_txg(spa));
1441 	ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1442 	ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));	/* zdb(8) */
1443 
1444 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1445 	    BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1446 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1447 	ASSERT0(zio->io_queued_timestamp);
1448 
1449 	return (zio);
1450 }
1451 
1452 zio_t *
1453 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
1454     zio_done_func_t *done, void *private, zio_flag_t flags)
1455 {
1456 	zio_t *zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1457 	    ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1458 	    ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
1459 	zio->io_cmd = cmd;
1460 	return (zio);
1461 }
1462 
1463 zio_t *
1464 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1465     zio_done_func_t *done, void *private, zio_priority_t priority,
1466     zio_flag_t flags, enum trim_flag trim_flags)
1467 {
1468 	zio_t *zio;
1469 
1470 	ASSERT0(vd->vdev_children);
1471 	ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1472 	ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1473 	ASSERT3U(size, !=, 0);
1474 
1475 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1476 	    private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1477 	    vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1478 	zio->io_trim_flags = trim_flags;
1479 
1480 	return (zio);
1481 }
1482 
1483 zio_t *
1484 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1485     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1486     zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1487 {
1488 	zio_t *zio;
1489 
1490 	ASSERT(vd->vdev_children == 0);
1491 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1492 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1493 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1494 
1495 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1496 	    private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1497 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1498 
1499 	zio->io_prop.zp_checksum = checksum;
1500 
1501 	return (zio);
1502 }
1503 
1504 zio_t *
1505 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1506     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1507     zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1508 {
1509 	zio_t *zio;
1510 
1511 	ASSERT(vd->vdev_children == 0);
1512 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1513 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1514 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1515 
1516 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1517 	    private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1518 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1519 
1520 	zio->io_prop.zp_checksum = checksum;
1521 
1522 	if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1523 		/*
1524 		 * zec checksums are necessarily destructive -- they modify
1525 		 * the end of the write buffer to hold the verifier/checksum.
1526 		 * Therefore, we must make a local copy in case the data is
1527 		 * being written to multiple places in parallel.
1528 		 */
1529 		abd_t *wbuf = abd_alloc_sametype(data, size);
1530 		abd_copy(wbuf, data, size);
1531 
1532 		zio_push_transform(zio, wbuf, size, size, NULL);
1533 	}
1534 
1535 	return (zio);
1536 }
1537 
1538 /*
1539  * Create a child I/O to do some work for us.
1540  */
1541 zio_t *
1542 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1543     abd_t *data, uint64_t size, int type, zio_priority_t priority,
1544     zio_flag_t flags, zio_done_func_t *done, void *private)
1545 {
1546 	enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1547 	zio_t *zio;
1548 
1549 	/*
1550 	 * vdev child I/Os do not propagate their error to the parent.
1551 	 * Therefore, for correct operation the caller *must* check for
1552 	 * and handle the error in the child i/o's done callback.
1553 	 * The only exceptions are i/os that we don't care about
1554 	 * (OPTIONAL or REPAIR).
1555 	 */
1556 	ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1557 	    done != NULL);
1558 
1559 	if (type == ZIO_TYPE_READ && bp != NULL) {
1560 		/*
1561 		 * If we have the bp, then the child should perform the
1562 		 * checksum and the parent need not.  This pushes error
1563 		 * detection as close to the leaves as possible and
1564 		 * eliminates redundant checksums in the interior nodes.
1565 		 */
1566 		pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1567 		pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1568 	}
1569 
1570 	if (vd->vdev_ops->vdev_op_leaf) {
1571 		ASSERT0(vd->vdev_children);
1572 		offset += VDEV_LABEL_START_SIZE;
1573 	}
1574 
1575 	flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1576 
1577 	/*
1578 	 * If we've decided to do a repair, the write is not speculative --
1579 	 * even if the original read was.
1580 	 */
1581 	if (flags & ZIO_FLAG_IO_REPAIR)
1582 		flags &= ~ZIO_FLAG_SPECULATIVE;
1583 
1584 	/*
1585 	 * If we're creating a child I/O that is not associated with a
1586 	 * top-level vdev, then the child zio is not an allocating I/O.
1587 	 * If this is a retried I/O then we ignore it since we will
1588 	 * have already processed the original allocating I/O.
1589 	 */
1590 	if (flags & ZIO_FLAG_IO_ALLOCATING &&
1591 	    (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1592 		ASSERT(pio->io_metaslab_class != NULL);
1593 		ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1594 		ASSERT(type == ZIO_TYPE_WRITE);
1595 		ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1596 		ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1597 		ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1598 		    pio->io_child_type == ZIO_CHILD_GANG);
1599 
1600 		flags &= ~ZIO_FLAG_IO_ALLOCATING;
1601 	}
1602 
1603 	zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1604 	    done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1605 	    ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1606 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1607 
1608 	return (zio);
1609 }
1610 
1611 zio_t *
1612 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1613     zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1614     zio_done_func_t *done, void *private)
1615 {
1616 	zio_t *zio;
1617 
1618 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1619 
1620 	zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1621 	    data, size, size, done, private, type, priority,
1622 	    flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1623 	    vd, offset, NULL,
1624 	    ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1625 
1626 	return (zio);
1627 }
1628 
1629 void
1630 zio_flush(zio_t *pio, vdev_t *vd)
1631 {
1632 	if (vd->vdev_nowritecache)
1633 		return;
1634 	if (vd->vdev_children == 0) {
1635 		zio_nowait(zio_ioctl(pio, vd->vdev_spa, vd,
1636 		    DKIOCFLUSHWRITECACHE, NULL, NULL, ZIO_FLAG_CANFAIL |
1637 		    ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
1638 	} else {
1639 		for (uint64_t c = 0; c < vd->vdev_children; c++)
1640 			zio_flush(pio, vd->vdev_child[c]);
1641 	}
1642 }
1643 
1644 void
1645 zio_shrink(zio_t *zio, uint64_t size)
1646 {
1647 	ASSERT3P(zio->io_executor, ==, NULL);
1648 	ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1649 	ASSERT3U(size, <=, zio->io_size);
1650 
1651 	/*
1652 	 * We don't shrink for raidz because of problems with the
1653 	 * reconstruction when reading back less than the block size.
1654 	 * Note, BP_IS_RAIDZ() assumes no compression.
1655 	 */
1656 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1657 	if (!BP_IS_RAIDZ(zio->io_bp)) {
1658 		/* we are not doing a raw write */
1659 		ASSERT3U(zio->io_size, ==, zio->io_lsize);
1660 		zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1661 	}
1662 }
1663 
1664 /*
1665  * Round provided allocation size up to a value that can be allocated
1666  * by at least some vdev(s) in the pool with minimum or no additional
1667  * padding and without extra space usage on others
1668  */
1669 static uint64_t
1670 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1671 {
1672 	if (size > spa->spa_min_alloc)
1673 		return (roundup(size, spa->spa_gcd_alloc));
1674 	return (spa->spa_min_alloc);
1675 }
1676 
1677 /*
1678  * ==========================================================================
1679  * Prepare to read and write logical blocks
1680  * ==========================================================================
1681  */
1682 
1683 static zio_t *
1684 zio_read_bp_init(zio_t *zio)
1685 {
1686 	blkptr_t *bp = zio->io_bp;
1687 	uint64_t psize =
1688 	    BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1689 
1690 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1691 
1692 	if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1693 	    zio->io_child_type == ZIO_CHILD_LOGICAL &&
1694 	    !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1695 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1696 		    psize, psize, zio_decompress);
1697 	}
1698 
1699 	if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1700 	    BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1701 	    zio->io_child_type == ZIO_CHILD_LOGICAL) {
1702 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1703 		    psize, psize, zio_decrypt);
1704 	}
1705 
1706 	if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1707 		int psize = BPE_GET_PSIZE(bp);
1708 		void *data = abd_borrow_buf(zio->io_abd, psize);
1709 
1710 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1711 		decode_embedded_bp_compressed(bp, data);
1712 		abd_return_buf_copy(zio->io_abd, data, psize);
1713 	} else {
1714 		ASSERT(!BP_IS_EMBEDDED(bp));
1715 	}
1716 
1717 	if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1718 		zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1719 
1720 	return (zio);
1721 }
1722 
1723 static zio_t *
1724 zio_write_bp_init(zio_t *zio)
1725 {
1726 	if (!IO_IS_ALLOCATING(zio))
1727 		return (zio);
1728 
1729 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1730 
1731 	if (zio->io_bp_override) {
1732 		blkptr_t *bp = zio->io_bp;
1733 		zio_prop_t *zp = &zio->io_prop;
1734 
1735 		ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
1736 
1737 		*bp = *zio->io_bp_override;
1738 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1739 
1740 		if (zp->zp_brtwrite)
1741 			return (zio);
1742 
1743 		ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1744 
1745 		if (BP_IS_EMBEDDED(bp))
1746 			return (zio);
1747 
1748 		/*
1749 		 * If we've been overridden and nopwrite is set then
1750 		 * set the flag accordingly to indicate that a nopwrite
1751 		 * has already occurred.
1752 		 */
1753 		if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1754 			ASSERT(!zp->zp_dedup);
1755 			ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1756 			zio->io_flags |= ZIO_FLAG_NOPWRITE;
1757 			return (zio);
1758 		}
1759 
1760 		ASSERT(!zp->zp_nopwrite);
1761 
1762 		if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1763 			return (zio);
1764 
1765 		ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1766 		    ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1767 
1768 		if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1769 		    !zp->zp_encrypt) {
1770 			BP_SET_DEDUP(bp, 1);
1771 			zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1772 			return (zio);
1773 		}
1774 
1775 		/*
1776 		 * We were unable to handle this as an override bp, treat
1777 		 * it as a regular write I/O.
1778 		 */
1779 		zio->io_bp_override = NULL;
1780 		*bp = zio->io_bp_orig;
1781 		zio->io_pipeline = zio->io_orig_pipeline;
1782 	}
1783 
1784 	return (zio);
1785 }
1786 
1787 static zio_t *
1788 zio_write_compress(zio_t *zio)
1789 {
1790 	spa_t *spa = zio->io_spa;
1791 	zio_prop_t *zp = &zio->io_prop;
1792 	enum zio_compress compress = zp->zp_compress;
1793 	blkptr_t *bp = zio->io_bp;
1794 	uint64_t lsize = zio->io_lsize;
1795 	uint64_t psize = zio->io_size;
1796 	uint32_t pass = 1;
1797 
1798 	/*
1799 	 * If our children haven't all reached the ready stage,
1800 	 * wait for them and then repeat this pipeline stage.
1801 	 */
1802 	if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1803 	    ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1804 		return (NULL);
1805 	}
1806 
1807 	if (!IO_IS_ALLOCATING(zio))
1808 		return (zio);
1809 
1810 	if (zio->io_children_ready != NULL) {
1811 		/*
1812 		 * Now that all our children are ready, run the callback
1813 		 * associated with this zio in case it wants to modify the
1814 		 * data to be written.
1815 		 */
1816 		ASSERT3U(zp->zp_level, >, 0);
1817 		zio->io_children_ready(zio);
1818 	}
1819 
1820 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1821 	ASSERT(zio->io_bp_override == NULL);
1822 
1823 	if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
1824 		/*
1825 		 * We're rewriting an existing block, which means we're
1826 		 * working on behalf of spa_sync().  For spa_sync() to
1827 		 * converge, it must eventually be the case that we don't
1828 		 * have to allocate new blocks.  But compression changes
1829 		 * the blocksize, which forces a reallocate, and makes
1830 		 * convergence take longer.  Therefore, after the first
1831 		 * few passes, stop compressing to ensure convergence.
1832 		 */
1833 		pass = spa_sync_pass(spa);
1834 
1835 		ASSERT(zio->io_txg == spa_syncing_txg(spa));
1836 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1837 		ASSERT(!BP_GET_DEDUP(bp));
1838 
1839 		if (pass >= zfs_sync_pass_dont_compress)
1840 			compress = ZIO_COMPRESS_OFF;
1841 
1842 		/* Make sure someone doesn't change their mind on overwrites */
1843 		ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1844 		    MIN(zp->zp_copies, spa_max_replication(spa))
1845 		    == BP_GET_NDVAS(bp));
1846 	}
1847 
1848 	/* If it's a compressed write that is not raw, compress the buffer. */
1849 	if (compress != ZIO_COMPRESS_OFF &&
1850 	    !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1851 		void *cbuf = NULL;
1852 		psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
1853 		    zp->zp_complevel);
1854 		if (psize == 0) {
1855 			compress = ZIO_COMPRESS_OFF;
1856 		} else if (psize >= lsize) {
1857 			compress = ZIO_COMPRESS_OFF;
1858 			if (cbuf != NULL)
1859 				zio_buf_free(cbuf, lsize);
1860 		} else if (!zp->zp_dedup && !zp->zp_encrypt &&
1861 		    psize <= BPE_PAYLOAD_SIZE &&
1862 		    zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1863 		    spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1864 			encode_embedded_bp_compressed(bp,
1865 			    cbuf, compress, lsize, psize);
1866 			BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1867 			BP_SET_TYPE(bp, zio->io_prop.zp_type);
1868 			BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1869 			zio_buf_free(cbuf, lsize);
1870 			BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
1871 			zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1872 			ASSERT(spa_feature_is_active(spa,
1873 			    SPA_FEATURE_EMBEDDED_DATA));
1874 			return (zio);
1875 		} else {
1876 			/*
1877 			 * Round compressed size up to the minimum allocation
1878 			 * size of the smallest-ashift device, and zero the
1879 			 * tail. This ensures that the compressed size of the
1880 			 * BP (and thus compressratio property) are correct,
1881 			 * in that we charge for the padding used to fill out
1882 			 * the last sector.
1883 			 */
1884 			size_t rounded = (size_t)zio_roundup_alloc_size(spa,
1885 			    psize);
1886 			if (rounded >= lsize) {
1887 				compress = ZIO_COMPRESS_OFF;
1888 				zio_buf_free(cbuf, lsize);
1889 				psize = lsize;
1890 			} else {
1891 				abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1892 				abd_take_ownership_of_buf(cdata, B_TRUE);
1893 				abd_zero_off(cdata, psize, rounded - psize);
1894 				psize = rounded;
1895 				zio_push_transform(zio, cdata,
1896 				    psize, lsize, NULL);
1897 			}
1898 		}
1899 
1900 		/*
1901 		 * We were unable to handle this as an override bp, treat
1902 		 * it as a regular write I/O.
1903 		 */
1904 		zio->io_bp_override = NULL;
1905 		*bp = zio->io_bp_orig;
1906 		zio->io_pipeline = zio->io_orig_pipeline;
1907 
1908 	} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1909 	    zp->zp_type == DMU_OT_DNODE) {
1910 		/*
1911 		 * The DMU actually relies on the zio layer's compression
1912 		 * to free metadnode blocks that have had all contained
1913 		 * dnodes freed. As a result, even when doing a raw
1914 		 * receive, we must check whether the block can be compressed
1915 		 * to a hole.
1916 		 */
1917 		psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
1918 		    zio->io_abd, NULL, lsize, zp->zp_complevel);
1919 		if (psize == 0 || psize >= lsize)
1920 			compress = ZIO_COMPRESS_OFF;
1921 	} else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
1922 	    !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
1923 		/*
1924 		 * If we are raw receiving an encrypted dataset we should not
1925 		 * take this codepath because it will change the on-disk block
1926 		 * and decryption will fail.
1927 		 */
1928 		size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
1929 		    lsize);
1930 
1931 		if (rounded != psize) {
1932 			abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
1933 			abd_zero_off(cdata, psize, rounded - psize);
1934 			abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
1935 			psize = rounded;
1936 			zio_push_transform(zio, cdata,
1937 			    psize, rounded, NULL);
1938 		}
1939 	} else {
1940 		ASSERT3U(psize, !=, 0);
1941 	}
1942 
1943 	/*
1944 	 * The final pass of spa_sync() must be all rewrites, but the first
1945 	 * few passes offer a trade-off: allocating blocks defers convergence,
1946 	 * but newly allocated blocks are sequential, so they can be written
1947 	 * to disk faster.  Therefore, we allow the first few passes of
1948 	 * spa_sync() to allocate new blocks, but force rewrites after that.
1949 	 * There should only be a handful of blocks after pass 1 in any case.
1950 	 */
1951 	if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
1952 	    BP_GET_PSIZE(bp) == psize &&
1953 	    pass >= zfs_sync_pass_rewrite) {
1954 		VERIFY3U(psize, !=, 0);
1955 		enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1956 
1957 		zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1958 		zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1959 	} else {
1960 		BP_ZERO(bp);
1961 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
1962 	}
1963 
1964 	if (psize == 0) {
1965 		if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
1966 		    spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1967 			BP_SET_LSIZE(bp, lsize);
1968 			BP_SET_TYPE(bp, zp->zp_type);
1969 			BP_SET_LEVEL(bp, zp->zp_level);
1970 			BP_SET_BIRTH(bp, zio->io_txg, 0);
1971 		}
1972 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1973 	} else {
1974 		ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1975 		BP_SET_LSIZE(bp, lsize);
1976 		BP_SET_TYPE(bp, zp->zp_type);
1977 		BP_SET_LEVEL(bp, zp->zp_level);
1978 		BP_SET_PSIZE(bp, psize);
1979 		BP_SET_COMPRESS(bp, compress);
1980 		BP_SET_CHECKSUM(bp, zp->zp_checksum);
1981 		BP_SET_DEDUP(bp, zp->zp_dedup);
1982 		BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1983 		if (zp->zp_dedup) {
1984 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1985 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1986 			ASSERT(!zp->zp_encrypt ||
1987 			    DMU_OT_IS_ENCRYPTED(zp->zp_type));
1988 			zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1989 		}
1990 		if (zp->zp_nopwrite) {
1991 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1992 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1993 			zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1994 		}
1995 	}
1996 	return (zio);
1997 }
1998 
1999 static zio_t *
2000 zio_free_bp_init(zio_t *zio)
2001 {
2002 	blkptr_t *bp = zio->io_bp;
2003 
2004 	if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2005 		if (BP_GET_DEDUP(bp))
2006 			zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2007 	}
2008 
2009 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2010 
2011 	return (zio);
2012 }
2013 
2014 /*
2015  * ==========================================================================
2016  * Execute the I/O pipeline
2017  * ==========================================================================
2018  */
2019 
2020 static void
2021 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2022 {
2023 	spa_t *spa = zio->io_spa;
2024 	zio_type_t t = zio->io_type;
2025 	int flags = (cutinline ? TQ_FRONT : 0);
2026 
2027 	/*
2028 	 * If we're a config writer or a probe, the normal issue and
2029 	 * interrupt threads may all be blocked waiting for the config lock.
2030 	 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2031 	 */
2032 	if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2033 		t = ZIO_TYPE_NULL;
2034 
2035 	/*
2036 	 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2037 	 */
2038 	if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2039 		t = ZIO_TYPE_NULL;
2040 
2041 	/*
2042 	 * If this is a high priority I/O, then use the high priority taskq if
2043 	 * available.
2044 	 */
2045 	if ((zio->io_priority == ZIO_PRIORITY_NOW ||
2046 	    zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
2047 	    spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2048 		q++;
2049 
2050 	ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2051 
2052 	/*
2053 	 * NB: We are assuming that the zio can only be dispatched
2054 	 * to a single taskq at a time.  It would be a grievous error
2055 	 * to dispatch the zio to another taskq at the same time.
2056 	 */
2057 	ASSERT(taskq_empty_ent(&zio->io_tqent));
2058 	spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
2059 	    &zio->io_tqent, zio);
2060 }
2061 
2062 static boolean_t
2063 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2064 {
2065 	spa_t *spa = zio->io_spa;
2066 
2067 	taskq_t *tq = taskq_of_curthread();
2068 
2069 	for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2070 		spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2071 		uint_t i;
2072 		for (i = 0; i < tqs->stqs_count; i++) {
2073 			if (tqs->stqs_taskq[i] == tq)
2074 				return (B_TRUE);
2075 		}
2076 	}
2077 
2078 	return (B_FALSE);
2079 }
2080 
2081 static zio_t *
2082 zio_issue_async(zio_t *zio)
2083 {
2084 	ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2085 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2086 	return (NULL);
2087 }
2088 
2089 void
2090 zio_interrupt(void *zio)
2091 {
2092 	zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2093 }
2094 
2095 void
2096 zio_delay_interrupt(zio_t *zio)
2097 {
2098 	/*
2099 	 * The timeout_generic() function isn't defined in userspace, so
2100 	 * rather than trying to implement the function, the zio delay
2101 	 * functionality has been disabled for userspace builds.
2102 	 */
2103 
2104 #ifdef _KERNEL
2105 	/*
2106 	 * If io_target_timestamp is zero, then no delay has been registered
2107 	 * for this IO, thus jump to the end of this function and "skip" the
2108 	 * delay; issuing it directly to the zio layer.
2109 	 */
2110 	if (zio->io_target_timestamp != 0) {
2111 		hrtime_t now = gethrtime();
2112 
2113 		if (now >= zio->io_target_timestamp) {
2114 			/*
2115 			 * This IO has already taken longer than the target
2116 			 * delay to complete, so we don't want to delay it
2117 			 * any longer; we "miss" the delay and issue it
2118 			 * directly to the zio layer. This is likely due to
2119 			 * the target latency being set to a value less than
2120 			 * the underlying hardware can satisfy (e.g. delay
2121 			 * set to 1ms, but the disks take 10ms to complete an
2122 			 * IO request).
2123 			 */
2124 
2125 			DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2126 			    hrtime_t, now);
2127 
2128 			zio_interrupt(zio);
2129 		} else {
2130 			taskqid_t tid;
2131 			hrtime_t diff = zio->io_target_timestamp - now;
2132 			clock_t expire_at_tick = ddi_get_lbolt() +
2133 			    NSEC_TO_TICK(diff);
2134 
2135 			DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2136 			    hrtime_t, now, hrtime_t, diff);
2137 
2138 			if (NSEC_TO_TICK(diff) == 0) {
2139 				/* Our delay is less than a jiffy - just spin */
2140 				zfs_sleep_until(zio->io_target_timestamp);
2141 				zio_interrupt(zio);
2142 			} else {
2143 				/*
2144 				 * Use taskq_dispatch_delay() in the place of
2145 				 * OpenZFS's timeout_generic().
2146 				 */
2147 				tid = taskq_dispatch_delay(system_taskq,
2148 				    zio_interrupt, zio, TQ_NOSLEEP,
2149 				    expire_at_tick);
2150 				if (tid == TASKQID_INVALID) {
2151 					/*
2152 					 * Couldn't allocate a task.  Just
2153 					 * finish the zio without a delay.
2154 					 */
2155 					zio_interrupt(zio);
2156 				}
2157 			}
2158 		}
2159 		return;
2160 	}
2161 #endif
2162 	DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2163 	zio_interrupt(zio);
2164 }
2165 
2166 static void
2167 zio_deadman_impl(zio_t *pio, int ziodepth)
2168 {
2169 	zio_t *cio, *cio_next;
2170 	zio_link_t *zl = NULL;
2171 	vdev_t *vd = pio->io_vd;
2172 
2173 	if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2174 		vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2175 		zbookmark_phys_t *zb = &pio->io_bookmark;
2176 		uint64_t delta = gethrtime() - pio->io_timestamp;
2177 		uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2178 
2179 		zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2180 		    "delta=%llu queued=%llu io=%llu "
2181 		    "path=%s "
2182 		    "last=%llu type=%d "
2183 		    "priority=%d flags=0x%llx stage=0x%x "
2184 		    "pipeline=0x%x pipeline-trace=0x%x "
2185 		    "objset=%llu object=%llu "
2186 		    "level=%llu blkid=%llu "
2187 		    "offset=%llu size=%llu "
2188 		    "error=%d",
2189 		    ziodepth, pio, pio->io_timestamp,
2190 		    (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2191 		    vd ? vd->vdev_path : "NULL",
2192 		    vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2193 		    pio->io_priority, (u_longlong_t)pio->io_flags,
2194 		    pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2195 		    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2196 		    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2197 		    (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2198 		    pio->io_error);
2199 		(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2200 		    pio->io_spa, vd, zb, pio, 0);
2201 
2202 		if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2203 		    taskq_empty_ent(&pio->io_tqent)) {
2204 			zio_interrupt(pio);
2205 		}
2206 	}
2207 
2208 	mutex_enter(&pio->io_lock);
2209 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2210 		cio_next = zio_walk_children(pio, &zl);
2211 		zio_deadman_impl(cio, ziodepth + 1);
2212 	}
2213 	mutex_exit(&pio->io_lock);
2214 }
2215 
2216 /*
2217  * Log the critical information describing this zio and all of its children
2218  * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2219  */
2220 void
2221 zio_deadman(zio_t *pio, const char *tag)
2222 {
2223 	spa_t *spa = pio->io_spa;
2224 	char *name = spa_name(spa);
2225 
2226 	if (!zfs_deadman_enabled || spa_suspended(spa))
2227 		return;
2228 
2229 	zio_deadman_impl(pio, 0);
2230 
2231 	switch (spa_get_deadman_failmode(spa)) {
2232 	case ZIO_FAILURE_MODE_WAIT:
2233 		zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2234 		break;
2235 
2236 	case ZIO_FAILURE_MODE_CONTINUE:
2237 		zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2238 		break;
2239 
2240 	case ZIO_FAILURE_MODE_PANIC:
2241 		fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2242 		break;
2243 	}
2244 }
2245 
2246 /*
2247  * Execute the I/O pipeline until one of the following occurs:
2248  * (1) the I/O completes; (2) the pipeline stalls waiting for
2249  * dependent child I/Os; (3) the I/O issues, so we're waiting
2250  * for an I/O completion interrupt; (4) the I/O is delegated by
2251  * vdev-level caching or aggregation; (5) the I/O is deferred
2252  * due to vdev-level queueing; (6) the I/O is handed off to
2253  * another thread.  In all cases, the pipeline stops whenever
2254  * there's no CPU work; it never burns a thread in cv_wait_io().
2255  *
2256  * There's no locking on io_stage because there's no legitimate way
2257  * for multiple threads to be attempting to process the same I/O.
2258  */
2259 static zio_pipe_stage_t *zio_pipeline[];
2260 
2261 /*
2262  * zio_execute() is a wrapper around the static function
2263  * __zio_execute() so that we can force  __zio_execute() to be
2264  * inlined.  This reduces stack overhead which is important
2265  * because __zio_execute() is called recursively in several zio
2266  * code paths.  zio_execute() itself cannot be inlined because
2267  * it is externally visible.
2268  */
2269 void
2270 zio_execute(void *zio)
2271 {
2272 	fstrans_cookie_t cookie;
2273 
2274 	cookie = spl_fstrans_mark();
2275 	__zio_execute(zio);
2276 	spl_fstrans_unmark(cookie);
2277 }
2278 
2279 /*
2280  * Used to determine if in the current context the stack is sized large
2281  * enough to allow zio_execute() to be called recursively.  A minimum
2282  * stack size of 16K is required to avoid needing to re-dispatch the zio.
2283  */
2284 static boolean_t
2285 zio_execute_stack_check(zio_t *zio)
2286 {
2287 #if !defined(HAVE_LARGE_STACKS)
2288 	dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2289 
2290 	/* Executing in txg_sync_thread() context. */
2291 	if (dp && curthread == dp->dp_tx.tx_sync_thread)
2292 		return (B_TRUE);
2293 
2294 	/* Pool initialization outside of zio_taskq context. */
2295 	if (dp && spa_is_initializing(dp->dp_spa) &&
2296 	    !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2297 	    !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2298 		return (B_TRUE);
2299 #else
2300 	(void) zio;
2301 #endif /* HAVE_LARGE_STACKS */
2302 
2303 	return (B_FALSE);
2304 }
2305 
2306 __attribute__((always_inline))
2307 static inline void
2308 __zio_execute(zio_t *zio)
2309 {
2310 	ASSERT3U(zio->io_queued_timestamp, >, 0);
2311 
2312 	while (zio->io_stage < ZIO_STAGE_DONE) {
2313 		enum zio_stage pipeline = zio->io_pipeline;
2314 		enum zio_stage stage = zio->io_stage;
2315 
2316 		zio->io_executor = curthread;
2317 
2318 		ASSERT(!MUTEX_HELD(&zio->io_lock));
2319 		ASSERT(ISP2(stage));
2320 		ASSERT(zio->io_stall == NULL);
2321 
2322 		do {
2323 			stage <<= 1;
2324 		} while ((stage & pipeline) == 0);
2325 
2326 		ASSERT(stage <= ZIO_STAGE_DONE);
2327 
2328 		/*
2329 		 * If we are in interrupt context and this pipeline stage
2330 		 * will grab a config lock that is held across I/O,
2331 		 * or may wait for an I/O that needs an interrupt thread
2332 		 * to complete, issue async to avoid deadlock.
2333 		 *
2334 		 * For VDEV_IO_START, we cut in line so that the io will
2335 		 * be sent to disk promptly.
2336 		 */
2337 		if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2338 		    zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2339 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2340 			    zio_requeue_io_start_cut_in_line : B_FALSE;
2341 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2342 			return;
2343 		}
2344 
2345 		/*
2346 		 * If the current context doesn't have large enough stacks
2347 		 * the zio must be issued asynchronously to prevent overflow.
2348 		 */
2349 		if (zio_execute_stack_check(zio)) {
2350 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2351 			    zio_requeue_io_start_cut_in_line : B_FALSE;
2352 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2353 			return;
2354 		}
2355 
2356 		zio->io_stage = stage;
2357 		zio->io_pipeline_trace |= zio->io_stage;
2358 
2359 		/*
2360 		 * The zio pipeline stage returns the next zio to execute
2361 		 * (typically the same as this one), or NULL if we should
2362 		 * stop.
2363 		 */
2364 		zio = zio_pipeline[highbit64(stage) - 1](zio);
2365 
2366 		if (zio == NULL)
2367 			return;
2368 	}
2369 }
2370 
2371 
2372 /*
2373  * ==========================================================================
2374  * Initiate I/O, either sync or async
2375  * ==========================================================================
2376  */
2377 int
2378 zio_wait(zio_t *zio)
2379 {
2380 	/*
2381 	 * Some routines, like zio_free_sync(), may return a NULL zio
2382 	 * to avoid the performance overhead of creating and then destroying
2383 	 * an unneeded zio.  For the callers' simplicity, we accept a NULL
2384 	 * zio and ignore it.
2385 	 */
2386 	if (zio == NULL)
2387 		return (0);
2388 
2389 	long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2390 	int error;
2391 
2392 	ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2393 	ASSERT3P(zio->io_executor, ==, NULL);
2394 
2395 	zio->io_waiter = curthread;
2396 	ASSERT0(zio->io_queued_timestamp);
2397 	zio->io_queued_timestamp = gethrtime();
2398 
2399 	if (zio->io_type == ZIO_TYPE_WRITE) {
2400 		spa_select_allocator(zio);
2401 	}
2402 	__zio_execute(zio);
2403 
2404 	mutex_enter(&zio->io_lock);
2405 	while (zio->io_executor != NULL) {
2406 		error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2407 		    ddi_get_lbolt() + timeout);
2408 
2409 		if (zfs_deadman_enabled && error == -1 &&
2410 		    gethrtime() - zio->io_queued_timestamp >
2411 		    spa_deadman_ziotime(zio->io_spa)) {
2412 			mutex_exit(&zio->io_lock);
2413 			timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2414 			zio_deadman(zio, FTAG);
2415 			mutex_enter(&zio->io_lock);
2416 		}
2417 	}
2418 	mutex_exit(&zio->io_lock);
2419 
2420 	error = zio->io_error;
2421 	zio_destroy(zio);
2422 
2423 	return (error);
2424 }
2425 
2426 void
2427 zio_nowait(zio_t *zio)
2428 {
2429 	/*
2430 	 * See comment in zio_wait().
2431 	 */
2432 	if (zio == NULL)
2433 		return;
2434 
2435 	ASSERT3P(zio->io_executor, ==, NULL);
2436 
2437 	if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2438 	    list_is_empty(&zio->io_parent_list)) {
2439 		zio_t *pio;
2440 
2441 		/*
2442 		 * This is a logical async I/O with no parent to wait for it.
2443 		 * We add it to the spa_async_root_zio "Godfather" I/O which
2444 		 * will ensure they complete prior to unloading the pool.
2445 		 */
2446 		spa_t *spa = zio->io_spa;
2447 		pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2448 
2449 		zio_add_child(pio, zio);
2450 	}
2451 
2452 	ASSERT0(zio->io_queued_timestamp);
2453 	zio->io_queued_timestamp = gethrtime();
2454 	if (zio->io_type == ZIO_TYPE_WRITE) {
2455 		spa_select_allocator(zio);
2456 	}
2457 	__zio_execute(zio);
2458 }
2459 
2460 /*
2461  * ==========================================================================
2462  * Reexecute, cancel, or suspend/resume failed I/O
2463  * ==========================================================================
2464  */
2465 
2466 static void
2467 zio_reexecute(void *arg)
2468 {
2469 	zio_t *pio = arg;
2470 	zio_t *cio, *cio_next, *gio;
2471 
2472 	ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2473 	ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2474 	ASSERT(pio->io_gang_leader == NULL);
2475 	ASSERT(pio->io_gang_tree == NULL);
2476 
2477 	mutex_enter(&pio->io_lock);
2478 	pio->io_flags = pio->io_orig_flags;
2479 	pio->io_stage = pio->io_orig_stage;
2480 	pio->io_pipeline = pio->io_orig_pipeline;
2481 	pio->io_reexecute = 0;
2482 	pio->io_flags |= ZIO_FLAG_REEXECUTED;
2483 	pio->io_pipeline_trace = 0;
2484 	pio->io_error = 0;
2485 	pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2486 	    (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2487 	pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2488 	zio_link_t *zl = NULL;
2489 	while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2490 		for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2491 			gio->io_children[pio->io_child_type][w] +=
2492 			    !pio->io_state[w];
2493 		}
2494 	}
2495 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2496 		pio->io_child_error[c] = 0;
2497 
2498 	if (IO_IS_ALLOCATING(pio))
2499 		BP_ZERO(pio->io_bp);
2500 
2501 	/*
2502 	 * As we reexecute pio's children, new children could be created.
2503 	 * New children go to the head of pio's io_child_list, however,
2504 	 * so we will (correctly) not reexecute them.  The key is that
2505 	 * the remainder of pio's io_child_list, from 'cio_next' onward,
2506 	 * cannot be affected by any side effects of reexecuting 'cio'.
2507 	 */
2508 	zl = NULL;
2509 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2510 		cio_next = zio_walk_children(pio, &zl);
2511 		mutex_exit(&pio->io_lock);
2512 		zio_reexecute(cio);
2513 		mutex_enter(&pio->io_lock);
2514 	}
2515 	mutex_exit(&pio->io_lock);
2516 
2517 	/*
2518 	 * Now that all children have been reexecuted, execute the parent.
2519 	 * We don't reexecute "The Godfather" I/O here as it's the
2520 	 * responsibility of the caller to wait on it.
2521 	 */
2522 	if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2523 		pio->io_queued_timestamp = gethrtime();
2524 		__zio_execute(pio);
2525 	}
2526 }
2527 
2528 void
2529 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2530 {
2531 	if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2532 		fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2533 		    "failure and the failure mode property for this pool "
2534 		    "is set to panic.", spa_name(spa));
2535 
2536 	cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
2537 	    "failure and has been suspended.\n", spa_name(spa));
2538 
2539 	(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2540 	    NULL, NULL, 0);
2541 
2542 	mutex_enter(&spa->spa_suspend_lock);
2543 
2544 	if (spa->spa_suspend_zio_root == NULL)
2545 		spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2546 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2547 		    ZIO_FLAG_GODFATHER);
2548 
2549 	spa->spa_suspended = reason;
2550 
2551 	if (zio != NULL) {
2552 		ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2553 		ASSERT(zio != spa->spa_suspend_zio_root);
2554 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2555 		ASSERT(zio_unique_parent(zio) == NULL);
2556 		ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2557 		zio_add_child(spa->spa_suspend_zio_root, zio);
2558 	}
2559 
2560 	mutex_exit(&spa->spa_suspend_lock);
2561 }
2562 
2563 int
2564 zio_resume(spa_t *spa)
2565 {
2566 	zio_t *pio;
2567 
2568 	/*
2569 	 * Reexecute all previously suspended i/o.
2570 	 */
2571 	mutex_enter(&spa->spa_suspend_lock);
2572 	spa->spa_suspended = ZIO_SUSPEND_NONE;
2573 	cv_broadcast(&spa->spa_suspend_cv);
2574 	pio = spa->spa_suspend_zio_root;
2575 	spa->spa_suspend_zio_root = NULL;
2576 	mutex_exit(&spa->spa_suspend_lock);
2577 
2578 	if (pio == NULL)
2579 		return (0);
2580 
2581 	zio_reexecute(pio);
2582 	return (zio_wait(pio));
2583 }
2584 
2585 void
2586 zio_resume_wait(spa_t *spa)
2587 {
2588 	mutex_enter(&spa->spa_suspend_lock);
2589 	while (spa_suspended(spa))
2590 		cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2591 	mutex_exit(&spa->spa_suspend_lock);
2592 }
2593 
2594 /*
2595  * ==========================================================================
2596  * Gang blocks.
2597  *
2598  * A gang block is a collection of small blocks that looks to the DMU
2599  * like one large block.  When zio_dva_allocate() cannot find a block
2600  * of the requested size, due to either severe fragmentation or the pool
2601  * being nearly full, it calls zio_write_gang_block() to construct the
2602  * block from smaller fragments.
2603  *
2604  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2605  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
2606  * an indirect block: it's an array of block pointers.  It consumes
2607  * only one sector and hence is allocatable regardless of fragmentation.
2608  * The gang header's bps point to its gang members, which hold the data.
2609  *
2610  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2611  * as the verifier to ensure uniqueness of the SHA256 checksum.
2612  * Critically, the gang block bp's blk_cksum is the checksum of the data,
2613  * not the gang header.  This ensures that data block signatures (needed for
2614  * deduplication) are independent of how the block is physically stored.
2615  *
2616  * Gang blocks can be nested: a gang member may itself be a gang block.
2617  * Thus every gang block is a tree in which root and all interior nodes are
2618  * gang headers, and the leaves are normal blocks that contain user data.
2619  * The root of the gang tree is called the gang leader.
2620  *
2621  * To perform any operation (read, rewrite, free, claim) on a gang block,
2622  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2623  * in the io_gang_tree field of the original logical i/o by recursively
2624  * reading the gang leader and all gang headers below it.  This yields
2625  * an in-core tree containing the contents of every gang header and the
2626  * bps for every constituent of the gang block.
2627  *
2628  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2629  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
2630  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2631  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2632  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2633  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
2634  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2635  * of the gang header plus zio_checksum_compute() of the data to update the
2636  * gang header's blk_cksum as described above.
2637  *
2638  * The two-phase assemble/issue model solves the problem of partial failure --
2639  * what if you'd freed part of a gang block but then couldn't read the
2640  * gang header for another part?  Assembling the entire gang tree first
2641  * ensures that all the necessary gang header I/O has succeeded before
2642  * starting the actual work of free, claim, or write.  Once the gang tree
2643  * is assembled, free and claim are in-memory operations that cannot fail.
2644  *
2645  * In the event that a gang write fails, zio_dva_unallocate() walks the
2646  * gang tree to immediately free (i.e. insert back into the space map)
2647  * everything we've allocated.  This ensures that we don't get ENOSPC
2648  * errors during repeated suspend/resume cycles due to a flaky device.
2649  *
2650  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
2651  * the gang tree, we won't modify the block, so we can safely defer the free
2652  * (knowing that the block is still intact).  If we *can* assemble the gang
2653  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2654  * each constituent bp and we can allocate a new block on the next sync pass.
2655  *
2656  * In all cases, the gang tree allows complete recovery from partial failure.
2657  * ==========================================================================
2658  */
2659 
2660 static void
2661 zio_gang_issue_func_done(zio_t *zio)
2662 {
2663 	abd_free(zio->io_abd);
2664 }
2665 
2666 static zio_t *
2667 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2668     uint64_t offset)
2669 {
2670 	if (gn != NULL)
2671 		return (pio);
2672 
2673 	return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2674 	    BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2675 	    NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2676 	    &pio->io_bookmark));
2677 }
2678 
2679 static zio_t *
2680 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2681     uint64_t offset)
2682 {
2683 	zio_t *zio;
2684 
2685 	if (gn != NULL) {
2686 		abd_t *gbh_abd =
2687 		    abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2688 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2689 		    gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2690 		    pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2691 		    &pio->io_bookmark);
2692 		/*
2693 		 * As we rewrite each gang header, the pipeline will compute
2694 		 * a new gang block header checksum for it; but no one will
2695 		 * compute a new data checksum, so we do that here.  The one
2696 		 * exception is the gang leader: the pipeline already computed
2697 		 * its data checksum because that stage precedes gang assembly.
2698 		 * (Presently, nothing actually uses interior data checksums;
2699 		 * this is just good hygiene.)
2700 		 */
2701 		if (gn != pio->io_gang_leader->io_gang_tree) {
2702 			abd_t *buf = abd_get_offset(data, offset);
2703 
2704 			zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2705 			    buf, BP_GET_PSIZE(bp));
2706 
2707 			abd_free(buf);
2708 		}
2709 		/*
2710 		 * If we are here to damage data for testing purposes,
2711 		 * leave the GBH alone so that we can detect the damage.
2712 		 */
2713 		if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2714 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2715 	} else {
2716 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2717 		    abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2718 		    zio_gang_issue_func_done, NULL, pio->io_priority,
2719 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2720 	}
2721 
2722 	return (zio);
2723 }
2724 
2725 static zio_t *
2726 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2727     uint64_t offset)
2728 {
2729 	(void) gn, (void) data, (void) offset;
2730 
2731 	zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2732 	    ZIO_GANG_CHILD_FLAGS(pio));
2733 	if (zio == NULL) {
2734 		zio = zio_null(pio, pio->io_spa,
2735 		    NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2736 	}
2737 	return (zio);
2738 }
2739 
2740 static zio_t *
2741 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2742     uint64_t offset)
2743 {
2744 	(void) gn, (void) data, (void) offset;
2745 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2746 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2747 }
2748 
2749 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2750 	NULL,
2751 	zio_read_gang,
2752 	zio_rewrite_gang,
2753 	zio_free_gang,
2754 	zio_claim_gang,
2755 	NULL
2756 };
2757 
2758 static void zio_gang_tree_assemble_done(zio_t *zio);
2759 
2760 static zio_gang_node_t *
2761 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2762 {
2763 	zio_gang_node_t *gn;
2764 
2765 	ASSERT(*gnpp == NULL);
2766 
2767 	gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2768 	gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2769 	*gnpp = gn;
2770 
2771 	return (gn);
2772 }
2773 
2774 static void
2775 zio_gang_node_free(zio_gang_node_t **gnpp)
2776 {
2777 	zio_gang_node_t *gn = *gnpp;
2778 
2779 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2780 		ASSERT(gn->gn_child[g] == NULL);
2781 
2782 	zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2783 	kmem_free(gn, sizeof (*gn));
2784 	*gnpp = NULL;
2785 }
2786 
2787 static void
2788 zio_gang_tree_free(zio_gang_node_t **gnpp)
2789 {
2790 	zio_gang_node_t *gn = *gnpp;
2791 
2792 	if (gn == NULL)
2793 		return;
2794 
2795 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2796 		zio_gang_tree_free(&gn->gn_child[g]);
2797 
2798 	zio_gang_node_free(gnpp);
2799 }
2800 
2801 static void
2802 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2803 {
2804 	zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2805 	abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2806 
2807 	ASSERT(gio->io_gang_leader == gio);
2808 	ASSERT(BP_IS_GANG(bp));
2809 
2810 	zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2811 	    zio_gang_tree_assemble_done, gn, gio->io_priority,
2812 	    ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2813 }
2814 
2815 static void
2816 zio_gang_tree_assemble_done(zio_t *zio)
2817 {
2818 	zio_t *gio = zio->io_gang_leader;
2819 	zio_gang_node_t *gn = zio->io_private;
2820 	blkptr_t *bp = zio->io_bp;
2821 
2822 	ASSERT(gio == zio_unique_parent(zio));
2823 	ASSERT(list_is_empty(&zio->io_child_list));
2824 
2825 	if (zio->io_error)
2826 		return;
2827 
2828 	/* this ABD was created from a linear buf in zio_gang_tree_assemble */
2829 	if (BP_SHOULD_BYTESWAP(bp))
2830 		byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2831 
2832 	ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2833 	ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2834 	ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2835 
2836 	abd_free(zio->io_abd);
2837 
2838 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2839 		blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2840 		if (!BP_IS_GANG(gbp))
2841 			continue;
2842 		zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2843 	}
2844 }
2845 
2846 static void
2847 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2848     uint64_t offset)
2849 {
2850 	zio_t *gio = pio->io_gang_leader;
2851 	zio_t *zio;
2852 
2853 	ASSERT(BP_IS_GANG(bp) == !!gn);
2854 	ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2855 	ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
2856 
2857 	/*
2858 	 * If you're a gang header, your data is in gn->gn_gbh.
2859 	 * If you're a gang member, your data is in 'data' and gn == NULL.
2860 	 */
2861 	zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
2862 
2863 	if (gn != NULL) {
2864 		ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2865 
2866 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2867 			blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2868 			if (BP_IS_HOLE(gbp))
2869 				continue;
2870 			zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2871 			    offset);
2872 			offset += BP_GET_PSIZE(gbp);
2873 		}
2874 	}
2875 
2876 	if (gn == gio->io_gang_tree)
2877 		ASSERT3U(gio->io_size, ==, offset);
2878 
2879 	if (zio != pio)
2880 		zio_nowait(zio);
2881 }
2882 
2883 static zio_t *
2884 zio_gang_assemble(zio_t *zio)
2885 {
2886 	blkptr_t *bp = zio->io_bp;
2887 
2888 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2889 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2890 
2891 	zio->io_gang_leader = zio;
2892 
2893 	zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2894 
2895 	return (zio);
2896 }
2897 
2898 static zio_t *
2899 zio_gang_issue(zio_t *zio)
2900 {
2901 	blkptr_t *bp = zio->io_bp;
2902 
2903 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
2904 		return (NULL);
2905 	}
2906 
2907 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2908 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2909 
2910 	if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2911 		zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2912 		    0);
2913 	else
2914 		zio_gang_tree_free(&zio->io_gang_tree);
2915 
2916 	zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2917 
2918 	return (zio);
2919 }
2920 
2921 static void
2922 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
2923 {
2924 	cio->io_allocator = pio->io_allocator;
2925 	cio->io_wr_iss_tq = pio->io_wr_iss_tq;
2926 }
2927 
2928 static void
2929 zio_write_gang_member_ready(zio_t *zio)
2930 {
2931 	zio_t *pio = zio_unique_parent(zio);
2932 	dva_t *cdva = zio->io_bp->blk_dva;
2933 	dva_t *pdva = pio->io_bp->blk_dva;
2934 	uint64_t asize;
2935 	zio_t *gio __maybe_unused = zio->io_gang_leader;
2936 
2937 	if (BP_IS_HOLE(zio->io_bp))
2938 		return;
2939 
2940 	ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2941 
2942 	ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2943 	ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2944 	ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2945 	ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2946 	VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2947 
2948 	mutex_enter(&pio->io_lock);
2949 	for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2950 		ASSERT(DVA_GET_GANG(&pdva[d]));
2951 		asize = DVA_GET_ASIZE(&pdva[d]);
2952 		asize += DVA_GET_ASIZE(&cdva[d]);
2953 		DVA_SET_ASIZE(&pdva[d], asize);
2954 	}
2955 	mutex_exit(&pio->io_lock);
2956 }
2957 
2958 static void
2959 zio_write_gang_done(zio_t *zio)
2960 {
2961 	/*
2962 	 * The io_abd field will be NULL for a zio with no data.  The io_flags
2963 	 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2964 	 * check for it here as it is cleared in zio_ready.
2965 	 */
2966 	if (zio->io_abd != NULL)
2967 		abd_free(zio->io_abd);
2968 }
2969 
2970 static zio_t *
2971 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
2972 {
2973 	spa_t *spa = pio->io_spa;
2974 	blkptr_t *bp = pio->io_bp;
2975 	zio_t *gio = pio->io_gang_leader;
2976 	zio_t *zio;
2977 	zio_gang_node_t *gn, **gnpp;
2978 	zio_gbh_phys_t *gbh;
2979 	abd_t *gbh_abd;
2980 	uint64_t txg = pio->io_txg;
2981 	uint64_t resid = pio->io_size;
2982 	uint64_t lsize;
2983 	int copies = gio->io_prop.zp_copies;
2984 	zio_prop_t zp;
2985 	int error;
2986 	boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
2987 
2988 	/*
2989 	 * If one copy was requested, store 2 copies of the GBH, so that we
2990 	 * can still traverse all the data (e.g. to free or scrub) even if a
2991 	 * block is damaged.  Note that we can't store 3 copies of the GBH in
2992 	 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
2993 	 */
2994 	int gbh_copies = copies;
2995 	if (gbh_copies == 1) {
2996 		gbh_copies = MIN(2, spa_max_replication(spa));
2997 	}
2998 
2999 	ASSERT(ZIO_HAS_ALLOCATOR(pio));
3000 	int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
3001 	if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3002 		ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3003 		ASSERT(has_data);
3004 
3005 		flags |= METASLAB_ASYNC_ALLOC;
3006 		VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
3007 		    mca_alloc_slots, pio));
3008 
3009 		/*
3010 		 * The logical zio has already placed a reservation for
3011 		 * 'copies' allocation slots but gang blocks may require
3012 		 * additional copies. These additional copies
3013 		 * (i.e. gbh_copies - copies) are guaranteed to succeed
3014 		 * since metaslab_class_throttle_reserve() always allows
3015 		 * additional reservations for gang blocks.
3016 		 */
3017 		VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
3018 		    pio->io_allocator, pio, flags));
3019 	}
3020 
3021 	error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
3022 	    bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3023 	    &pio->io_alloc_list, pio, pio->io_allocator);
3024 	if (error) {
3025 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3026 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3027 			ASSERT(has_data);
3028 
3029 			/*
3030 			 * If we failed to allocate the gang block header then
3031 			 * we remove any additional allocation reservations that
3032 			 * we placed here. The original reservation will
3033 			 * be removed when the logical I/O goes to the ready
3034 			 * stage.
3035 			 */
3036 			metaslab_class_throttle_unreserve(mc,
3037 			    gbh_copies - copies, pio->io_allocator, pio);
3038 		}
3039 
3040 		pio->io_error = error;
3041 		return (pio);
3042 	}
3043 
3044 	if (pio == gio) {
3045 		gnpp = &gio->io_gang_tree;
3046 	} else {
3047 		gnpp = pio->io_private;
3048 		ASSERT(pio->io_ready == zio_write_gang_member_ready);
3049 	}
3050 
3051 	gn = zio_gang_node_alloc(gnpp);
3052 	gbh = gn->gn_gbh;
3053 	memset(gbh, 0, SPA_GANGBLOCKSIZE);
3054 	gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
3055 
3056 	/*
3057 	 * Create the gang header.
3058 	 */
3059 	zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3060 	    zio_write_gang_done, NULL, pio->io_priority,
3061 	    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3062 
3063 	zio_gang_inherit_allocator(pio, zio);
3064 
3065 	/*
3066 	 * Create and nowait the gang children.
3067 	 */
3068 	for (int g = 0; resid != 0; resid -= lsize, g++) {
3069 		lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
3070 		    SPA_MINBLOCKSIZE);
3071 		ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
3072 
3073 		zp.zp_checksum = gio->io_prop.zp_checksum;
3074 		zp.zp_compress = ZIO_COMPRESS_OFF;
3075 		zp.zp_complevel = gio->io_prop.zp_complevel;
3076 		zp.zp_type = DMU_OT_NONE;
3077 		zp.zp_level = 0;
3078 		zp.zp_copies = gio->io_prop.zp_copies;
3079 		zp.zp_dedup = B_FALSE;
3080 		zp.zp_dedup_verify = B_FALSE;
3081 		zp.zp_nopwrite = B_FALSE;
3082 		zp.zp_encrypt = gio->io_prop.zp_encrypt;
3083 		zp.zp_byteorder = gio->io_prop.zp_byteorder;
3084 		memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3085 		memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3086 		memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3087 
3088 		zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
3089 		    has_data ? abd_get_offset(pio->io_abd, pio->io_size -
3090 		    resid) : NULL, lsize, lsize, &zp,
3091 		    zio_write_gang_member_ready, NULL,
3092 		    zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3093 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3094 
3095 		zio_gang_inherit_allocator(zio, cio);
3096 
3097 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3098 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3099 			ASSERT(has_data);
3100 
3101 			/*
3102 			 * Gang children won't throttle but we should
3103 			 * account for their work, so reserve an allocation
3104 			 * slot for them here.
3105 			 */
3106 			VERIFY(metaslab_class_throttle_reserve(mc,
3107 			    zp.zp_copies, cio->io_allocator, cio, flags));
3108 		}
3109 		zio_nowait(cio);
3110 	}
3111 
3112 	/*
3113 	 * Set pio's pipeline to just wait for zio to finish.
3114 	 */
3115 	pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3116 
3117 	zio_nowait(zio);
3118 
3119 	return (pio);
3120 }
3121 
3122 /*
3123  * The zio_nop_write stage in the pipeline determines if allocating a
3124  * new bp is necessary.  The nopwrite feature can handle writes in
3125  * either syncing or open context (i.e. zil writes) and as a result is
3126  * mutually exclusive with dedup.
3127  *
3128  * By leveraging a cryptographically secure checksum, such as SHA256, we
3129  * can compare the checksums of the new data and the old to determine if
3130  * allocating a new block is required.  Note that our requirements for
3131  * cryptographic strength are fairly weak: there can't be any accidental
3132  * hash collisions, but we don't need to be secure against intentional
3133  * (malicious) collisions.  To trigger a nopwrite, you have to be able
3134  * to write the file to begin with, and triggering an incorrect (hash
3135  * collision) nopwrite is no worse than simply writing to the file.
3136  * That said, there are no known attacks against the checksum algorithms
3137  * used for nopwrite, assuming that the salt and the checksums
3138  * themselves remain secret.
3139  */
3140 static zio_t *
3141 zio_nop_write(zio_t *zio)
3142 {
3143 	blkptr_t *bp = zio->io_bp;
3144 	blkptr_t *bp_orig = &zio->io_bp_orig;
3145 	zio_prop_t *zp = &zio->io_prop;
3146 
3147 	ASSERT(BP_IS_HOLE(bp));
3148 	ASSERT(BP_GET_LEVEL(bp) == 0);
3149 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3150 	ASSERT(zp->zp_nopwrite);
3151 	ASSERT(!zp->zp_dedup);
3152 	ASSERT(zio->io_bp_override == NULL);
3153 	ASSERT(IO_IS_ALLOCATING(zio));
3154 
3155 	/*
3156 	 * Check to see if the original bp and the new bp have matching
3157 	 * characteristics (i.e. same checksum, compression algorithms, etc).
3158 	 * If they don't then just continue with the pipeline which will
3159 	 * allocate a new bp.
3160 	 */
3161 	if (BP_IS_HOLE(bp_orig) ||
3162 	    !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3163 	    ZCHECKSUM_FLAG_NOPWRITE) ||
3164 	    BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3165 	    BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3166 	    BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3167 	    BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3168 	    zp->zp_copies != BP_GET_NDVAS(bp_orig))
3169 		return (zio);
3170 
3171 	/*
3172 	 * If the checksums match then reset the pipeline so that we
3173 	 * avoid allocating a new bp and issuing any I/O.
3174 	 */
3175 	if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3176 		ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3177 		    ZCHECKSUM_FLAG_NOPWRITE);
3178 		ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3179 		ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3180 		ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3181 		ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3182 
3183 		/*
3184 		 * If we're overwriting a block that is currently on an
3185 		 * indirect vdev, then ignore the nopwrite request and
3186 		 * allow a new block to be allocated on a concrete vdev.
3187 		 */
3188 		spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3189 		for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3190 			vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3191 			    DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3192 			if (tvd->vdev_ops == &vdev_indirect_ops) {
3193 				spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3194 				return (zio);
3195 			}
3196 		}
3197 		spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3198 
3199 		*bp = *bp_orig;
3200 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3201 		zio->io_flags |= ZIO_FLAG_NOPWRITE;
3202 	}
3203 
3204 	return (zio);
3205 }
3206 
3207 /*
3208  * ==========================================================================
3209  * Block Reference Table
3210  * ==========================================================================
3211  */
3212 static zio_t *
3213 zio_brt_free(zio_t *zio)
3214 {
3215 	blkptr_t *bp;
3216 
3217 	bp = zio->io_bp;
3218 
3219 	if (BP_GET_LEVEL(bp) > 0 ||
3220 	    BP_IS_METADATA(bp) ||
3221 	    !brt_maybe_exists(zio->io_spa, bp)) {
3222 		return (zio);
3223 	}
3224 
3225 	if (!brt_entry_decref(zio->io_spa, bp)) {
3226 		/*
3227 		 * This isn't the last reference, so we cannot free
3228 		 * the data yet.
3229 		 */
3230 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3231 	}
3232 
3233 	return (zio);
3234 }
3235 
3236 /*
3237  * ==========================================================================
3238  * Dedup
3239  * ==========================================================================
3240  */
3241 static void
3242 zio_ddt_child_read_done(zio_t *zio)
3243 {
3244 	blkptr_t *bp = zio->io_bp;
3245 	ddt_entry_t *dde = zio->io_private;
3246 	ddt_phys_t *ddp;
3247 	zio_t *pio = zio_unique_parent(zio);
3248 
3249 	mutex_enter(&pio->io_lock);
3250 	ddp = ddt_phys_select(dde, bp);
3251 	if (zio->io_error == 0)
3252 		ddt_phys_clear(ddp);	/* this ddp doesn't need repair */
3253 
3254 	if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
3255 		dde->dde_repair_abd = zio->io_abd;
3256 	else
3257 		abd_free(zio->io_abd);
3258 	mutex_exit(&pio->io_lock);
3259 }
3260 
3261 static zio_t *
3262 zio_ddt_read_start(zio_t *zio)
3263 {
3264 	blkptr_t *bp = zio->io_bp;
3265 
3266 	ASSERT(BP_GET_DEDUP(bp));
3267 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3268 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3269 
3270 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
3271 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
3272 		ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3273 		ddt_phys_t *ddp = dde->dde_phys;
3274 		ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
3275 		blkptr_t blk;
3276 
3277 		ASSERT(zio->io_vsd == NULL);
3278 		zio->io_vsd = dde;
3279 
3280 		if (ddp_self == NULL)
3281 			return (zio);
3282 
3283 		for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
3284 			if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
3285 				continue;
3286 			ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
3287 			    &blk);
3288 			zio_nowait(zio_read(zio, zio->io_spa, &blk,
3289 			    abd_alloc_for_io(zio->io_size, B_TRUE),
3290 			    zio->io_size, zio_ddt_child_read_done, dde,
3291 			    zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3292 			    ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3293 		}
3294 		return (zio);
3295 	}
3296 
3297 	zio_nowait(zio_read(zio, zio->io_spa, bp,
3298 	    zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3299 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3300 
3301 	return (zio);
3302 }
3303 
3304 static zio_t *
3305 zio_ddt_read_done(zio_t *zio)
3306 {
3307 	blkptr_t *bp = zio->io_bp;
3308 
3309 	if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3310 		return (NULL);
3311 	}
3312 
3313 	ASSERT(BP_GET_DEDUP(bp));
3314 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3315 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3316 
3317 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
3318 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
3319 		ddt_entry_t *dde = zio->io_vsd;
3320 		if (ddt == NULL) {
3321 			ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3322 			return (zio);
3323 		}
3324 		if (dde == NULL) {
3325 			zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3326 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3327 			return (NULL);
3328 		}
3329 		if (dde->dde_repair_abd != NULL) {
3330 			abd_copy(zio->io_abd, dde->dde_repair_abd,
3331 			    zio->io_size);
3332 			zio->io_child_error[ZIO_CHILD_DDT] = 0;
3333 		}
3334 		ddt_repair_done(ddt, dde);
3335 		zio->io_vsd = NULL;
3336 	}
3337 
3338 	ASSERT(zio->io_vsd == NULL);
3339 
3340 	return (zio);
3341 }
3342 
3343 static boolean_t
3344 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3345 {
3346 	spa_t *spa = zio->io_spa;
3347 	boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3348 
3349 	ASSERT(!(zio->io_bp_override && do_raw));
3350 
3351 	/*
3352 	 * Note: we compare the original data, not the transformed data,
3353 	 * because when zio->io_bp is an override bp, we will not have
3354 	 * pushed the I/O transforms.  That's an important optimization
3355 	 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3356 	 * However, we should never get a raw, override zio so in these
3357 	 * cases we can compare the io_abd directly. This is useful because
3358 	 * it allows us to do dedup verification even if we don't have access
3359 	 * to the original data (for instance, if the encryption keys aren't
3360 	 * loaded).
3361 	 */
3362 
3363 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
3364 		zio_t *lio = dde->dde_lead_zio[p];
3365 
3366 		if (lio != NULL && do_raw) {
3367 			return (lio->io_size != zio->io_size ||
3368 			    abd_cmp(zio->io_abd, lio->io_abd) != 0);
3369 		} else if (lio != NULL) {
3370 			return (lio->io_orig_size != zio->io_orig_size ||
3371 			    abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3372 		}
3373 	}
3374 
3375 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
3376 		ddt_phys_t *ddp = &dde->dde_phys[p];
3377 
3378 		if (ddp->ddp_phys_birth != 0 && do_raw) {
3379 			blkptr_t blk = *zio->io_bp;
3380 			uint64_t psize;
3381 			abd_t *tmpabd;
3382 			int error;
3383 
3384 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3385 			psize = BP_GET_PSIZE(&blk);
3386 
3387 			if (psize != zio->io_size)
3388 				return (B_TRUE);
3389 
3390 			ddt_exit(ddt);
3391 
3392 			tmpabd = abd_alloc_for_io(psize, B_TRUE);
3393 
3394 			error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3395 			    psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3396 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3397 			    ZIO_FLAG_RAW, &zio->io_bookmark));
3398 
3399 			if (error == 0) {
3400 				if (abd_cmp(tmpabd, zio->io_abd) != 0)
3401 					error = SET_ERROR(ENOENT);
3402 			}
3403 
3404 			abd_free(tmpabd);
3405 			ddt_enter(ddt);
3406 			return (error != 0);
3407 		} else if (ddp->ddp_phys_birth != 0) {
3408 			arc_buf_t *abuf = NULL;
3409 			arc_flags_t aflags = ARC_FLAG_WAIT;
3410 			blkptr_t blk = *zio->io_bp;
3411 			int error;
3412 
3413 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3414 
3415 			if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3416 				return (B_TRUE);
3417 
3418 			ddt_exit(ddt);
3419 
3420 			error = arc_read(NULL, spa, &blk,
3421 			    arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3422 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3423 			    &aflags, &zio->io_bookmark);
3424 
3425 			if (error == 0) {
3426 				if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3427 				    zio->io_orig_size) != 0)
3428 					error = SET_ERROR(ENOENT);
3429 				arc_buf_destroy(abuf, &abuf);
3430 			}
3431 
3432 			ddt_enter(ddt);
3433 			return (error != 0);
3434 		}
3435 	}
3436 
3437 	return (B_FALSE);
3438 }
3439 
3440 static void
3441 zio_ddt_child_write_ready(zio_t *zio)
3442 {
3443 	int p = zio->io_prop.zp_copies;
3444 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3445 	ddt_entry_t *dde = zio->io_private;
3446 	ddt_phys_t *ddp = &dde->dde_phys[p];
3447 	zio_t *pio;
3448 
3449 	if (zio->io_error)
3450 		return;
3451 
3452 	ddt_enter(ddt);
3453 
3454 	ASSERT(dde->dde_lead_zio[p] == zio);
3455 
3456 	ddt_phys_fill(ddp, zio->io_bp);
3457 
3458 	zio_link_t *zl = NULL;
3459 	while ((pio = zio_walk_parents(zio, &zl)) != NULL)
3460 		ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
3461 
3462 	ddt_exit(ddt);
3463 }
3464 
3465 static void
3466 zio_ddt_child_write_done(zio_t *zio)
3467 {
3468 	int p = zio->io_prop.zp_copies;
3469 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3470 	ddt_entry_t *dde = zio->io_private;
3471 	ddt_phys_t *ddp = &dde->dde_phys[p];
3472 
3473 	ddt_enter(ddt);
3474 
3475 	ASSERT(ddp->ddp_refcnt == 0);
3476 	ASSERT(dde->dde_lead_zio[p] == zio);
3477 	dde->dde_lead_zio[p] = NULL;
3478 
3479 	if (zio->io_error == 0) {
3480 		zio_link_t *zl = NULL;
3481 		while (zio_walk_parents(zio, &zl) != NULL)
3482 			ddt_phys_addref(ddp);
3483 	} else {
3484 		ddt_phys_clear(ddp);
3485 	}
3486 
3487 	ddt_exit(ddt);
3488 }
3489 
3490 static zio_t *
3491 zio_ddt_write(zio_t *zio)
3492 {
3493 	spa_t *spa = zio->io_spa;
3494 	blkptr_t *bp = zio->io_bp;
3495 	uint64_t txg = zio->io_txg;
3496 	zio_prop_t *zp = &zio->io_prop;
3497 	int p = zp->zp_copies;
3498 	zio_t *cio = NULL;
3499 	ddt_t *ddt = ddt_select(spa, bp);
3500 	ddt_entry_t *dde;
3501 	ddt_phys_t *ddp;
3502 
3503 	ASSERT(BP_GET_DEDUP(bp));
3504 	ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3505 	ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3506 	ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3507 
3508 	ddt_enter(ddt);
3509 	dde = ddt_lookup(ddt, bp, B_TRUE);
3510 	ddp = &dde->dde_phys[p];
3511 
3512 	if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3513 		/*
3514 		 * If we're using a weak checksum, upgrade to a strong checksum
3515 		 * and try again.  If we're already using a strong checksum,
3516 		 * we can't resolve it, so just convert to an ordinary write.
3517 		 * (And automatically e-mail a paper to Nature?)
3518 		 */
3519 		if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3520 		    ZCHECKSUM_FLAG_DEDUP)) {
3521 			zp->zp_checksum = spa_dedup_checksum(spa);
3522 			zio_pop_transforms(zio);
3523 			zio->io_stage = ZIO_STAGE_OPEN;
3524 			BP_ZERO(bp);
3525 		} else {
3526 			zp->zp_dedup = B_FALSE;
3527 			BP_SET_DEDUP(bp, B_FALSE);
3528 		}
3529 		ASSERT(!BP_GET_DEDUP(bp));
3530 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
3531 		ddt_exit(ddt);
3532 		return (zio);
3533 	}
3534 
3535 	if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
3536 		if (ddp->ddp_phys_birth != 0)
3537 			ddt_bp_fill(ddp, bp, txg);
3538 		if (dde->dde_lead_zio[p] != NULL)
3539 			zio_add_child(zio, dde->dde_lead_zio[p]);
3540 		else
3541 			ddt_phys_addref(ddp);
3542 	} else if (zio->io_bp_override) {
3543 		ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
3544 		ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3545 		ddt_phys_fill(ddp, bp);
3546 		ddt_phys_addref(ddp);
3547 	} else {
3548 		cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
3549 		    zio->io_orig_size, zio->io_orig_size, zp,
3550 		    zio_ddt_child_write_ready, NULL,
3551 		    zio_ddt_child_write_done, dde, zio->io_priority,
3552 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3553 
3554 		zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
3555 		dde->dde_lead_zio[p] = cio;
3556 	}
3557 
3558 	ddt_exit(ddt);
3559 
3560 	zio_nowait(cio);
3561 
3562 	return (zio);
3563 }
3564 
3565 static ddt_entry_t *freedde; /* for debugging */
3566 
3567 static zio_t *
3568 zio_ddt_free(zio_t *zio)
3569 {
3570 	spa_t *spa = zio->io_spa;
3571 	blkptr_t *bp = zio->io_bp;
3572 	ddt_t *ddt = ddt_select(spa, bp);
3573 	ddt_entry_t *dde;
3574 	ddt_phys_t *ddp;
3575 
3576 	ASSERT(BP_GET_DEDUP(bp));
3577 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3578 
3579 	ddt_enter(ddt);
3580 	freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
3581 	if (dde) {
3582 		ddp = ddt_phys_select(dde, bp);
3583 		if (ddp)
3584 			ddt_phys_decref(ddp);
3585 	}
3586 	ddt_exit(ddt);
3587 
3588 	return (zio);
3589 }
3590 
3591 /*
3592  * ==========================================================================
3593  * Allocate and free blocks
3594  * ==========================================================================
3595  */
3596 
3597 static zio_t *
3598 zio_io_to_allocate(spa_t *spa, int allocator)
3599 {
3600 	zio_t *zio;
3601 
3602 	ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
3603 
3604 	zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
3605 	if (zio == NULL)
3606 		return (NULL);
3607 
3608 	ASSERT(IO_IS_ALLOCATING(zio));
3609 	ASSERT(ZIO_HAS_ALLOCATOR(zio));
3610 
3611 	/*
3612 	 * Try to place a reservation for this zio. If we're unable to
3613 	 * reserve then we throttle.
3614 	 */
3615 	ASSERT3U(zio->io_allocator, ==, allocator);
3616 	if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
3617 	    zio->io_prop.zp_copies, allocator, zio, 0)) {
3618 		return (NULL);
3619 	}
3620 
3621 	avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
3622 	ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
3623 
3624 	return (zio);
3625 }
3626 
3627 static zio_t *
3628 zio_dva_throttle(zio_t *zio)
3629 {
3630 	spa_t *spa = zio->io_spa;
3631 	zio_t *nio;
3632 	metaslab_class_t *mc;
3633 
3634 	/* locate an appropriate allocation class */
3635 	mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
3636 	    zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
3637 
3638 	if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
3639 	    !mc->mc_alloc_throttle_enabled ||
3640 	    zio->io_child_type == ZIO_CHILD_GANG ||
3641 	    zio->io_flags & ZIO_FLAG_NODATA) {
3642 		return (zio);
3643 	}
3644 
3645 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3646 	ASSERT(ZIO_HAS_ALLOCATOR(zio));
3647 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3648 	ASSERT3U(zio->io_queued_timestamp, >, 0);
3649 	ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
3650 
3651 	int allocator = zio->io_allocator;
3652 	zio->io_metaslab_class = mc;
3653 	mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3654 	avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
3655 	nio = zio_io_to_allocate(spa, allocator);
3656 	mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3657 	return (nio);
3658 }
3659 
3660 static void
3661 zio_allocate_dispatch(spa_t *spa, int allocator)
3662 {
3663 	zio_t *zio;
3664 
3665 	mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3666 	zio = zio_io_to_allocate(spa, allocator);
3667 	mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3668 	if (zio == NULL)
3669 		return;
3670 
3671 	ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
3672 	ASSERT0(zio->io_error);
3673 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
3674 }
3675 
3676 static zio_t *
3677 zio_dva_allocate(zio_t *zio)
3678 {
3679 	spa_t *spa = zio->io_spa;
3680 	metaslab_class_t *mc;
3681 	blkptr_t *bp = zio->io_bp;
3682 	int error;
3683 	int flags = 0;
3684 
3685 	if (zio->io_gang_leader == NULL) {
3686 		ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3687 		zio->io_gang_leader = zio;
3688 	}
3689 
3690 	ASSERT(BP_IS_HOLE(bp));
3691 	ASSERT0(BP_GET_NDVAS(bp));
3692 	ASSERT3U(zio->io_prop.zp_copies, >, 0);
3693 	ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
3694 	ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
3695 
3696 	if (zio->io_flags & ZIO_FLAG_NODATA)
3697 		flags |= METASLAB_DONT_THROTTLE;
3698 	if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
3699 		flags |= METASLAB_GANG_CHILD;
3700 	if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
3701 		flags |= METASLAB_ASYNC_ALLOC;
3702 
3703 	/*
3704 	 * if not already chosen, locate an appropriate allocation class
3705 	 */
3706 	mc = zio->io_metaslab_class;
3707 	if (mc == NULL) {
3708 		mc = spa_preferred_class(spa, zio->io_size,
3709 		    zio->io_prop.zp_type, zio->io_prop.zp_level,
3710 		    zio->io_prop.zp_zpl_smallblk);
3711 		zio->io_metaslab_class = mc;
3712 	}
3713 
3714 	/*
3715 	 * Try allocating the block in the usual metaslab class.
3716 	 * If that's full, allocate it in the normal class.
3717 	 * If that's full, allocate as a gang block,
3718 	 * and if all are full, the allocation fails (which shouldn't happen).
3719 	 *
3720 	 * Note that we do not fall back on embedded slog (ZIL) space, to
3721 	 * preserve unfragmented slog space, which is critical for decent
3722 	 * sync write performance.  If a log allocation fails, we will fall
3723 	 * back to spa_sync() which is abysmal for performance.
3724 	 */
3725 	ASSERT(ZIO_HAS_ALLOCATOR(zio));
3726 	error = metaslab_alloc(spa, mc, zio->io_size, bp,
3727 	    zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3728 	    &zio->io_alloc_list, zio, zio->io_allocator);
3729 
3730 	/*
3731 	 * Fallback to normal class when an alloc class is full
3732 	 */
3733 	if (error == ENOSPC && mc != spa_normal_class(spa)) {
3734 		/*
3735 		 * If throttling, transfer reservation over to normal class.
3736 		 * The io_allocator slot can remain the same even though we
3737 		 * are switching classes.
3738 		 */
3739 		if (mc->mc_alloc_throttle_enabled &&
3740 		    (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
3741 			metaslab_class_throttle_unreserve(mc,
3742 			    zio->io_prop.zp_copies, zio->io_allocator, zio);
3743 			zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
3744 
3745 			VERIFY(metaslab_class_throttle_reserve(
3746 			    spa_normal_class(spa),
3747 			    zio->io_prop.zp_copies, zio->io_allocator, zio,
3748 			    flags | METASLAB_MUST_RESERVE));
3749 		}
3750 		zio->io_metaslab_class = mc = spa_normal_class(spa);
3751 		if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3752 			zfs_dbgmsg("%s: metaslab allocation failure, "
3753 			    "trying normal class: zio %px, size %llu, error %d",
3754 			    spa_name(spa), zio, (u_longlong_t)zio->io_size,
3755 			    error);
3756 		}
3757 
3758 		error = metaslab_alloc(spa, mc, zio->io_size, bp,
3759 		    zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3760 		    &zio->io_alloc_list, zio, zio->io_allocator);
3761 	}
3762 
3763 	if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
3764 		if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3765 			zfs_dbgmsg("%s: metaslab allocation failure, "
3766 			    "trying ganging: zio %px, size %llu, error %d",
3767 			    spa_name(spa), zio, (u_longlong_t)zio->io_size,
3768 			    error);
3769 		}
3770 		return (zio_write_gang_block(zio, mc));
3771 	}
3772 	if (error != 0) {
3773 		if (error != ENOSPC ||
3774 		    (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
3775 			zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
3776 			    "size %llu, error %d",
3777 			    spa_name(spa), zio, (u_longlong_t)zio->io_size,
3778 			    error);
3779 		}
3780 		zio->io_error = error;
3781 	}
3782 
3783 	return (zio);
3784 }
3785 
3786 static zio_t *
3787 zio_dva_free(zio_t *zio)
3788 {
3789 	metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
3790 
3791 	return (zio);
3792 }
3793 
3794 static zio_t *
3795 zio_dva_claim(zio_t *zio)
3796 {
3797 	int error;
3798 
3799 	error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
3800 	if (error)
3801 		zio->io_error = error;
3802 
3803 	return (zio);
3804 }
3805 
3806 /*
3807  * Undo an allocation.  This is used by zio_done() when an I/O fails
3808  * and we want to give back the block we just allocated.
3809  * This handles both normal blocks and gang blocks.
3810  */
3811 static void
3812 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
3813 {
3814 	ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
3815 	ASSERT(zio->io_bp_override == NULL);
3816 
3817 	if (!BP_IS_HOLE(bp)) {
3818 		metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
3819 		    B_TRUE);
3820 	}
3821 
3822 	if (gn != NULL) {
3823 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
3824 			zio_dva_unallocate(zio, gn->gn_child[g],
3825 			    &gn->gn_gbh->zg_blkptr[g]);
3826 		}
3827 	}
3828 }
3829 
3830 /*
3831  * Try to allocate an intent log block.  Return 0 on success, errno on failure.
3832  */
3833 int
3834 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
3835     uint64_t size, boolean_t *slog)
3836 {
3837 	int error = 1;
3838 	zio_alloc_list_t io_alloc_list;
3839 
3840 	ASSERT(txg > spa_syncing_txg(spa));
3841 
3842 	metaslab_trace_init(&io_alloc_list);
3843 
3844 	/*
3845 	 * Block pointer fields are useful to metaslabs for stats and debugging.
3846 	 * Fill in the obvious ones before calling into metaslab_alloc().
3847 	 */
3848 	BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3849 	BP_SET_PSIZE(new_bp, size);
3850 	BP_SET_LEVEL(new_bp, 0);
3851 
3852 	/*
3853 	 * When allocating a zil block, we don't have information about
3854 	 * the final destination of the block except the objset it's part
3855 	 * of, so we just hash the objset ID to pick the allocator to get
3856 	 * some parallelism.
3857 	 */
3858 	int flags = METASLAB_ZIL;
3859 	int allocator = (uint_t)cityhash4(0, 0, 0,
3860 	    os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
3861 	error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
3862 	    txg, NULL, flags, &io_alloc_list, NULL, allocator);
3863 	*slog = (error == 0);
3864 	if (error != 0) {
3865 		error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
3866 		    new_bp, 1, txg, NULL, flags,
3867 		    &io_alloc_list, NULL, allocator);
3868 	}
3869 	if (error != 0) {
3870 		error = metaslab_alloc(spa, spa_normal_class(spa), size,
3871 		    new_bp, 1, txg, NULL, flags,
3872 		    &io_alloc_list, NULL, allocator);
3873 	}
3874 	metaslab_trace_fini(&io_alloc_list);
3875 
3876 	if (error == 0) {
3877 		BP_SET_LSIZE(new_bp, size);
3878 		BP_SET_PSIZE(new_bp, size);
3879 		BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
3880 		BP_SET_CHECKSUM(new_bp,
3881 		    spa_version(spa) >= SPA_VERSION_SLIM_ZIL
3882 		    ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
3883 		BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3884 		BP_SET_LEVEL(new_bp, 0);
3885 		BP_SET_DEDUP(new_bp, 0);
3886 		BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
3887 
3888 		/*
3889 		 * encrypted blocks will require an IV and salt. We generate
3890 		 * these now since we will not be rewriting the bp at
3891 		 * rewrite time.
3892 		 */
3893 		if (os->os_encrypted) {
3894 			uint8_t iv[ZIO_DATA_IV_LEN];
3895 			uint8_t salt[ZIO_DATA_SALT_LEN];
3896 
3897 			BP_SET_CRYPT(new_bp, B_TRUE);
3898 			VERIFY0(spa_crypt_get_salt(spa,
3899 			    dmu_objset_id(os), salt));
3900 			VERIFY0(zio_crypt_generate_iv(iv));
3901 
3902 			zio_crypt_encode_params_bp(new_bp, salt, iv);
3903 		}
3904 	} else {
3905 		zfs_dbgmsg("%s: zil block allocation failure: "
3906 		    "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
3907 		    error);
3908 	}
3909 
3910 	return (error);
3911 }
3912 
3913 /*
3914  * ==========================================================================
3915  * Read and write to physical devices
3916  * ==========================================================================
3917  */
3918 
3919 /*
3920  * Issue an I/O to the underlying vdev. Typically the issue pipeline
3921  * stops after this stage and will resume upon I/O completion.
3922  * However, there are instances where the vdev layer may need to
3923  * continue the pipeline when an I/O was not issued. Since the I/O
3924  * that was sent to the vdev layer might be different than the one
3925  * currently active in the pipeline (see vdev_queue_io()), we explicitly
3926  * force the underlying vdev layers to call either zio_execute() or
3927  * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
3928  */
3929 static zio_t *
3930 zio_vdev_io_start(zio_t *zio)
3931 {
3932 	vdev_t *vd = zio->io_vd;
3933 	uint64_t align;
3934 	spa_t *spa = zio->io_spa;
3935 
3936 	zio->io_delay = 0;
3937 
3938 	ASSERT(zio->io_error == 0);
3939 	ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
3940 
3941 	if (vd == NULL) {
3942 		if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3943 			spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
3944 
3945 		/*
3946 		 * The mirror_ops handle multiple DVAs in a single BP.
3947 		 */
3948 		vdev_mirror_ops.vdev_op_io_start(zio);
3949 		return (NULL);
3950 	}
3951 
3952 	ASSERT3P(zio->io_logical, !=, zio);
3953 	if (zio->io_type == ZIO_TYPE_WRITE) {
3954 		ASSERT(spa->spa_trust_config);
3955 
3956 		/*
3957 		 * Note: the code can handle other kinds of writes,
3958 		 * but we don't expect them.
3959 		 */
3960 		if (zio->io_vd->vdev_noalloc) {
3961 			ASSERT(zio->io_flags &
3962 			    (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
3963 			    ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
3964 		}
3965 	}
3966 
3967 	align = 1ULL << vd->vdev_top->vdev_ashift;
3968 
3969 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
3970 	    P2PHASE(zio->io_size, align) != 0) {
3971 		/* Transform logical writes to be a full physical block size. */
3972 		uint64_t asize = P2ROUNDUP(zio->io_size, align);
3973 		abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
3974 		ASSERT(vd == vd->vdev_top);
3975 		if (zio->io_type == ZIO_TYPE_WRITE) {
3976 			abd_copy(abuf, zio->io_abd, zio->io_size);
3977 			abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
3978 		}
3979 		zio_push_transform(zio, abuf, asize, asize, zio_subblock);
3980 	}
3981 
3982 	/*
3983 	 * If this is not a physical io, make sure that it is properly aligned
3984 	 * before proceeding.
3985 	 */
3986 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
3987 		ASSERT0(P2PHASE(zio->io_offset, align));
3988 		ASSERT0(P2PHASE(zio->io_size, align));
3989 	} else {
3990 		/*
3991 		 * For physical writes, we allow 512b aligned writes and assume
3992 		 * the device will perform a read-modify-write as necessary.
3993 		 */
3994 		ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3995 		ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
3996 	}
3997 
3998 	VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
3999 
4000 	/*
4001 	 * If this is a repair I/O, and there's no self-healing involved --
4002 	 * that is, we're just resilvering what we expect to resilver --
4003 	 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4004 	 * This prevents spurious resilvering.
4005 	 *
4006 	 * There are a few ways that we can end up creating these spurious
4007 	 * resilver i/os:
4008 	 *
4009 	 * 1. A resilver i/o will be issued if any DVA in the BP has a
4010 	 * dirty DTL.  The mirror code will issue resilver writes to
4011 	 * each DVA, including the one(s) that are not on vdevs with dirty
4012 	 * DTLs.
4013 	 *
4014 	 * 2. With nested replication, which happens when we have a
4015 	 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4016 	 * For example, given mirror(replacing(A+B), C), it's likely that
4017 	 * only A is out of date (it's the new device). In this case, we'll
4018 	 * read from C, then use the data to resilver A+B -- but we don't
4019 	 * actually want to resilver B, just A. The top-level mirror has no
4020 	 * way to know this, so instead we just discard unnecessary repairs
4021 	 * as we work our way down the vdev tree.
4022 	 *
4023 	 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4024 	 * The same logic applies to any form of nested replication: ditto
4025 	 * + mirror, RAID-Z + replacing, etc.
4026 	 *
4027 	 * However, indirect vdevs point off to other vdevs which may have
4028 	 * DTL's, so we never bypass them.  The child i/os on concrete vdevs
4029 	 * will be properly bypassed instead.
4030 	 *
4031 	 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4032 	 * a dRAID spare vdev. For example, when a dRAID spare is first
4033 	 * used, its spare blocks need to be written to but the leaf vdev's
4034 	 * of such blocks can have empty DTL_PARTIAL.
4035 	 *
4036 	 * There seemed no clean way to allow such writes while bypassing
4037 	 * spurious ones. At this point, just avoid all bypassing for dRAID
4038 	 * for correctness.
4039 	 */
4040 	if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4041 	    !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4042 	    zio->io_txg != 0 &&	/* not a delegated i/o */
4043 	    vd->vdev_ops != &vdev_indirect_ops &&
4044 	    vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4045 	    !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4046 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4047 		zio_vdev_io_bypass(zio);
4048 		return (zio);
4049 	}
4050 
4051 	/*
4052 	 * Select the next best leaf I/O to process.  Distributed spares are
4053 	 * excluded since they dispatch the I/O directly to a leaf vdev after
4054 	 * applying the dRAID mapping.
4055 	 */
4056 	if (vd->vdev_ops->vdev_op_leaf &&
4057 	    vd->vdev_ops != &vdev_draid_spare_ops &&
4058 	    (zio->io_type == ZIO_TYPE_READ ||
4059 	    zio->io_type == ZIO_TYPE_WRITE ||
4060 	    zio->io_type == ZIO_TYPE_TRIM)) {
4061 
4062 		if ((zio = vdev_queue_io(zio)) == NULL)
4063 			return (NULL);
4064 
4065 		if (!vdev_accessible(vd, zio)) {
4066 			zio->io_error = SET_ERROR(ENXIO);
4067 			zio_interrupt(zio);
4068 			return (NULL);
4069 		}
4070 		zio->io_delay = gethrtime();
4071 	}
4072 
4073 	vd->vdev_ops->vdev_op_io_start(zio);
4074 	return (NULL);
4075 }
4076 
4077 static zio_t *
4078 zio_vdev_io_done(zio_t *zio)
4079 {
4080 	vdev_t *vd = zio->io_vd;
4081 	vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4082 	boolean_t unexpected_error = B_FALSE;
4083 
4084 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4085 		return (NULL);
4086 	}
4087 
4088 	ASSERT(zio->io_type == ZIO_TYPE_READ ||
4089 	    zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
4090 
4091 	if (zio->io_delay)
4092 		zio->io_delay = gethrtime() - zio->io_delay;
4093 
4094 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4095 	    vd->vdev_ops != &vdev_draid_spare_ops) {
4096 		vdev_queue_io_done(zio);
4097 
4098 		if (zio_injection_enabled && zio->io_error == 0)
4099 			zio->io_error = zio_handle_device_injections(vd, zio,
4100 			    EIO, EILSEQ);
4101 
4102 		if (zio_injection_enabled && zio->io_error == 0)
4103 			zio->io_error = zio_handle_label_injection(zio, EIO);
4104 
4105 		if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
4106 			if (!vdev_accessible(vd, zio)) {
4107 				zio->io_error = SET_ERROR(ENXIO);
4108 			} else {
4109 				unexpected_error = B_TRUE;
4110 			}
4111 		}
4112 	}
4113 
4114 	ops->vdev_op_io_done(zio);
4115 
4116 	if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4117 		VERIFY(vdev_probe(vd, zio) == NULL);
4118 
4119 	return (zio);
4120 }
4121 
4122 /*
4123  * This function is used to change the priority of an existing zio that is
4124  * currently in-flight. This is used by the arc to upgrade priority in the
4125  * event that a demand read is made for a block that is currently queued
4126  * as a scrub or async read IO. Otherwise, the high priority read request
4127  * would end up having to wait for the lower priority IO.
4128  */
4129 void
4130 zio_change_priority(zio_t *pio, zio_priority_t priority)
4131 {
4132 	zio_t *cio, *cio_next;
4133 	zio_link_t *zl = NULL;
4134 
4135 	ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4136 
4137 	if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4138 		vdev_queue_change_io_priority(pio, priority);
4139 	} else {
4140 		pio->io_priority = priority;
4141 	}
4142 
4143 	mutex_enter(&pio->io_lock);
4144 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4145 		cio_next = zio_walk_children(pio, &zl);
4146 		zio_change_priority(cio, priority);
4147 	}
4148 	mutex_exit(&pio->io_lock);
4149 }
4150 
4151 /*
4152  * For non-raidz ZIOs, we can just copy aside the bad data read from the
4153  * disk, and use that to finish the checksum ereport later.
4154  */
4155 static void
4156 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4157     const abd_t *good_buf)
4158 {
4159 	/* no processing needed */
4160 	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4161 }
4162 
4163 void
4164 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4165 {
4166 	void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4167 
4168 	abd_copy(abd, zio->io_abd, zio->io_size);
4169 
4170 	zcr->zcr_cbinfo = zio->io_size;
4171 	zcr->zcr_cbdata = abd;
4172 	zcr->zcr_finish = zio_vsd_default_cksum_finish;
4173 	zcr->zcr_free = zio_abd_free;
4174 }
4175 
4176 static zio_t *
4177 zio_vdev_io_assess(zio_t *zio)
4178 {
4179 	vdev_t *vd = zio->io_vd;
4180 
4181 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4182 		return (NULL);
4183 	}
4184 
4185 	if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4186 		spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4187 
4188 	if (zio->io_vsd != NULL) {
4189 		zio->io_vsd_ops->vsd_free(zio);
4190 		zio->io_vsd = NULL;
4191 	}
4192 
4193 	if (zio_injection_enabled && zio->io_error == 0)
4194 		zio->io_error = zio_handle_fault_injection(zio, EIO);
4195 
4196 	/*
4197 	 * If the I/O failed, determine whether we should attempt to retry it.
4198 	 *
4199 	 * On retry, we cut in line in the issue queue, since we don't want
4200 	 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4201 	 */
4202 	if (zio->io_error && vd == NULL &&
4203 	    !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4204 		ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE));	/* not a leaf */
4205 		ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS));	/* not a leaf */
4206 		zio->io_error = 0;
4207 		zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4208 		zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4209 		zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4210 		    zio_requeue_io_start_cut_in_line);
4211 		return (NULL);
4212 	}
4213 
4214 	/*
4215 	 * If we got an error on a leaf device, convert it to ENXIO
4216 	 * if the device is not accessible at all.
4217 	 */
4218 	if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4219 	    !vdev_accessible(vd, zio))
4220 		zio->io_error = SET_ERROR(ENXIO);
4221 
4222 	/*
4223 	 * If we can't write to an interior vdev (mirror or RAID-Z),
4224 	 * set vdev_cant_write so that we stop trying to allocate from it.
4225 	 */
4226 	if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4227 	    vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4228 		vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4229 		    "cant_write=TRUE due to write failure with ENXIO",
4230 		    zio);
4231 		vd->vdev_cant_write = B_TRUE;
4232 	}
4233 
4234 	/*
4235 	 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
4236 	 * attempts will ever succeed. In this case we set a persistent
4237 	 * boolean flag so that we don't bother with it in the future.
4238 	 */
4239 	if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
4240 	    zio->io_type == ZIO_TYPE_IOCTL &&
4241 	    zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
4242 		vd->vdev_nowritecache = B_TRUE;
4243 
4244 	if (zio->io_error)
4245 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4246 
4247 	return (zio);
4248 }
4249 
4250 void
4251 zio_vdev_io_reissue(zio_t *zio)
4252 {
4253 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4254 	ASSERT(zio->io_error == 0);
4255 
4256 	zio->io_stage >>= 1;
4257 }
4258 
4259 void
4260 zio_vdev_io_redone(zio_t *zio)
4261 {
4262 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4263 
4264 	zio->io_stage >>= 1;
4265 }
4266 
4267 void
4268 zio_vdev_io_bypass(zio_t *zio)
4269 {
4270 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4271 	ASSERT(zio->io_error == 0);
4272 
4273 	zio->io_flags |= ZIO_FLAG_IO_BYPASS;
4274 	zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
4275 }
4276 
4277 /*
4278  * ==========================================================================
4279  * Encrypt and store encryption parameters
4280  * ==========================================================================
4281  */
4282 
4283 
4284 /*
4285  * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4286  * managing the storage of encryption parameters and passing them to the
4287  * lower-level encryption functions.
4288  */
4289 static zio_t *
4290 zio_encrypt(zio_t *zio)
4291 {
4292 	zio_prop_t *zp = &zio->io_prop;
4293 	spa_t *spa = zio->io_spa;
4294 	blkptr_t *bp = zio->io_bp;
4295 	uint64_t psize = BP_GET_PSIZE(bp);
4296 	uint64_t dsobj = zio->io_bookmark.zb_objset;
4297 	dmu_object_type_t ot = BP_GET_TYPE(bp);
4298 	void *enc_buf = NULL;
4299 	abd_t *eabd = NULL;
4300 	uint8_t salt[ZIO_DATA_SALT_LEN];
4301 	uint8_t iv[ZIO_DATA_IV_LEN];
4302 	uint8_t mac[ZIO_DATA_MAC_LEN];
4303 	boolean_t no_crypt = B_FALSE;
4304 
4305 	/* the root zio already encrypted the data */
4306 	if (zio->io_child_type == ZIO_CHILD_GANG)
4307 		return (zio);
4308 
4309 	/* only ZIL blocks are re-encrypted on rewrite */
4310 	if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
4311 		return (zio);
4312 
4313 	if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4314 		BP_SET_CRYPT(bp, B_FALSE);
4315 		return (zio);
4316 	}
4317 
4318 	/* if we are doing raw encryption set the provided encryption params */
4319 	if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
4320 		ASSERT0(BP_GET_LEVEL(bp));
4321 		BP_SET_CRYPT(bp, B_TRUE);
4322 		BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4323 		if (ot != DMU_OT_OBJSET)
4324 			zio_crypt_encode_mac_bp(bp, zp->zp_mac);
4325 
4326 		/* dnode blocks must be written out in the provided byteorder */
4327 		if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4328 		    ot == DMU_OT_DNODE) {
4329 			void *bswap_buf = zio_buf_alloc(psize);
4330 			abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4331 
4332 			ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4333 			abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4334 			dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4335 			    psize);
4336 
4337 			abd_take_ownership_of_buf(babd, B_TRUE);
4338 			zio_push_transform(zio, babd, psize, psize, NULL);
4339 		}
4340 
4341 		if (DMU_OT_IS_ENCRYPTED(ot))
4342 			zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
4343 		return (zio);
4344 	}
4345 
4346 	/* indirect blocks only maintain a cksum of the lower level MACs */
4347 	if (BP_GET_LEVEL(bp) > 0) {
4348 		BP_SET_CRYPT(bp, B_TRUE);
4349 		VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4350 		    zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4351 		    mac));
4352 		zio_crypt_encode_mac_bp(bp, mac);
4353 		return (zio);
4354 	}
4355 
4356 	/*
4357 	 * Objset blocks are a special case since they have 2 256-bit MACs
4358 	 * embedded within them.
4359 	 */
4360 	if (ot == DMU_OT_OBJSET) {
4361 		ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4362 		ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4363 		BP_SET_CRYPT(bp, B_TRUE);
4364 		VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4365 		    zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
4366 		return (zio);
4367 	}
4368 
4369 	/* unencrypted object types are only authenticated with a MAC */
4370 	if (!DMU_OT_IS_ENCRYPTED(ot)) {
4371 		BP_SET_CRYPT(bp, B_TRUE);
4372 		VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4373 		    zio->io_abd, psize, mac));
4374 		zio_crypt_encode_mac_bp(bp, mac);
4375 		return (zio);
4376 	}
4377 
4378 	/*
4379 	 * Later passes of sync-to-convergence may decide to rewrite data
4380 	 * in place to avoid more disk reallocations. This presents a problem
4381 	 * for encryption because this constitutes rewriting the new data with
4382 	 * the same encryption key and IV. However, this only applies to blocks
4383 	 * in the MOS (particularly the spacemaps) and we do not encrypt the
4384 	 * MOS. We assert that the zio is allocating or an intent log write
4385 	 * to enforce this.
4386 	 */
4387 	ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4388 	ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4389 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4390 	ASSERT3U(psize, !=, 0);
4391 
4392 	enc_buf = zio_buf_alloc(psize);
4393 	eabd = abd_get_from_buf(enc_buf, psize);
4394 	abd_take_ownership_of_buf(eabd, B_TRUE);
4395 
4396 	/*
4397 	 * For an explanation of what encryption parameters are stored
4398 	 * where, see the block comment in zio_crypt.c.
4399 	 */
4400 	if (ot == DMU_OT_INTENT_LOG) {
4401 		zio_crypt_decode_params_bp(bp, salt, iv);
4402 	} else {
4403 		BP_SET_CRYPT(bp, B_TRUE);
4404 	}
4405 
4406 	/* Perform the encryption. This should not fail */
4407 	VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4408 	    BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4409 	    salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
4410 
4411 	/* encode encryption metadata into the bp */
4412 	if (ot == DMU_OT_INTENT_LOG) {
4413 		/*
4414 		 * ZIL blocks store the MAC in the embedded checksum, so the
4415 		 * transform must always be applied.
4416 		 */
4417 		zio_crypt_encode_mac_zil(enc_buf, mac);
4418 		zio_push_transform(zio, eabd, psize, psize, NULL);
4419 	} else {
4420 		BP_SET_CRYPT(bp, B_TRUE);
4421 		zio_crypt_encode_params_bp(bp, salt, iv);
4422 		zio_crypt_encode_mac_bp(bp, mac);
4423 
4424 		if (no_crypt) {
4425 			ASSERT3U(ot, ==, DMU_OT_DNODE);
4426 			abd_free(eabd);
4427 		} else {
4428 			zio_push_transform(zio, eabd, psize, psize, NULL);
4429 		}
4430 	}
4431 
4432 	return (zio);
4433 }
4434 
4435 /*
4436  * ==========================================================================
4437  * Generate and verify checksums
4438  * ==========================================================================
4439  */
4440 static zio_t *
4441 zio_checksum_generate(zio_t *zio)
4442 {
4443 	blkptr_t *bp = zio->io_bp;
4444 	enum zio_checksum checksum;
4445 
4446 	if (bp == NULL) {
4447 		/*
4448 		 * This is zio_write_phys().
4449 		 * We're either generating a label checksum, or none at all.
4450 		 */
4451 		checksum = zio->io_prop.zp_checksum;
4452 
4453 		if (checksum == ZIO_CHECKSUM_OFF)
4454 			return (zio);
4455 
4456 		ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4457 	} else {
4458 		if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4459 			ASSERT(!IO_IS_ALLOCATING(zio));
4460 			checksum = ZIO_CHECKSUM_GANG_HEADER;
4461 		} else {
4462 			checksum = BP_GET_CHECKSUM(bp);
4463 		}
4464 	}
4465 
4466 	zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
4467 
4468 	return (zio);
4469 }
4470 
4471 static zio_t *
4472 zio_checksum_verify(zio_t *zio)
4473 {
4474 	zio_bad_cksum_t info;
4475 	blkptr_t *bp = zio->io_bp;
4476 	int error;
4477 
4478 	ASSERT(zio->io_vd != NULL);
4479 
4480 	if (bp == NULL) {
4481 		/*
4482 		 * This is zio_read_phys().
4483 		 * We're either verifying a label checksum, or nothing at all.
4484 		 */
4485 		if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
4486 			return (zio);
4487 
4488 		ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
4489 	}
4490 
4491 	if ((error = zio_checksum_error(zio, &info)) != 0) {
4492 		zio->io_error = error;
4493 		if (error == ECKSUM &&
4494 		    !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
4495 			mutex_enter(&zio->io_vd->vdev_stat_lock);
4496 			zio->io_vd->vdev_stat.vs_checksum_errors++;
4497 			mutex_exit(&zio->io_vd->vdev_stat_lock);
4498 			(void) zfs_ereport_start_checksum(zio->io_spa,
4499 			    zio->io_vd, &zio->io_bookmark, zio,
4500 			    zio->io_offset, zio->io_size, &info);
4501 		}
4502 	}
4503 
4504 	return (zio);
4505 }
4506 
4507 /*
4508  * Called by RAID-Z to ensure we don't compute the checksum twice.
4509  */
4510 void
4511 zio_checksum_verified(zio_t *zio)
4512 {
4513 	zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
4514 }
4515 
4516 /*
4517  * ==========================================================================
4518  * Error rank.  Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
4519  * An error of 0 indicates success.  ENXIO indicates whole-device failure,
4520  * which may be transient (e.g. unplugged) or permanent.  ECKSUM and EIO
4521  * indicate errors that are specific to one I/O, and most likely permanent.
4522  * Any other error is presumed to be worse because we weren't expecting it.
4523  * ==========================================================================
4524  */
4525 int
4526 zio_worst_error(int e1, int e2)
4527 {
4528 	static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
4529 	int r1, r2;
4530 
4531 	for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
4532 		if (e1 == zio_error_rank[r1])
4533 			break;
4534 
4535 	for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
4536 		if (e2 == zio_error_rank[r2])
4537 			break;
4538 
4539 	return (r1 > r2 ? e1 : e2);
4540 }
4541 
4542 /*
4543  * ==========================================================================
4544  * I/O completion
4545  * ==========================================================================
4546  */
4547 static zio_t *
4548 zio_ready(zio_t *zio)
4549 {
4550 	blkptr_t *bp = zio->io_bp;
4551 	zio_t *pio, *pio_next;
4552 	zio_link_t *zl = NULL;
4553 
4554 	if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
4555 	    ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
4556 		return (NULL);
4557 	}
4558 
4559 	if (zio->io_ready) {
4560 		ASSERT(IO_IS_ALLOCATING(zio));
4561 		ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
4562 		    BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
4563 		ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
4564 
4565 		zio->io_ready(zio);
4566 	}
4567 
4568 #ifdef ZFS_DEBUG
4569 	if (bp != NULL && bp != &zio->io_bp_copy)
4570 		zio->io_bp_copy = *bp;
4571 #endif
4572 
4573 	if (zio->io_error != 0) {
4574 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4575 
4576 		if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4577 			ASSERT(IO_IS_ALLOCATING(zio));
4578 			ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4579 			ASSERT(zio->io_metaslab_class != NULL);
4580 			ASSERT(ZIO_HAS_ALLOCATOR(zio));
4581 
4582 			/*
4583 			 * We were unable to allocate anything, unreserve and
4584 			 * issue the next I/O to allocate.
4585 			 */
4586 			metaslab_class_throttle_unreserve(
4587 			    zio->io_metaslab_class, zio->io_prop.zp_copies,
4588 			    zio->io_allocator, zio);
4589 			zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
4590 		}
4591 	}
4592 
4593 	mutex_enter(&zio->io_lock);
4594 	zio->io_state[ZIO_WAIT_READY] = 1;
4595 	pio = zio_walk_parents(zio, &zl);
4596 	mutex_exit(&zio->io_lock);
4597 
4598 	/*
4599 	 * As we notify zio's parents, new parents could be added.
4600 	 * New parents go to the head of zio's io_parent_list, however,
4601 	 * so we will (correctly) not notify them.  The remainder of zio's
4602 	 * io_parent_list, from 'pio_next' onward, cannot change because
4603 	 * all parents must wait for us to be done before they can be done.
4604 	 */
4605 	for (; pio != NULL; pio = pio_next) {
4606 		pio_next = zio_walk_parents(zio, &zl);
4607 		zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
4608 	}
4609 
4610 	if (zio->io_flags & ZIO_FLAG_NODATA) {
4611 		if (bp != NULL && BP_IS_GANG(bp)) {
4612 			zio->io_flags &= ~ZIO_FLAG_NODATA;
4613 		} else {
4614 			ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
4615 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
4616 		}
4617 	}
4618 
4619 	if (zio_injection_enabled &&
4620 	    zio->io_spa->spa_syncing_txg == zio->io_txg)
4621 		zio_handle_ignored_writes(zio);
4622 
4623 	return (zio);
4624 }
4625 
4626 /*
4627  * Update the allocation throttle accounting.
4628  */
4629 static void
4630 zio_dva_throttle_done(zio_t *zio)
4631 {
4632 	zio_t *lio __maybe_unused = zio->io_logical;
4633 	zio_t *pio = zio_unique_parent(zio);
4634 	vdev_t *vd = zio->io_vd;
4635 	int flags = METASLAB_ASYNC_ALLOC;
4636 
4637 	ASSERT3P(zio->io_bp, !=, NULL);
4638 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
4639 	ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
4640 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
4641 	ASSERT(vd != NULL);
4642 	ASSERT3P(vd, ==, vd->vdev_top);
4643 	ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
4644 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4645 	ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
4646 	ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
4647 	ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
4648 
4649 	/*
4650 	 * Parents of gang children can have two flavors -- ones that
4651 	 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
4652 	 * and ones that allocated the constituent blocks. The allocation
4653 	 * throttle needs to know the allocating parent zio so we must find
4654 	 * it here.
4655 	 */
4656 	if (pio->io_child_type == ZIO_CHILD_GANG) {
4657 		/*
4658 		 * If our parent is a rewrite gang child then our grandparent
4659 		 * would have been the one that performed the allocation.
4660 		 */
4661 		if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
4662 			pio = zio_unique_parent(pio);
4663 		flags |= METASLAB_GANG_CHILD;
4664 	}
4665 
4666 	ASSERT(IO_IS_ALLOCATING(pio));
4667 	ASSERT(ZIO_HAS_ALLOCATOR(pio));
4668 	ASSERT3P(zio, !=, zio->io_logical);
4669 	ASSERT(zio->io_logical != NULL);
4670 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4671 	ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
4672 	ASSERT(zio->io_metaslab_class != NULL);
4673 
4674 	mutex_enter(&pio->io_lock);
4675 	metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
4676 	    pio->io_allocator, B_TRUE);
4677 	mutex_exit(&pio->io_lock);
4678 
4679 	metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
4680 	    pio->io_allocator, pio);
4681 
4682 	/*
4683 	 * Call into the pipeline to see if there is more work that
4684 	 * needs to be done. If there is work to be done it will be
4685 	 * dispatched to another taskq thread.
4686 	 */
4687 	zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
4688 }
4689 
4690 static zio_t *
4691 zio_done(zio_t *zio)
4692 {
4693 	/*
4694 	 * Always attempt to keep stack usage minimal here since
4695 	 * we can be called recursively up to 19 levels deep.
4696 	 */
4697 	const uint64_t psize = zio->io_size;
4698 	zio_t *pio, *pio_next;
4699 	zio_link_t *zl = NULL;
4700 
4701 	/*
4702 	 * If our children haven't all completed,
4703 	 * wait for them and then repeat this pipeline stage.
4704 	 */
4705 	if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
4706 		return (NULL);
4707 	}
4708 
4709 	/*
4710 	 * If the allocation throttle is enabled, then update the accounting.
4711 	 * We only track child I/Os that are part of an allocating async
4712 	 * write. We must do this since the allocation is performed
4713 	 * by the logical I/O but the actual write is done by child I/Os.
4714 	 */
4715 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
4716 	    zio->io_child_type == ZIO_CHILD_VDEV) {
4717 		ASSERT(zio->io_metaslab_class != NULL);
4718 		ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
4719 		zio_dva_throttle_done(zio);
4720 	}
4721 
4722 	/*
4723 	 * If the allocation throttle is enabled, verify that
4724 	 * we have decremented the refcounts for every I/O that was throttled.
4725 	 */
4726 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4727 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4728 		ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4729 		ASSERT(zio->io_bp != NULL);
4730 		ASSERT(ZIO_HAS_ALLOCATOR(zio));
4731 
4732 		metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
4733 		    zio->io_allocator);
4734 		VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
4735 		    mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
4736 	}
4737 
4738 
4739 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
4740 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
4741 			ASSERT(zio->io_children[c][w] == 0);
4742 
4743 	if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
4744 		ASSERT(zio->io_bp->blk_pad[0] == 0);
4745 		ASSERT(zio->io_bp->blk_pad[1] == 0);
4746 		ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
4747 		    sizeof (blkptr_t)) == 0 ||
4748 		    (zio->io_bp == zio_unique_parent(zio)->io_bp));
4749 		if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
4750 		    zio->io_bp_override == NULL &&
4751 		    !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
4752 			ASSERT3U(zio->io_prop.zp_copies, <=,
4753 			    BP_GET_NDVAS(zio->io_bp));
4754 			ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
4755 			    (BP_COUNT_GANG(zio->io_bp) ==
4756 			    BP_GET_NDVAS(zio->io_bp)));
4757 		}
4758 		if (zio->io_flags & ZIO_FLAG_NOPWRITE)
4759 			VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4760 	}
4761 
4762 	/*
4763 	 * If there were child vdev/gang/ddt errors, they apply to us now.
4764 	 */
4765 	zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
4766 	zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
4767 	zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
4768 
4769 	/*
4770 	 * If the I/O on the transformed data was successful, generate any
4771 	 * checksum reports now while we still have the transformed data.
4772 	 */
4773 	if (zio->io_error == 0) {
4774 		while (zio->io_cksum_report != NULL) {
4775 			zio_cksum_report_t *zcr = zio->io_cksum_report;
4776 			uint64_t align = zcr->zcr_align;
4777 			uint64_t asize = P2ROUNDUP(psize, align);
4778 			abd_t *adata = zio->io_abd;
4779 
4780 			if (adata != NULL && asize != psize) {
4781 				adata = abd_alloc(asize, B_TRUE);
4782 				abd_copy(adata, zio->io_abd, psize);
4783 				abd_zero_off(adata, psize, asize - psize);
4784 			}
4785 
4786 			zio->io_cksum_report = zcr->zcr_next;
4787 			zcr->zcr_next = NULL;
4788 			zcr->zcr_finish(zcr, adata);
4789 			zfs_ereport_free_checksum(zcr);
4790 
4791 			if (adata != NULL && asize != psize)
4792 				abd_free(adata);
4793 		}
4794 	}
4795 
4796 	zio_pop_transforms(zio);	/* note: may set zio->io_error */
4797 
4798 	vdev_stat_update(zio, psize);
4799 
4800 	/*
4801 	 * If this I/O is attached to a particular vdev is slow, exceeding
4802 	 * 30 seconds to complete, post an error described the I/O delay.
4803 	 * We ignore these errors if the device is currently unavailable.
4804 	 */
4805 	if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
4806 		if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
4807 			/*
4808 			 * We want to only increment our slow IO counters if
4809 			 * the IO is valid (i.e. not if the drive is removed).
4810 			 *
4811 			 * zfs_ereport_post() will also do these checks, but
4812 			 * it can also ratelimit and have other failures, so we
4813 			 * need to increment the slow_io counters independent
4814 			 * of it.
4815 			 */
4816 			if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
4817 			    zio->io_spa, zio->io_vd, zio)) {
4818 				mutex_enter(&zio->io_vd->vdev_stat_lock);
4819 				zio->io_vd->vdev_stat.vs_slow_ios++;
4820 				mutex_exit(&zio->io_vd->vdev_stat_lock);
4821 
4822 				(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
4823 				    zio->io_spa, zio->io_vd, &zio->io_bookmark,
4824 				    zio, 0);
4825 			}
4826 		}
4827 	}
4828 
4829 	if (zio->io_error) {
4830 		/*
4831 		 * If this I/O is attached to a particular vdev,
4832 		 * generate an error message describing the I/O failure
4833 		 * at the block level.  We ignore these errors if the
4834 		 * device is currently unavailable.
4835 		 */
4836 		if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
4837 		    !vdev_is_dead(zio->io_vd)) {
4838 			int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
4839 			    zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
4840 			if (ret != EALREADY) {
4841 				mutex_enter(&zio->io_vd->vdev_stat_lock);
4842 				if (zio->io_type == ZIO_TYPE_READ)
4843 					zio->io_vd->vdev_stat.vs_read_errors++;
4844 				else if (zio->io_type == ZIO_TYPE_WRITE)
4845 					zio->io_vd->vdev_stat.vs_write_errors++;
4846 				mutex_exit(&zio->io_vd->vdev_stat_lock);
4847 			}
4848 		}
4849 
4850 		if ((zio->io_error == EIO || !(zio->io_flags &
4851 		    (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
4852 		    zio == zio->io_logical) {
4853 			/*
4854 			 * For logical I/O requests, tell the SPA to log the
4855 			 * error and generate a logical data ereport.
4856 			 */
4857 			spa_log_error(zio->io_spa, &zio->io_bookmark,
4858 			    BP_GET_LOGICAL_BIRTH(zio->io_bp));
4859 			(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
4860 			    zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
4861 		}
4862 	}
4863 
4864 	if (zio->io_error && zio == zio->io_logical) {
4865 		/*
4866 		 * Determine whether zio should be reexecuted.  This will
4867 		 * propagate all the way to the root via zio_notify_parent().
4868 		 */
4869 		ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
4870 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4871 
4872 		if (IO_IS_ALLOCATING(zio) &&
4873 		    !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
4874 			if (zio->io_error != ENOSPC)
4875 				zio->io_reexecute |= ZIO_REEXECUTE_NOW;
4876 			else
4877 				zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4878 		}
4879 
4880 		if ((zio->io_type == ZIO_TYPE_READ ||
4881 		    zio->io_type == ZIO_TYPE_FREE) &&
4882 		    !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
4883 		    zio->io_error == ENXIO &&
4884 		    spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
4885 		    spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
4886 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4887 
4888 		if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
4889 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4890 
4891 		/*
4892 		 * Here is a possibly good place to attempt to do
4893 		 * either combinatorial reconstruction or error correction
4894 		 * based on checksums.  It also might be a good place
4895 		 * to send out preliminary ereports before we suspend
4896 		 * processing.
4897 		 */
4898 	}
4899 
4900 	/*
4901 	 * If there were logical child errors, they apply to us now.
4902 	 * We defer this until now to avoid conflating logical child
4903 	 * errors with errors that happened to the zio itself when
4904 	 * updating vdev stats and reporting FMA events above.
4905 	 */
4906 	zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
4907 
4908 	if ((zio->io_error || zio->io_reexecute) &&
4909 	    IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
4910 	    !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
4911 		zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
4912 
4913 	zio_gang_tree_free(&zio->io_gang_tree);
4914 
4915 	/*
4916 	 * Godfather I/Os should never suspend.
4917 	 */
4918 	if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
4919 	    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
4920 		zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
4921 
4922 	if (zio->io_reexecute) {
4923 		/*
4924 		 * This is a logical I/O that wants to reexecute.
4925 		 *
4926 		 * Reexecute is top-down.  When an i/o fails, if it's not
4927 		 * the root, it simply notifies its parent and sticks around.
4928 		 * The parent, seeing that it still has children in zio_done(),
4929 		 * does the same.  This percolates all the way up to the root.
4930 		 * The root i/o will reexecute or suspend the entire tree.
4931 		 *
4932 		 * This approach ensures that zio_reexecute() honors
4933 		 * all the original i/o dependency relationships, e.g.
4934 		 * parents not executing until children are ready.
4935 		 */
4936 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4937 
4938 		zio->io_gang_leader = NULL;
4939 
4940 		mutex_enter(&zio->io_lock);
4941 		zio->io_state[ZIO_WAIT_DONE] = 1;
4942 		mutex_exit(&zio->io_lock);
4943 
4944 		/*
4945 		 * "The Godfather" I/O monitors its children but is
4946 		 * not a true parent to them. It will track them through
4947 		 * the pipeline but severs its ties whenever they get into
4948 		 * trouble (e.g. suspended). This allows "The Godfather"
4949 		 * I/O to return status without blocking.
4950 		 */
4951 		zl = NULL;
4952 		for (pio = zio_walk_parents(zio, &zl); pio != NULL;
4953 		    pio = pio_next) {
4954 			zio_link_t *remove_zl = zl;
4955 			pio_next = zio_walk_parents(zio, &zl);
4956 
4957 			if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
4958 			    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
4959 				zio_remove_child(pio, zio, remove_zl);
4960 				/*
4961 				 * This is a rare code path, so we don't
4962 				 * bother with "next_to_execute".
4963 				 */
4964 				zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
4965 				    NULL);
4966 			}
4967 		}
4968 
4969 		if ((pio = zio_unique_parent(zio)) != NULL) {
4970 			/*
4971 			 * We're not a root i/o, so there's nothing to do
4972 			 * but notify our parent.  Don't propagate errors
4973 			 * upward since we haven't permanently failed yet.
4974 			 */
4975 			ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
4976 			zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
4977 			/*
4978 			 * This is a rare code path, so we don't bother with
4979 			 * "next_to_execute".
4980 			 */
4981 			zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
4982 		} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
4983 			/*
4984 			 * We'd fail again if we reexecuted now, so suspend
4985 			 * until conditions improve (e.g. device comes online).
4986 			 */
4987 			zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
4988 		} else {
4989 			/*
4990 			 * Reexecution is potentially a huge amount of work.
4991 			 * Hand it off to the otherwise-unused claim taskq.
4992 			 */
4993 			ASSERT(taskq_empty_ent(&zio->io_tqent));
4994 			spa_taskq_dispatch_ent(zio->io_spa,
4995 			    ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
4996 			    zio_reexecute, zio, 0, &zio->io_tqent, NULL);
4997 		}
4998 		return (NULL);
4999 	}
5000 
5001 	ASSERT(list_is_empty(&zio->io_child_list));
5002 	ASSERT(zio->io_reexecute == 0);
5003 	ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5004 
5005 	/*
5006 	 * Report any checksum errors, since the I/O is complete.
5007 	 */
5008 	while (zio->io_cksum_report != NULL) {
5009 		zio_cksum_report_t *zcr = zio->io_cksum_report;
5010 		zio->io_cksum_report = zcr->zcr_next;
5011 		zcr->zcr_next = NULL;
5012 		zcr->zcr_finish(zcr, NULL);
5013 		zfs_ereport_free_checksum(zcr);
5014 	}
5015 
5016 	/*
5017 	 * It is the responsibility of the done callback to ensure that this
5018 	 * particular zio is no longer discoverable for adoption, and as
5019 	 * such, cannot acquire any new parents.
5020 	 */
5021 	if (zio->io_done)
5022 		zio->io_done(zio);
5023 
5024 	mutex_enter(&zio->io_lock);
5025 	zio->io_state[ZIO_WAIT_DONE] = 1;
5026 	mutex_exit(&zio->io_lock);
5027 
5028 	/*
5029 	 * We are done executing this zio.  We may want to execute a parent
5030 	 * next.  See the comment in zio_notify_parent().
5031 	 */
5032 	zio_t *next_to_execute = NULL;
5033 	zl = NULL;
5034 	for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5035 		zio_link_t *remove_zl = zl;
5036 		pio_next = zio_walk_parents(zio, &zl);
5037 		zio_remove_child(pio, zio, remove_zl);
5038 		zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5039 	}
5040 
5041 	if (zio->io_waiter != NULL) {
5042 		mutex_enter(&zio->io_lock);
5043 		zio->io_executor = NULL;
5044 		cv_broadcast(&zio->io_cv);
5045 		mutex_exit(&zio->io_lock);
5046 	} else {
5047 		zio_destroy(zio);
5048 	}
5049 
5050 	return (next_to_execute);
5051 }
5052 
5053 /*
5054  * ==========================================================================
5055  * I/O pipeline definition
5056  * ==========================================================================
5057  */
5058 static zio_pipe_stage_t *zio_pipeline[] = {
5059 	NULL,
5060 	zio_read_bp_init,
5061 	zio_write_bp_init,
5062 	zio_free_bp_init,
5063 	zio_issue_async,
5064 	zio_write_compress,
5065 	zio_encrypt,
5066 	zio_checksum_generate,
5067 	zio_nop_write,
5068 	zio_brt_free,
5069 	zio_ddt_read_start,
5070 	zio_ddt_read_done,
5071 	zio_ddt_write,
5072 	zio_ddt_free,
5073 	zio_gang_assemble,
5074 	zio_gang_issue,
5075 	zio_dva_throttle,
5076 	zio_dva_allocate,
5077 	zio_dva_free,
5078 	zio_dva_claim,
5079 	zio_ready,
5080 	zio_vdev_io_start,
5081 	zio_vdev_io_done,
5082 	zio_vdev_io_assess,
5083 	zio_checksum_verify,
5084 	zio_done
5085 };
5086 
5087 
5088 
5089 
5090 /*
5091  * Compare two zbookmark_phys_t's to see which we would reach first in a
5092  * pre-order traversal of the object tree.
5093  *
5094  * This is simple in every case aside from the meta-dnode object. For all other
5095  * objects, we traverse them in order (object 1 before object 2, and so on).
5096  * However, all of these objects are traversed while traversing object 0, since
5097  * the data it points to is the list of objects.  Thus, we need to convert to a
5098  * canonical representation so we can compare meta-dnode bookmarks to
5099  * non-meta-dnode bookmarks.
5100  *
5101  * We do this by calculating "equivalents" for each field of the zbookmark.
5102  * zbookmarks outside of the meta-dnode use their own object and level, and
5103  * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5104  * blocks this bookmark refers to) by multiplying their blkid by their span
5105  * (the number of L0 blocks contained within one block at their level).
5106  * zbookmarks inside the meta-dnode calculate their object equivalent
5107  * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5108  * level + 1<<31 (any value larger than a level could ever be) for their level.
5109  * This causes them to always compare before a bookmark in their object
5110  * equivalent, compare appropriately to bookmarks in other objects, and to
5111  * compare appropriately to other bookmarks in the meta-dnode.
5112  */
5113 int
5114 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5115     const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5116 {
5117 	/*
5118 	 * These variables represent the "equivalent" values for the zbookmark,
5119 	 * after converting zbookmarks inside the meta dnode to their
5120 	 * normal-object equivalents.
5121 	 */
5122 	uint64_t zb1obj, zb2obj;
5123 	uint64_t zb1L0, zb2L0;
5124 	uint64_t zb1level, zb2level;
5125 
5126 	if (zb1->zb_object == zb2->zb_object &&
5127 	    zb1->zb_level == zb2->zb_level &&
5128 	    zb1->zb_blkid == zb2->zb_blkid)
5129 		return (0);
5130 
5131 	IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5132 	IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5133 
5134 	/*
5135 	 * BP_SPANB calculates the span in blocks.
5136 	 */
5137 	zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5138 	zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
5139 
5140 	if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
5141 		zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5142 		zb1L0 = 0;
5143 		zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5144 	} else {
5145 		zb1obj = zb1->zb_object;
5146 		zb1level = zb1->zb_level;
5147 	}
5148 
5149 	if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5150 		zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5151 		zb2L0 = 0;
5152 		zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5153 	} else {
5154 		zb2obj = zb2->zb_object;
5155 		zb2level = zb2->zb_level;
5156 	}
5157 
5158 	/* Now that we have a canonical representation, do the comparison. */
5159 	if (zb1obj != zb2obj)
5160 		return (zb1obj < zb2obj ? -1 : 1);
5161 	else if (zb1L0 != zb2L0)
5162 		return (zb1L0 < zb2L0 ? -1 : 1);
5163 	else if (zb1level != zb2level)
5164 		return (zb1level > zb2level ? -1 : 1);
5165 	/*
5166 	 * This can (theoretically) happen if the bookmarks have the same object
5167 	 * and level, but different blkids, if the block sizes are not the same.
5168 	 * There is presently no way to change the indirect block sizes
5169 	 */
5170 	return (0);
5171 }
5172 
5173 /*
5174  *  This function checks the following: given that last_block is the place that
5175  *  our traversal stopped last time, does that guarantee that we've visited
5176  *  every node under subtree_root?  Therefore, we can't just use the raw output
5177  *  of zbookmark_compare.  We have to pass in a modified version of
5178  *  subtree_root; by incrementing the block id, and then checking whether
5179  *  last_block is before or equal to that, we can tell whether or not having
5180  *  visited last_block implies that all of subtree_root's children have been
5181  *  visited.
5182  */
5183 boolean_t
5184 zbookmark_subtree_completed(const dnode_phys_t *dnp,
5185     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5186 {
5187 	zbookmark_phys_t mod_zb = *subtree_root;
5188 	mod_zb.zb_blkid++;
5189 	ASSERT0(last_block->zb_level);
5190 
5191 	/* The objset_phys_t isn't before anything. */
5192 	if (dnp == NULL)
5193 		return (B_FALSE);
5194 
5195 	/*
5196 	 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5197 	 * data block size in sectors, because that variable is only used if
5198 	 * the bookmark refers to a block in the meta-dnode.  Since we don't
5199 	 * know without examining it what object it refers to, and there's no
5200 	 * harm in passing in this value in other cases, we always pass it in.
5201 	 *
5202 	 * We pass in 0 for the indirect block size shift because zb2 must be
5203 	 * level 0.  The indirect block size is only used to calculate the span
5204 	 * of the bookmark, but since the bookmark must be level 0, the span is
5205 	 * always 1, so the math works out.
5206 	 *
5207 	 * If you make changes to how the zbookmark_compare code works, be sure
5208 	 * to make sure that this code still works afterwards.
5209 	 */
5210 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5211 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5212 	    last_block) <= 0);
5213 }
5214 
5215 /*
5216  * This function is similar to zbookmark_subtree_completed(), but returns true
5217  * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5218  */
5219 boolean_t
5220 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5221     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5222 {
5223 	ASSERT0(last_block->zb_level);
5224 	if (dnp == NULL)
5225 		return (B_FALSE);
5226 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5227 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5228 	    last_block) >= 0);
5229 }
5230 
5231 EXPORT_SYMBOL(zio_type_name);
5232 EXPORT_SYMBOL(zio_buf_alloc);
5233 EXPORT_SYMBOL(zio_data_buf_alloc);
5234 EXPORT_SYMBOL(zio_buf_free);
5235 EXPORT_SYMBOL(zio_data_buf_free);
5236 
5237 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
5238 	"Max I/O completion time (milliseconds) before marking it as slow");
5239 
5240 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5241 	"Prioritize requeued I/O");
5242 
5243 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free,  UINT, ZMOD_RW,
5244 	"Defer frees starting in this pass");
5245 
5246 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
5247 	"Don't compress starting in this pass");
5248 
5249 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
5250 	"Rewrite new bps starting in this pass");
5251 
5252 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
5253 	"Throttle block allocations in the ZIO pipeline");
5254 
5255 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
5256 	"Log all slow ZIOs, not just those with vdevs");
5257