xref: /freebsd/sys/contrib/openzfs/module/zfs/zio.c (revision 044f94adff55f13130f03c0c170fa879c8febb5b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2017, Intel Corporation.
26  * Copyright (c) 2019, 2023, 2024, Klara Inc.
27  * Copyright (c) 2019, Allan Jude
28  * Copyright (c) 2021, Datto, Inc.
29  */
30 
31 #include <sys/sysmacros.h>
32 #include <sys/zfs_context.h>
33 #include <sys/fm/fs/zfs.h>
34 #include <sys/spa.h>
35 #include <sys/txg.h>
36 #include <sys/spa_impl.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/vdev_trim.h>
39 #include <sys/zio_impl.h>
40 #include <sys/zio_compress.h>
41 #include <sys/zio_checksum.h>
42 #include <sys/dmu_objset.h>
43 #include <sys/arc.h>
44 #include <sys/brt.h>
45 #include <sys/ddt.h>
46 #include <sys/blkptr.h>
47 #include <sys/zfeature.h>
48 #include <sys/dsl_scan.h>
49 #include <sys/metaslab_impl.h>
50 #include <sys/time.h>
51 #include <sys/trace_zfs.h>
52 #include <sys/abd.h>
53 #include <sys/dsl_crypt.h>
54 #include <cityhash.h>
55 
56 /*
57  * ==========================================================================
58  * I/O type descriptions
59  * ==========================================================================
60  */
61 const char *const zio_type_name[ZIO_TYPES] = {
62 	/*
63 	 * Note: Linux kernel thread name length is limited
64 	 * so these names will differ from upstream open zfs.
65 	 */
66 	"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
67 };
68 
69 int zio_dva_throttle_enabled = B_TRUE;
70 static int zio_deadman_log_all = B_FALSE;
71 
72 /*
73  * ==========================================================================
74  * I/O kmem caches
75  * ==========================================================================
76  */
77 static kmem_cache_t *zio_cache;
78 static kmem_cache_t *zio_link_cache;
79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
81 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
84 #endif
85 
86 /* Mark IOs as "slow" if they take longer than 30 seconds */
87 static uint_t zio_slow_io_ms = (30 * MILLISEC);
88 
89 #define	BP_SPANB(indblkshift, level) \
90 	(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
91 #define	COMPARE_META_LEVEL	0x80000000ul
92 /*
93  * The following actions directly effect the spa's sync-to-convergence logic.
94  * The values below define the sync pass when we start performing the action.
95  * Care should be taken when changing these values as they directly impact
96  * spa_sync() performance. Tuning these values may introduce subtle performance
97  * pathologies and should only be done in the context of performance analysis.
98  * These tunables will eventually be removed and replaced with #defines once
99  * enough analysis has been done to determine optimal values.
100  *
101  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
102  * regular blocks are not deferred.
103  *
104  * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
105  * compression (including of metadata).  In practice, we don't have this
106  * many sync passes, so this has no effect.
107  *
108  * The original intent was that disabling compression would help the sync
109  * passes to converge. However, in practice disabling compression increases
110  * the average number of sync passes, because when we turn compression off, a
111  * lot of block's size will change and thus we have to re-allocate (not
112  * overwrite) them. It also increases the number of 128KB allocations (e.g.
113  * for indirect blocks and spacemaps) because these will not be compressed.
114  * The 128K allocations are especially detrimental to performance on highly
115  * fragmented systems, which may have very few free segments of this size,
116  * and may need to load new metaslabs to satisfy 128K allocations.
117  */
118 
119 /* defer frees starting in this pass */
120 uint_t zfs_sync_pass_deferred_free = 2;
121 
122 /* don't compress starting in this pass */
123 static uint_t zfs_sync_pass_dont_compress = 8;
124 
125 /* rewrite new bps starting in this pass */
126 static uint_t zfs_sync_pass_rewrite = 2;
127 
128 /*
129  * An allocating zio is one that either currently has the DVA allocate
130  * stage set or will have it later in its lifetime.
131  */
132 #define	IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
133 
134 /*
135  * Enable smaller cores by excluding metadata
136  * allocations as well.
137  */
138 int zio_exclude_metadata = 0;
139 static int zio_requeue_io_start_cut_in_line = 1;
140 
141 #ifdef ZFS_DEBUG
142 static const int zio_buf_debug_limit = 16384;
143 #else
144 static const int zio_buf_debug_limit = 0;
145 #endif
146 
147 static inline void __zio_execute(zio_t *zio);
148 
149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
150 
151 void
152 zio_init(void)
153 {
154 	size_t c;
155 
156 	zio_cache = kmem_cache_create("zio_cache",
157 	    sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
158 	zio_link_cache = kmem_cache_create("zio_link_cache",
159 	    sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
160 
161 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
162 		size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
163 		size_t align, cflags, data_cflags;
164 		char name[32];
165 
166 		/*
167 		 * Create cache for each half-power of 2 size, starting from
168 		 * SPA_MINBLOCKSIZE.  It should give us memory space efficiency
169 		 * of ~7/8, sufficient for transient allocations mostly using
170 		 * these caches.
171 		 */
172 		size_t p2 = size;
173 		while (!ISP2(p2))
174 			p2 &= p2 - 1;
175 		if (!IS_P2ALIGNED(size, p2 / 2))
176 			continue;
177 
178 #ifndef _KERNEL
179 		/*
180 		 * If we are using watchpoints, put each buffer on its own page,
181 		 * to eliminate the performance overhead of trapping to the
182 		 * kernel when modifying a non-watched buffer that shares the
183 		 * page with a watched buffer.
184 		 */
185 		if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
186 			continue;
187 #endif
188 
189 		if (IS_P2ALIGNED(size, PAGESIZE))
190 			align = PAGESIZE;
191 		else
192 			align = 1 << (highbit64(size ^ (size - 1)) - 1);
193 
194 		cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
195 		    KMC_NODEBUG : 0;
196 		data_cflags = KMC_NODEBUG;
197 		if (abd_size_alloc_linear(size)) {
198 			cflags |= KMC_RECLAIMABLE;
199 			data_cflags |= KMC_RECLAIMABLE;
200 		}
201 		if (cflags == data_cflags) {
202 			/*
203 			 * Resulting kmem caches would be identical.
204 			 * Save memory by creating only one.
205 			 */
206 			(void) snprintf(name, sizeof (name),
207 			    "zio_buf_comb_%lu", (ulong_t)size);
208 			zio_buf_cache[c] = kmem_cache_create(name, size, align,
209 			    NULL, NULL, NULL, NULL, NULL, cflags);
210 			zio_data_buf_cache[c] = zio_buf_cache[c];
211 			continue;
212 		}
213 		(void) snprintf(name, sizeof (name), "zio_buf_%lu",
214 		    (ulong_t)size);
215 		zio_buf_cache[c] = kmem_cache_create(name, size, align,
216 		    NULL, NULL, NULL, NULL, NULL, cflags);
217 
218 		(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
219 		    (ulong_t)size);
220 		zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
221 		    NULL, NULL, NULL, NULL, NULL, data_cflags);
222 	}
223 
224 	while (--c != 0) {
225 		ASSERT(zio_buf_cache[c] != NULL);
226 		if (zio_buf_cache[c - 1] == NULL)
227 			zio_buf_cache[c - 1] = zio_buf_cache[c];
228 
229 		ASSERT(zio_data_buf_cache[c] != NULL);
230 		if (zio_data_buf_cache[c - 1] == NULL)
231 			zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
232 	}
233 
234 	zio_inject_init();
235 
236 	lz4_init();
237 }
238 
239 void
240 zio_fini(void)
241 {
242 	size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
243 
244 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
245 	for (size_t i = 0; i < n; i++) {
246 		if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
247 			(void) printf("zio_fini: [%d] %llu != %llu\n",
248 			    (int)((i + 1) << SPA_MINBLOCKSHIFT),
249 			    (long long unsigned)zio_buf_cache_allocs[i],
250 			    (long long unsigned)zio_buf_cache_frees[i]);
251 	}
252 #endif
253 
254 	/*
255 	 * The same kmem cache can show up multiple times in both zio_buf_cache
256 	 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
257 	 * sort it out.
258 	 */
259 	for (size_t i = 0; i < n; i++) {
260 		kmem_cache_t *cache = zio_buf_cache[i];
261 		if (cache == NULL)
262 			continue;
263 		for (size_t j = i; j < n; j++) {
264 			if (cache == zio_buf_cache[j])
265 				zio_buf_cache[j] = NULL;
266 			if (cache == zio_data_buf_cache[j])
267 				zio_data_buf_cache[j] = NULL;
268 		}
269 		kmem_cache_destroy(cache);
270 	}
271 
272 	for (size_t i = 0; i < n; i++) {
273 		kmem_cache_t *cache = zio_data_buf_cache[i];
274 		if (cache == NULL)
275 			continue;
276 		for (size_t j = i; j < n; j++) {
277 			if (cache == zio_data_buf_cache[j])
278 				zio_data_buf_cache[j] = NULL;
279 		}
280 		kmem_cache_destroy(cache);
281 	}
282 
283 	for (size_t i = 0; i < n; i++) {
284 		VERIFY3P(zio_buf_cache[i], ==, NULL);
285 		VERIFY3P(zio_data_buf_cache[i], ==, NULL);
286 	}
287 
288 	kmem_cache_destroy(zio_link_cache);
289 	kmem_cache_destroy(zio_cache);
290 
291 	zio_inject_fini();
292 
293 	lz4_fini();
294 }
295 
296 /*
297  * ==========================================================================
298  * Allocate and free I/O buffers
299  * ==========================================================================
300  */
301 
302 #if defined(ZFS_DEBUG) && defined(_KERNEL)
303 #define	ZFS_ZIO_BUF_CANARY	1
304 #endif
305 
306 #ifdef ZFS_ZIO_BUF_CANARY
307 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
308 
309 /*
310  * Use empty space after the buffer to detect overflows.
311  *
312  * Since zio_init() creates kmem caches only for certain set of buffer sizes,
313  * allocations of different sizes may have some unused space after the data.
314  * Filling part of that space with a known pattern on allocation and checking
315  * it on free should allow us to detect some buffer overflows.
316  */
317 static void
318 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
319 {
320 	size_t off = P2ROUNDUP(size, sizeof (ulong_t));
321 	ulong_t *canary = p + off / sizeof (ulong_t);
322 	size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
323 	if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
324 	    cache[c] == cache[c + 1])
325 		asize = (c + 2) << SPA_MINBLOCKSHIFT;
326 	for (; off < asize; canary++, off += sizeof (ulong_t))
327 		*canary = zio_buf_canary;
328 }
329 
330 static void
331 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
332 {
333 	size_t off = P2ROUNDUP(size, sizeof (ulong_t));
334 	ulong_t *canary = p + off / sizeof (ulong_t);
335 	size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
336 	if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
337 	    cache[c] == cache[c + 1])
338 		asize = (c + 2) << SPA_MINBLOCKSHIFT;
339 	for (; off < asize; canary++, off += sizeof (ulong_t)) {
340 		if (unlikely(*canary != zio_buf_canary)) {
341 			PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
342 			    p, size, (canary - p) * sizeof (ulong_t),
343 			    *canary, zio_buf_canary);
344 		}
345 	}
346 }
347 #endif
348 
349 /*
350  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
351  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
352  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
353  * excess / transient data in-core during a crashdump.
354  */
355 void *
356 zio_buf_alloc(size_t size)
357 {
358 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
359 
360 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
361 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
362 	atomic_add_64(&zio_buf_cache_allocs[c], 1);
363 #endif
364 
365 	void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
366 #ifdef ZFS_ZIO_BUF_CANARY
367 	zio_buf_put_canary(p, size, zio_buf_cache, c);
368 #endif
369 	return (p);
370 }
371 
372 /*
373  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
374  * crashdump if the kernel panics.  This exists so that we will limit the amount
375  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
376  * of kernel heap dumped to disk when the kernel panics)
377  */
378 void *
379 zio_data_buf_alloc(size_t size)
380 {
381 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
382 
383 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
384 
385 	void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
386 #ifdef ZFS_ZIO_BUF_CANARY
387 	zio_buf_put_canary(p, size, zio_data_buf_cache, c);
388 #endif
389 	return (p);
390 }
391 
392 void
393 zio_buf_free(void *buf, size_t size)
394 {
395 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
396 
397 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
398 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
399 	atomic_add_64(&zio_buf_cache_frees[c], 1);
400 #endif
401 
402 #ifdef ZFS_ZIO_BUF_CANARY
403 	zio_buf_check_canary(buf, size, zio_buf_cache, c);
404 #endif
405 	kmem_cache_free(zio_buf_cache[c], buf);
406 }
407 
408 void
409 zio_data_buf_free(void *buf, size_t size)
410 {
411 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
412 
413 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
414 
415 #ifdef ZFS_ZIO_BUF_CANARY
416 	zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
417 #endif
418 	kmem_cache_free(zio_data_buf_cache[c], buf);
419 }
420 
421 static void
422 zio_abd_free(void *abd, size_t size)
423 {
424 	(void) size;
425 	abd_free((abd_t *)abd);
426 }
427 
428 /*
429  * ==========================================================================
430  * Push and pop I/O transform buffers
431  * ==========================================================================
432  */
433 void
434 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
435     zio_transform_func_t *transform)
436 {
437 	zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
438 
439 	zt->zt_orig_abd = zio->io_abd;
440 	zt->zt_orig_size = zio->io_size;
441 	zt->zt_bufsize = bufsize;
442 	zt->zt_transform = transform;
443 
444 	zt->zt_next = zio->io_transform_stack;
445 	zio->io_transform_stack = zt;
446 
447 	zio->io_abd = data;
448 	zio->io_size = size;
449 }
450 
451 void
452 zio_pop_transforms(zio_t *zio)
453 {
454 	zio_transform_t *zt;
455 
456 	while ((zt = zio->io_transform_stack) != NULL) {
457 		if (zt->zt_transform != NULL)
458 			zt->zt_transform(zio,
459 			    zt->zt_orig_abd, zt->zt_orig_size);
460 
461 		if (zt->zt_bufsize != 0)
462 			abd_free(zio->io_abd);
463 
464 		zio->io_abd = zt->zt_orig_abd;
465 		zio->io_size = zt->zt_orig_size;
466 		zio->io_transform_stack = zt->zt_next;
467 
468 		kmem_free(zt, sizeof (zio_transform_t));
469 	}
470 }
471 
472 /*
473  * ==========================================================================
474  * I/O transform callbacks for subblocks, decompression, and decryption
475  * ==========================================================================
476  */
477 static void
478 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
479 {
480 	ASSERT(zio->io_size > size);
481 
482 	if (zio->io_type == ZIO_TYPE_READ)
483 		abd_copy(data, zio->io_abd, size);
484 }
485 
486 static void
487 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
488 {
489 	if (zio->io_error == 0) {
490 		int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
491 		    zio->io_abd, data, zio->io_size, size,
492 		    &zio->io_prop.zp_complevel);
493 
494 		if (zio_injection_enabled && ret == 0)
495 			ret = zio_handle_fault_injection(zio, EINVAL);
496 
497 		if (ret != 0)
498 			zio->io_error = SET_ERROR(EIO);
499 	}
500 }
501 
502 static void
503 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
504 {
505 	int ret;
506 	void *tmp;
507 	blkptr_t *bp = zio->io_bp;
508 	spa_t *spa = zio->io_spa;
509 	uint64_t dsobj = zio->io_bookmark.zb_objset;
510 	uint64_t lsize = BP_GET_LSIZE(bp);
511 	dmu_object_type_t ot = BP_GET_TYPE(bp);
512 	uint8_t salt[ZIO_DATA_SALT_LEN];
513 	uint8_t iv[ZIO_DATA_IV_LEN];
514 	uint8_t mac[ZIO_DATA_MAC_LEN];
515 	boolean_t no_crypt = B_FALSE;
516 
517 	ASSERT(BP_USES_CRYPT(bp));
518 	ASSERT3U(size, !=, 0);
519 
520 	if (zio->io_error != 0)
521 		return;
522 
523 	/*
524 	 * Verify the cksum of MACs stored in an indirect bp. It will always
525 	 * be possible to verify this since it does not require an encryption
526 	 * key.
527 	 */
528 	if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
529 		zio_crypt_decode_mac_bp(bp, mac);
530 
531 		if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
532 			/*
533 			 * We haven't decompressed the data yet, but
534 			 * zio_crypt_do_indirect_mac_checksum() requires
535 			 * decompressed data to be able to parse out the MACs
536 			 * from the indirect block. We decompress it now and
537 			 * throw away the result after we are finished.
538 			 */
539 			abd_t *abd = abd_alloc_linear(lsize, B_TRUE);
540 			ret = zio_decompress_data(BP_GET_COMPRESS(bp),
541 			    zio->io_abd, abd, zio->io_size, lsize,
542 			    &zio->io_prop.zp_complevel);
543 			if (ret != 0) {
544 				abd_free(abd);
545 				ret = SET_ERROR(EIO);
546 				goto error;
547 			}
548 			ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
549 			    abd, lsize, BP_SHOULD_BYTESWAP(bp), mac);
550 			abd_free(abd);
551 		} else {
552 			ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
553 			    zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
554 		}
555 		abd_copy(data, zio->io_abd, size);
556 
557 		if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
558 			ret = zio_handle_decrypt_injection(spa,
559 			    &zio->io_bookmark, ot, ECKSUM);
560 		}
561 		if (ret != 0)
562 			goto error;
563 
564 		return;
565 	}
566 
567 	/*
568 	 * If this is an authenticated block, just check the MAC. It would be
569 	 * nice to separate this out into its own flag, but when this was done,
570 	 * we had run out of bits in what is now zio_flag_t. Future cleanup
571 	 * could make this a flag bit.
572 	 */
573 	if (BP_IS_AUTHENTICATED(bp)) {
574 		if (ot == DMU_OT_OBJSET) {
575 			ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
576 			    dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
577 		} else {
578 			zio_crypt_decode_mac_bp(bp, mac);
579 			ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
580 			    zio->io_abd, size, mac);
581 			if (zio_injection_enabled && ret == 0) {
582 				ret = zio_handle_decrypt_injection(spa,
583 				    &zio->io_bookmark, ot, ECKSUM);
584 			}
585 		}
586 		abd_copy(data, zio->io_abd, size);
587 
588 		if (ret != 0)
589 			goto error;
590 
591 		return;
592 	}
593 
594 	zio_crypt_decode_params_bp(bp, salt, iv);
595 
596 	if (ot == DMU_OT_INTENT_LOG) {
597 		tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
598 		zio_crypt_decode_mac_zil(tmp, mac);
599 		abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
600 	} else {
601 		zio_crypt_decode_mac_bp(bp, mac);
602 	}
603 
604 	ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
605 	    BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
606 	    zio->io_abd, &no_crypt);
607 	if (no_crypt)
608 		abd_copy(data, zio->io_abd, size);
609 
610 	if (ret != 0)
611 		goto error;
612 
613 	return;
614 
615 error:
616 	/* assert that the key was found unless this was speculative */
617 	ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
618 
619 	/*
620 	 * If there was a decryption / authentication error return EIO as
621 	 * the io_error. If this was not a speculative zio, create an ereport.
622 	 */
623 	if (ret == ECKSUM) {
624 		zio->io_error = SET_ERROR(EIO);
625 		if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
626 			spa_log_error(spa, &zio->io_bookmark,
627 			    BP_GET_LOGICAL_BIRTH(zio->io_bp));
628 			(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
629 			    spa, NULL, &zio->io_bookmark, zio, 0);
630 		}
631 	} else {
632 		zio->io_error = ret;
633 	}
634 }
635 
636 /*
637  * ==========================================================================
638  * I/O parent/child relationships and pipeline interlocks
639  * ==========================================================================
640  */
641 zio_t *
642 zio_walk_parents(zio_t *cio, zio_link_t **zl)
643 {
644 	list_t *pl = &cio->io_parent_list;
645 
646 	*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
647 	if (*zl == NULL)
648 		return (NULL);
649 
650 	ASSERT((*zl)->zl_child == cio);
651 	return ((*zl)->zl_parent);
652 }
653 
654 zio_t *
655 zio_walk_children(zio_t *pio, zio_link_t **zl)
656 {
657 	list_t *cl = &pio->io_child_list;
658 
659 	ASSERT(MUTEX_HELD(&pio->io_lock));
660 
661 	*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
662 	if (*zl == NULL)
663 		return (NULL);
664 
665 	ASSERT((*zl)->zl_parent == pio);
666 	return ((*zl)->zl_child);
667 }
668 
669 zio_t *
670 zio_unique_parent(zio_t *cio)
671 {
672 	zio_link_t *zl = NULL;
673 	zio_t *pio = zio_walk_parents(cio, &zl);
674 
675 	VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
676 	return (pio);
677 }
678 
679 void
680 zio_add_child(zio_t *pio, zio_t *cio)
681 {
682 	/*
683 	 * Logical I/Os can have logical, gang, or vdev children.
684 	 * Gang I/Os can have gang or vdev children.
685 	 * Vdev I/Os can only have vdev children.
686 	 * The following ASSERT captures all of these constraints.
687 	 */
688 	ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
689 
690 	/* Parent should not have READY stage if child doesn't have it. */
691 	IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
692 	    (cio->io_child_type != ZIO_CHILD_VDEV),
693 	    (pio->io_pipeline & ZIO_STAGE_READY) == 0);
694 
695 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
696 	zl->zl_parent = pio;
697 	zl->zl_child = cio;
698 
699 	mutex_enter(&pio->io_lock);
700 	mutex_enter(&cio->io_lock);
701 
702 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
703 
704 	uint64_t *countp = pio->io_children[cio->io_child_type];
705 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
706 		countp[w] += !cio->io_state[w];
707 
708 	list_insert_head(&pio->io_child_list, zl);
709 	list_insert_head(&cio->io_parent_list, zl);
710 
711 	mutex_exit(&cio->io_lock);
712 	mutex_exit(&pio->io_lock);
713 }
714 
715 void
716 zio_add_child_first(zio_t *pio, zio_t *cio)
717 {
718 	/*
719 	 * Logical I/Os can have logical, gang, or vdev children.
720 	 * Gang I/Os can have gang or vdev children.
721 	 * Vdev I/Os can only have vdev children.
722 	 * The following ASSERT captures all of these constraints.
723 	 */
724 	ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
725 
726 	/* Parent should not have READY stage if child doesn't have it. */
727 	IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
728 	    (cio->io_child_type != ZIO_CHILD_VDEV),
729 	    (pio->io_pipeline & ZIO_STAGE_READY) == 0);
730 
731 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
732 	zl->zl_parent = pio;
733 	zl->zl_child = cio;
734 
735 	ASSERT(list_is_empty(&cio->io_parent_list));
736 	list_insert_head(&cio->io_parent_list, zl);
737 
738 	mutex_enter(&pio->io_lock);
739 
740 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
741 
742 	uint64_t *countp = pio->io_children[cio->io_child_type];
743 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
744 		countp[w] += !cio->io_state[w];
745 
746 	list_insert_head(&pio->io_child_list, zl);
747 
748 	mutex_exit(&pio->io_lock);
749 }
750 
751 static void
752 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
753 {
754 	ASSERT(zl->zl_parent == pio);
755 	ASSERT(zl->zl_child == cio);
756 
757 	mutex_enter(&pio->io_lock);
758 	mutex_enter(&cio->io_lock);
759 
760 	list_remove(&pio->io_child_list, zl);
761 	list_remove(&cio->io_parent_list, zl);
762 
763 	mutex_exit(&cio->io_lock);
764 	mutex_exit(&pio->io_lock);
765 	kmem_cache_free(zio_link_cache, zl);
766 }
767 
768 static boolean_t
769 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
770 {
771 	boolean_t waiting = B_FALSE;
772 
773 	mutex_enter(&zio->io_lock);
774 	ASSERT(zio->io_stall == NULL);
775 	for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
776 		if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
777 			continue;
778 
779 		uint64_t *countp = &zio->io_children[c][wait];
780 		if (*countp != 0) {
781 			zio->io_stage >>= 1;
782 			ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
783 			zio->io_stall = countp;
784 			waiting = B_TRUE;
785 			break;
786 		}
787 	}
788 	mutex_exit(&zio->io_lock);
789 	return (waiting);
790 }
791 
792 __attribute__((always_inline))
793 static inline void
794 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
795     zio_t **next_to_executep)
796 {
797 	uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
798 	int *errorp = &pio->io_child_error[zio->io_child_type];
799 
800 	mutex_enter(&pio->io_lock);
801 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
802 		*errorp = zio_worst_error(*errorp, zio->io_error);
803 	pio->io_reexecute |= zio->io_reexecute;
804 	ASSERT3U(*countp, >, 0);
805 
806 	(*countp)--;
807 
808 	if (*countp == 0 && pio->io_stall == countp) {
809 		zio_taskq_type_t type =
810 		    pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
811 		    ZIO_TASKQ_INTERRUPT;
812 		pio->io_stall = NULL;
813 		mutex_exit(&pio->io_lock);
814 
815 		/*
816 		 * If we can tell the caller to execute this parent next, do
817 		 * so. We do this if the parent's zio type matches the child's
818 		 * type, or if it's a zio_null() with no done callback, and so
819 		 * has no actual work to do. Otherwise dispatch the parent zio
820 		 * in its own taskq.
821 		 *
822 		 * Having the caller execute the parent when possible reduces
823 		 * locking on the zio taskq's, reduces context switch
824 		 * overhead, and has no recursion penalty.  Note that one
825 		 * read from disk typically causes at least 3 zio's: a
826 		 * zio_null(), the logical zio_read(), and then a physical
827 		 * zio.  When the physical ZIO completes, we are able to call
828 		 * zio_done() on all 3 of these zio's from one invocation of
829 		 * zio_execute() by returning the parent back to
830 		 * zio_execute().  Since the parent isn't executed until this
831 		 * thread returns back to zio_execute(), the caller should do
832 		 * so promptly.
833 		 *
834 		 * In other cases, dispatching the parent prevents
835 		 * overflowing the stack when we have deeply nested
836 		 * parent-child relationships, as we do with the "mega zio"
837 		 * of writes for spa_sync(), and the chain of ZIL blocks.
838 		 */
839 		if (next_to_executep != NULL && *next_to_executep == NULL &&
840 		    (pio->io_type == zio->io_type ||
841 		    (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
842 			*next_to_executep = pio;
843 		} else {
844 			zio_taskq_dispatch(pio, type, B_FALSE);
845 		}
846 	} else {
847 		mutex_exit(&pio->io_lock);
848 	}
849 }
850 
851 static void
852 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
853 {
854 	if (zio->io_child_error[c] != 0 && zio->io_error == 0)
855 		zio->io_error = zio->io_child_error[c];
856 }
857 
858 int
859 zio_bookmark_compare(const void *x1, const void *x2)
860 {
861 	const zio_t *z1 = x1;
862 	const zio_t *z2 = x2;
863 
864 	if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
865 		return (-1);
866 	if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
867 		return (1);
868 
869 	if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
870 		return (-1);
871 	if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
872 		return (1);
873 
874 	if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
875 		return (-1);
876 	if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
877 		return (1);
878 
879 	if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
880 		return (-1);
881 	if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
882 		return (1);
883 
884 	if (z1 < z2)
885 		return (-1);
886 	if (z1 > z2)
887 		return (1);
888 
889 	return (0);
890 }
891 
892 /*
893  * ==========================================================================
894  * Create the various types of I/O (read, write, free, etc)
895  * ==========================================================================
896  */
897 static zio_t *
898 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
899     abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
900     void *private, zio_type_t type, zio_priority_t priority,
901     zio_flag_t flags, vdev_t *vd, uint64_t offset,
902     const zbookmark_phys_t *zb, enum zio_stage stage,
903     enum zio_stage pipeline)
904 {
905 	zio_t *zio;
906 
907 	IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
908 	ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
909 	ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
910 
911 	ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
912 	ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
913 	ASSERT(vd || stage == ZIO_STAGE_OPEN);
914 
915 	IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
916 
917 	zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
918 	memset(zio, 0, sizeof (zio_t));
919 
920 	mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
921 	cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
922 
923 	list_create(&zio->io_parent_list, sizeof (zio_link_t),
924 	    offsetof(zio_link_t, zl_parent_node));
925 	list_create(&zio->io_child_list, sizeof (zio_link_t),
926 	    offsetof(zio_link_t, zl_child_node));
927 	metaslab_trace_init(&zio->io_alloc_list);
928 
929 	if (vd != NULL)
930 		zio->io_child_type = ZIO_CHILD_VDEV;
931 	else if (flags & ZIO_FLAG_GANG_CHILD)
932 		zio->io_child_type = ZIO_CHILD_GANG;
933 	else if (flags & ZIO_FLAG_DDT_CHILD)
934 		zio->io_child_type = ZIO_CHILD_DDT;
935 	else
936 		zio->io_child_type = ZIO_CHILD_LOGICAL;
937 
938 	if (bp != NULL) {
939 		if (type != ZIO_TYPE_WRITE ||
940 		    zio->io_child_type == ZIO_CHILD_DDT) {
941 			zio->io_bp_copy = *bp;
942 			zio->io_bp = &zio->io_bp_copy;	/* so caller can free */
943 		} else {
944 			zio->io_bp = (blkptr_t *)bp;
945 		}
946 		zio->io_bp_orig = *bp;
947 		if (zio->io_child_type == ZIO_CHILD_LOGICAL)
948 			zio->io_logical = zio;
949 		if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
950 			pipeline |= ZIO_GANG_STAGES;
951 	}
952 
953 	zio->io_spa = spa;
954 	zio->io_txg = txg;
955 	zio->io_done = done;
956 	zio->io_private = private;
957 	zio->io_type = type;
958 	zio->io_priority = priority;
959 	zio->io_vd = vd;
960 	zio->io_offset = offset;
961 	zio->io_orig_abd = zio->io_abd = data;
962 	zio->io_orig_size = zio->io_size = psize;
963 	zio->io_lsize = lsize;
964 	zio->io_orig_flags = zio->io_flags = flags;
965 	zio->io_orig_stage = zio->io_stage = stage;
966 	zio->io_orig_pipeline = zio->io_pipeline = pipeline;
967 	zio->io_pipeline_trace = ZIO_STAGE_OPEN;
968 	zio->io_allocator = ZIO_ALLOCATOR_NONE;
969 
970 	zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
971 	    (pipeline & ZIO_STAGE_READY) == 0;
972 	zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
973 
974 	if (zb != NULL)
975 		zio->io_bookmark = *zb;
976 
977 	if (pio != NULL) {
978 		zio->io_metaslab_class = pio->io_metaslab_class;
979 		if (zio->io_logical == NULL)
980 			zio->io_logical = pio->io_logical;
981 		if (zio->io_child_type == ZIO_CHILD_GANG)
982 			zio->io_gang_leader = pio->io_gang_leader;
983 		zio_add_child_first(pio, zio);
984 	}
985 
986 	taskq_init_ent(&zio->io_tqent);
987 
988 	return (zio);
989 }
990 
991 void
992 zio_destroy(zio_t *zio)
993 {
994 	metaslab_trace_fini(&zio->io_alloc_list);
995 	list_destroy(&zio->io_parent_list);
996 	list_destroy(&zio->io_child_list);
997 	mutex_destroy(&zio->io_lock);
998 	cv_destroy(&zio->io_cv);
999 	kmem_cache_free(zio_cache, zio);
1000 }
1001 
1002 /*
1003  * ZIO intended to be between others.  Provides synchronization at READY
1004  * and DONE pipeline stages and calls the respective callbacks.
1005  */
1006 zio_t *
1007 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
1008     void *private, zio_flag_t flags)
1009 {
1010 	zio_t *zio;
1011 
1012 	zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1013 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1014 	    ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1015 
1016 	return (zio);
1017 }
1018 
1019 /*
1020  * ZIO intended to be a root of a tree.  Unlike null ZIO does not have a
1021  * READY pipeline stage (is ready on creation), so it should not be used
1022  * as child of any ZIO that may need waiting for grandchildren READY stage
1023  * (any other ZIO type).
1024  */
1025 zio_t *
1026 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1027 {
1028 	zio_t *zio;
1029 
1030 	zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1031 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1032 	    ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1033 
1034 	return (zio);
1035 }
1036 
1037 static int
1038 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1039     enum blk_verify_flag blk_verify, const char *fmt, ...)
1040 {
1041 	va_list adx;
1042 	char buf[256];
1043 
1044 	va_start(adx, fmt);
1045 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
1046 	va_end(adx);
1047 
1048 	zfs_dbgmsg("bad blkptr at %px: "
1049 	    "DVA[0]=%#llx/%#llx "
1050 	    "DVA[1]=%#llx/%#llx "
1051 	    "DVA[2]=%#llx/%#llx "
1052 	    "prop=%#llx "
1053 	    "pad=%#llx,%#llx "
1054 	    "phys_birth=%#llx "
1055 	    "birth=%#llx "
1056 	    "fill=%#llx "
1057 	    "cksum=%#llx/%#llx/%#llx/%#llx",
1058 	    bp,
1059 	    (long long)bp->blk_dva[0].dva_word[0],
1060 	    (long long)bp->blk_dva[0].dva_word[1],
1061 	    (long long)bp->blk_dva[1].dva_word[0],
1062 	    (long long)bp->blk_dva[1].dva_word[1],
1063 	    (long long)bp->blk_dva[2].dva_word[0],
1064 	    (long long)bp->blk_dva[2].dva_word[1],
1065 	    (long long)bp->blk_prop,
1066 	    (long long)bp->blk_pad[0],
1067 	    (long long)bp->blk_pad[1],
1068 	    (long long)BP_GET_PHYSICAL_BIRTH(bp),
1069 	    (long long)BP_GET_LOGICAL_BIRTH(bp),
1070 	    (long long)bp->blk_fill,
1071 	    (long long)bp->blk_cksum.zc_word[0],
1072 	    (long long)bp->blk_cksum.zc_word[1],
1073 	    (long long)bp->blk_cksum.zc_word[2],
1074 	    (long long)bp->blk_cksum.zc_word[3]);
1075 	switch (blk_verify) {
1076 	case BLK_VERIFY_HALT:
1077 		zfs_panic_recover("%s: %s", spa_name(spa), buf);
1078 		break;
1079 	case BLK_VERIFY_LOG:
1080 		zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1081 		break;
1082 	case BLK_VERIFY_ONLY:
1083 		break;
1084 	}
1085 
1086 	return (1);
1087 }
1088 
1089 /*
1090  * Verify the block pointer fields contain reasonable values.  This means
1091  * it only contains known object types, checksum/compression identifiers,
1092  * block sizes within the maximum allowed limits, valid DVAs, etc.
1093  *
1094  * If everything checks out B_TRUE is returned.  The zfs_blkptr_verify
1095  * argument controls the behavior when an invalid field is detected.
1096  *
1097  * Values for blk_verify_flag:
1098  *   BLK_VERIFY_ONLY: evaluate the block
1099  *   BLK_VERIFY_LOG: evaluate the block and log problems
1100  *   BLK_VERIFY_HALT: call zfs_panic_recover on error
1101  *
1102  * Values for blk_config_flag:
1103  *   BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1104  *   BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1105  *   obtained for reader
1106  *   BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1107  *   performance
1108  */
1109 boolean_t
1110 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1111     enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1112 {
1113 	int errors = 0;
1114 
1115 	if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
1116 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1117 		    "blkptr at %px has invalid TYPE %llu",
1118 		    bp, (longlong_t)BP_GET_TYPE(bp));
1119 	}
1120 	if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
1121 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1122 		    "blkptr at %px has invalid COMPRESS %llu",
1123 		    bp, (longlong_t)BP_GET_COMPRESS(bp));
1124 	}
1125 	if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1126 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1127 		    "blkptr at %px has invalid LSIZE %llu",
1128 		    bp, (longlong_t)BP_GET_LSIZE(bp));
1129 	}
1130 	if (BP_IS_EMBEDDED(bp)) {
1131 		if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
1132 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1133 			    "blkptr at %px has invalid ETYPE %llu",
1134 			    bp, (longlong_t)BPE_GET_ETYPE(bp));
1135 		}
1136 		if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
1137 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1138 			    "blkptr at %px has invalid PSIZE %llu",
1139 			    bp, (longlong_t)BPE_GET_PSIZE(bp));
1140 		}
1141 		return (errors == 0);
1142 	}
1143 	if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
1144 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1145 		    "blkptr at %px has invalid CHECKSUM %llu",
1146 		    bp, (longlong_t)BP_GET_CHECKSUM(bp));
1147 	}
1148 	if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1149 		errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1150 		    "blkptr at %px has invalid PSIZE %llu",
1151 		    bp, (longlong_t)BP_GET_PSIZE(bp));
1152 	}
1153 
1154 	/*
1155 	 * Do not verify individual DVAs if the config is not trusted. This
1156 	 * will be done once the zio is executed in vdev_mirror_map_alloc.
1157 	 */
1158 	if (unlikely(!spa->spa_trust_config))
1159 		return (errors == 0);
1160 
1161 	switch (blk_config) {
1162 	case BLK_CONFIG_HELD:
1163 		ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1164 		break;
1165 	case BLK_CONFIG_NEEDED:
1166 		spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1167 		break;
1168 	case BLK_CONFIG_SKIP:
1169 		return (errors == 0);
1170 	default:
1171 		panic("invalid blk_config %u", blk_config);
1172 	}
1173 
1174 	/*
1175 	 * Pool-specific checks.
1176 	 *
1177 	 * Note: it would be nice to verify that the logical birth
1178 	 * and physical birth are not too large.  However,
1179 	 * spa_freeze() allows the birth time of log blocks (and
1180 	 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1181 	 * large.
1182 	 */
1183 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1184 		const dva_t *dva = &bp->blk_dva[i];
1185 		uint64_t vdevid = DVA_GET_VDEV(dva);
1186 
1187 		if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
1188 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1189 			    "blkptr at %px DVA %u has invalid VDEV %llu",
1190 			    bp, i, (longlong_t)vdevid);
1191 			continue;
1192 		}
1193 		vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1194 		if (unlikely(vd == NULL)) {
1195 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1196 			    "blkptr at %px DVA %u has invalid VDEV %llu",
1197 			    bp, i, (longlong_t)vdevid);
1198 			continue;
1199 		}
1200 		if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
1201 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1202 			    "blkptr at %px DVA %u has hole VDEV %llu",
1203 			    bp, i, (longlong_t)vdevid);
1204 			continue;
1205 		}
1206 		if (vd->vdev_ops == &vdev_missing_ops) {
1207 			/*
1208 			 * "missing" vdevs are valid during import, but we
1209 			 * don't have their detailed info (e.g. asize), so
1210 			 * we can't perform any more checks on them.
1211 			 */
1212 			continue;
1213 		}
1214 		uint64_t offset = DVA_GET_OFFSET(dva);
1215 		uint64_t asize = DVA_GET_ASIZE(dva);
1216 		if (DVA_GET_GANG(dva))
1217 			asize = vdev_gang_header_asize(vd);
1218 		if (unlikely(offset + asize > vd->vdev_asize)) {
1219 			errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1220 			    "blkptr at %px DVA %u has invalid OFFSET %llu",
1221 			    bp, i, (longlong_t)offset);
1222 		}
1223 	}
1224 	if (blk_config == BLK_CONFIG_NEEDED)
1225 		spa_config_exit(spa, SCL_VDEV, bp);
1226 
1227 	return (errors == 0);
1228 }
1229 
1230 boolean_t
1231 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1232 {
1233 	(void) bp;
1234 	uint64_t vdevid = DVA_GET_VDEV(dva);
1235 
1236 	if (vdevid >= spa->spa_root_vdev->vdev_children)
1237 		return (B_FALSE);
1238 
1239 	vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1240 	if (vd == NULL)
1241 		return (B_FALSE);
1242 
1243 	if (vd->vdev_ops == &vdev_hole_ops)
1244 		return (B_FALSE);
1245 
1246 	if (vd->vdev_ops == &vdev_missing_ops) {
1247 		return (B_FALSE);
1248 	}
1249 
1250 	uint64_t offset = DVA_GET_OFFSET(dva);
1251 	uint64_t asize = DVA_GET_ASIZE(dva);
1252 
1253 	if (DVA_GET_GANG(dva))
1254 		asize = vdev_gang_header_asize(vd);
1255 	if (offset + asize > vd->vdev_asize)
1256 		return (B_FALSE);
1257 
1258 	return (B_TRUE);
1259 }
1260 
1261 zio_t *
1262 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1263     abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1264     zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1265 {
1266 	zio_t *zio;
1267 
1268 	zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
1269 	    data, size, size, done, private,
1270 	    ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1271 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1272 	    ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1273 
1274 	return (zio);
1275 }
1276 
1277 zio_t *
1278 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1279     abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1280     zio_done_func_t *ready, zio_done_func_t *children_ready,
1281     zio_done_func_t *done, void *private, zio_priority_t priority,
1282     zio_flag_t flags, const zbookmark_phys_t *zb)
1283 {
1284 	zio_t *zio;
1285 
1286 	ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
1287 	    zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
1288 	    zp->zp_compress >= ZIO_COMPRESS_OFF &&
1289 	    zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
1290 	    DMU_OT_IS_VALID(zp->zp_type) &&
1291 	    zp->zp_level < 32 &&
1292 	    zp->zp_copies > 0 &&
1293 	    zp->zp_copies <= spa_max_replication(spa));
1294 
1295 	zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1296 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1297 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1298 	    ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
1299 
1300 	zio->io_ready = ready;
1301 	zio->io_children_ready = children_ready;
1302 	zio->io_prop = *zp;
1303 
1304 	/*
1305 	 * Data can be NULL if we are going to call zio_write_override() to
1306 	 * provide the already-allocated BP.  But we may need the data to
1307 	 * verify a dedup hit (if requested).  In this case, don't try to
1308 	 * dedup (just take the already-allocated BP verbatim). Encrypted
1309 	 * dedup blocks need data as well so we also disable dedup in this
1310 	 * case.
1311 	 */
1312 	if (data == NULL &&
1313 	    (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1314 		zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1315 	}
1316 
1317 	return (zio);
1318 }
1319 
1320 zio_t *
1321 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1322     uint64_t size, zio_done_func_t *done, void *private,
1323     zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1324 {
1325 	zio_t *zio;
1326 
1327 	zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1328 	    ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1329 	    ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1330 
1331 	return (zio);
1332 }
1333 
1334 void
1335 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
1336     boolean_t brtwrite)
1337 {
1338 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1339 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1340 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1341 	ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1342 	ASSERT(!brtwrite || !nopwrite);
1343 
1344 	/*
1345 	 * We must reset the io_prop to match the values that existed
1346 	 * when the bp was first written by dmu_sync() keeping in mind
1347 	 * that nopwrite and dedup are mutually exclusive.
1348 	 */
1349 	zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1350 	zio->io_prop.zp_nopwrite = nopwrite;
1351 	zio->io_prop.zp_brtwrite = brtwrite;
1352 	zio->io_prop.zp_copies = copies;
1353 	zio->io_bp_override = bp;
1354 }
1355 
1356 void
1357 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1358 {
1359 
1360 	(void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1361 
1362 	/*
1363 	 * The check for EMBEDDED is a performance optimization.  We
1364 	 * process the free here (by ignoring it) rather than
1365 	 * putting it on the list and then processing it in zio_free_sync().
1366 	 */
1367 	if (BP_IS_EMBEDDED(bp))
1368 		return;
1369 
1370 	/*
1371 	 * Frees that are for the currently-syncing txg, are not going to be
1372 	 * deferred, and which will not need to do a read (i.e. not GANG or
1373 	 * DEDUP), can be processed immediately.  Otherwise, put them on the
1374 	 * in-memory list for later processing.
1375 	 *
1376 	 * Note that we only defer frees after zfs_sync_pass_deferred_free
1377 	 * when the log space map feature is disabled. [see relevant comment
1378 	 * in spa_sync_iterate_to_convergence()]
1379 	 */
1380 	if (BP_IS_GANG(bp) ||
1381 	    BP_GET_DEDUP(bp) ||
1382 	    txg != spa->spa_syncing_txg ||
1383 	    (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1384 	    !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1385 	    brt_maybe_exists(spa, bp)) {
1386 		metaslab_check_free(spa, bp);
1387 		bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1388 	} else {
1389 		VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
1390 	}
1391 }
1392 
1393 /*
1394  * To improve performance, this function may return NULL if we were able
1395  * to do the free immediately.  This avoids the cost of creating a zio
1396  * (and linking it to the parent, etc).
1397  */
1398 zio_t *
1399 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1400     zio_flag_t flags)
1401 {
1402 	ASSERT(!BP_IS_HOLE(bp));
1403 	ASSERT(spa_syncing_txg(spa) == txg);
1404 
1405 	if (BP_IS_EMBEDDED(bp))
1406 		return (NULL);
1407 
1408 	metaslab_check_free(spa, bp);
1409 	arc_freed(spa, bp);
1410 	dsl_scan_freed(spa, bp);
1411 
1412 	if (BP_IS_GANG(bp) ||
1413 	    BP_GET_DEDUP(bp) ||
1414 	    brt_maybe_exists(spa, bp)) {
1415 		/*
1416 		 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1417 		 * block header, the DDT or the BRT), so issue them
1418 		 * asynchronously so that this thread is not tied up.
1419 		 */
1420 		enum zio_stage stage =
1421 		    ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1422 
1423 		return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1424 		    BP_GET_PSIZE(bp), NULL, NULL,
1425 		    ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1426 		    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1427 	} else {
1428 		metaslab_free(spa, bp, txg, B_FALSE);
1429 		return (NULL);
1430 	}
1431 }
1432 
1433 zio_t *
1434 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1435     zio_done_func_t *done, void *private, zio_flag_t flags)
1436 {
1437 	zio_t *zio;
1438 
1439 	(void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1440 	    BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1441 
1442 	if (BP_IS_EMBEDDED(bp))
1443 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1444 
1445 	/*
1446 	 * A claim is an allocation of a specific block.  Claims are needed
1447 	 * to support immediate writes in the intent log.  The issue is that
1448 	 * immediate writes contain committed data, but in a txg that was
1449 	 * *not* committed.  Upon opening the pool after an unclean shutdown,
1450 	 * the intent log claims all blocks that contain immediate write data
1451 	 * so that the SPA knows they're in use.
1452 	 *
1453 	 * All claims *must* be resolved in the first txg -- before the SPA
1454 	 * starts allocating blocks -- so that nothing is allocated twice.
1455 	 * If txg == 0 we just verify that the block is claimable.
1456 	 */
1457 	ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1458 	    spa_min_claim_txg(spa));
1459 	ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1460 	ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));	/* zdb(8) */
1461 
1462 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1463 	    BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1464 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1465 	ASSERT0(zio->io_queued_timestamp);
1466 
1467 	return (zio);
1468 }
1469 
1470 zio_t *
1471 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1472     zio_done_func_t *done, void *private, zio_priority_t priority,
1473     zio_flag_t flags, enum trim_flag trim_flags)
1474 {
1475 	zio_t *zio;
1476 
1477 	ASSERT0(vd->vdev_children);
1478 	ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1479 	ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1480 	ASSERT3U(size, !=, 0);
1481 
1482 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1483 	    private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1484 	    vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1485 	zio->io_trim_flags = trim_flags;
1486 
1487 	return (zio);
1488 }
1489 
1490 zio_t *
1491 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1492     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1493     zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1494 {
1495 	zio_t *zio;
1496 
1497 	ASSERT(vd->vdev_children == 0);
1498 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1499 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1500 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1501 
1502 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1503 	    private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1504 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1505 
1506 	zio->io_prop.zp_checksum = checksum;
1507 
1508 	return (zio);
1509 }
1510 
1511 zio_t *
1512 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1513     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1514     zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1515 {
1516 	zio_t *zio;
1517 
1518 	ASSERT(vd->vdev_children == 0);
1519 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1520 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1521 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1522 
1523 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1524 	    private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1525 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1526 
1527 	zio->io_prop.zp_checksum = checksum;
1528 
1529 	if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1530 		/*
1531 		 * zec checksums are necessarily destructive -- they modify
1532 		 * the end of the write buffer to hold the verifier/checksum.
1533 		 * Therefore, we must make a local copy in case the data is
1534 		 * being written to multiple places in parallel.
1535 		 */
1536 		abd_t *wbuf = abd_alloc_sametype(data, size);
1537 		abd_copy(wbuf, data, size);
1538 
1539 		zio_push_transform(zio, wbuf, size, size, NULL);
1540 	}
1541 
1542 	return (zio);
1543 }
1544 
1545 /*
1546  * Create a child I/O to do some work for us.
1547  */
1548 zio_t *
1549 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1550     abd_t *data, uint64_t size, int type, zio_priority_t priority,
1551     zio_flag_t flags, zio_done_func_t *done, void *private)
1552 {
1553 	enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1554 	zio_t *zio;
1555 
1556 	/*
1557 	 * vdev child I/Os do not propagate their error to the parent.
1558 	 * Therefore, for correct operation the caller *must* check for
1559 	 * and handle the error in the child i/o's done callback.
1560 	 * The only exceptions are i/os that we don't care about
1561 	 * (OPTIONAL or REPAIR).
1562 	 */
1563 	ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1564 	    done != NULL);
1565 
1566 	if (type == ZIO_TYPE_READ && bp != NULL) {
1567 		/*
1568 		 * If we have the bp, then the child should perform the
1569 		 * checksum and the parent need not.  This pushes error
1570 		 * detection as close to the leaves as possible and
1571 		 * eliminates redundant checksums in the interior nodes.
1572 		 */
1573 		pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1574 		pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1575 	}
1576 
1577 	if (vd->vdev_ops->vdev_op_leaf) {
1578 		ASSERT0(vd->vdev_children);
1579 		offset += VDEV_LABEL_START_SIZE;
1580 	}
1581 
1582 	flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1583 
1584 	/*
1585 	 * If we've decided to do a repair, the write is not speculative --
1586 	 * even if the original read was.
1587 	 */
1588 	if (flags & ZIO_FLAG_IO_REPAIR)
1589 		flags &= ~ZIO_FLAG_SPECULATIVE;
1590 
1591 	/*
1592 	 * If we're creating a child I/O that is not associated with a
1593 	 * top-level vdev, then the child zio is not an allocating I/O.
1594 	 * If this is a retried I/O then we ignore it since we will
1595 	 * have already processed the original allocating I/O.
1596 	 */
1597 	if (flags & ZIO_FLAG_IO_ALLOCATING &&
1598 	    (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1599 		ASSERT(pio->io_metaslab_class != NULL);
1600 		ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1601 		ASSERT(type == ZIO_TYPE_WRITE);
1602 		ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1603 		ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1604 		ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1605 		    pio->io_child_type == ZIO_CHILD_GANG);
1606 
1607 		flags &= ~ZIO_FLAG_IO_ALLOCATING;
1608 	}
1609 
1610 	zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1611 	    done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1612 	    ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1613 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1614 
1615 	return (zio);
1616 }
1617 
1618 zio_t *
1619 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1620     zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1621     zio_done_func_t *done, void *private)
1622 {
1623 	zio_t *zio;
1624 
1625 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1626 
1627 	zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1628 	    data, size, size, done, private, type, priority,
1629 	    flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1630 	    vd, offset, NULL,
1631 	    ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1632 
1633 	return (zio);
1634 }
1635 
1636 
1637 /*
1638  * Send a flush command to the given vdev. Unlike most zio creation functions,
1639  * the flush zios are issued immediately. You can wait on pio to pause until
1640  * the flushes complete.
1641  */
1642 void
1643 zio_flush(zio_t *pio, vdev_t *vd)
1644 {
1645 	const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1646 	    ZIO_FLAG_DONT_RETRY;
1647 
1648 	if (vd->vdev_nowritecache)
1649 		return;
1650 
1651 	if (vd->vdev_children == 0) {
1652 		zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
1653 		    NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1654 		    NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
1655 	} else {
1656 		for (uint64_t c = 0; c < vd->vdev_children; c++)
1657 			zio_flush(pio, vd->vdev_child[c]);
1658 	}
1659 }
1660 
1661 void
1662 zio_shrink(zio_t *zio, uint64_t size)
1663 {
1664 	ASSERT3P(zio->io_executor, ==, NULL);
1665 	ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1666 	ASSERT3U(size, <=, zio->io_size);
1667 
1668 	/*
1669 	 * We don't shrink for raidz because of problems with the
1670 	 * reconstruction when reading back less than the block size.
1671 	 * Note, BP_IS_RAIDZ() assumes no compression.
1672 	 */
1673 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1674 	if (!BP_IS_RAIDZ(zio->io_bp)) {
1675 		/* we are not doing a raw write */
1676 		ASSERT3U(zio->io_size, ==, zio->io_lsize);
1677 		zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1678 	}
1679 }
1680 
1681 /*
1682  * Round provided allocation size up to a value that can be allocated
1683  * by at least some vdev(s) in the pool with minimum or no additional
1684  * padding and without extra space usage on others
1685  */
1686 static uint64_t
1687 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1688 {
1689 	if (size > spa->spa_min_alloc)
1690 		return (roundup(size, spa->spa_gcd_alloc));
1691 	return (spa->spa_min_alloc);
1692 }
1693 
1694 /*
1695  * ==========================================================================
1696  * Prepare to read and write logical blocks
1697  * ==========================================================================
1698  */
1699 
1700 static zio_t *
1701 zio_read_bp_init(zio_t *zio)
1702 {
1703 	blkptr_t *bp = zio->io_bp;
1704 	uint64_t psize =
1705 	    BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1706 
1707 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1708 
1709 	if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1710 	    zio->io_child_type == ZIO_CHILD_LOGICAL &&
1711 	    !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1712 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1713 		    psize, psize, zio_decompress);
1714 	}
1715 
1716 	if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1717 	    BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1718 	    zio->io_child_type == ZIO_CHILD_LOGICAL) {
1719 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1720 		    psize, psize, zio_decrypt);
1721 	}
1722 
1723 	if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1724 		int psize = BPE_GET_PSIZE(bp);
1725 		void *data = abd_borrow_buf(zio->io_abd, psize);
1726 
1727 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1728 		decode_embedded_bp_compressed(bp, data);
1729 		abd_return_buf_copy(zio->io_abd, data, psize);
1730 	} else {
1731 		ASSERT(!BP_IS_EMBEDDED(bp));
1732 	}
1733 
1734 	if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1735 		zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1736 
1737 	return (zio);
1738 }
1739 
1740 static zio_t *
1741 zio_write_bp_init(zio_t *zio)
1742 {
1743 	if (!IO_IS_ALLOCATING(zio))
1744 		return (zio);
1745 
1746 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1747 
1748 	if (zio->io_bp_override) {
1749 		blkptr_t *bp = zio->io_bp;
1750 		zio_prop_t *zp = &zio->io_prop;
1751 
1752 		ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
1753 
1754 		*bp = *zio->io_bp_override;
1755 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1756 
1757 		if (zp->zp_brtwrite)
1758 			return (zio);
1759 
1760 		ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1761 
1762 		if (BP_IS_EMBEDDED(bp))
1763 			return (zio);
1764 
1765 		/*
1766 		 * If we've been overridden and nopwrite is set then
1767 		 * set the flag accordingly to indicate that a nopwrite
1768 		 * has already occurred.
1769 		 */
1770 		if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1771 			ASSERT(!zp->zp_dedup);
1772 			ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1773 			zio->io_flags |= ZIO_FLAG_NOPWRITE;
1774 			return (zio);
1775 		}
1776 
1777 		ASSERT(!zp->zp_nopwrite);
1778 
1779 		if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1780 			return (zio);
1781 
1782 		ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1783 		    ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1784 
1785 		if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1786 		    !zp->zp_encrypt) {
1787 			BP_SET_DEDUP(bp, 1);
1788 			zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1789 			return (zio);
1790 		}
1791 
1792 		/*
1793 		 * We were unable to handle this as an override bp, treat
1794 		 * it as a regular write I/O.
1795 		 */
1796 		zio->io_bp_override = NULL;
1797 		*bp = zio->io_bp_orig;
1798 		zio->io_pipeline = zio->io_orig_pipeline;
1799 	}
1800 
1801 	return (zio);
1802 }
1803 
1804 static zio_t *
1805 zio_write_compress(zio_t *zio)
1806 {
1807 	spa_t *spa = zio->io_spa;
1808 	zio_prop_t *zp = &zio->io_prop;
1809 	enum zio_compress compress = zp->zp_compress;
1810 	blkptr_t *bp = zio->io_bp;
1811 	uint64_t lsize = zio->io_lsize;
1812 	uint64_t psize = zio->io_size;
1813 	uint32_t pass = 1;
1814 
1815 	/*
1816 	 * If our children haven't all reached the ready stage,
1817 	 * wait for them and then repeat this pipeline stage.
1818 	 */
1819 	if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1820 	    ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1821 		return (NULL);
1822 	}
1823 
1824 	if (!IO_IS_ALLOCATING(zio))
1825 		return (zio);
1826 
1827 	if (zio->io_children_ready != NULL) {
1828 		/*
1829 		 * Now that all our children are ready, run the callback
1830 		 * associated with this zio in case it wants to modify the
1831 		 * data to be written.
1832 		 */
1833 		ASSERT3U(zp->zp_level, >, 0);
1834 		zio->io_children_ready(zio);
1835 	}
1836 
1837 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1838 	ASSERT(zio->io_bp_override == NULL);
1839 
1840 	if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
1841 		/*
1842 		 * We're rewriting an existing block, which means we're
1843 		 * working on behalf of spa_sync().  For spa_sync() to
1844 		 * converge, it must eventually be the case that we don't
1845 		 * have to allocate new blocks.  But compression changes
1846 		 * the blocksize, which forces a reallocate, and makes
1847 		 * convergence take longer.  Therefore, after the first
1848 		 * few passes, stop compressing to ensure convergence.
1849 		 */
1850 		pass = spa_sync_pass(spa);
1851 
1852 		ASSERT(zio->io_txg == spa_syncing_txg(spa));
1853 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1854 		ASSERT(!BP_GET_DEDUP(bp));
1855 
1856 		if (pass >= zfs_sync_pass_dont_compress)
1857 			compress = ZIO_COMPRESS_OFF;
1858 
1859 		/* Make sure someone doesn't change their mind on overwrites */
1860 		ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1861 		    MIN(zp->zp_copies, spa_max_replication(spa))
1862 		    == BP_GET_NDVAS(bp));
1863 	}
1864 
1865 	/* If it's a compressed write that is not raw, compress the buffer. */
1866 	if (compress != ZIO_COMPRESS_OFF &&
1867 	    !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1868 		abd_t *cabd = NULL;
1869 		if (abd_cmp_zero(zio->io_abd, lsize) == 0)
1870 			psize = 0;
1871 		else if (compress == ZIO_COMPRESS_EMPTY)
1872 			psize = lsize;
1873 		else
1874 			psize = zio_compress_data(compress, zio->io_abd, &cabd,
1875 			    lsize, zp->zp_complevel);
1876 		if (psize == 0) {
1877 			compress = ZIO_COMPRESS_OFF;
1878 		} else if (psize >= lsize) {
1879 			compress = ZIO_COMPRESS_OFF;
1880 			if (cabd != NULL)
1881 				abd_free(cabd);
1882 		} else if (!zp->zp_dedup && !zp->zp_encrypt &&
1883 		    psize <= BPE_PAYLOAD_SIZE &&
1884 		    zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1885 		    spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1886 			void *cbuf = abd_borrow_buf_copy(cabd, lsize);
1887 			encode_embedded_bp_compressed(bp,
1888 			    cbuf, compress, lsize, psize);
1889 			BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1890 			BP_SET_TYPE(bp, zio->io_prop.zp_type);
1891 			BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1892 			abd_return_buf(cabd, cbuf, lsize);
1893 			abd_free(cabd);
1894 			BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
1895 			zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1896 			ASSERT(spa_feature_is_active(spa,
1897 			    SPA_FEATURE_EMBEDDED_DATA));
1898 			return (zio);
1899 		} else {
1900 			/*
1901 			 * Round compressed size up to the minimum allocation
1902 			 * size of the smallest-ashift device, and zero the
1903 			 * tail. This ensures that the compressed size of the
1904 			 * BP (and thus compressratio property) are correct,
1905 			 * in that we charge for the padding used to fill out
1906 			 * the last sector.
1907 			 */
1908 			size_t rounded = (size_t)zio_roundup_alloc_size(spa,
1909 			    psize);
1910 			if (rounded >= lsize) {
1911 				compress = ZIO_COMPRESS_OFF;
1912 				abd_free(cabd);
1913 				psize = lsize;
1914 			} else {
1915 				abd_zero_off(cabd, psize, rounded - psize);
1916 				psize = rounded;
1917 				zio_push_transform(zio, cabd,
1918 				    psize, lsize, NULL);
1919 			}
1920 		}
1921 
1922 		/*
1923 		 * We were unable to handle this as an override bp, treat
1924 		 * it as a regular write I/O.
1925 		 */
1926 		zio->io_bp_override = NULL;
1927 		*bp = zio->io_bp_orig;
1928 		zio->io_pipeline = zio->io_orig_pipeline;
1929 
1930 	} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1931 	    zp->zp_type == DMU_OT_DNODE) {
1932 		/*
1933 		 * The DMU actually relies on the zio layer's compression
1934 		 * to free metadnode blocks that have had all contained
1935 		 * dnodes freed. As a result, even when doing a raw
1936 		 * receive, we must check whether the block can be compressed
1937 		 * to a hole.
1938 		 */
1939 		if (abd_cmp_zero(zio->io_abd, lsize) == 0) {
1940 			psize = 0;
1941 			compress = ZIO_COMPRESS_OFF;
1942 		} else {
1943 			psize = lsize;
1944 		}
1945 	} else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
1946 	    !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
1947 		/*
1948 		 * If we are raw receiving an encrypted dataset we should not
1949 		 * take this codepath because it will change the on-disk block
1950 		 * and decryption will fail.
1951 		 */
1952 		size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
1953 		    lsize);
1954 
1955 		if (rounded != psize) {
1956 			abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
1957 			abd_zero_off(cdata, psize, rounded - psize);
1958 			abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
1959 			psize = rounded;
1960 			zio_push_transform(zio, cdata,
1961 			    psize, rounded, NULL);
1962 		}
1963 	} else {
1964 		ASSERT3U(psize, !=, 0);
1965 	}
1966 
1967 	/*
1968 	 * The final pass of spa_sync() must be all rewrites, but the first
1969 	 * few passes offer a trade-off: allocating blocks defers convergence,
1970 	 * but newly allocated blocks are sequential, so they can be written
1971 	 * to disk faster.  Therefore, we allow the first few passes of
1972 	 * spa_sync() to allocate new blocks, but force rewrites after that.
1973 	 * There should only be a handful of blocks after pass 1 in any case.
1974 	 */
1975 	if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
1976 	    BP_GET_PSIZE(bp) == psize &&
1977 	    pass >= zfs_sync_pass_rewrite) {
1978 		VERIFY3U(psize, !=, 0);
1979 		enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1980 
1981 		zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1982 		zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1983 	} else {
1984 		BP_ZERO(bp);
1985 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
1986 	}
1987 
1988 	if (psize == 0) {
1989 		if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
1990 		    spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1991 			BP_SET_LSIZE(bp, lsize);
1992 			BP_SET_TYPE(bp, zp->zp_type);
1993 			BP_SET_LEVEL(bp, zp->zp_level);
1994 			BP_SET_BIRTH(bp, zio->io_txg, 0);
1995 		}
1996 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1997 	} else {
1998 		ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1999 		BP_SET_LSIZE(bp, lsize);
2000 		BP_SET_TYPE(bp, zp->zp_type);
2001 		BP_SET_LEVEL(bp, zp->zp_level);
2002 		BP_SET_PSIZE(bp, psize);
2003 		BP_SET_COMPRESS(bp, compress);
2004 		BP_SET_CHECKSUM(bp, zp->zp_checksum);
2005 		BP_SET_DEDUP(bp, zp->zp_dedup);
2006 		BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
2007 		if (zp->zp_dedup) {
2008 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2009 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2010 			ASSERT(!zp->zp_encrypt ||
2011 			    DMU_OT_IS_ENCRYPTED(zp->zp_type));
2012 			zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
2013 		}
2014 		if (zp->zp_nopwrite) {
2015 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2016 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2017 			zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
2018 		}
2019 	}
2020 	return (zio);
2021 }
2022 
2023 static zio_t *
2024 zio_free_bp_init(zio_t *zio)
2025 {
2026 	blkptr_t *bp = zio->io_bp;
2027 
2028 	if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2029 		if (BP_GET_DEDUP(bp))
2030 			zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2031 	}
2032 
2033 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2034 
2035 	return (zio);
2036 }
2037 
2038 /*
2039  * ==========================================================================
2040  * Execute the I/O pipeline
2041  * ==========================================================================
2042  */
2043 
2044 static void
2045 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2046 {
2047 	spa_t *spa = zio->io_spa;
2048 	zio_type_t t = zio->io_type;
2049 
2050 	/*
2051 	 * If we're a config writer or a probe, the normal issue and
2052 	 * interrupt threads may all be blocked waiting for the config lock.
2053 	 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2054 	 */
2055 	if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2056 		t = ZIO_TYPE_NULL;
2057 
2058 	/*
2059 	 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2060 	 */
2061 	if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2062 		t = ZIO_TYPE_NULL;
2063 
2064 	/*
2065 	 * If this is a high priority I/O, then use the high priority taskq if
2066 	 * available or cut the line otherwise.
2067 	 */
2068 	if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
2069 		if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2070 			q++;
2071 		else
2072 			cutinline = B_TRUE;
2073 	}
2074 
2075 	ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2076 
2077 	spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
2078 }
2079 
2080 static boolean_t
2081 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2082 {
2083 	spa_t *spa = zio->io_spa;
2084 
2085 	taskq_t *tq = taskq_of_curthread();
2086 
2087 	for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2088 		spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2089 		uint_t i;
2090 		for (i = 0; i < tqs->stqs_count; i++) {
2091 			if (tqs->stqs_taskq[i] == tq)
2092 				return (B_TRUE);
2093 		}
2094 	}
2095 
2096 	return (B_FALSE);
2097 }
2098 
2099 static zio_t *
2100 zio_issue_async(zio_t *zio)
2101 {
2102 	ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2103 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2104 	return (NULL);
2105 }
2106 
2107 void
2108 zio_interrupt(void *zio)
2109 {
2110 	zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2111 }
2112 
2113 void
2114 zio_delay_interrupt(zio_t *zio)
2115 {
2116 	/*
2117 	 * The timeout_generic() function isn't defined in userspace, so
2118 	 * rather than trying to implement the function, the zio delay
2119 	 * functionality has been disabled for userspace builds.
2120 	 */
2121 
2122 #ifdef _KERNEL
2123 	/*
2124 	 * If io_target_timestamp is zero, then no delay has been registered
2125 	 * for this IO, thus jump to the end of this function and "skip" the
2126 	 * delay; issuing it directly to the zio layer.
2127 	 */
2128 	if (zio->io_target_timestamp != 0) {
2129 		hrtime_t now = gethrtime();
2130 
2131 		if (now >= zio->io_target_timestamp) {
2132 			/*
2133 			 * This IO has already taken longer than the target
2134 			 * delay to complete, so we don't want to delay it
2135 			 * any longer; we "miss" the delay and issue it
2136 			 * directly to the zio layer. This is likely due to
2137 			 * the target latency being set to a value less than
2138 			 * the underlying hardware can satisfy (e.g. delay
2139 			 * set to 1ms, but the disks take 10ms to complete an
2140 			 * IO request).
2141 			 */
2142 
2143 			DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2144 			    hrtime_t, now);
2145 
2146 			zio_interrupt(zio);
2147 		} else {
2148 			taskqid_t tid;
2149 			hrtime_t diff = zio->io_target_timestamp - now;
2150 			clock_t expire_at_tick = ddi_get_lbolt() +
2151 			    NSEC_TO_TICK(diff);
2152 
2153 			DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2154 			    hrtime_t, now, hrtime_t, diff);
2155 
2156 			if (NSEC_TO_TICK(diff) == 0) {
2157 				/* Our delay is less than a jiffy - just spin */
2158 				zfs_sleep_until(zio->io_target_timestamp);
2159 				zio_interrupt(zio);
2160 			} else {
2161 				/*
2162 				 * Use taskq_dispatch_delay() in the place of
2163 				 * OpenZFS's timeout_generic().
2164 				 */
2165 				tid = taskq_dispatch_delay(system_taskq,
2166 				    zio_interrupt, zio, TQ_NOSLEEP,
2167 				    expire_at_tick);
2168 				if (tid == TASKQID_INVALID) {
2169 					/*
2170 					 * Couldn't allocate a task.  Just
2171 					 * finish the zio without a delay.
2172 					 */
2173 					zio_interrupt(zio);
2174 				}
2175 			}
2176 		}
2177 		return;
2178 	}
2179 #endif
2180 	DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2181 	zio_interrupt(zio);
2182 }
2183 
2184 static void
2185 zio_deadman_impl(zio_t *pio, int ziodepth)
2186 {
2187 	zio_t *cio, *cio_next;
2188 	zio_link_t *zl = NULL;
2189 	vdev_t *vd = pio->io_vd;
2190 
2191 	if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2192 		vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2193 		zbookmark_phys_t *zb = &pio->io_bookmark;
2194 		uint64_t delta = gethrtime() - pio->io_timestamp;
2195 		uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2196 
2197 		zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2198 		    "delta=%llu queued=%llu io=%llu "
2199 		    "path=%s "
2200 		    "last=%llu type=%d "
2201 		    "priority=%d flags=0x%llx stage=0x%x "
2202 		    "pipeline=0x%x pipeline-trace=0x%x "
2203 		    "objset=%llu object=%llu "
2204 		    "level=%llu blkid=%llu "
2205 		    "offset=%llu size=%llu "
2206 		    "error=%d",
2207 		    ziodepth, pio, pio->io_timestamp,
2208 		    (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2209 		    vd ? vd->vdev_path : "NULL",
2210 		    vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2211 		    pio->io_priority, (u_longlong_t)pio->io_flags,
2212 		    pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2213 		    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2214 		    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2215 		    (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2216 		    pio->io_error);
2217 		(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2218 		    pio->io_spa, vd, zb, pio, 0);
2219 
2220 		if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2221 		    taskq_empty_ent(&pio->io_tqent)) {
2222 			zio_interrupt(pio);
2223 		}
2224 	}
2225 
2226 	mutex_enter(&pio->io_lock);
2227 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2228 		cio_next = zio_walk_children(pio, &zl);
2229 		zio_deadman_impl(cio, ziodepth + 1);
2230 	}
2231 	mutex_exit(&pio->io_lock);
2232 }
2233 
2234 /*
2235  * Log the critical information describing this zio and all of its children
2236  * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2237  */
2238 void
2239 zio_deadman(zio_t *pio, const char *tag)
2240 {
2241 	spa_t *spa = pio->io_spa;
2242 	char *name = spa_name(spa);
2243 
2244 	if (!zfs_deadman_enabled || spa_suspended(spa))
2245 		return;
2246 
2247 	zio_deadman_impl(pio, 0);
2248 
2249 	switch (spa_get_deadman_failmode(spa)) {
2250 	case ZIO_FAILURE_MODE_WAIT:
2251 		zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2252 		break;
2253 
2254 	case ZIO_FAILURE_MODE_CONTINUE:
2255 		zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2256 		break;
2257 
2258 	case ZIO_FAILURE_MODE_PANIC:
2259 		fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2260 		break;
2261 	}
2262 }
2263 
2264 /*
2265  * Execute the I/O pipeline until one of the following occurs:
2266  * (1) the I/O completes; (2) the pipeline stalls waiting for
2267  * dependent child I/Os; (3) the I/O issues, so we're waiting
2268  * for an I/O completion interrupt; (4) the I/O is delegated by
2269  * vdev-level caching or aggregation; (5) the I/O is deferred
2270  * due to vdev-level queueing; (6) the I/O is handed off to
2271  * another thread.  In all cases, the pipeline stops whenever
2272  * there's no CPU work; it never burns a thread in cv_wait_io().
2273  *
2274  * There's no locking on io_stage because there's no legitimate way
2275  * for multiple threads to be attempting to process the same I/O.
2276  */
2277 static zio_pipe_stage_t *zio_pipeline[];
2278 
2279 /*
2280  * zio_execute() is a wrapper around the static function
2281  * __zio_execute() so that we can force  __zio_execute() to be
2282  * inlined.  This reduces stack overhead which is important
2283  * because __zio_execute() is called recursively in several zio
2284  * code paths.  zio_execute() itself cannot be inlined because
2285  * it is externally visible.
2286  */
2287 void
2288 zio_execute(void *zio)
2289 {
2290 	fstrans_cookie_t cookie;
2291 
2292 	cookie = spl_fstrans_mark();
2293 	__zio_execute(zio);
2294 	spl_fstrans_unmark(cookie);
2295 }
2296 
2297 /*
2298  * Used to determine if in the current context the stack is sized large
2299  * enough to allow zio_execute() to be called recursively.  A minimum
2300  * stack size of 16K is required to avoid needing to re-dispatch the zio.
2301  */
2302 static boolean_t
2303 zio_execute_stack_check(zio_t *zio)
2304 {
2305 #if !defined(HAVE_LARGE_STACKS)
2306 	dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2307 
2308 	/* Executing in txg_sync_thread() context. */
2309 	if (dp && curthread == dp->dp_tx.tx_sync_thread)
2310 		return (B_TRUE);
2311 
2312 	/* Pool initialization outside of zio_taskq context. */
2313 	if (dp && spa_is_initializing(dp->dp_spa) &&
2314 	    !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2315 	    !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2316 		return (B_TRUE);
2317 #else
2318 	(void) zio;
2319 #endif /* HAVE_LARGE_STACKS */
2320 
2321 	return (B_FALSE);
2322 }
2323 
2324 __attribute__((always_inline))
2325 static inline void
2326 __zio_execute(zio_t *zio)
2327 {
2328 	ASSERT3U(zio->io_queued_timestamp, >, 0);
2329 
2330 	while (zio->io_stage < ZIO_STAGE_DONE) {
2331 		enum zio_stage pipeline = zio->io_pipeline;
2332 		enum zio_stage stage = zio->io_stage;
2333 
2334 		zio->io_executor = curthread;
2335 
2336 		ASSERT(!MUTEX_HELD(&zio->io_lock));
2337 		ASSERT(ISP2(stage));
2338 		ASSERT(zio->io_stall == NULL);
2339 
2340 		do {
2341 			stage <<= 1;
2342 		} while ((stage & pipeline) == 0);
2343 
2344 		ASSERT(stage <= ZIO_STAGE_DONE);
2345 
2346 		/*
2347 		 * If we are in interrupt context and this pipeline stage
2348 		 * will grab a config lock that is held across I/O,
2349 		 * or may wait for an I/O that needs an interrupt thread
2350 		 * to complete, issue async to avoid deadlock.
2351 		 *
2352 		 * For VDEV_IO_START, we cut in line so that the io will
2353 		 * be sent to disk promptly.
2354 		 */
2355 		if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2356 		    zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2357 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2358 			    zio_requeue_io_start_cut_in_line : B_FALSE;
2359 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2360 			return;
2361 		}
2362 
2363 		/*
2364 		 * If the current context doesn't have large enough stacks
2365 		 * the zio must be issued asynchronously to prevent overflow.
2366 		 */
2367 		if (zio_execute_stack_check(zio)) {
2368 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2369 			    zio_requeue_io_start_cut_in_line : B_FALSE;
2370 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2371 			return;
2372 		}
2373 
2374 		zio->io_stage = stage;
2375 		zio->io_pipeline_trace |= zio->io_stage;
2376 
2377 		/*
2378 		 * The zio pipeline stage returns the next zio to execute
2379 		 * (typically the same as this one), or NULL if we should
2380 		 * stop.
2381 		 */
2382 		zio = zio_pipeline[highbit64(stage) - 1](zio);
2383 
2384 		if (zio == NULL)
2385 			return;
2386 	}
2387 }
2388 
2389 
2390 /*
2391  * ==========================================================================
2392  * Initiate I/O, either sync or async
2393  * ==========================================================================
2394  */
2395 int
2396 zio_wait(zio_t *zio)
2397 {
2398 	/*
2399 	 * Some routines, like zio_free_sync(), may return a NULL zio
2400 	 * to avoid the performance overhead of creating and then destroying
2401 	 * an unneeded zio.  For the callers' simplicity, we accept a NULL
2402 	 * zio and ignore it.
2403 	 */
2404 	if (zio == NULL)
2405 		return (0);
2406 
2407 	long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2408 	int error;
2409 
2410 	ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2411 	ASSERT3P(zio->io_executor, ==, NULL);
2412 
2413 	zio->io_waiter = curthread;
2414 	ASSERT0(zio->io_queued_timestamp);
2415 	zio->io_queued_timestamp = gethrtime();
2416 
2417 	if (zio->io_type == ZIO_TYPE_WRITE) {
2418 		spa_select_allocator(zio);
2419 	}
2420 	__zio_execute(zio);
2421 
2422 	mutex_enter(&zio->io_lock);
2423 	while (zio->io_executor != NULL) {
2424 		error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2425 		    ddi_get_lbolt() + timeout);
2426 
2427 		if (zfs_deadman_enabled && error == -1 &&
2428 		    gethrtime() - zio->io_queued_timestamp >
2429 		    spa_deadman_ziotime(zio->io_spa)) {
2430 			mutex_exit(&zio->io_lock);
2431 			timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2432 			zio_deadman(zio, FTAG);
2433 			mutex_enter(&zio->io_lock);
2434 		}
2435 	}
2436 	mutex_exit(&zio->io_lock);
2437 
2438 	error = zio->io_error;
2439 	zio_destroy(zio);
2440 
2441 	return (error);
2442 }
2443 
2444 void
2445 zio_nowait(zio_t *zio)
2446 {
2447 	/*
2448 	 * See comment in zio_wait().
2449 	 */
2450 	if (zio == NULL)
2451 		return;
2452 
2453 	ASSERT3P(zio->io_executor, ==, NULL);
2454 
2455 	if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2456 	    list_is_empty(&zio->io_parent_list)) {
2457 		zio_t *pio;
2458 
2459 		/*
2460 		 * This is a logical async I/O with no parent to wait for it.
2461 		 * We add it to the spa_async_root_zio "Godfather" I/O which
2462 		 * will ensure they complete prior to unloading the pool.
2463 		 */
2464 		spa_t *spa = zio->io_spa;
2465 		pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2466 
2467 		zio_add_child(pio, zio);
2468 	}
2469 
2470 	ASSERT0(zio->io_queued_timestamp);
2471 	zio->io_queued_timestamp = gethrtime();
2472 	if (zio->io_type == ZIO_TYPE_WRITE) {
2473 		spa_select_allocator(zio);
2474 	}
2475 	__zio_execute(zio);
2476 }
2477 
2478 /*
2479  * ==========================================================================
2480  * Reexecute, cancel, or suspend/resume failed I/O
2481  * ==========================================================================
2482  */
2483 
2484 static void
2485 zio_reexecute(void *arg)
2486 {
2487 	zio_t *pio = arg;
2488 	zio_t *cio, *cio_next, *gio;
2489 
2490 	ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2491 	ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2492 	ASSERT(pio->io_gang_leader == NULL);
2493 	ASSERT(pio->io_gang_tree == NULL);
2494 
2495 	mutex_enter(&pio->io_lock);
2496 	pio->io_flags = pio->io_orig_flags;
2497 	pio->io_stage = pio->io_orig_stage;
2498 	pio->io_pipeline = pio->io_orig_pipeline;
2499 	pio->io_reexecute = 0;
2500 	pio->io_flags |= ZIO_FLAG_REEXECUTED;
2501 	pio->io_pipeline_trace = 0;
2502 	pio->io_error = 0;
2503 	pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2504 	    (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2505 	pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2506 	zio_link_t *zl = NULL;
2507 	while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2508 		for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2509 			gio->io_children[pio->io_child_type][w] +=
2510 			    !pio->io_state[w];
2511 		}
2512 	}
2513 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2514 		pio->io_child_error[c] = 0;
2515 
2516 	if (IO_IS_ALLOCATING(pio))
2517 		BP_ZERO(pio->io_bp);
2518 
2519 	/*
2520 	 * As we reexecute pio's children, new children could be created.
2521 	 * New children go to the head of pio's io_child_list, however,
2522 	 * so we will (correctly) not reexecute them.  The key is that
2523 	 * the remainder of pio's io_child_list, from 'cio_next' onward,
2524 	 * cannot be affected by any side effects of reexecuting 'cio'.
2525 	 */
2526 	zl = NULL;
2527 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2528 		cio_next = zio_walk_children(pio, &zl);
2529 		mutex_exit(&pio->io_lock);
2530 		zio_reexecute(cio);
2531 		mutex_enter(&pio->io_lock);
2532 	}
2533 	mutex_exit(&pio->io_lock);
2534 
2535 	/*
2536 	 * Now that all children have been reexecuted, execute the parent.
2537 	 * We don't reexecute "The Godfather" I/O here as it's the
2538 	 * responsibility of the caller to wait on it.
2539 	 */
2540 	if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2541 		pio->io_queued_timestamp = gethrtime();
2542 		__zio_execute(pio);
2543 	}
2544 }
2545 
2546 void
2547 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2548 {
2549 	if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2550 		fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2551 		    "failure and the failure mode property for this pool "
2552 		    "is set to panic.", spa_name(spa));
2553 
2554 	if (reason != ZIO_SUSPEND_MMP) {
2555 		cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2556 		    "I/O failure and has been suspended.\n", spa_name(spa));
2557 	}
2558 
2559 	(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2560 	    NULL, NULL, 0);
2561 
2562 	mutex_enter(&spa->spa_suspend_lock);
2563 
2564 	if (spa->spa_suspend_zio_root == NULL)
2565 		spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2566 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2567 		    ZIO_FLAG_GODFATHER);
2568 
2569 	spa->spa_suspended = reason;
2570 
2571 	if (zio != NULL) {
2572 		ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2573 		ASSERT(zio != spa->spa_suspend_zio_root);
2574 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2575 		ASSERT(zio_unique_parent(zio) == NULL);
2576 		ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2577 		zio_add_child(spa->spa_suspend_zio_root, zio);
2578 	}
2579 
2580 	mutex_exit(&spa->spa_suspend_lock);
2581 }
2582 
2583 int
2584 zio_resume(spa_t *spa)
2585 {
2586 	zio_t *pio;
2587 
2588 	/*
2589 	 * Reexecute all previously suspended i/o.
2590 	 */
2591 	mutex_enter(&spa->spa_suspend_lock);
2592 	spa->spa_suspended = ZIO_SUSPEND_NONE;
2593 	cv_broadcast(&spa->spa_suspend_cv);
2594 	pio = spa->spa_suspend_zio_root;
2595 	spa->spa_suspend_zio_root = NULL;
2596 	mutex_exit(&spa->spa_suspend_lock);
2597 
2598 	if (pio == NULL)
2599 		return (0);
2600 
2601 	zio_reexecute(pio);
2602 	return (zio_wait(pio));
2603 }
2604 
2605 void
2606 zio_resume_wait(spa_t *spa)
2607 {
2608 	mutex_enter(&spa->spa_suspend_lock);
2609 	while (spa_suspended(spa))
2610 		cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2611 	mutex_exit(&spa->spa_suspend_lock);
2612 }
2613 
2614 /*
2615  * ==========================================================================
2616  * Gang blocks.
2617  *
2618  * A gang block is a collection of small blocks that looks to the DMU
2619  * like one large block.  When zio_dva_allocate() cannot find a block
2620  * of the requested size, due to either severe fragmentation or the pool
2621  * being nearly full, it calls zio_write_gang_block() to construct the
2622  * block from smaller fragments.
2623  *
2624  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2625  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
2626  * an indirect block: it's an array of block pointers.  It consumes
2627  * only one sector and hence is allocatable regardless of fragmentation.
2628  * The gang header's bps point to its gang members, which hold the data.
2629  *
2630  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2631  * as the verifier to ensure uniqueness of the SHA256 checksum.
2632  * Critically, the gang block bp's blk_cksum is the checksum of the data,
2633  * not the gang header.  This ensures that data block signatures (needed for
2634  * deduplication) are independent of how the block is physically stored.
2635  *
2636  * Gang blocks can be nested: a gang member may itself be a gang block.
2637  * Thus every gang block is a tree in which root and all interior nodes are
2638  * gang headers, and the leaves are normal blocks that contain user data.
2639  * The root of the gang tree is called the gang leader.
2640  *
2641  * To perform any operation (read, rewrite, free, claim) on a gang block,
2642  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2643  * in the io_gang_tree field of the original logical i/o by recursively
2644  * reading the gang leader and all gang headers below it.  This yields
2645  * an in-core tree containing the contents of every gang header and the
2646  * bps for every constituent of the gang block.
2647  *
2648  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2649  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
2650  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2651  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2652  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2653  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
2654  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2655  * of the gang header plus zio_checksum_compute() of the data to update the
2656  * gang header's blk_cksum as described above.
2657  *
2658  * The two-phase assemble/issue model solves the problem of partial failure --
2659  * what if you'd freed part of a gang block but then couldn't read the
2660  * gang header for another part?  Assembling the entire gang tree first
2661  * ensures that all the necessary gang header I/O has succeeded before
2662  * starting the actual work of free, claim, or write.  Once the gang tree
2663  * is assembled, free and claim are in-memory operations that cannot fail.
2664  *
2665  * In the event that a gang write fails, zio_dva_unallocate() walks the
2666  * gang tree to immediately free (i.e. insert back into the space map)
2667  * everything we've allocated.  This ensures that we don't get ENOSPC
2668  * errors during repeated suspend/resume cycles due to a flaky device.
2669  *
2670  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
2671  * the gang tree, we won't modify the block, so we can safely defer the free
2672  * (knowing that the block is still intact).  If we *can* assemble the gang
2673  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2674  * each constituent bp and we can allocate a new block on the next sync pass.
2675  *
2676  * In all cases, the gang tree allows complete recovery from partial failure.
2677  * ==========================================================================
2678  */
2679 
2680 static void
2681 zio_gang_issue_func_done(zio_t *zio)
2682 {
2683 	abd_free(zio->io_abd);
2684 }
2685 
2686 static zio_t *
2687 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2688     uint64_t offset)
2689 {
2690 	if (gn != NULL)
2691 		return (pio);
2692 
2693 	return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2694 	    BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2695 	    NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2696 	    &pio->io_bookmark));
2697 }
2698 
2699 static zio_t *
2700 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2701     uint64_t offset)
2702 {
2703 	zio_t *zio;
2704 
2705 	if (gn != NULL) {
2706 		abd_t *gbh_abd =
2707 		    abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2708 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2709 		    gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2710 		    pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2711 		    &pio->io_bookmark);
2712 		/*
2713 		 * As we rewrite each gang header, the pipeline will compute
2714 		 * a new gang block header checksum for it; but no one will
2715 		 * compute a new data checksum, so we do that here.  The one
2716 		 * exception is the gang leader: the pipeline already computed
2717 		 * its data checksum because that stage precedes gang assembly.
2718 		 * (Presently, nothing actually uses interior data checksums;
2719 		 * this is just good hygiene.)
2720 		 */
2721 		if (gn != pio->io_gang_leader->io_gang_tree) {
2722 			abd_t *buf = abd_get_offset(data, offset);
2723 
2724 			zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2725 			    buf, BP_GET_PSIZE(bp));
2726 
2727 			abd_free(buf);
2728 		}
2729 		/*
2730 		 * If we are here to damage data for testing purposes,
2731 		 * leave the GBH alone so that we can detect the damage.
2732 		 */
2733 		if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2734 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2735 	} else {
2736 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2737 		    abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2738 		    zio_gang_issue_func_done, NULL, pio->io_priority,
2739 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2740 	}
2741 
2742 	return (zio);
2743 }
2744 
2745 static zio_t *
2746 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2747     uint64_t offset)
2748 {
2749 	(void) gn, (void) data, (void) offset;
2750 
2751 	zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2752 	    ZIO_GANG_CHILD_FLAGS(pio));
2753 	if (zio == NULL) {
2754 		zio = zio_null(pio, pio->io_spa,
2755 		    NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2756 	}
2757 	return (zio);
2758 }
2759 
2760 static zio_t *
2761 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2762     uint64_t offset)
2763 {
2764 	(void) gn, (void) data, (void) offset;
2765 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2766 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2767 }
2768 
2769 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2770 	NULL,
2771 	zio_read_gang,
2772 	zio_rewrite_gang,
2773 	zio_free_gang,
2774 	zio_claim_gang,
2775 	NULL
2776 };
2777 
2778 static void zio_gang_tree_assemble_done(zio_t *zio);
2779 
2780 static zio_gang_node_t *
2781 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2782 {
2783 	zio_gang_node_t *gn;
2784 
2785 	ASSERT(*gnpp == NULL);
2786 
2787 	gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2788 	gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2789 	*gnpp = gn;
2790 
2791 	return (gn);
2792 }
2793 
2794 static void
2795 zio_gang_node_free(zio_gang_node_t **gnpp)
2796 {
2797 	zio_gang_node_t *gn = *gnpp;
2798 
2799 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2800 		ASSERT(gn->gn_child[g] == NULL);
2801 
2802 	zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2803 	kmem_free(gn, sizeof (*gn));
2804 	*gnpp = NULL;
2805 }
2806 
2807 static void
2808 zio_gang_tree_free(zio_gang_node_t **gnpp)
2809 {
2810 	zio_gang_node_t *gn = *gnpp;
2811 
2812 	if (gn == NULL)
2813 		return;
2814 
2815 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2816 		zio_gang_tree_free(&gn->gn_child[g]);
2817 
2818 	zio_gang_node_free(gnpp);
2819 }
2820 
2821 static void
2822 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2823 {
2824 	zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2825 	abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2826 
2827 	ASSERT(gio->io_gang_leader == gio);
2828 	ASSERT(BP_IS_GANG(bp));
2829 
2830 	zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2831 	    zio_gang_tree_assemble_done, gn, gio->io_priority,
2832 	    ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2833 }
2834 
2835 static void
2836 zio_gang_tree_assemble_done(zio_t *zio)
2837 {
2838 	zio_t *gio = zio->io_gang_leader;
2839 	zio_gang_node_t *gn = zio->io_private;
2840 	blkptr_t *bp = zio->io_bp;
2841 
2842 	ASSERT(gio == zio_unique_parent(zio));
2843 	ASSERT(list_is_empty(&zio->io_child_list));
2844 
2845 	if (zio->io_error)
2846 		return;
2847 
2848 	/* this ABD was created from a linear buf in zio_gang_tree_assemble */
2849 	if (BP_SHOULD_BYTESWAP(bp))
2850 		byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2851 
2852 	ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2853 	ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2854 	ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2855 
2856 	abd_free(zio->io_abd);
2857 
2858 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2859 		blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2860 		if (!BP_IS_GANG(gbp))
2861 			continue;
2862 		zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2863 	}
2864 }
2865 
2866 static void
2867 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2868     uint64_t offset)
2869 {
2870 	zio_t *gio = pio->io_gang_leader;
2871 	zio_t *zio;
2872 
2873 	ASSERT(BP_IS_GANG(bp) == !!gn);
2874 	ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2875 	ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
2876 
2877 	/*
2878 	 * If you're a gang header, your data is in gn->gn_gbh.
2879 	 * If you're a gang member, your data is in 'data' and gn == NULL.
2880 	 */
2881 	zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
2882 
2883 	if (gn != NULL) {
2884 		ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2885 
2886 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2887 			blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2888 			if (BP_IS_HOLE(gbp))
2889 				continue;
2890 			zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2891 			    offset);
2892 			offset += BP_GET_PSIZE(gbp);
2893 		}
2894 	}
2895 
2896 	if (gn == gio->io_gang_tree)
2897 		ASSERT3U(gio->io_size, ==, offset);
2898 
2899 	if (zio != pio)
2900 		zio_nowait(zio);
2901 }
2902 
2903 static zio_t *
2904 zio_gang_assemble(zio_t *zio)
2905 {
2906 	blkptr_t *bp = zio->io_bp;
2907 
2908 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2909 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2910 
2911 	zio->io_gang_leader = zio;
2912 
2913 	zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2914 
2915 	return (zio);
2916 }
2917 
2918 static zio_t *
2919 zio_gang_issue(zio_t *zio)
2920 {
2921 	blkptr_t *bp = zio->io_bp;
2922 
2923 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
2924 		return (NULL);
2925 	}
2926 
2927 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2928 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2929 
2930 	if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2931 		zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2932 		    0);
2933 	else
2934 		zio_gang_tree_free(&zio->io_gang_tree);
2935 
2936 	zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2937 
2938 	return (zio);
2939 }
2940 
2941 static void
2942 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
2943 {
2944 	cio->io_allocator = pio->io_allocator;
2945 }
2946 
2947 static void
2948 zio_write_gang_member_ready(zio_t *zio)
2949 {
2950 	zio_t *pio = zio_unique_parent(zio);
2951 	dva_t *cdva = zio->io_bp->blk_dva;
2952 	dva_t *pdva = pio->io_bp->blk_dva;
2953 	uint64_t asize;
2954 	zio_t *gio __maybe_unused = zio->io_gang_leader;
2955 
2956 	if (BP_IS_HOLE(zio->io_bp))
2957 		return;
2958 
2959 	ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2960 
2961 	ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2962 	ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2963 	ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2964 	ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2965 	VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2966 
2967 	mutex_enter(&pio->io_lock);
2968 	for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2969 		ASSERT(DVA_GET_GANG(&pdva[d]));
2970 		asize = DVA_GET_ASIZE(&pdva[d]);
2971 		asize += DVA_GET_ASIZE(&cdva[d]);
2972 		DVA_SET_ASIZE(&pdva[d], asize);
2973 	}
2974 	mutex_exit(&pio->io_lock);
2975 }
2976 
2977 static void
2978 zio_write_gang_done(zio_t *zio)
2979 {
2980 	/*
2981 	 * The io_abd field will be NULL for a zio with no data.  The io_flags
2982 	 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2983 	 * check for it here as it is cleared in zio_ready.
2984 	 */
2985 	if (zio->io_abd != NULL)
2986 		abd_free(zio->io_abd);
2987 }
2988 
2989 static zio_t *
2990 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
2991 {
2992 	spa_t *spa = pio->io_spa;
2993 	blkptr_t *bp = pio->io_bp;
2994 	zio_t *gio = pio->io_gang_leader;
2995 	zio_t *zio;
2996 	zio_gang_node_t *gn, **gnpp;
2997 	zio_gbh_phys_t *gbh;
2998 	abd_t *gbh_abd;
2999 	uint64_t txg = pio->io_txg;
3000 	uint64_t resid = pio->io_size;
3001 	uint64_t lsize;
3002 	int copies = gio->io_prop.zp_copies;
3003 	zio_prop_t zp;
3004 	int error;
3005 	boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
3006 
3007 	/*
3008 	 * If one copy was requested, store 2 copies of the GBH, so that we
3009 	 * can still traverse all the data (e.g. to free or scrub) even if a
3010 	 * block is damaged.  Note that we can't store 3 copies of the GBH in
3011 	 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
3012 	 */
3013 	int gbh_copies = copies;
3014 	if (gbh_copies == 1) {
3015 		gbh_copies = MIN(2, spa_max_replication(spa));
3016 	}
3017 
3018 	ASSERT(ZIO_HAS_ALLOCATOR(pio));
3019 	int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
3020 	if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3021 		ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3022 		ASSERT(has_data);
3023 
3024 		flags |= METASLAB_ASYNC_ALLOC;
3025 		VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
3026 		    mca_alloc_slots, pio));
3027 
3028 		/*
3029 		 * The logical zio has already placed a reservation for
3030 		 * 'copies' allocation slots but gang blocks may require
3031 		 * additional copies. These additional copies
3032 		 * (i.e. gbh_copies - copies) are guaranteed to succeed
3033 		 * since metaslab_class_throttle_reserve() always allows
3034 		 * additional reservations for gang blocks.
3035 		 */
3036 		VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
3037 		    pio->io_allocator, pio, flags));
3038 	}
3039 
3040 	error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
3041 	    bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3042 	    &pio->io_alloc_list, pio, pio->io_allocator);
3043 	if (error) {
3044 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3045 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3046 			ASSERT(has_data);
3047 
3048 			/*
3049 			 * If we failed to allocate the gang block header then
3050 			 * we remove any additional allocation reservations that
3051 			 * we placed here. The original reservation will
3052 			 * be removed when the logical I/O goes to the ready
3053 			 * stage.
3054 			 */
3055 			metaslab_class_throttle_unreserve(mc,
3056 			    gbh_copies - copies, pio->io_allocator, pio);
3057 		}
3058 
3059 		pio->io_error = error;
3060 		return (pio);
3061 	}
3062 
3063 	if (pio == gio) {
3064 		gnpp = &gio->io_gang_tree;
3065 	} else {
3066 		gnpp = pio->io_private;
3067 		ASSERT(pio->io_ready == zio_write_gang_member_ready);
3068 	}
3069 
3070 	gn = zio_gang_node_alloc(gnpp);
3071 	gbh = gn->gn_gbh;
3072 	memset(gbh, 0, SPA_GANGBLOCKSIZE);
3073 	gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
3074 
3075 	/*
3076 	 * Create the gang header.
3077 	 */
3078 	zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3079 	    zio_write_gang_done, NULL, pio->io_priority,
3080 	    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3081 
3082 	zio_gang_inherit_allocator(pio, zio);
3083 
3084 	/*
3085 	 * Create and nowait the gang children.
3086 	 */
3087 	for (int g = 0; resid != 0; resid -= lsize, g++) {
3088 		lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
3089 		    SPA_MINBLOCKSIZE);
3090 		ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
3091 
3092 		zp.zp_checksum = gio->io_prop.zp_checksum;
3093 		zp.zp_compress = ZIO_COMPRESS_OFF;
3094 		zp.zp_complevel = gio->io_prop.zp_complevel;
3095 		zp.zp_type = zp.zp_storage_type = DMU_OT_NONE;
3096 		zp.zp_level = 0;
3097 		zp.zp_copies = gio->io_prop.zp_copies;
3098 		zp.zp_dedup = B_FALSE;
3099 		zp.zp_dedup_verify = B_FALSE;
3100 		zp.zp_nopwrite = B_FALSE;
3101 		zp.zp_encrypt = gio->io_prop.zp_encrypt;
3102 		zp.zp_byteorder = gio->io_prop.zp_byteorder;
3103 		memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3104 		memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3105 		memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3106 
3107 		zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
3108 		    has_data ? abd_get_offset(pio->io_abd, pio->io_size -
3109 		    resid) : NULL, lsize, lsize, &zp,
3110 		    zio_write_gang_member_ready, NULL,
3111 		    zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3112 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3113 
3114 		zio_gang_inherit_allocator(zio, cio);
3115 
3116 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3117 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3118 			ASSERT(has_data);
3119 
3120 			/*
3121 			 * Gang children won't throttle but we should
3122 			 * account for their work, so reserve an allocation
3123 			 * slot for them here.
3124 			 */
3125 			VERIFY(metaslab_class_throttle_reserve(mc,
3126 			    zp.zp_copies, cio->io_allocator, cio, flags));
3127 		}
3128 		zio_nowait(cio);
3129 	}
3130 
3131 	/*
3132 	 * Set pio's pipeline to just wait for zio to finish.
3133 	 */
3134 	pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3135 
3136 	zio_nowait(zio);
3137 
3138 	return (pio);
3139 }
3140 
3141 /*
3142  * The zio_nop_write stage in the pipeline determines if allocating a
3143  * new bp is necessary.  The nopwrite feature can handle writes in
3144  * either syncing or open context (i.e. zil writes) and as a result is
3145  * mutually exclusive with dedup.
3146  *
3147  * By leveraging a cryptographically secure checksum, such as SHA256, we
3148  * can compare the checksums of the new data and the old to determine if
3149  * allocating a new block is required.  Note that our requirements for
3150  * cryptographic strength are fairly weak: there can't be any accidental
3151  * hash collisions, but we don't need to be secure against intentional
3152  * (malicious) collisions.  To trigger a nopwrite, you have to be able
3153  * to write the file to begin with, and triggering an incorrect (hash
3154  * collision) nopwrite is no worse than simply writing to the file.
3155  * That said, there are no known attacks against the checksum algorithms
3156  * used for nopwrite, assuming that the salt and the checksums
3157  * themselves remain secret.
3158  */
3159 static zio_t *
3160 zio_nop_write(zio_t *zio)
3161 {
3162 	blkptr_t *bp = zio->io_bp;
3163 	blkptr_t *bp_orig = &zio->io_bp_orig;
3164 	zio_prop_t *zp = &zio->io_prop;
3165 
3166 	ASSERT(BP_IS_HOLE(bp));
3167 	ASSERT(BP_GET_LEVEL(bp) == 0);
3168 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3169 	ASSERT(zp->zp_nopwrite);
3170 	ASSERT(!zp->zp_dedup);
3171 	ASSERT(zio->io_bp_override == NULL);
3172 	ASSERT(IO_IS_ALLOCATING(zio));
3173 
3174 	/*
3175 	 * Check to see if the original bp and the new bp have matching
3176 	 * characteristics (i.e. same checksum, compression algorithms, etc).
3177 	 * If they don't then just continue with the pipeline which will
3178 	 * allocate a new bp.
3179 	 */
3180 	if (BP_IS_HOLE(bp_orig) ||
3181 	    !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3182 	    ZCHECKSUM_FLAG_NOPWRITE) ||
3183 	    BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3184 	    BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3185 	    BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3186 	    BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3187 	    zp->zp_copies != BP_GET_NDVAS(bp_orig))
3188 		return (zio);
3189 
3190 	/*
3191 	 * If the checksums match then reset the pipeline so that we
3192 	 * avoid allocating a new bp and issuing any I/O.
3193 	 */
3194 	if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3195 		ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3196 		    ZCHECKSUM_FLAG_NOPWRITE);
3197 		ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3198 		ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3199 		ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3200 		ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3201 
3202 		/*
3203 		 * If we're overwriting a block that is currently on an
3204 		 * indirect vdev, then ignore the nopwrite request and
3205 		 * allow a new block to be allocated on a concrete vdev.
3206 		 */
3207 		spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3208 		for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3209 			vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3210 			    DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3211 			if (tvd->vdev_ops == &vdev_indirect_ops) {
3212 				spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3213 				return (zio);
3214 			}
3215 		}
3216 		spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3217 
3218 		*bp = *bp_orig;
3219 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3220 		zio->io_flags |= ZIO_FLAG_NOPWRITE;
3221 	}
3222 
3223 	return (zio);
3224 }
3225 
3226 /*
3227  * ==========================================================================
3228  * Block Reference Table
3229  * ==========================================================================
3230  */
3231 static zio_t *
3232 zio_brt_free(zio_t *zio)
3233 {
3234 	blkptr_t *bp;
3235 
3236 	bp = zio->io_bp;
3237 
3238 	if (BP_GET_LEVEL(bp) > 0 ||
3239 	    BP_IS_METADATA(bp) ||
3240 	    !brt_maybe_exists(zio->io_spa, bp)) {
3241 		return (zio);
3242 	}
3243 
3244 	if (!brt_entry_decref(zio->io_spa, bp)) {
3245 		/*
3246 		 * This isn't the last reference, so we cannot free
3247 		 * the data yet.
3248 		 */
3249 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3250 	}
3251 
3252 	return (zio);
3253 }
3254 
3255 /*
3256  * ==========================================================================
3257  * Dedup
3258  * ==========================================================================
3259  */
3260 static void
3261 zio_ddt_child_read_done(zio_t *zio)
3262 {
3263 	blkptr_t *bp = zio->io_bp;
3264 	ddt_t *ddt;
3265 	ddt_entry_t *dde = zio->io_private;
3266 	zio_t *pio = zio_unique_parent(zio);
3267 
3268 	mutex_enter(&pio->io_lock);
3269 	ddt = ddt_select(zio->io_spa, bp);
3270 
3271 	if (zio->io_error == 0) {
3272 		ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
3273 		/* this phys variant doesn't need repair */
3274 		ddt_phys_clear(dde->dde_phys, v);
3275 	}
3276 
3277 	if (zio->io_error == 0 && dde->dde_io->dde_repair_abd == NULL)
3278 		dde->dde_io->dde_repair_abd = zio->io_abd;
3279 	else
3280 		abd_free(zio->io_abd);
3281 	mutex_exit(&pio->io_lock);
3282 }
3283 
3284 static zio_t *
3285 zio_ddt_read_start(zio_t *zio)
3286 {
3287 	blkptr_t *bp = zio->io_bp;
3288 
3289 	ASSERT(BP_GET_DEDUP(bp));
3290 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3291 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3292 
3293 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
3294 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
3295 		ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3296 		ddt_phys_variant_t v_self = ddt_phys_select(ddt, dde, bp);
3297 		ddt_univ_phys_t *ddp = dde->dde_phys;
3298 		blkptr_t blk;
3299 
3300 		ASSERT(zio->io_vsd == NULL);
3301 		zio->io_vsd = dde;
3302 
3303 		if (v_self == DDT_PHYS_NONE)
3304 			return (zio);
3305 
3306 		/* issue I/O for the other copies */
3307 		for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3308 			ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3309 
3310 			if (ddt_phys_birth(ddp, v) == 0 || v == v_self)
3311 				continue;
3312 
3313 			ddt_bp_create(ddt->ddt_checksum, &dde->dde_key,
3314 			    ddp, v, &blk);
3315 			zio_nowait(zio_read(zio, zio->io_spa, &blk,
3316 			    abd_alloc_for_io(zio->io_size, B_TRUE),
3317 			    zio->io_size, zio_ddt_child_read_done, dde,
3318 			    zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3319 			    ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3320 		}
3321 		return (zio);
3322 	}
3323 
3324 	zio_nowait(zio_read(zio, zio->io_spa, bp,
3325 	    zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3326 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3327 
3328 	return (zio);
3329 }
3330 
3331 static zio_t *
3332 zio_ddt_read_done(zio_t *zio)
3333 {
3334 	blkptr_t *bp = zio->io_bp;
3335 
3336 	if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3337 		return (NULL);
3338 	}
3339 
3340 	ASSERT(BP_GET_DEDUP(bp));
3341 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3342 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3343 
3344 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
3345 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
3346 		ddt_entry_t *dde = zio->io_vsd;
3347 		if (ddt == NULL) {
3348 			ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3349 			return (zio);
3350 		}
3351 		if (dde == NULL) {
3352 			zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3353 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3354 			return (NULL);
3355 		}
3356 		if (dde->dde_io->dde_repair_abd != NULL) {
3357 			abd_copy(zio->io_abd, dde->dde_io->dde_repair_abd,
3358 			    zio->io_size);
3359 			zio->io_child_error[ZIO_CHILD_DDT] = 0;
3360 		}
3361 		ddt_repair_done(ddt, dde);
3362 		zio->io_vsd = NULL;
3363 	}
3364 
3365 	ASSERT(zio->io_vsd == NULL);
3366 
3367 	return (zio);
3368 }
3369 
3370 static boolean_t
3371 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3372 {
3373 	spa_t *spa = zio->io_spa;
3374 	boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3375 
3376 	ASSERT(!(zio->io_bp_override && do_raw));
3377 
3378 	/*
3379 	 * Note: we compare the original data, not the transformed data,
3380 	 * because when zio->io_bp is an override bp, we will not have
3381 	 * pushed the I/O transforms.  That's an important optimization
3382 	 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3383 	 * However, we should never get a raw, override zio so in these
3384 	 * cases we can compare the io_abd directly. This is useful because
3385 	 * it allows us to do dedup verification even if we don't have access
3386 	 * to the original data (for instance, if the encryption keys aren't
3387 	 * loaded).
3388 	 */
3389 
3390 	for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3391 		if (DDT_PHYS_IS_DITTO(ddt, p))
3392 			continue;
3393 
3394 		if (dde->dde_io == NULL)
3395 			continue;
3396 
3397 		zio_t *lio = dde->dde_io->dde_lead_zio[p];
3398 		if (lio == NULL)
3399 			continue;
3400 
3401 		if (do_raw)
3402 			return (lio->io_size != zio->io_size ||
3403 			    abd_cmp(zio->io_abd, lio->io_abd) != 0);
3404 
3405 		return (lio->io_orig_size != zio->io_orig_size ||
3406 		    abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3407 	}
3408 
3409 	for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3410 		ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3411 		uint64_t phys_birth = ddt_phys_birth(dde->dde_phys, v);
3412 
3413 		if (phys_birth != 0 && do_raw) {
3414 			blkptr_t blk = *zio->io_bp;
3415 			uint64_t psize;
3416 			abd_t *tmpabd;
3417 			int error;
3418 
3419 			ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3420 			psize = BP_GET_PSIZE(&blk);
3421 
3422 			if (psize != zio->io_size)
3423 				return (B_TRUE);
3424 
3425 			ddt_exit(ddt);
3426 
3427 			tmpabd = abd_alloc_for_io(psize, B_TRUE);
3428 
3429 			error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3430 			    psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3431 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3432 			    ZIO_FLAG_RAW, &zio->io_bookmark));
3433 
3434 			if (error == 0) {
3435 				if (abd_cmp(tmpabd, zio->io_abd) != 0)
3436 					error = SET_ERROR(ENOENT);
3437 			}
3438 
3439 			abd_free(tmpabd);
3440 			ddt_enter(ddt);
3441 			return (error != 0);
3442 		} else if (phys_birth != 0) {
3443 			arc_buf_t *abuf = NULL;
3444 			arc_flags_t aflags = ARC_FLAG_WAIT;
3445 			blkptr_t blk = *zio->io_bp;
3446 			int error;
3447 
3448 			ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3449 
3450 			if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3451 				return (B_TRUE);
3452 
3453 			ddt_exit(ddt);
3454 
3455 			error = arc_read(NULL, spa, &blk,
3456 			    arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3457 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3458 			    &aflags, &zio->io_bookmark);
3459 
3460 			if (error == 0) {
3461 				if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3462 				    zio->io_orig_size) != 0)
3463 					error = SET_ERROR(ENOENT);
3464 				arc_buf_destroy(abuf, &abuf);
3465 			}
3466 
3467 			ddt_enter(ddt);
3468 			return (error != 0);
3469 		}
3470 	}
3471 
3472 	return (B_FALSE);
3473 }
3474 
3475 static void
3476 zio_ddt_child_write_done(zio_t *zio)
3477 {
3478 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3479 	ddt_entry_t *dde = zio->io_private;
3480 
3481 	zio_link_t *zl = NULL;
3482 	ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3483 
3484 	int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3485 	ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3486 	ddt_univ_phys_t *ddp = dde->dde_phys;
3487 
3488 	ddt_enter(ddt);
3489 
3490 	/* we're the lead, so once we're done there's no one else outstanding */
3491 	if (dde->dde_io->dde_lead_zio[p] == zio)
3492 		dde->dde_io->dde_lead_zio[p] = NULL;
3493 
3494 	ddt_univ_phys_t *orig = &dde->dde_io->dde_orig_phys;
3495 
3496 	if (zio->io_error != 0) {
3497 		/*
3498 		 * The write failed, so we're about to abort the entire IO
3499 		 * chain. We need to revert the entry back to what it was at
3500 		 * the last time it was successfully extended.
3501 		 */
3502 		ddt_phys_copy(ddp, orig, v);
3503 		ddt_phys_clear(orig, v);
3504 
3505 		ddt_exit(ddt);
3506 		return;
3507 	}
3508 
3509 	/*
3510 	 * We've successfully added new DVAs to the entry. Clear the saved
3511 	 * state or, if there's still outstanding IO, remember it so we can
3512 	 * revert to a known good state if that IO fails.
3513 	 */
3514 	if (dde->dde_io->dde_lead_zio[p] == NULL)
3515 		ddt_phys_clear(orig, v);
3516 	else
3517 		ddt_phys_copy(orig, ddp, v);
3518 
3519 	/*
3520 	 * Add references for all dedup writes that were waiting on the
3521 	 * physical one, skipping any other physical writes that are waiting.
3522 	 */
3523 	zio_t *pio;
3524 	zl = NULL;
3525 	while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3526 		if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3527 			ddt_phys_addref(ddp, v);
3528 	}
3529 
3530 	ddt_exit(ddt);
3531 }
3532 
3533 static void
3534 zio_ddt_child_write_ready(zio_t *zio)
3535 {
3536 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3537 	ddt_entry_t *dde = zio->io_private;
3538 
3539 	zio_link_t *zl = NULL;
3540 	ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3541 
3542 	int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3543 	ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3544 
3545 	if (zio->io_error != 0)
3546 		return;
3547 
3548 	ddt_enter(ddt);
3549 
3550 	ddt_phys_extend(dde->dde_phys, v, zio->io_bp);
3551 
3552 	zio_t *pio;
3553 	zl = NULL;
3554 	while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3555 		if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3556 			ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg);
3557 	}
3558 
3559 	ddt_exit(ddt);
3560 }
3561 
3562 static zio_t *
3563 zio_ddt_write(zio_t *zio)
3564 {
3565 	spa_t *spa = zio->io_spa;
3566 	blkptr_t *bp = zio->io_bp;
3567 	uint64_t txg = zio->io_txg;
3568 	zio_prop_t *zp = &zio->io_prop;
3569 	ddt_t *ddt = ddt_select(spa, bp);
3570 	ddt_entry_t *dde;
3571 
3572 	ASSERT(BP_GET_DEDUP(bp));
3573 	ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3574 	ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3575 	ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3576 
3577 	ddt_enter(ddt);
3578 	dde = ddt_lookup(ddt, bp);
3579 	if (dde == NULL) {
3580 		/* DDT size is over its quota so no new entries */
3581 		zp->zp_dedup = B_FALSE;
3582 		BP_SET_DEDUP(bp, B_FALSE);
3583 		if (zio->io_bp_override == NULL)
3584 			zio->io_pipeline = ZIO_WRITE_PIPELINE;
3585 		ddt_exit(ddt);
3586 		return (zio);
3587 	}
3588 
3589 	if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3590 		/*
3591 		 * If we're using a weak checksum, upgrade to a strong checksum
3592 		 * and try again.  If we're already using a strong checksum,
3593 		 * we can't resolve it, so just convert to an ordinary write.
3594 		 * (And automatically e-mail a paper to Nature?)
3595 		 */
3596 		if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3597 		    ZCHECKSUM_FLAG_DEDUP)) {
3598 			zp->zp_checksum = spa_dedup_checksum(spa);
3599 			zio_pop_transforms(zio);
3600 			zio->io_stage = ZIO_STAGE_OPEN;
3601 			BP_ZERO(bp);
3602 		} else {
3603 			zp->zp_dedup = B_FALSE;
3604 			BP_SET_DEDUP(bp, B_FALSE);
3605 		}
3606 		ASSERT(!BP_GET_DEDUP(bp));
3607 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
3608 		ddt_exit(ddt);
3609 		return (zio);
3610 	}
3611 
3612 	int p = DDT_PHYS_FOR_COPIES(ddt, zp->zp_copies);
3613 	ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3614 	ddt_univ_phys_t *ddp = dde->dde_phys;
3615 
3616 	/*
3617 	 * In the common cases, at this point we have a regular BP with no
3618 	 * allocated DVAs, and the corresponding DDT entry for its checksum.
3619 	 * Our goal is to fill the BP with enough DVAs to satisfy its copies=
3620 	 * requirement.
3621 	 *
3622 	 * One of three things needs to happen to fulfill this:
3623 	 *
3624 	 * - if the DDT entry has enough DVAs to satisfy the BP, we just copy
3625 	 *   them out of the entry and return;
3626 	 *
3627 	 * - if the DDT entry has no DVAs (ie its brand new), then we have to
3628 	 *   issue the write as normal so that DVAs can be allocated and the
3629 	 *   data land on disk. We then copy the DVAs into the DDT entry on
3630 	 *   return.
3631 	 *
3632 	 * - if the DDT entry has some DVAs, but too few, we have to issue the
3633 	 *   write, adjusted to have allocate fewer copies. When it returns, we
3634 	 *   add the new DVAs to the DDT entry, and update the BP to have the
3635 	 *   full amount it originally requested.
3636 	 *
3637 	 * In all cases, if there's already a writing IO in flight, we need to
3638 	 * defer the action until after the write is done. If our action is to
3639 	 * write, we need to adjust our request for additional DVAs to match
3640 	 * what will be in the DDT entry after it completes. In this way every
3641 	 * IO can be guaranteed to recieve enough DVAs simply by joining the
3642 	 * end of the chain and letting the sequence play out.
3643 	 */
3644 
3645 	/*
3646 	 * Number of DVAs in the DDT entry. If the BP is encrypted we ignore
3647 	 * the third one as normal.
3648 	 */
3649 	int have_dvas = ddt_phys_dva_count(ddp, v, BP_IS_ENCRYPTED(bp));
3650 	IMPLY(have_dvas == 0, ddt_phys_birth(ddp, v) == 0);
3651 
3652 	/* Number of DVAs requested bya the IO. */
3653 	uint8_t need_dvas = zp->zp_copies;
3654 
3655 	/*
3656 	 * What we do next depends on whether or not there's IO outstanding that
3657 	 * will update this entry.
3658 	 */
3659 	if (dde->dde_io == NULL || dde->dde_io->dde_lead_zio[p] == NULL) {
3660 		/*
3661 		 * No IO outstanding, so we only need to worry about ourselves.
3662 		 */
3663 
3664 		/*
3665 		 * Override BPs bring their own DVAs and their own problems.
3666 		 */
3667 		if (zio->io_bp_override) {
3668 			/*
3669 			 * For a brand-new entry, all the work has been done
3670 			 * for us, and we can just fill it out from the provided
3671 			 * block and leave.
3672 			 */
3673 			if (have_dvas == 0) {
3674 				ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
3675 				ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3676 				ddt_phys_extend(ddp, v, bp);
3677 				ddt_phys_addref(ddp, v);
3678 				ddt_exit(ddt);
3679 				return (zio);
3680 			}
3681 
3682 			/*
3683 			 * If we already have this entry, then we want to treat
3684 			 * it like a regular write. To do this we just wipe
3685 			 * them out and proceed like a regular write.
3686 			 *
3687 			 * Even if there are some DVAs in the entry, we still
3688 			 * have to clear them out. We can't use them to fill
3689 			 * out the dedup entry, as they are all referenced
3690 			 * together by a bp already on disk, and will be freed
3691 			 * as a group.
3692 			 */
3693 			BP_ZERO_DVAS(bp);
3694 			BP_SET_BIRTH(bp, 0, 0);
3695 		}
3696 
3697 		/*
3698 		 * If there are enough DVAs in the entry to service our request,
3699 		 * then we can just use them as-is.
3700 		 */
3701 		if (have_dvas >= need_dvas) {
3702 			ddt_bp_fill(ddp, v, bp, txg);
3703 			ddt_phys_addref(ddp, v);
3704 			ddt_exit(ddt);
3705 			return (zio);
3706 		}
3707 
3708 		/*
3709 		 * Otherwise, we have to issue IO to fill the entry up to the
3710 		 * amount we need.
3711 		 */
3712 		need_dvas -= have_dvas;
3713 	} else {
3714 		/*
3715 		 * There's a write in-flight. If there's already enough DVAs on
3716 		 * the entry, then either there were already enough to start
3717 		 * with, or the in-flight IO is between READY and DONE, and so
3718 		 * has extended the entry with new DVAs. Either way, we don't
3719 		 * need to do anything, we can just slot in behind it.
3720 		 */
3721 
3722 		if (zio->io_bp_override) {
3723 			/*
3724 			 * If there's a write out, then we're soon going to
3725 			 * have our own copies of this block, so clear out the
3726 			 * override block and treat it as a regular dedup
3727 			 * write. See comment above.
3728 			 */
3729 			BP_ZERO_DVAS(bp);
3730 			BP_SET_BIRTH(bp, 0, 0);
3731 		}
3732 
3733 		if (have_dvas >= need_dvas) {
3734 			/*
3735 			 * A minor point: there might already be enough
3736 			 * committed DVAs in the entry to service our request,
3737 			 * but we don't know which are completed and which are
3738 			 * allocated but not yet written. In this case, should
3739 			 * the IO for the new DVAs fail, we will be on the end
3740 			 * of the IO chain and will also recieve an error, even
3741 			 * though our request could have been serviced.
3742 			 *
3743 			 * This is an extremely rare case, as it requires the
3744 			 * original block to be copied with a request for a
3745 			 * larger number of DVAs, then copied again requesting
3746 			 * the same (or already fulfilled) number of DVAs while
3747 			 * the first request is active, and then that first
3748 			 * request errors. In return, the logic required to
3749 			 * catch and handle it is complex. For now, I'm just
3750 			 * not going to bother with it.
3751 			 */
3752 
3753 			/*
3754 			 * We always fill the bp here as we may have arrived
3755 			 * after the in-flight write has passed READY, and so
3756 			 * missed out.
3757 			 */
3758 			ddt_bp_fill(ddp, v, bp, txg);
3759 			zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
3760 			ddt_exit(ddt);
3761 			return (zio);
3762 		}
3763 
3764 		/*
3765 		 * There's not enough in the entry yet, so we need to look at
3766 		 * the write in-flight and see how many DVAs it will have once
3767 		 * it completes.
3768 		 *
3769 		 * The in-flight write has potentially had its copies request
3770 		 * reduced (if we're filling out an existing entry), so we need
3771 		 * to reach in and get the original write to find out what it is
3772 		 * expecting.
3773 		 *
3774 		 * Note that the parent of the lead zio will always have the
3775 		 * highest zp_copies of any zio in the chain, because ones that
3776 		 * can be serviced without additional IO are always added to
3777 		 * the back of the chain.
3778 		 */
3779 		zio_link_t *zl = NULL;
3780 		zio_t *pio =
3781 		    zio_walk_parents(dde->dde_io->dde_lead_zio[p], &zl);
3782 		ASSERT(pio);
3783 		uint8_t parent_dvas = pio->io_prop.zp_copies;
3784 
3785 		if (parent_dvas >= need_dvas) {
3786 			zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
3787 			ddt_exit(ddt);
3788 			return (zio);
3789 		}
3790 
3791 		/*
3792 		 * Still not enough, so we will need to issue to get the
3793 		 * shortfall.
3794 		 */
3795 		need_dvas -= parent_dvas;
3796 	}
3797 
3798 	/*
3799 	 * We need to write. We will create a new write with the copies
3800 	 * property adjusted to match the number of DVAs we need to need to
3801 	 * grow the DDT entry by to satisfy the request.
3802 	 */
3803 	zio_prop_t czp = *zp;
3804 	czp.zp_copies = need_dvas;
3805 	zio_t *cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
3806 	    zio->io_orig_size, zio->io_orig_size, &czp,
3807 	    zio_ddt_child_write_ready, NULL,
3808 	    zio_ddt_child_write_done, dde, zio->io_priority,
3809 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3810 
3811 	zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
3812 
3813 	/*
3814 	 * We are the new lead zio, because our parent has the highest
3815 	 * zp_copies that has been requested for this entry so far.
3816 	 */
3817 	ddt_alloc_entry_io(dde);
3818 	if (dde->dde_io->dde_lead_zio[p] == NULL) {
3819 		/*
3820 		 * First time out, take a copy of the stable entry to revert
3821 		 * to if there's an error (see zio_ddt_child_write_done())
3822 		 */
3823 		ddt_phys_copy(&dde->dde_io->dde_orig_phys, dde->dde_phys, v);
3824 	} else {
3825 		/*
3826 		 * Make the existing chain our child, because it cannot
3827 		 * complete until we have.
3828 		 */
3829 		zio_add_child(cio, dde->dde_io->dde_lead_zio[p]);
3830 	}
3831 	dde->dde_io->dde_lead_zio[p] = cio;
3832 
3833 	ddt_exit(ddt);
3834 
3835 	zio_nowait(cio);
3836 
3837 	return (zio);
3838 }
3839 
3840 static ddt_entry_t *freedde; /* for debugging */
3841 
3842 static zio_t *
3843 zio_ddt_free(zio_t *zio)
3844 {
3845 	spa_t *spa = zio->io_spa;
3846 	blkptr_t *bp = zio->io_bp;
3847 	ddt_t *ddt = ddt_select(spa, bp);
3848 	ddt_entry_t *dde = NULL;
3849 
3850 	ASSERT(BP_GET_DEDUP(bp));
3851 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3852 
3853 	ddt_enter(ddt);
3854 	freedde = dde = ddt_lookup(ddt, bp);
3855 	if (dde) {
3856 		ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
3857 		if (v != DDT_PHYS_NONE)
3858 			ddt_phys_decref(dde->dde_phys, v);
3859 	}
3860 	ddt_exit(ddt);
3861 
3862 	/*
3863 	 * When no entry was found, it must have been pruned,
3864 	 * so we can free it now instead of decrementing the
3865 	 * refcount in the DDT.
3866 	 */
3867 	if (!dde) {
3868 		BP_SET_DEDUP(bp, 0);
3869 		zio->io_pipeline |= ZIO_STAGE_DVA_FREE;
3870 	}
3871 
3872 	return (zio);
3873 }
3874 
3875 /*
3876  * ==========================================================================
3877  * Allocate and free blocks
3878  * ==========================================================================
3879  */
3880 
3881 static zio_t *
3882 zio_io_to_allocate(spa_t *spa, int allocator)
3883 {
3884 	zio_t *zio;
3885 
3886 	ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
3887 
3888 	zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
3889 	if (zio == NULL)
3890 		return (NULL);
3891 
3892 	ASSERT(IO_IS_ALLOCATING(zio));
3893 	ASSERT(ZIO_HAS_ALLOCATOR(zio));
3894 
3895 	/*
3896 	 * Try to place a reservation for this zio. If we're unable to
3897 	 * reserve then we throttle.
3898 	 */
3899 	ASSERT3U(zio->io_allocator, ==, allocator);
3900 	if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
3901 	    zio->io_prop.zp_copies, allocator, zio, 0)) {
3902 		return (NULL);
3903 	}
3904 
3905 	avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
3906 	ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
3907 
3908 	return (zio);
3909 }
3910 
3911 static zio_t *
3912 zio_dva_throttle(zio_t *zio)
3913 {
3914 	spa_t *spa = zio->io_spa;
3915 	zio_t *nio;
3916 	metaslab_class_t *mc;
3917 
3918 	/* locate an appropriate allocation class */
3919 	mc = spa_preferred_class(spa, zio);
3920 
3921 	if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
3922 	    !mc->mc_alloc_throttle_enabled ||
3923 	    zio->io_child_type == ZIO_CHILD_GANG ||
3924 	    zio->io_flags & ZIO_FLAG_NODATA) {
3925 		return (zio);
3926 	}
3927 
3928 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3929 	ASSERT(ZIO_HAS_ALLOCATOR(zio));
3930 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3931 	ASSERT3U(zio->io_queued_timestamp, >, 0);
3932 	ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
3933 
3934 	int allocator = zio->io_allocator;
3935 	zio->io_metaslab_class = mc;
3936 	mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3937 	avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
3938 	nio = zio_io_to_allocate(spa, allocator);
3939 	mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3940 	return (nio);
3941 }
3942 
3943 static void
3944 zio_allocate_dispatch(spa_t *spa, int allocator)
3945 {
3946 	zio_t *zio;
3947 
3948 	mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3949 	zio = zio_io_to_allocate(spa, allocator);
3950 	mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3951 	if (zio == NULL)
3952 		return;
3953 
3954 	ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
3955 	ASSERT0(zio->io_error);
3956 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
3957 }
3958 
3959 static zio_t *
3960 zio_dva_allocate(zio_t *zio)
3961 {
3962 	spa_t *spa = zio->io_spa;
3963 	metaslab_class_t *mc;
3964 	blkptr_t *bp = zio->io_bp;
3965 	int error;
3966 	int flags = 0;
3967 
3968 	if (zio->io_gang_leader == NULL) {
3969 		ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3970 		zio->io_gang_leader = zio;
3971 	}
3972 
3973 	ASSERT(BP_IS_HOLE(bp));
3974 	ASSERT0(BP_GET_NDVAS(bp));
3975 	ASSERT3U(zio->io_prop.zp_copies, >, 0);
3976 	ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
3977 	ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
3978 
3979 	if (zio->io_flags & ZIO_FLAG_NODATA)
3980 		flags |= METASLAB_DONT_THROTTLE;
3981 	if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
3982 		flags |= METASLAB_GANG_CHILD;
3983 	if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
3984 		flags |= METASLAB_ASYNC_ALLOC;
3985 
3986 	/*
3987 	 * if not already chosen, locate an appropriate allocation class
3988 	 */
3989 	mc = zio->io_metaslab_class;
3990 	if (mc == NULL) {
3991 		mc = spa_preferred_class(spa, zio);
3992 		zio->io_metaslab_class = mc;
3993 	}
3994 
3995 	/*
3996 	 * Try allocating the block in the usual metaslab class.
3997 	 * If that's full, allocate it in the normal class.
3998 	 * If that's full, allocate as a gang block,
3999 	 * and if all are full, the allocation fails (which shouldn't happen).
4000 	 *
4001 	 * Note that we do not fall back on embedded slog (ZIL) space, to
4002 	 * preserve unfragmented slog space, which is critical for decent
4003 	 * sync write performance.  If a log allocation fails, we will fall
4004 	 * back to spa_sync() which is abysmal for performance.
4005 	 */
4006 	ASSERT(ZIO_HAS_ALLOCATOR(zio));
4007 	error = metaslab_alloc(spa, mc, zio->io_size, bp,
4008 	    zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
4009 	    &zio->io_alloc_list, zio, zio->io_allocator);
4010 
4011 	/*
4012 	 * Fallback to normal class when an alloc class is full
4013 	 */
4014 	if (error == ENOSPC && mc != spa_normal_class(spa)) {
4015 		/*
4016 		 * When the dedup or special class is spilling into the  normal
4017 		 * class, there can still be significant space available due
4018 		 * to deferred frees that are in-flight.  We track the txg when
4019 		 * this occurred and back off adding new DDT entries for a few
4020 		 * txgs to allow the free blocks to be processed.
4021 		 */
4022 		if ((mc == spa_dedup_class(spa) || (spa_special_has_ddt(spa) &&
4023 		    mc == spa_special_class(spa))) &&
4024 		    spa->spa_dedup_class_full_txg != zio->io_txg) {
4025 			spa->spa_dedup_class_full_txg = zio->io_txg;
4026 			zfs_dbgmsg("%s[%d]: %s class spilling, req size %d, "
4027 			    "%llu allocated of %llu",
4028 			    spa_name(spa), (int)zio->io_txg,
4029 			    mc == spa_dedup_class(spa) ? "dedup" : "special",
4030 			    (int)zio->io_size,
4031 			    (u_longlong_t)metaslab_class_get_alloc(mc),
4032 			    (u_longlong_t)metaslab_class_get_space(mc));
4033 		}
4034 
4035 		/*
4036 		 * If throttling, transfer reservation over to normal class.
4037 		 * The io_allocator slot can remain the same even though we
4038 		 * are switching classes.
4039 		 */
4040 		if (mc->mc_alloc_throttle_enabled &&
4041 		    (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
4042 			metaslab_class_throttle_unreserve(mc,
4043 			    zio->io_prop.zp_copies, zio->io_allocator, zio);
4044 			zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
4045 
4046 			VERIFY(metaslab_class_throttle_reserve(
4047 			    spa_normal_class(spa),
4048 			    zio->io_prop.zp_copies, zio->io_allocator, zio,
4049 			    flags | METASLAB_MUST_RESERVE));
4050 		}
4051 		zio->io_metaslab_class = mc = spa_normal_class(spa);
4052 		if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4053 			zfs_dbgmsg("%s: metaslab allocation failure, "
4054 			    "trying normal class: zio %px, size %llu, error %d",
4055 			    spa_name(spa), zio, (u_longlong_t)zio->io_size,
4056 			    error);
4057 		}
4058 
4059 		error = metaslab_alloc(spa, mc, zio->io_size, bp,
4060 		    zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
4061 		    &zio->io_alloc_list, zio, zio->io_allocator);
4062 	}
4063 
4064 	if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
4065 		if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4066 			zfs_dbgmsg("%s: metaslab allocation failure, "
4067 			    "trying ganging: zio %px, size %llu, error %d",
4068 			    spa_name(spa), zio, (u_longlong_t)zio->io_size,
4069 			    error);
4070 		}
4071 		return (zio_write_gang_block(zio, mc));
4072 	}
4073 	if (error != 0) {
4074 		if (error != ENOSPC ||
4075 		    (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
4076 			zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
4077 			    "size %llu, error %d",
4078 			    spa_name(spa), zio, (u_longlong_t)zio->io_size,
4079 			    error);
4080 		}
4081 		zio->io_error = error;
4082 	}
4083 
4084 	return (zio);
4085 }
4086 
4087 static zio_t *
4088 zio_dva_free(zio_t *zio)
4089 {
4090 	metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
4091 
4092 	return (zio);
4093 }
4094 
4095 static zio_t *
4096 zio_dva_claim(zio_t *zio)
4097 {
4098 	int error;
4099 
4100 	error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
4101 	if (error)
4102 		zio->io_error = error;
4103 
4104 	return (zio);
4105 }
4106 
4107 /*
4108  * Undo an allocation.  This is used by zio_done() when an I/O fails
4109  * and we want to give back the block we just allocated.
4110  * This handles both normal blocks and gang blocks.
4111  */
4112 static void
4113 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
4114 {
4115 	ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
4116 	ASSERT(zio->io_bp_override == NULL);
4117 
4118 	if (!BP_IS_HOLE(bp)) {
4119 		metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
4120 		    B_TRUE);
4121 	}
4122 
4123 	if (gn != NULL) {
4124 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
4125 			zio_dva_unallocate(zio, gn->gn_child[g],
4126 			    &gn->gn_gbh->zg_blkptr[g]);
4127 		}
4128 	}
4129 }
4130 
4131 /*
4132  * Try to allocate an intent log block.  Return 0 on success, errno on failure.
4133  */
4134 int
4135 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
4136     uint64_t size, boolean_t *slog)
4137 {
4138 	int error = 1;
4139 	zio_alloc_list_t io_alloc_list;
4140 
4141 	ASSERT(txg > spa_syncing_txg(spa));
4142 
4143 	metaslab_trace_init(&io_alloc_list);
4144 
4145 	/*
4146 	 * Block pointer fields are useful to metaslabs for stats and debugging.
4147 	 * Fill in the obvious ones before calling into metaslab_alloc().
4148 	 */
4149 	BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4150 	BP_SET_PSIZE(new_bp, size);
4151 	BP_SET_LEVEL(new_bp, 0);
4152 
4153 	/*
4154 	 * When allocating a zil block, we don't have information about
4155 	 * the final destination of the block except the objset it's part
4156 	 * of, so we just hash the objset ID to pick the allocator to get
4157 	 * some parallelism.
4158 	 */
4159 	int flags = METASLAB_ZIL;
4160 	int allocator = (uint_t)cityhash4(0, 0, 0,
4161 	    os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
4162 	error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
4163 	    txg, NULL, flags, &io_alloc_list, NULL, allocator);
4164 	*slog = (error == 0);
4165 	if (error != 0) {
4166 		error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
4167 		    new_bp, 1, txg, NULL, flags,
4168 		    &io_alloc_list, NULL, allocator);
4169 	}
4170 	if (error != 0) {
4171 		error = metaslab_alloc(spa, spa_normal_class(spa), size,
4172 		    new_bp, 1, txg, NULL, flags,
4173 		    &io_alloc_list, NULL, allocator);
4174 	}
4175 	metaslab_trace_fini(&io_alloc_list);
4176 
4177 	if (error == 0) {
4178 		BP_SET_LSIZE(new_bp, size);
4179 		BP_SET_PSIZE(new_bp, size);
4180 		BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
4181 		BP_SET_CHECKSUM(new_bp,
4182 		    spa_version(spa) >= SPA_VERSION_SLIM_ZIL
4183 		    ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
4184 		BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4185 		BP_SET_LEVEL(new_bp, 0);
4186 		BP_SET_DEDUP(new_bp, 0);
4187 		BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
4188 
4189 		/*
4190 		 * encrypted blocks will require an IV and salt. We generate
4191 		 * these now since we will not be rewriting the bp at
4192 		 * rewrite time.
4193 		 */
4194 		if (os->os_encrypted) {
4195 			uint8_t iv[ZIO_DATA_IV_LEN];
4196 			uint8_t salt[ZIO_DATA_SALT_LEN];
4197 
4198 			BP_SET_CRYPT(new_bp, B_TRUE);
4199 			VERIFY0(spa_crypt_get_salt(spa,
4200 			    dmu_objset_id(os), salt));
4201 			VERIFY0(zio_crypt_generate_iv(iv));
4202 
4203 			zio_crypt_encode_params_bp(new_bp, salt, iv);
4204 		}
4205 	} else {
4206 		zfs_dbgmsg("%s: zil block allocation failure: "
4207 		    "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
4208 		    error);
4209 	}
4210 
4211 	return (error);
4212 }
4213 
4214 /*
4215  * ==========================================================================
4216  * Read and write to physical devices
4217  * ==========================================================================
4218  */
4219 
4220 /*
4221  * Issue an I/O to the underlying vdev. Typically the issue pipeline
4222  * stops after this stage and will resume upon I/O completion.
4223  * However, there are instances where the vdev layer may need to
4224  * continue the pipeline when an I/O was not issued. Since the I/O
4225  * that was sent to the vdev layer might be different than the one
4226  * currently active in the pipeline (see vdev_queue_io()), we explicitly
4227  * force the underlying vdev layers to call either zio_execute() or
4228  * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
4229  */
4230 static zio_t *
4231 zio_vdev_io_start(zio_t *zio)
4232 {
4233 	vdev_t *vd = zio->io_vd;
4234 	uint64_t align;
4235 	spa_t *spa = zio->io_spa;
4236 
4237 	zio->io_delay = 0;
4238 
4239 	ASSERT(zio->io_error == 0);
4240 	ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
4241 
4242 	if (vd == NULL) {
4243 		if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4244 			spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
4245 
4246 		/*
4247 		 * The mirror_ops handle multiple DVAs in a single BP.
4248 		 */
4249 		vdev_mirror_ops.vdev_op_io_start(zio);
4250 		return (NULL);
4251 	}
4252 
4253 	ASSERT3P(zio->io_logical, !=, zio);
4254 	if (zio->io_type == ZIO_TYPE_WRITE) {
4255 		ASSERT(spa->spa_trust_config);
4256 
4257 		/*
4258 		 * Note: the code can handle other kinds of writes,
4259 		 * but we don't expect them.
4260 		 */
4261 		if (zio->io_vd->vdev_noalloc) {
4262 			ASSERT(zio->io_flags &
4263 			    (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
4264 			    ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
4265 		}
4266 	}
4267 
4268 	align = 1ULL << vd->vdev_top->vdev_ashift;
4269 
4270 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
4271 	    P2PHASE(zio->io_size, align) != 0) {
4272 		/* Transform logical writes to be a full physical block size. */
4273 		uint64_t asize = P2ROUNDUP(zio->io_size, align);
4274 		abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
4275 		ASSERT(vd == vd->vdev_top);
4276 		if (zio->io_type == ZIO_TYPE_WRITE) {
4277 			abd_copy(abuf, zio->io_abd, zio->io_size);
4278 			abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
4279 		}
4280 		zio_push_transform(zio, abuf, asize, asize, zio_subblock);
4281 	}
4282 
4283 	/*
4284 	 * If this is not a physical io, make sure that it is properly aligned
4285 	 * before proceeding.
4286 	 */
4287 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
4288 		ASSERT0(P2PHASE(zio->io_offset, align));
4289 		ASSERT0(P2PHASE(zio->io_size, align));
4290 	} else {
4291 		/*
4292 		 * For physical writes, we allow 512b aligned writes and assume
4293 		 * the device will perform a read-modify-write as necessary.
4294 		 */
4295 		ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
4296 		ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
4297 	}
4298 
4299 	VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
4300 
4301 	/*
4302 	 * If this is a repair I/O, and there's no self-healing involved --
4303 	 * that is, we're just resilvering what we expect to resilver --
4304 	 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4305 	 * This prevents spurious resilvering.
4306 	 *
4307 	 * There are a few ways that we can end up creating these spurious
4308 	 * resilver i/os:
4309 	 *
4310 	 * 1. A resilver i/o will be issued if any DVA in the BP has a
4311 	 * dirty DTL.  The mirror code will issue resilver writes to
4312 	 * each DVA, including the one(s) that are not on vdevs with dirty
4313 	 * DTLs.
4314 	 *
4315 	 * 2. With nested replication, which happens when we have a
4316 	 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4317 	 * For example, given mirror(replacing(A+B), C), it's likely that
4318 	 * only A is out of date (it's the new device). In this case, we'll
4319 	 * read from C, then use the data to resilver A+B -- but we don't
4320 	 * actually want to resilver B, just A. The top-level mirror has no
4321 	 * way to know this, so instead we just discard unnecessary repairs
4322 	 * as we work our way down the vdev tree.
4323 	 *
4324 	 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4325 	 * The same logic applies to any form of nested replication: ditto
4326 	 * + mirror, RAID-Z + replacing, etc.
4327 	 *
4328 	 * However, indirect vdevs point off to other vdevs which may have
4329 	 * DTL's, so we never bypass them.  The child i/os on concrete vdevs
4330 	 * will be properly bypassed instead.
4331 	 *
4332 	 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4333 	 * a dRAID spare vdev. For example, when a dRAID spare is first
4334 	 * used, its spare blocks need to be written to but the leaf vdev's
4335 	 * of such blocks can have empty DTL_PARTIAL.
4336 	 *
4337 	 * There seemed no clean way to allow such writes while bypassing
4338 	 * spurious ones. At this point, just avoid all bypassing for dRAID
4339 	 * for correctness.
4340 	 */
4341 	if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4342 	    !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4343 	    zio->io_txg != 0 &&	/* not a delegated i/o */
4344 	    vd->vdev_ops != &vdev_indirect_ops &&
4345 	    vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4346 	    !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4347 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4348 		zio_vdev_io_bypass(zio);
4349 		return (zio);
4350 	}
4351 
4352 	/*
4353 	 * Select the next best leaf I/O to process.  Distributed spares are
4354 	 * excluded since they dispatch the I/O directly to a leaf vdev after
4355 	 * applying the dRAID mapping.
4356 	 */
4357 	if (vd->vdev_ops->vdev_op_leaf &&
4358 	    vd->vdev_ops != &vdev_draid_spare_ops &&
4359 	    (zio->io_type == ZIO_TYPE_READ ||
4360 	    zio->io_type == ZIO_TYPE_WRITE ||
4361 	    zio->io_type == ZIO_TYPE_TRIM)) {
4362 
4363 		if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
4364 			/*
4365 			 * "no-op" injections return success, but do no actual
4366 			 * work. Just skip the remaining vdev stages.
4367 			 */
4368 			zio_vdev_io_bypass(zio);
4369 			zio_interrupt(zio);
4370 			return (NULL);
4371 		}
4372 
4373 		if ((zio = vdev_queue_io(zio)) == NULL)
4374 			return (NULL);
4375 
4376 		if (!vdev_accessible(vd, zio)) {
4377 			zio->io_error = SET_ERROR(ENXIO);
4378 			zio_interrupt(zio);
4379 			return (NULL);
4380 		}
4381 		zio->io_delay = gethrtime();
4382 	}
4383 
4384 	vd->vdev_ops->vdev_op_io_start(zio);
4385 	return (NULL);
4386 }
4387 
4388 static zio_t *
4389 zio_vdev_io_done(zio_t *zio)
4390 {
4391 	vdev_t *vd = zio->io_vd;
4392 	vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4393 	boolean_t unexpected_error = B_FALSE;
4394 
4395 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4396 		return (NULL);
4397 	}
4398 
4399 	ASSERT(zio->io_type == ZIO_TYPE_READ ||
4400 	    zio->io_type == ZIO_TYPE_WRITE ||
4401 	    zio->io_type == ZIO_TYPE_FLUSH ||
4402 	    zio->io_type == ZIO_TYPE_TRIM);
4403 
4404 	if (zio->io_delay)
4405 		zio->io_delay = gethrtime() - zio->io_delay;
4406 
4407 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4408 	    vd->vdev_ops != &vdev_draid_spare_ops) {
4409 		if (zio->io_type != ZIO_TYPE_FLUSH)
4410 			vdev_queue_io_done(zio);
4411 
4412 		if (zio_injection_enabled && zio->io_error == 0)
4413 			zio->io_error = zio_handle_device_injections(vd, zio,
4414 			    EIO, EILSEQ);
4415 
4416 		if (zio_injection_enabled && zio->io_error == 0)
4417 			zio->io_error = zio_handle_label_injection(zio, EIO);
4418 
4419 		if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4420 		    zio->io_type != ZIO_TYPE_TRIM) {
4421 			if (!vdev_accessible(vd, zio)) {
4422 				zio->io_error = SET_ERROR(ENXIO);
4423 			} else {
4424 				unexpected_error = B_TRUE;
4425 			}
4426 		}
4427 	}
4428 
4429 	ops->vdev_op_io_done(zio);
4430 
4431 	if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4432 		VERIFY(vdev_probe(vd, zio) == NULL);
4433 
4434 	return (zio);
4435 }
4436 
4437 /*
4438  * This function is used to change the priority of an existing zio that is
4439  * currently in-flight. This is used by the arc to upgrade priority in the
4440  * event that a demand read is made for a block that is currently queued
4441  * as a scrub or async read IO. Otherwise, the high priority read request
4442  * would end up having to wait for the lower priority IO.
4443  */
4444 void
4445 zio_change_priority(zio_t *pio, zio_priority_t priority)
4446 {
4447 	zio_t *cio, *cio_next;
4448 	zio_link_t *zl = NULL;
4449 
4450 	ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4451 
4452 	if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4453 		vdev_queue_change_io_priority(pio, priority);
4454 	} else {
4455 		pio->io_priority = priority;
4456 	}
4457 
4458 	mutex_enter(&pio->io_lock);
4459 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4460 		cio_next = zio_walk_children(pio, &zl);
4461 		zio_change_priority(cio, priority);
4462 	}
4463 	mutex_exit(&pio->io_lock);
4464 }
4465 
4466 /*
4467  * For non-raidz ZIOs, we can just copy aside the bad data read from the
4468  * disk, and use that to finish the checksum ereport later.
4469  */
4470 static void
4471 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4472     const abd_t *good_buf)
4473 {
4474 	/* no processing needed */
4475 	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4476 }
4477 
4478 void
4479 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4480 {
4481 	void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4482 
4483 	abd_copy(abd, zio->io_abd, zio->io_size);
4484 
4485 	zcr->zcr_cbinfo = zio->io_size;
4486 	zcr->zcr_cbdata = abd;
4487 	zcr->zcr_finish = zio_vsd_default_cksum_finish;
4488 	zcr->zcr_free = zio_abd_free;
4489 }
4490 
4491 static zio_t *
4492 zio_vdev_io_assess(zio_t *zio)
4493 {
4494 	vdev_t *vd = zio->io_vd;
4495 
4496 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4497 		return (NULL);
4498 	}
4499 
4500 	if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4501 		spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4502 
4503 	if (zio->io_vsd != NULL) {
4504 		zio->io_vsd_ops->vsd_free(zio);
4505 		zio->io_vsd = NULL;
4506 	}
4507 
4508 	if (zio_injection_enabled && zio->io_error == 0)
4509 		zio->io_error = zio_handle_fault_injection(zio, EIO);
4510 
4511 	/*
4512 	 * If the I/O failed, determine whether we should attempt to retry it.
4513 	 *
4514 	 * On retry, we cut in line in the issue queue, since we don't want
4515 	 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4516 	 */
4517 	if (zio->io_error && vd == NULL &&
4518 	    !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4519 		ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE));	/* not a leaf */
4520 		ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS));	/* not a leaf */
4521 		zio->io_error = 0;
4522 		zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4523 		zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4524 		zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4525 		    zio_requeue_io_start_cut_in_line);
4526 		return (NULL);
4527 	}
4528 
4529 	/*
4530 	 * If we got an error on a leaf device, convert it to ENXIO
4531 	 * if the device is not accessible at all.
4532 	 */
4533 	if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4534 	    !vdev_accessible(vd, zio))
4535 		zio->io_error = SET_ERROR(ENXIO);
4536 
4537 	/*
4538 	 * If we can't write to an interior vdev (mirror or RAID-Z),
4539 	 * set vdev_cant_write so that we stop trying to allocate from it.
4540 	 */
4541 	if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4542 	    vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4543 		vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4544 		    "cant_write=TRUE due to write failure with ENXIO",
4545 		    zio);
4546 		vd->vdev_cant_write = B_TRUE;
4547 	}
4548 
4549 	/*
4550 	 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
4551 	 * attempts will ever succeed. In this case we set a persistent
4552 	 * boolean flag so that we don't bother with it in the future.
4553 	 */
4554 	if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
4555 	    zio->io_type == ZIO_TYPE_FLUSH && vd != NULL)
4556 		vd->vdev_nowritecache = B_TRUE;
4557 
4558 	if (zio->io_error)
4559 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4560 
4561 	return (zio);
4562 }
4563 
4564 void
4565 zio_vdev_io_reissue(zio_t *zio)
4566 {
4567 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4568 	ASSERT(zio->io_error == 0);
4569 
4570 	zio->io_stage >>= 1;
4571 }
4572 
4573 void
4574 zio_vdev_io_redone(zio_t *zio)
4575 {
4576 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4577 
4578 	zio->io_stage >>= 1;
4579 }
4580 
4581 void
4582 zio_vdev_io_bypass(zio_t *zio)
4583 {
4584 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4585 	ASSERT(zio->io_error == 0);
4586 
4587 	zio->io_flags |= ZIO_FLAG_IO_BYPASS;
4588 	zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
4589 }
4590 
4591 /*
4592  * ==========================================================================
4593  * Encrypt and store encryption parameters
4594  * ==========================================================================
4595  */
4596 
4597 
4598 /*
4599  * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4600  * managing the storage of encryption parameters and passing them to the
4601  * lower-level encryption functions.
4602  */
4603 static zio_t *
4604 zio_encrypt(zio_t *zio)
4605 {
4606 	zio_prop_t *zp = &zio->io_prop;
4607 	spa_t *spa = zio->io_spa;
4608 	blkptr_t *bp = zio->io_bp;
4609 	uint64_t psize = BP_GET_PSIZE(bp);
4610 	uint64_t dsobj = zio->io_bookmark.zb_objset;
4611 	dmu_object_type_t ot = BP_GET_TYPE(bp);
4612 	void *enc_buf = NULL;
4613 	abd_t *eabd = NULL;
4614 	uint8_t salt[ZIO_DATA_SALT_LEN];
4615 	uint8_t iv[ZIO_DATA_IV_LEN];
4616 	uint8_t mac[ZIO_DATA_MAC_LEN];
4617 	boolean_t no_crypt = B_FALSE;
4618 
4619 	/* the root zio already encrypted the data */
4620 	if (zio->io_child_type == ZIO_CHILD_GANG)
4621 		return (zio);
4622 
4623 	/* only ZIL blocks are re-encrypted on rewrite */
4624 	if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
4625 		return (zio);
4626 
4627 	if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4628 		BP_SET_CRYPT(bp, B_FALSE);
4629 		return (zio);
4630 	}
4631 
4632 	/* if we are doing raw encryption set the provided encryption params */
4633 	if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
4634 		ASSERT0(BP_GET_LEVEL(bp));
4635 		BP_SET_CRYPT(bp, B_TRUE);
4636 		BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4637 		if (ot != DMU_OT_OBJSET)
4638 			zio_crypt_encode_mac_bp(bp, zp->zp_mac);
4639 
4640 		/* dnode blocks must be written out in the provided byteorder */
4641 		if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4642 		    ot == DMU_OT_DNODE) {
4643 			void *bswap_buf = zio_buf_alloc(psize);
4644 			abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4645 
4646 			ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4647 			abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4648 			dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4649 			    psize);
4650 
4651 			abd_take_ownership_of_buf(babd, B_TRUE);
4652 			zio_push_transform(zio, babd, psize, psize, NULL);
4653 		}
4654 
4655 		if (DMU_OT_IS_ENCRYPTED(ot))
4656 			zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
4657 		return (zio);
4658 	}
4659 
4660 	/* indirect blocks only maintain a cksum of the lower level MACs */
4661 	if (BP_GET_LEVEL(bp) > 0) {
4662 		BP_SET_CRYPT(bp, B_TRUE);
4663 		VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4664 		    zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4665 		    mac));
4666 		zio_crypt_encode_mac_bp(bp, mac);
4667 		return (zio);
4668 	}
4669 
4670 	/*
4671 	 * Objset blocks are a special case since they have 2 256-bit MACs
4672 	 * embedded within them.
4673 	 */
4674 	if (ot == DMU_OT_OBJSET) {
4675 		ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4676 		ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4677 		BP_SET_CRYPT(bp, B_TRUE);
4678 		VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4679 		    zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
4680 		return (zio);
4681 	}
4682 
4683 	/* unencrypted object types are only authenticated with a MAC */
4684 	if (!DMU_OT_IS_ENCRYPTED(ot)) {
4685 		BP_SET_CRYPT(bp, B_TRUE);
4686 		VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4687 		    zio->io_abd, psize, mac));
4688 		zio_crypt_encode_mac_bp(bp, mac);
4689 		return (zio);
4690 	}
4691 
4692 	/*
4693 	 * Later passes of sync-to-convergence may decide to rewrite data
4694 	 * in place to avoid more disk reallocations. This presents a problem
4695 	 * for encryption because this constitutes rewriting the new data with
4696 	 * the same encryption key and IV. However, this only applies to blocks
4697 	 * in the MOS (particularly the spacemaps) and we do not encrypt the
4698 	 * MOS. We assert that the zio is allocating or an intent log write
4699 	 * to enforce this.
4700 	 */
4701 	ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4702 	ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4703 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4704 	ASSERT3U(psize, !=, 0);
4705 
4706 	enc_buf = zio_buf_alloc(psize);
4707 	eabd = abd_get_from_buf(enc_buf, psize);
4708 	abd_take_ownership_of_buf(eabd, B_TRUE);
4709 
4710 	/*
4711 	 * For an explanation of what encryption parameters are stored
4712 	 * where, see the block comment in zio_crypt.c.
4713 	 */
4714 	if (ot == DMU_OT_INTENT_LOG) {
4715 		zio_crypt_decode_params_bp(bp, salt, iv);
4716 	} else {
4717 		BP_SET_CRYPT(bp, B_TRUE);
4718 	}
4719 
4720 	/* Perform the encryption. This should not fail */
4721 	VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4722 	    BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4723 	    salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
4724 
4725 	/* encode encryption metadata into the bp */
4726 	if (ot == DMU_OT_INTENT_LOG) {
4727 		/*
4728 		 * ZIL blocks store the MAC in the embedded checksum, so the
4729 		 * transform must always be applied.
4730 		 */
4731 		zio_crypt_encode_mac_zil(enc_buf, mac);
4732 		zio_push_transform(zio, eabd, psize, psize, NULL);
4733 	} else {
4734 		BP_SET_CRYPT(bp, B_TRUE);
4735 		zio_crypt_encode_params_bp(bp, salt, iv);
4736 		zio_crypt_encode_mac_bp(bp, mac);
4737 
4738 		if (no_crypt) {
4739 			ASSERT3U(ot, ==, DMU_OT_DNODE);
4740 			abd_free(eabd);
4741 		} else {
4742 			zio_push_transform(zio, eabd, psize, psize, NULL);
4743 		}
4744 	}
4745 
4746 	return (zio);
4747 }
4748 
4749 /*
4750  * ==========================================================================
4751  * Generate and verify checksums
4752  * ==========================================================================
4753  */
4754 static zio_t *
4755 zio_checksum_generate(zio_t *zio)
4756 {
4757 	blkptr_t *bp = zio->io_bp;
4758 	enum zio_checksum checksum;
4759 
4760 	if (bp == NULL) {
4761 		/*
4762 		 * This is zio_write_phys().
4763 		 * We're either generating a label checksum, or none at all.
4764 		 */
4765 		checksum = zio->io_prop.zp_checksum;
4766 
4767 		if (checksum == ZIO_CHECKSUM_OFF)
4768 			return (zio);
4769 
4770 		ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4771 	} else {
4772 		if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4773 			ASSERT(!IO_IS_ALLOCATING(zio));
4774 			checksum = ZIO_CHECKSUM_GANG_HEADER;
4775 		} else {
4776 			checksum = BP_GET_CHECKSUM(bp);
4777 		}
4778 	}
4779 
4780 	zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
4781 
4782 	return (zio);
4783 }
4784 
4785 static zio_t *
4786 zio_checksum_verify(zio_t *zio)
4787 {
4788 	zio_bad_cksum_t info;
4789 	blkptr_t *bp = zio->io_bp;
4790 	int error;
4791 
4792 	ASSERT(zio->io_vd != NULL);
4793 
4794 	if (bp == NULL) {
4795 		/*
4796 		 * This is zio_read_phys().
4797 		 * We're either verifying a label checksum, or nothing at all.
4798 		 */
4799 		if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
4800 			return (zio);
4801 
4802 		ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
4803 	}
4804 
4805 	if ((error = zio_checksum_error(zio, &info)) != 0) {
4806 		zio->io_error = error;
4807 		if (error == ECKSUM &&
4808 		    !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
4809 			mutex_enter(&zio->io_vd->vdev_stat_lock);
4810 			zio->io_vd->vdev_stat.vs_checksum_errors++;
4811 			mutex_exit(&zio->io_vd->vdev_stat_lock);
4812 			(void) zfs_ereport_start_checksum(zio->io_spa,
4813 			    zio->io_vd, &zio->io_bookmark, zio,
4814 			    zio->io_offset, zio->io_size, &info);
4815 		}
4816 	}
4817 
4818 	return (zio);
4819 }
4820 
4821 /*
4822  * Called by RAID-Z to ensure we don't compute the checksum twice.
4823  */
4824 void
4825 zio_checksum_verified(zio_t *zio)
4826 {
4827 	zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
4828 }
4829 
4830 /*
4831  * ==========================================================================
4832  * Error rank.  Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
4833  * An error of 0 indicates success.  ENXIO indicates whole-device failure,
4834  * which may be transient (e.g. unplugged) or permanent.  ECKSUM and EIO
4835  * indicate errors that are specific to one I/O, and most likely permanent.
4836  * Any other error is presumed to be worse because we weren't expecting it.
4837  * ==========================================================================
4838  */
4839 int
4840 zio_worst_error(int e1, int e2)
4841 {
4842 	static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
4843 	int r1, r2;
4844 
4845 	for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
4846 		if (e1 == zio_error_rank[r1])
4847 			break;
4848 
4849 	for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
4850 		if (e2 == zio_error_rank[r2])
4851 			break;
4852 
4853 	return (r1 > r2 ? e1 : e2);
4854 }
4855 
4856 /*
4857  * ==========================================================================
4858  * I/O completion
4859  * ==========================================================================
4860  */
4861 static zio_t *
4862 zio_ready(zio_t *zio)
4863 {
4864 	blkptr_t *bp = zio->io_bp;
4865 	zio_t *pio, *pio_next;
4866 	zio_link_t *zl = NULL;
4867 
4868 	if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
4869 	    ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
4870 		return (NULL);
4871 	}
4872 
4873 	if (zio->io_ready) {
4874 		ASSERT(IO_IS_ALLOCATING(zio));
4875 		ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
4876 		    BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
4877 		ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
4878 
4879 		zio->io_ready(zio);
4880 	}
4881 
4882 #ifdef ZFS_DEBUG
4883 	if (bp != NULL && bp != &zio->io_bp_copy)
4884 		zio->io_bp_copy = *bp;
4885 #endif
4886 
4887 	if (zio->io_error != 0) {
4888 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4889 
4890 		if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4891 			ASSERT(IO_IS_ALLOCATING(zio));
4892 			ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4893 			ASSERT(zio->io_metaslab_class != NULL);
4894 			ASSERT(ZIO_HAS_ALLOCATOR(zio));
4895 
4896 			/*
4897 			 * We were unable to allocate anything, unreserve and
4898 			 * issue the next I/O to allocate.
4899 			 */
4900 			metaslab_class_throttle_unreserve(
4901 			    zio->io_metaslab_class, zio->io_prop.zp_copies,
4902 			    zio->io_allocator, zio);
4903 			zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
4904 		}
4905 	}
4906 
4907 	mutex_enter(&zio->io_lock);
4908 	zio->io_state[ZIO_WAIT_READY] = 1;
4909 	pio = zio_walk_parents(zio, &zl);
4910 	mutex_exit(&zio->io_lock);
4911 
4912 	/*
4913 	 * As we notify zio's parents, new parents could be added.
4914 	 * New parents go to the head of zio's io_parent_list, however,
4915 	 * so we will (correctly) not notify them.  The remainder of zio's
4916 	 * io_parent_list, from 'pio_next' onward, cannot change because
4917 	 * all parents must wait for us to be done before they can be done.
4918 	 */
4919 	for (; pio != NULL; pio = pio_next) {
4920 		pio_next = zio_walk_parents(zio, &zl);
4921 		zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
4922 	}
4923 
4924 	if (zio->io_flags & ZIO_FLAG_NODATA) {
4925 		if (bp != NULL && BP_IS_GANG(bp)) {
4926 			zio->io_flags &= ~ZIO_FLAG_NODATA;
4927 		} else {
4928 			ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
4929 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
4930 		}
4931 	}
4932 
4933 	if (zio_injection_enabled &&
4934 	    zio->io_spa->spa_syncing_txg == zio->io_txg)
4935 		zio_handle_ignored_writes(zio);
4936 
4937 	return (zio);
4938 }
4939 
4940 /*
4941  * Update the allocation throttle accounting.
4942  */
4943 static void
4944 zio_dva_throttle_done(zio_t *zio)
4945 {
4946 	zio_t *lio __maybe_unused = zio->io_logical;
4947 	zio_t *pio = zio_unique_parent(zio);
4948 	vdev_t *vd = zio->io_vd;
4949 	int flags = METASLAB_ASYNC_ALLOC;
4950 
4951 	ASSERT3P(zio->io_bp, !=, NULL);
4952 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
4953 	ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
4954 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
4955 	ASSERT(vd != NULL);
4956 	ASSERT3P(vd, ==, vd->vdev_top);
4957 	ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
4958 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4959 	ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
4960 	ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
4961 	ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
4962 
4963 	/*
4964 	 * Parents of gang children can have two flavors -- ones that
4965 	 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
4966 	 * and ones that allocated the constituent blocks. The allocation
4967 	 * throttle needs to know the allocating parent zio so we must find
4968 	 * it here.
4969 	 */
4970 	if (pio->io_child_type == ZIO_CHILD_GANG) {
4971 		/*
4972 		 * If our parent is a rewrite gang child then our grandparent
4973 		 * would have been the one that performed the allocation.
4974 		 */
4975 		if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
4976 			pio = zio_unique_parent(pio);
4977 		flags |= METASLAB_GANG_CHILD;
4978 	}
4979 
4980 	ASSERT(IO_IS_ALLOCATING(pio));
4981 	ASSERT(ZIO_HAS_ALLOCATOR(pio));
4982 	ASSERT3P(zio, !=, zio->io_logical);
4983 	ASSERT(zio->io_logical != NULL);
4984 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4985 	ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
4986 	ASSERT(zio->io_metaslab_class != NULL);
4987 
4988 	mutex_enter(&pio->io_lock);
4989 	metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
4990 	    pio->io_allocator, B_TRUE);
4991 	mutex_exit(&pio->io_lock);
4992 
4993 	metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
4994 	    pio->io_allocator, pio);
4995 
4996 	/*
4997 	 * Call into the pipeline to see if there is more work that
4998 	 * needs to be done. If there is work to be done it will be
4999 	 * dispatched to another taskq thread.
5000 	 */
5001 	zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
5002 }
5003 
5004 static zio_t *
5005 zio_done(zio_t *zio)
5006 {
5007 	/*
5008 	 * Always attempt to keep stack usage minimal here since
5009 	 * we can be called recursively up to 19 levels deep.
5010 	 */
5011 	const uint64_t psize = zio->io_size;
5012 	zio_t *pio, *pio_next;
5013 	zio_link_t *zl = NULL;
5014 
5015 	/*
5016 	 * If our children haven't all completed,
5017 	 * wait for them and then repeat this pipeline stage.
5018 	 */
5019 	if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
5020 		return (NULL);
5021 	}
5022 
5023 	/*
5024 	 * If the allocation throttle is enabled, then update the accounting.
5025 	 * We only track child I/Os that are part of an allocating async
5026 	 * write. We must do this since the allocation is performed
5027 	 * by the logical I/O but the actual write is done by child I/Os.
5028 	 */
5029 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
5030 	    zio->io_child_type == ZIO_CHILD_VDEV) {
5031 		ASSERT(zio->io_metaslab_class != NULL);
5032 		ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
5033 		zio_dva_throttle_done(zio);
5034 	}
5035 
5036 	/*
5037 	 * If the allocation throttle is enabled, verify that
5038 	 * we have decremented the refcounts for every I/O that was throttled.
5039 	 */
5040 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
5041 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
5042 		ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
5043 		ASSERT(zio->io_bp != NULL);
5044 		ASSERT(ZIO_HAS_ALLOCATOR(zio));
5045 
5046 		metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
5047 		    zio->io_allocator);
5048 		VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
5049 		    mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
5050 	}
5051 
5052 
5053 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
5054 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
5055 			ASSERT(zio->io_children[c][w] == 0);
5056 
5057 	if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
5058 		ASSERT(zio->io_bp->blk_pad[0] == 0);
5059 		ASSERT(zio->io_bp->blk_pad[1] == 0);
5060 		ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
5061 		    sizeof (blkptr_t)) == 0 ||
5062 		    (zio->io_bp == zio_unique_parent(zio)->io_bp));
5063 		if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
5064 		    zio->io_bp_override == NULL &&
5065 		    !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
5066 			ASSERT3U(zio->io_prop.zp_copies, <=,
5067 			    BP_GET_NDVAS(zio->io_bp));
5068 			ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
5069 			    (BP_COUNT_GANG(zio->io_bp) ==
5070 			    BP_GET_NDVAS(zio->io_bp)));
5071 		}
5072 		if (zio->io_flags & ZIO_FLAG_NOPWRITE)
5073 			VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
5074 	}
5075 
5076 	/*
5077 	 * If there were child vdev/gang/ddt errors, they apply to us now.
5078 	 */
5079 	zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
5080 	zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
5081 	zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
5082 
5083 	/*
5084 	 * If the I/O on the transformed data was successful, generate any
5085 	 * checksum reports now while we still have the transformed data.
5086 	 */
5087 	if (zio->io_error == 0) {
5088 		while (zio->io_cksum_report != NULL) {
5089 			zio_cksum_report_t *zcr = zio->io_cksum_report;
5090 			uint64_t align = zcr->zcr_align;
5091 			uint64_t asize = P2ROUNDUP(psize, align);
5092 			abd_t *adata = zio->io_abd;
5093 
5094 			if (adata != NULL && asize != psize) {
5095 				adata = abd_alloc(asize, B_TRUE);
5096 				abd_copy(adata, zio->io_abd, psize);
5097 				abd_zero_off(adata, psize, asize - psize);
5098 			}
5099 
5100 			zio->io_cksum_report = zcr->zcr_next;
5101 			zcr->zcr_next = NULL;
5102 			zcr->zcr_finish(zcr, adata);
5103 			zfs_ereport_free_checksum(zcr);
5104 
5105 			if (adata != NULL && asize != psize)
5106 				abd_free(adata);
5107 		}
5108 	}
5109 
5110 	zio_pop_transforms(zio);	/* note: may set zio->io_error */
5111 
5112 	vdev_stat_update(zio, psize);
5113 
5114 	/*
5115 	 * If this I/O is attached to a particular vdev is slow, exceeding
5116 	 * 30 seconds to complete, post an error described the I/O delay.
5117 	 * We ignore these errors if the device is currently unavailable.
5118 	 */
5119 	if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
5120 		if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
5121 			/*
5122 			 * We want to only increment our slow IO counters if
5123 			 * the IO is valid (i.e. not if the drive is removed).
5124 			 *
5125 			 * zfs_ereport_post() will also do these checks, but
5126 			 * it can also ratelimit and have other failures, so we
5127 			 * need to increment the slow_io counters independent
5128 			 * of it.
5129 			 */
5130 			if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
5131 			    zio->io_spa, zio->io_vd, zio)) {
5132 				mutex_enter(&zio->io_vd->vdev_stat_lock);
5133 				zio->io_vd->vdev_stat.vs_slow_ios++;
5134 				mutex_exit(&zio->io_vd->vdev_stat_lock);
5135 
5136 				(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
5137 				    zio->io_spa, zio->io_vd, &zio->io_bookmark,
5138 				    zio, 0);
5139 			}
5140 		}
5141 	}
5142 
5143 	if (zio->io_error) {
5144 		/*
5145 		 * If this I/O is attached to a particular vdev,
5146 		 * generate an error message describing the I/O failure
5147 		 * at the block level.  We ignore these errors if the
5148 		 * device is currently unavailable.
5149 		 */
5150 		if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
5151 		    !vdev_is_dead(zio->io_vd)) {
5152 			int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
5153 			    zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5154 			if (ret != EALREADY) {
5155 				mutex_enter(&zio->io_vd->vdev_stat_lock);
5156 				if (zio->io_type == ZIO_TYPE_READ)
5157 					zio->io_vd->vdev_stat.vs_read_errors++;
5158 				else if (zio->io_type == ZIO_TYPE_WRITE)
5159 					zio->io_vd->vdev_stat.vs_write_errors++;
5160 				mutex_exit(&zio->io_vd->vdev_stat_lock);
5161 			}
5162 		}
5163 
5164 		if ((zio->io_error == EIO || !(zio->io_flags &
5165 		    (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
5166 		    zio == zio->io_logical) {
5167 			/*
5168 			 * For logical I/O requests, tell the SPA to log the
5169 			 * error and generate a logical data ereport.
5170 			 */
5171 			spa_log_error(zio->io_spa, &zio->io_bookmark,
5172 			    BP_GET_LOGICAL_BIRTH(zio->io_bp));
5173 			(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
5174 			    zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
5175 		}
5176 	}
5177 
5178 	if (zio->io_error && zio == zio->io_logical) {
5179 		/*
5180 		 * Determine whether zio should be reexecuted.  This will
5181 		 * propagate all the way to the root via zio_notify_parent().
5182 		 */
5183 		ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
5184 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5185 
5186 		if (IO_IS_ALLOCATING(zio) &&
5187 		    !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
5188 			if (zio->io_error != ENOSPC)
5189 				zio->io_reexecute |= ZIO_REEXECUTE_NOW;
5190 			else
5191 				zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5192 		}
5193 
5194 		if ((zio->io_type == ZIO_TYPE_READ ||
5195 		    zio->io_type == ZIO_TYPE_FREE) &&
5196 		    !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
5197 		    zio->io_error == ENXIO &&
5198 		    spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
5199 		    spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
5200 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5201 
5202 		if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
5203 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5204 
5205 		/*
5206 		 * Here is a possibly good place to attempt to do
5207 		 * either combinatorial reconstruction or error correction
5208 		 * based on checksums.  It also might be a good place
5209 		 * to send out preliminary ereports before we suspend
5210 		 * processing.
5211 		 */
5212 	}
5213 
5214 	/*
5215 	 * If there were logical child errors, they apply to us now.
5216 	 * We defer this until now to avoid conflating logical child
5217 	 * errors with errors that happened to the zio itself when
5218 	 * updating vdev stats and reporting FMA events above.
5219 	 */
5220 	zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
5221 
5222 	if ((zio->io_error || zio->io_reexecute) &&
5223 	    IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
5224 	    !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
5225 		zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
5226 
5227 	zio_gang_tree_free(&zio->io_gang_tree);
5228 
5229 	/*
5230 	 * Godfather I/Os should never suspend.
5231 	 */
5232 	if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
5233 	    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
5234 		zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
5235 
5236 	if (zio->io_reexecute) {
5237 		/*
5238 		 * This is a logical I/O that wants to reexecute.
5239 		 *
5240 		 * Reexecute is top-down.  When an i/o fails, if it's not
5241 		 * the root, it simply notifies its parent and sticks around.
5242 		 * The parent, seeing that it still has children in zio_done(),
5243 		 * does the same.  This percolates all the way up to the root.
5244 		 * The root i/o will reexecute or suspend the entire tree.
5245 		 *
5246 		 * This approach ensures that zio_reexecute() honors
5247 		 * all the original i/o dependency relationships, e.g.
5248 		 * parents not executing until children are ready.
5249 		 */
5250 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5251 
5252 		zio->io_gang_leader = NULL;
5253 
5254 		mutex_enter(&zio->io_lock);
5255 		zio->io_state[ZIO_WAIT_DONE] = 1;
5256 		mutex_exit(&zio->io_lock);
5257 
5258 		/*
5259 		 * "The Godfather" I/O monitors its children but is
5260 		 * not a true parent to them. It will track them through
5261 		 * the pipeline but severs its ties whenever they get into
5262 		 * trouble (e.g. suspended). This allows "The Godfather"
5263 		 * I/O to return status without blocking.
5264 		 */
5265 		zl = NULL;
5266 		for (pio = zio_walk_parents(zio, &zl); pio != NULL;
5267 		    pio = pio_next) {
5268 			zio_link_t *remove_zl = zl;
5269 			pio_next = zio_walk_parents(zio, &zl);
5270 
5271 			if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
5272 			    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
5273 				zio_remove_child(pio, zio, remove_zl);
5274 				/*
5275 				 * This is a rare code path, so we don't
5276 				 * bother with "next_to_execute".
5277 				 */
5278 				zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
5279 				    NULL);
5280 			}
5281 		}
5282 
5283 		if ((pio = zio_unique_parent(zio)) != NULL) {
5284 			/*
5285 			 * We're not a root i/o, so there's nothing to do
5286 			 * but notify our parent.  Don't propagate errors
5287 			 * upward since we haven't permanently failed yet.
5288 			 */
5289 			ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
5290 			zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
5291 			/*
5292 			 * This is a rare code path, so we don't bother with
5293 			 * "next_to_execute".
5294 			 */
5295 			zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
5296 		} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
5297 			/*
5298 			 * We'd fail again if we reexecuted now, so suspend
5299 			 * until conditions improve (e.g. device comes online).
5300 			 */
5301 			zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
5302 		} else {
5303 			/*
5304 			 * Reexecution is potentially a huge amount of work.
5305 			 * Hand it off to the otherwise-unused claim taskq.
5306 			 */
5307 			spa_taskq_dispatch(zio->io_spa,
5308 			    ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
5309 			    zio_reexecute, zio, B_FALSE);
5310 		}
5311 		return (NULL);
5312 	}
5313 
5314 	ASSERT(list_is_empty(&zio->io_child_list));
5315 	ASSERT(zio->io_reexecute == 0);
5316 	ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5317 
5318 	/*
5319 	 * Report any checksum errors, since the I/O is complete.
5320 	 */
5321 	while (zio->io_cksum_report != NULL) {
5322 		zio_cksum_report_t *zcr = zio->io_cksum_report;
5323 		zio->io_cksum_report = zcr->zcr_next;
5324 		zcr->zcr_next = NULL;
5325 		zcr->zcr_finish(zcr, NULL);
5326 		zfs_ereport_free_checksum(zcr);
5327 	}
5328 
5329 	/*
5330 	 * It is the responsibility of the done callback to ensure that this
5331 	 * particular zio is no longer discoverable for adoption, and as
5332 	 * such, cannot acquire any new parents.
5333 	 */
5334 	if (zio->io_done)
5335 		zio->io_done(zio);
5336 
5337 	mutex_enter(&zio->io_lock);
5338 	zio->io_state[ZIO_WAIT_DONE] = 1;
5339 	mutex_exit(&zio->io_lock);
5340 
5341 	/*
5342 	 * We are done executing this zio.  We may want to execute a parent
5343 	 * next.  See the comment in zio_notify_parent().
5344 	 */
5345 	zio_t *next_to_execute = NULL;
5346 	zl = NULL;
5347 	for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5348 		zio_link_t *remove_zl = zl;
5349 		pio_next = zio_walk_parents(zio, &zl);
5350 		zio_remove_child(pio, zio, remove_zl);
5351 		zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5352 	}
5353 
5354 	if (zio->io_waiter != NULL) {
5355 		mutex_enter(&zio->io_lock);
5356 		zio->io_executor = NULL;
5357 		cv_broadcast(&zio->io_cv);
5358 		mutex_exit(&zio->io_lock);
5359 	} else {
5360 		zio_destroy(zio);
5361 	}
5362 
5363 	return (next_to_execute);
5364 }
5365 
5366 /*
5367  * ==========================================================================
5368  * I/O pipeline definition
5369  * ==========================================================================
5370  */
5371 static zio_pipe_stage_t *zio_pipeline[] = {
5372 	NULL,
5373 	zio_read_bp_init,
5374 	zio_write_bp_init,
5375 	zio_free_bp_init,
5376 	zio_issue_async,
5377 	zio_write_compress,
5378 	zio_encrypt,
5379 	zio_checksum_generate,
5380 	zio_nop_write,
5381 	zio_brt_free,
5382 	zio_ddt_read_start,
5383 	zio_ddt_read_done,
5384 	zio_ddt_write,
5385 	zio_ddt_free,
5386 	zio_gang_assemble,
5387 	zio_gang_issue,
5388 	zio_dva_throttle,
5389 	zio_dva_allocate,
5390 	zio_dva_free,
5391 	zio_dva_claim,
5392 	zio_ready,
5393 	zio_vdev_io_start,
5394 	zio_vdev_io_done,
5395 	zio_vdev_io_assess,
5396 	zio_checksum_verify,
5397 	zio_done
5398 };
5399 
5400 
5401 
5402 
5403 /*
5404  * Compare two zbookmark_phys_t's to see which we would reach first in a
5405  * pre-order traversal of the object tree.
5406  *
5407  * This is simple in every case aside from the meta-dnode object. For all other
5408  * objects, we traverse them in order (object 1 before object 2, and so on).
5409  * However, all of these objects are traversed while traversing object 0, since
5410  * the data it points to is the list of objects.  Thus, we need to convert to a
5411  * canonical representation so we can compare meta-dnode bookmarks to
5412  * non-meta-dnode bookmarks.
5413  *
5414  * We do this by calculating "equivalents" for each field of the zbookmark.
5415  * zbookmarks outside of the meta-dnode use their own object and level, and
5416  * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5417  * blocks this bookmark refers to) by multiplying their blkid by their span
5418  * (the number of L0 blocks contained within one block at their level).
5419  * zbookmarks inside the meta-dnode calculate their object equivalent
5420  * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5421  * level + 1<<31 (any value larger than a level could ever be) for their level.
5422  * This causes them to always compare before a bookmark in their object
5423  * equivalent, compare appropriately to bookmarks in other objects, and to
5424  * compare appropriately to other bookmarks in the meta-dnode.
5425  */
5426 int
5427 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5428     const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5429 {
5430 	/*
5431 	 * These variables represent the "equivalent" values for the zbookmark,
5432 	 * after converting zbookmarks inside the meta dnode to their
5433 	 * normal-object equivalents.
5434 	 */
5435 	uint64_t zb1obj, zb2obj;
5436 	uint64_t zb1L0, zb2L0;
5437 	uint64_t zb1level, zb2level;
5438 
5439 	if (zb1->zb_object == zb2->zb_object &&
5440 	    zb1->zb_level == zb2->zb_level &&
5441 	    zb1->zb_blkid == zb2->zb_blkid)
5442 		return (0);
5443 
5444 	IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5445 	IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5446 
5447 	/*
5448 	 * BP_SPANB calculates the span in blocks.
5449 	 */
5450 	zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5451 	zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
5452 
5453 	if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
5454 		zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5455 		zb1L0 = 0;
5456 		zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5457 	} else {
5458 		zb1obj = zb1->zb_object;
5459 		zb1level = zb1->zb_level;
5460 	}
5461 
5462 	if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5463 		zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5464 		zb2L0 = 0;
5465 		zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5466 	} else {
5467 		zb2obj = zb2->zb_object;
5468 		zb2level = zb2->zb_level;
5469 	}
5470 
5471 	/* Now that we have a canonical representation, do the comparison. */
5472 	if (zb1obj != zb2obj)
5473 		return (zb1obj < zb2obj ? -1 : 1);
5474 	else if (zb1L0 != zb2L0)
5475 		return (zb1L0 < zb2L0 ? -1 : 1);
5476 	else if (zb1level != zb2level)
5477 		return (zb1level > zb2level ? -1 : 1);
5478 	/*
5479 	 * This can (theoretically) happen if the bookmarks have the same object
5480 	 * and level, but different blkids, if the block sizes are not the same.
5481 	 * There is presently no way to change the indirect block sizes
5482 	 */
5483 	return (0);
5484 }
5485 
5486 /*
5487  *  This function checks the following: given that last_block is the place that
5488  *  our traversal stopped last time, does that guarantee that we've visited
5489  *  every node under subtree_root?  Therefore, we can't just use the raw output
5490  *  of zbookmark_compare.  We have to pass in a modified version of
5491  *  subtree_root; by incrementing the block id, and then checking whether
5492  *  last_block is before or equal to that, we can tell whether or not having
5493  *  visited last_block implies that all of subtree_root's children have been
5494  *  visited.
5495  */
5496 boolean_t
5497 zbookmark_subtree_completed(const dnode_phys_t *dnp,
5498     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5499 {
5500 	zbookmark_phys_t mod_zb = *subtree_root;
5501 	mod_zb.zb_blkid++;
5502 	ASSERT0(last_block->zb_level);
5503 
5504 	/* The objset_phys_t isn't before anything. */
5505 	if (dnp == NULL)
5506 		return (B_FALSE);
5507 
5508 	/*
5509 	 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5510 	 * data block size in sectors, because that variable is only used if
5511 	 * the bookmark refers to a block in the meta-dnode.  Since we don't
5512 	 * know without examining it what object it refers to, and there's no
5513 	 * harm in passing in this value in other cases, we always pass it in.
5514 	 *
5515 	 * We pass in 0 for the indirect block size shift because zb2 must be
5516 	 * level 0.  The indirect block size is only used to calculate the span
5517 	 * of the bookmark, but since the bookmark must be level 0, the span is
5518 	 * always 1, so the math works out.
5519 	 *
5520 	 * If you make changes to how the zbookmark_compare code works, be sure
5521 	 * to make sure that this code still works afterwards.
5522 	 */
5523 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5524 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5525 	    last_block) <= 0);
5526 }
5527 
5528 /*
5529  * This function is similar to zbookmark_subtree_completed(), but returns true
5530  * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5531  */
5532 boolean_t
5533 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5534     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5535 {
5536 	ASSERT0(last_block->zb_level);
5537 	if (dnp == NULL)
5538 		return (B_FALSE);
5539 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5540 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5541 	    last_block) >= 0);
5542 }
5543 
5544 EXPORT_SYMBOL(zio_type_name);
5545 EXPORT_SYMBOL(zio_buf_alloc);
5546 EXPORT_SYMBOL(zio_data_buf_alloc);
5547 EXPORT_SYMBOL(zio_buf_free);
5548 EXPORT_SYMBOL(zio_data_buf_free);
5549 
5550 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
5551 	"Max I/O completion time (milliseconds) before marking it as slow");
5552 
5553 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5554 	"Prioritize requeued I/O");
5555 
5556 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free,  UINT, ZMOD_RW,
5557 	"Defer frees starting in this pass");
5558 
5559 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
5560 	"Don't compress starting in this pass");
5561 
5562 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
5563 	"Rewrite new bps starting in this pass");
5564 
5565 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
5566 	"Throttle block allocations in the ZIO pipeline");
5567 
5568 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
5569 	"Log all slow ZIOs, not just those with vdevs");
5570