1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
26 * Copyright (c) 2019, 2023, 2024, 2025, Klara, Inc.
27 * Copyright (c) 2019, Allan Jude
28 * Copyright (c) 2021, Datto, Inc.
29 * Copyright (c) 2021, 2024 by George Melikov. All rights reserved.
30 */
31
32 #include <sys/sysmacros.h>
33 #include <sys/zfs_context.h>
34 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa.h>
36 #include <sys/txg.h>
37 #include <sys/spa_impl.h>
38 #include <sys/vdev_impl.h>
39 #include <sys/vdev_trim.h>
40 #include <sys/zio_impl.h>
41 #include <sys/zio_compress.h>
42 #include <sys/zio_checksum.h>
43 #include <sys/dmu_objset.h>
44 #include <sys/arc.h>
45 #include <sys/brt.h>
46 #include <sys/ddt.h>
47 #include <sys/blkptr.h>
48 #include <sys/zfeature.h>
49 #include <sys/dsl_scan.h>
50 #include <sys/metaslab_impl.h>
51 #include <sys/time.h>
52 #include <sys/trace_zfs.h>
53 #include <sys/abd.h>
54 #include <sys/dsl_crypt.h>
55 #include <cityhash.h>
56
57 /*
58 * ==========================================================================
59 * I/O type descriptions
60 * ==========================================================================
61 */
62 const char *const zio_type_name[ZIO_TYPES] = {
63 /*
64 * Note: Linux kernel thread name length is limited
65 * so these names will differ from upstream open zfs.
66 */
67 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
68 };
69
70 int zio_dva_throttle_enabled = B_TRUE;
71 static int zio_deadman_log_all = B_FALSE;
72
73 /*
74 * ==========================================================================
75 * I/O kmem caches
76 * ==========================================================================
77 */
78 static kmem_cache_t *zio_cache;
79 static kmem_cache_t *zio_link_cache;
80 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
81 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
82 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
83 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
84 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
85 #endif
86
87 /* Mark IOs as "slow" if they take longer than 30 seconds */
88 static uint_t zio_slow_io_ms = (30 * MILLISEC);
89
90 #define BP_SPANB(indblkshift, level) \
91 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
92 #define COMPARE_META_LEVEL 0x80000000ul
93 /*
94 * The following actions directly effect the spa's sync-to-convergence logic.
95 * The values below define the sync pass when we start performing the action.
96 * Care should be taken when changing these values as they directly impact
97 * spa_sync() performance. Tuning these values may introduce subtle performance
98 * pathologies and should only be done in the context of performance analysis.
99 * These tunables will eventually be removed and replaced with #defines once
100 * enough analysis has been done to determine optimal values.
101 *
102 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
103 * regular blocks are not deferred.
104 *
105 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
106 * compression (including of metadata). In practice, we don't have this
107 * many sync passes, so this has no effect.
108 *
109 * The original intent was that disabling compression would help the sync
110 * passes to converge. However, in practice disabling compression increases
111 * the average number of sync passes, because when we turn compression off, a
112 * lot of block's size will change and thus we have to re-allocate (not
113 * overwrite) them. It also increases the number of 128KB allocations (e.g.
114 * for indirect blocks and spacemaps) because these will not be compressed.
115 * The 128K allocations are especially detrimental to performance on highly
116 * fragmented systems, which may have very few free segments of this size,
117 * and may need to load new metaslabs to satisfy 128K allocations.
118 */
119
120 /* defer frees starting in this pass */
121 uint_t zfs_sync_pass_deferred_free = 2;
122
123 /* don't compress starting in this pass */
124 static uint_t zfs_sync_pass_dont_compress = 8;
125
126 /* rewrite new bps starting in this pass */
127 static uint_t zfs_sync_pass_rewrite = 2;
128
129 /*
130 * An allocating zio is one that either currently has the DVA allocate
131 * stage set or will have it later in its lifetime.
132 */
133 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
134
135 /*
136 * Enable smaller cores by excluding metadata
137 * allocations as well.
138 */
139 int zio_exclude_metadata = 0;
140 static int zio_requeue_io_start_cut_in_line = 1;
141
142 #ifdef ZFS_DEBUG
143 static const int zio_buf_debug_limit = 16384;
144 #else
145 static const int zio_buf_debug_limit = 0;
146 #endif
147
148 typedef struct zio_stats {
149 kstat_named_t ziostat_total_allocations;
150 kstat_named_t ziostat_alloc_class_fallbacks;
151 kstat_named_t ziostat_gang_writes;
152 kstat_named_t ziostat_gang_multilevel;
153 } zio_stats_t;
154
155 static zio_stats_t zio_stats = {
156 { "total_allocations", KSTAT_DATA_UINT64 },
157 { "alloc_class_fallbacks", KSTAT_DATA_UINT64 },
158 { "gang_writes", KSTAT_DATA_UINT64 },
159 { "gang_multilevel", KSTAT_DATA_UINT64 },
160 };
161
162 struct {
163 wmsum_t ziostat_total_allocations;
164 wmsum_t ziostat_alloc_class_fallbacks;
165 wmsum_t ziostat_gang_writes;
166 wmsum_t ziostat_gang_multilevel;
167 } ziostat_sums;
168
169 #define ZIOSTAT_BUMP(stat) wmsum_add(&ziostat_sums.stat, 1);
170
171 static kstat_t *zio_ksp;
172
173 static inline void __zio_execute(zio_t *zio);
174
175 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
176
177 static int
zio_kstats_update(kstat_t * ksp,int rw)178 zio_kstats_update(kstat_t *ksp, int rw)
179 {
180 zio_stats_t *zs = ksp->ks_data;
181 if (rw == KSTAT_WRITE)
182 return (EACCES);
183
184 zs->ziostat_total_allocations.value.ui64 =
185 wmsum_value(&ziostat_sums.ziostat_total_allocations);
186 zs->ziostat_alloc_class_fallbacks.value.ui64 =
187 wmsum_value(&ziostat_sums.ziostat_alloc_class_fallbacks);
188 zs->ziostat_gang_writes.value.ui64 =
189 wmsum_value(&ziostat_sums.ziostat_gang_writes);
190 zs->ziostat_gang_multilevel.value.ui64 =
191 wmsum_value(&ziostat_sums.ziostat_gang_multilevel);
192 return (0);
193 }
194
195 void
zio_init(void)196 zio_init(void)
197 {
198 size_t c;
199
200 zio_cache = kmem_cache_create("zio_cache",
201 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
202 zio_link_cache = kmem_cache_create("zio_link_cache",
203 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
204
205 wmsum_init(&ziostat_sums.ziostat_total_allocations, 0);
206 wmsum_init(&ziostat_sums.ziostat_alloc_class_fallbacks, 0);
207 wmsum_init(&ziostat_sums.ziostat_gang_writes, 0);
208 wmsum_init(&ziostat_sums.ziostat_gang_multilevel, 0);
209 zio_ksp = kstat_create("zfs", 0, "zio_stats",
210 "misc", KSTAT_TYPE_NAMED, sizeof (zio_stats) /
211 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
212 if (zio_ksp != NULL) {
213 zio_ksp->ks_data = &zio_stats;
214 zio_ksp->ks_update = zio_kstats_update;
215 kstat_install(zio_ksp);
216 }
217
218 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
219 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
220 size_t align, cflags, data_cflags;
221 char name[32];
222
223 /*
224 * Create cache for each half-power of 2 size, starting from
225 * SPA_MINBLOCKSIZE. It should give us memory space efficiency
226 * of ~7/8, sufficient for transient allocations mostly using
227 * these caches.
228 */
229 size_t p2 = size;
230 while (!ISP2(p2))
231 p2 &= p2 - 1;
232 if (!IS_P2ALIGNED(size, p2 / 2))
233 continue;
234
235 #ifndef _KERNEL
236 /*
237 * If we are using watchpoints, put each buffer on its own page,
238 * to eliminate the performance overhead of trapping to the
239 * kernel when modifying a non-watched buffer that shares the
240 * page with a watched buffer.
241 */
242 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
243 continue;
244 #endif
245
246 if (IS_P2ALIGNED(size, PAGESIZE))
247 align = PAGESIZE;
248 else
249 align = 1 << (highbit64(size ^ (size - 1)) - 1);
250
251 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
252 KMC_NODEBUG : 0;
253 data_cflags = KMC_NODEBUG;
254 if (abd_size_alloc_linear(size)) {
255 cflags |= KMC_RECLAIMABLE;
256 data_cflags |= KMC_RECLAIMABLE;
257 }
258 if (cflags == data_cflags) {
259 /*
260 * Resulting kmem caches would be identical.
261 * Save memory by creating only one.
262 */
263 (void) snprintf(name, sizeof (name),
264 "zio_buf_comb_%lu", (ulong_t)size);
265 zio_buf_cache[c] = kmem_cache_create(name, size, align,
266 NULL, NULL, NULL, NULL, NULL, cflags);
267 zio_data_buf_cache[c] = zio_buf_cache[c];
268 continue;
269 }
270 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
271 (ulong_t)size);
272 zio_buf_cache[c] = kmem_cache_create(name, size, align,
273 NULL, NULL, NULL, NULL, NULL, cflags);
274
275 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
276 (ulong_t)size);
277 zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
278 NULL, NULL, NULL, NULL, NULL, data_cflags);
279 }
280
281 while (--c != 0) {
282 ASSERT(zio_buf_cache[c] != NULL);
283 if (zio_buf_cache[c - 1] == NULL)
284 zio_buf_cache[c - 1] = zio_buf_cache[c];
285
286 ASSERT(zio_data_buf_cache[c] != NULL);
287 if (zio_data_buf_cache[c - 1] == NULL)
288 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
289 }
290
291 zio_inject_init();
292
293 lz4_init();
294 }
295
296 void
zio_fini(void)297 zio_fini(void)
298 {
299 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
300
301 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
302 for (size_t i = 0; i < n; i++) {
303 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
304 (void) printf("zio_fini: [%d] %llu != %llu\n",
305 (int)((i + 1) << SPA_MINBLOCKSHIFT),
306 (long long unsigned)zio_buf_cache_allocs[i],
307 (long long unsigned)zio_buf_cache_frees[i]);
308 }
309 #endif
310
311 /*
312 * The same kmem cache can show up multiple times in both zio_buf_cache
313 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
314 * sort it out.
315 */
316 for (size_t i = 0; i < n; i++) {
317 kmem_cache_t *cache = zio_buf_cache[i];
318 if (cache == NULL)
319 continue;
320 for (size_t j = i; j < n; j++) {
321 if (cache == zio_buf_cache[j])
322 zio_buf_cache[j] = NULL;
323 if (cache == zio_data_buf_cache[j])
324 zio_data_buf_cache[j] = NULL;
325 }
326 kmem_cache_destroy(cache);
327 }
328
329 for (size_t i = 0; i < n; i++) {
330 kmem_cache_t *cache = zio_data_buf_cache[i];
331 if (cache == NULL)
332 continue;
333 for (size_t j = i; j < n; j++) {
334 if (cache == zio_data_buf_cache[j])
335 zio_data_buf_cache[j] = NULL;
336 }
337 kmem_cache_destroy(cache);
338 }
339
340 for (size_t i = 0; i < n; i++) {
341 VERIFY3P(zio_buf_cache[i], ==, NULL);
342 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
343 }
344
345 if (zio_ksp != NULL) {
346 kstat_delete(zio_ksp);
347 zio_ksp = NULL;
348 }
349
350 wmsum_fini(&ziostat_sums.ziostat_total_allocations);
351 wmsum_fini(&ziostat_sums.ziostat_alloc_class_fallbacks);
352 wmsum_fini(&ziostat_sums.ziostat_gang_writes);
353 wmsum_fini(&ziostat_sums.ziostat_gang_multilevel);
354
355 kmem_cache_destroy(zio_link_cache);
356 kmem_cache_destroy(zio_cache);
357
358 zio_inject_fini();
359
360 lz4_fini();
361 }
362
363 /*
364 * ==========================================================================
365 * Allocate and free I/O buffers
366 * ==========================================================================
367 */
368
369 #if defined(ZFS_DEBUG) && defined(_KERNEL)
370 #define ZFS_ZIO_BUF_CANARY 1
371 #endif
372
373 #ifdef ZFS_ZIO_BUF_CANARY
374 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
375
376 /*
377 * Use empty space after the buffer to detect overflows.
378 *
379 * Since zio_init() creates kmem caches only for certain set of buffer sizes,
380 * allocations of different sizes may have some unused space after the data.
381 * Filling part of that space with a known pattern on allocation and checking
382 * it on free should allow us to detect some buffer overflows.
383 */
384 static void
zio_buf_put_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)385 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
386 {
387 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
388 ulong_t *canary = p + off / sizeof (ulong_t);
389 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
390 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
391 cache[c] == cache[c + 1])
392 asize = (c + 2) << SPA_MINBLOCKSHIFT;
393 for (; off < asize; canary++, off += sizeof (ulong_t))
394 *canary = zio_buf_canary;
395 }
396
397 static void
zio_buf_check_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)398 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
399 {
400 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
401 ulong_t *canary = p + off / sizeof (ulong_t);
402 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
403 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
404 cache[c] == cache[c + 1])
405 asize = (c + 2) << SPA_MINBLOCKSHIFT;
406 for (; off < asize; canary++, off += sizeof (ulong_t)) {
407 if (unlikely(*canary != zio_buf_canary)) {
408 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
409 p, size, (canary - p) * sizeof (ulong_t),
410 *canary, zio_buf_canary);
411 }
412 }
413 }
414 #endif
415
416 /*
417 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
418 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
419 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
420 * excess / transient data in-core during a crashdump.
421 */
422 void *
zio_buf_alloc(size_t size)423 zio_buf_alloc(size_t size)
424 {
425 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
426
427 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
428 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
429 atomic_add_64(&zio_buf_cache_allocs[c], 1);
430 #endif
431
432 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
433 #ifdef ZFS_ZIO_BUF_CANARY
434 zio_buf_put_canary(p, size, zio_buf_cache, c);
435 #endif
436 return (p);
437 }
438
439 /*
440 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
441 * crashdump if the kernel panics. This exists so that we will limit the amount
442 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
443 * of kernel heap dumped to disk when the kernel panics)
444 */
445 void *
zio_data_buf_alloc(size_t size)446 zio_data_buf_alloc(size_t size)
447 {
448 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
449
450 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
451
452 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
453 #ifdef ZFS_ZIO_BUF_CANARY
454 zio_buf_put_canary(p, size, zio_data_buf_cache, c);
455 #endif
456 return (p);
457 }
458
459 void
zio_buf_free(void * buf,size_t size)460 zio_buf_free(void *buf, size_t size)
461 {
462 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
463
464 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
465 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
466 atomic_add_64(&zio_buf_cache_frees[c], 1);
467 #endif
468
469 #ifdef ZFS_ZIO_BUF_CANARY
470 zio_buf_check_canary(buf, size, zio_buf_cache, c);
471 #endif
472 kmem_cache_free(zio_buf_cache[c], buf);
473 }
474
475 void
zio_data_buf_free(void * buf,size_t size)476 zio_data_buf_free(void *buf, size_t size)
477 {
478 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
479
480 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
481
482 #ifdef ZFS_ZIO_BUF_CANARY
483 zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
484 #endif
485 kmem_cache_free(zio_data_buf_cache[c], buf);
486 }
487
488 static void
zio_abd_free(void * abd,size_t size)489 zio_abd_free(void *abd, size_t size)
490 {
491 (void) size;
492 abd_free((abd_t *)abd);
493 }
494
495 /*
496 * ==========================================================================
497 * Push and pop I/O transform buffers
498 * ==========================================================================
499 */
500 void
zio_push_transform(zio_t * zio,abd_t * data,uint64_t size,uint64_t bufsize,zio_transform_func_t * transform)501 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
502 zio_transform_func_t *transform)
503 {
504 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
505
506 zt->zt_orig_abd = zio->io_abd;
507 zt->zt_orig_size = zio->io_size;
508 zt->zt_bufsize = bufsize;
509 zt->zt_transform = transform;
510
511 zt->zt_next = zio->io_transform_stack;
512 zio->io_transform_stack = zt;
513
514 zio->io_abd = data;
515 zio->io_size = size;
516 }
517
518 void
zio_pop_transforms(zio_t * zio)519 zio_pop_transforms(zio_t *zio)
520 {
521 zio_transform_t *zt;
522
523 while ((zt = zio->io_transform_stack) != NULL) {
524 if (zt->zt_transform != NULL)
525 zt->zt_transform(zio,
526 zt->zt_orig_abd, zt->zt_orig_size);
527
528 if (zt->zt_bufsize != 0)
529 abd_free(zio->io_abd);
530
531 zio->io_abd = zt->zt_orig_abd;
532 zio->io_size = zt->zt_orig_size;
533 zio->io_transform_stack = zt->zt_next;
534
535 kmem_free(zt, sizeof (zio_transform_t));
536 }
537 }
538
539 /*
540 * ==========================================================================
541 * I/O transform callbacks for subblocks, decompression, and decryption
542 * ==========================================================================
543 */
544 static void
zio_subblock(zio_t * zio,abd_t * data,uint64_t size)545 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
546 {
547 ASSERT(zio->io_size > size);
548
549 if (zio->io_type == ZIO_TYPE_READ)
550 abd_copy(data, zio->io_abd, size);
551 }
552
553 static void
zio_decompress(zio_t * zio,abd_t * data,uint64_t size)554 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
555 {
556 if (zio->io_error == 0) {
557 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
558 zio->io_abd, data, zio->io_size, size,
559 &zio->io_prop.zp_complevel);
560
561 if (zio_injection_enabled && ret == 0)
562 ret = zio_handle_fault_injection(zio, EINVAL);
563
564 if (ret != 0)
565 zio->io_error = SET_ERROR(EIO);
566 }
567 }
568
569 static void
zio_decrypt(zio_t * zio,abd_t * data,uint64_t size)570 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
571 {
572 int ret;
573 void *tmp;
574 blkptr_t *bp = zio->io_bp;
575 spa_t *spa = zio->io_spa;
576 uint64_t dsobj = zio->io_bookmark.zb_objset;
577 uint64_t lsize = BP_GET_LSIZE(bp);
578 dmu_object_type_t ot = BP_GET_TYPE(bp);
579 uint8_t salt[ZIO_DATA_SALT_LEN];
580 uint8_t iv[ZIO_DATA_IV_LEN];
581 uint8_t mac[ZIO_DATA_MAC_LEN];
582 boolean_t no_crypt = B_FALSE;
583
584 ASSERT(BP_USES_CRYPT(bp));
585 ASSERT3U(size, !=, 0);
586
587 if (zio->io_error != 0)
588 return;
589
590 /*
591 * Verify the cksum of MACs stored in an indirect bp. It will always
592 * be possible to verify this since it does not require an encryption
593 * key.
594 */
595 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
596 zio_crypt_decode_mac_bp(bp, mac);
597
598 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
599 /*
600 * We haven't decompressed the data yet, but
601 * zio_crypt_do_indirect_mac_checksum() requires
602 * decompressed data to be able to parse out the MACs
603 * from the indirect block. We decompress it now and
604 * throw away the result after we are finished.
605 */
606 abd_t *abd = abd_alloc_linear(lsize, B_TRUE);
607 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
608 zio->io_abd, abd, zio->io_size, lsize,
609 &zio->io_prop.zp_complevel);
610 if (ret != 0) {
611 abd_free(abd);
612 ret = SET_ERROR(EIO);
613 goto error;
614 }
615 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
616 abd, lsize, BP_SHOULD_BYTESWAP(bp), mac);
617 abd_free(abd);
618 } else {
619 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
620 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
621 }
622 abd_copy(data, zio->io_abd, size);
623
624 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
625 ret = zio_handle_decrypt_injection(spa,
626 &zio->io_bookmark, ot, ECKSUM);
627 }
628 if (ret != 0)
629 goto error;
630
631 return;
632 }
633
634 /*
635 * If this is an authenticated block, just check the MAC. It would be
636 * nice to separate this out into its own flag, but when this was done,
637 * we had run out of bits in what is now zio_flag_t. Future cleanup
638 * could make this a flag bit.
639 */
640 if (BP_IS_AUTHENTICATED(bp)) {
641 if (ot == DMU_OT_OBJSET) {
642 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
643 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
644 } else {
645 zio_crypt_decode_mac_bp(bp, mac);
646 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
647 zio->io_abd, size, mac);
648 if (zio_injection_enabled && ret == 0) {
649 ret = zio_handle_decrypt_injection(spa,
650 &zio->io_bookmark, ot, ECKSUM);
651 }
652 }
653 abd_copy(data, zio->io_abd, size);
654
655 if (ret != 0)
656 goto error;
657
658 return;
659 }
660
661 zio_crypt_decode_params_bp(bp, salt, iv);
662
663 if (ot == DMU_OT_INTENT_LOG) {
664 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
665 zio_crypt_decode_mac_zil(tmp, mac);
666 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
667 } else {
668 zio_crypt_decode_mac_bp(bp, mac);
669 }
670
671 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
672 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
673 zio->io_abd, &no_crypt);
674 if (no_crypt)
675 abd_copy(data, zio->io_abd, size);
676
677 if (ret != 0)
678 goto error;
679
680 return;
681
682 error:
683 /* assert that the key was found unless this was speculative */
684 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
685
686 /*
687 * If there was a decryption / authentication error return EIO as
688 * the io_error. If this was not a speculative zio, create an ereport.
689 */
690 if (ret == ECKSUM) {
691 zio->io_error = SET_ERROR(EIO);
692 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
693 spa_log_error(spa, &zio->io_bookmark,
694 BP_GET_LOGICAL_BIRTH(zio->io_bp));
695 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
696 spa, NULL, &zio->io_bookmark, zio, 0);
697 }
698 } else {
699 zio->io_error = ret;
700 }
701 }
702
703 /*
704 * ==========================================================================
705 * I/O parent/child relationships and pipeline interlocks
706 * ==========================================================================
707 */
708 zio_t *
zio_walk_parents(zio_t * cio,zio_link_t ** zl)709 zio_walk_parents(zio_t *cio, zio_link_t **zl)
710 {
711 list_t *pl = &cio->io_parent_list;
712
713 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
714 if (*zl == NULL)
715 return (NULL);
716
717 ASSERT((*zl)->zl_child == cio);
718 return ((*zl)->zl_parent);
719 }
720
721 zio_t *
zio_walk_children(zio_t * pio,zio_link_t ** zl)722 zio_walk_children(zio_t *pio, zio_link_t **zl)
723 {
724 list_t *cl = &pio->io_child_list;
725
726 ASSERT(MUTEX_HELD(&pio->io_lock));
727
728 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
729 if (*zl == NULL)
730 return (NULL);
731
732 ASSERT((*zl)->zl_parent == pio);
733 return ((*zl)->zl_child);
734 }
735
736 zio_t *
zio_unique_parent(zio_t * cio)737 zio_unique_parent(zio_t *cio)
738 {
739 zio_link_t *zl = NULL;
740 zio_t *pio = zio_walk_parents(cio, &zl);
741
742 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
743 return (pio);
744 }
745
746 void
zio_add_child(zio_t * pio,zio_t * cio)747 zio_add_child(zio_t *pio, zio_t *cio)
748 {
749 /*
750 * Logical I/Os can have logical, gang, or vdev children.
751 * Gang I/Os can have gang or vdev children.
752 * Vdev I/Os can only have vdev children.
753 * The following ASSERT captures all of these constraints.
754 */
755 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
756
757 /* Parent should not have READY stage if child doesn't have it. */
758 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
759 (cio->io_child_type != ZIO_CHILD_VDEV),
760 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
761
762 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
763 zl->zl_parent = pio;
764 zl->zl_child = cio;
765
766 mutex_enter(&pio->io_lock);
767 mutex_enter(&cio->io_lock);
768
769 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
770
771 uint64_t *countp = pio->io_children[cio->io_child_type];
772 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
773 countp[w] += !cio->io_state[w];
774
775 list_insert_head(&pio->io_child_list, zl);
776 list_insert_head(&cio->io_parent_list, zl);
777
778 mutex_exit(&cio->io_lock);
779 mutex_exit(&pio->io_lock);
780 }
781
782 void
zio_add_child_first(zio_t * pio,zio_t * cio)783 zio_add_child_first(zio_t *pio, zio_t *cio)
784 {
785 /*
786 * Logical I/Os can have logical, gang, or vdev children.
787 * Gang I/Os can have gang or vdev children.
788 * Vdev I/Os can only have vdev children.
789 * The following ASSERT captures all of these constraints.
790 */
791 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
792
793 /* Parent should not have READY stage if child doesn't have it. */
794 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
795 (cio->io_child_type != ZIO_CHILD_VDEV),
796 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
797
798 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
799 zl->zl_parent = pio;
800 zl->zl_child = cio;
801
802 ASSERT(list_is_empty(&cio->io_parent_list));
803 list_insert_head(&cio->io_parent_list, zl);
804
805 mutex_enter(&pio->io_lock);
806
807 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
808
809 uint64_t *countp = pio->io_children[cio->io_child_type];
810 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
811 countp[w] += !cio->io_state[w];
812
813 list_insert_head(&pio->io_child_list, zl);
814
815 mutex_exit(&pio->io_lock);
816 }
817
818 static void
zio_remove_child(zio_t * pio,zio_t * cio,zio_link_t * zl)819 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
820 {
821 ASSERT(zl->zl_parent == pio);
822 ASSERT(zl->zl_child == cio);
823
824 mutex_enter(&pio->io_lock);
825 mutex_enter(&cio->io_lock);
826
827 list_remove(&pio->io_child_list, zl);
828 list_remove(&cio->io_parent_list, zl);
829
830 mutex_exit(&cio->io_lock);
831 mutex_exit(&pio->io_lock);
832 kmem_cache_free(zio_link_cache, zl);
833 }
834
835 static boolean_t
zio_wait_for_children(zio_t * zio,uint8_t childbits,enum zio_wait_type wait)836 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
837 {
838 boolean_t waiting = B_FALSE;
839
840 mutex_enter(&zio->io_lock);
841 ASSERT(zio->io_stall == NULL);
842 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
843 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
844 continue;
845
846 uint64_t *countp = &zio->io_children[c][wait];
847 if (*countp != 0) {
848 zio->io_stage >>= 1;
849 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
850 zio->io_stall = countp;
851 waiting = B_TRUE;
852 break;
853 }
854 }
855 mutex_exit(&zio->io_lock);
856 return (waiting);
857 }
858
859 __attribute__((always_inline))
860 static inline void
zio_notify_parent(zio_t * pio,zio_t * zio,enum zio_wait_type wait,zio_t ** next_to_executep)861 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
862 zio_t **next_to_executep)
863 {
864 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
865 int *errorp = &pio->io_child_error[zio->io_child_type];
866
867 mutex_enter(&pio->io_lock);
868 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
869 *errorp = zio_worst_error(*errorp, zio->io_error);
870 pio->io_reexecute |= zio->io_reexecute;
871 ASSERT3U(*countp, >, 0);
872
873 /*
874 * Propogate the Direct I/O checksum verify failure to the parent.
875 */
876 if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
877 pio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
878
879 (*countp)--;
880
881 if (*countp == 0 && pio->io_stall == countp) {
882 zio_taskq_type_t type =
883 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
884 ZIO_TASKQ_INTERRUPT;
885 pio->io_stall = NULL;
886 mutex_exit(&pio->io_lock);
887
888 /*
889 * If we can tell the caller to execute this parent next, do
890 * so. We do this if the parent's zio type matches the child's
891 * type, or if it's a zio_null() with no done callback, and so
892 * has no actual work to do. Otherwise dispatch the parent zio
893 * in its own taskq.
894 *
895 * Having the caller execute the parent when possible reduces
896 * locking on the zio taskq's, reduces context switch
897 * overhead, and has no recursion penalty. Note that one
898 * read from disk typically causes at least 3 zio's: a
899 * zio_null(), the logical zio_read(), and then a physical
900 * zio. When the physical ZIO completes, we are able to call
901 * zio_done() on all 3 of these zio's from one invocation of
902 * zio_execute() by returning the parent back to
903 * zio_execute(). Since the parent isn't executed until this
904 * thread returns back to zio_execute(), the caller should do
905 * so promptly.
906 *
907 * In other cases, dispatching the parent prevents
908 * overflowing the stack when we have deeply nested
909 * parent-child relationships, as we do with the "mega zio"
910 * of writes for spa_sync(), and the chain of ZIL blocks.
911 */
912 if (next_to_executep != NULL && *next_to_executep == NULL &&
913 (pio->io_type == zio->io_type ||
914 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
915 *next_to_executep = pio;
916 } else {
917 zio_taskq_dispatch(pio, type, B_FALSE);
918 }
919 } else {
920 mutex_exit(&pio->io_lock);
921 }
922 }
923
924 static void
zio_inherit_child_errors(zio_t * zio,enum zio_child c)925 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
926 {
927 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
928 zio->io_error = zio->io_child_error[c];
929 }
930
931 int
zio_bookmark_compare(const void * x1,const void * x2)932 zio_bookmark_compare(const void *x1, const void *x2)
933 {
934 const zio_t *z1 = x1;
935 const zio_t *z2 = x2;
936
937 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
938 return (-1);
939 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
940 return (1);
941
942 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
943 return (-1);
944 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
945 return (1);
946
947 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
948 return (-1);
949 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
950 return (1);
951
952 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
953 return (-1);
954 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
955 return (1);
956
957 if (z1 < z2)
958 return (-1);
959 if (z1 > z2)
960 return (1);
961
962 return (0);
963 }
964
965 /*
966 * ==========================================================================
967 * Create the various types of I/O (read, write, free, etc)
968 * ==========================================================================
969 */
970 static zio_t *
zio_create(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,zio_done_func_t * done,void * private,zio_type_t type,zio_priority_t priority,zio_flag_t flags,vdev_t * vd,uint64_t offset,const zbookmark_phys_t * zb,enum zio_stage stage,enum zio_stage pipeline)971 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
972 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
973 void *private, zio_type_t type, zio_priority_t priority,
974 zio_flag_t flags, vdev_t *vd, uint64_t offset,
975 const zbookmark_phys_t *zb, enum zio_stage stage,
976 enum zio_stage pipeline)
977 {
978 zio_t *zio;
979
980 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
981 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
982 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
983
984 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
985 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
986 ASSERT(vd || stage == ZIO_STAGE_OPEN);
987
988 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
989
990 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
991 memset(zio, 0, sizeof (zio_t));
992
993 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
994 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
995
996 list_create(&zio->io_parent_list, sizeof (zio_link_t),
997 offsetof(zio_link_t, zl_parent_node));
998 list_create(&zio->io_child_list, sizeof (zio_link_t),
999 offsetof(zio_link_t, zl_child_node));
1000 metaslab_trace_init(&zio->io_alloc_list);
1001
1002 if (vd != NULL)
1003 zio->io_child_type = ZIO_CHILD_VDEV;
1004 else if (flags & ZIO_FLAG_GANG_CHILD)
1005 zio->io_child_type = ZIO_CHILD_GANG;
1006 else if (flags & ZIO_FLAG_DDT_CHILD)
1007 zio->io_child_type = ZIO_CHILD_DDT;
1008 else
1009 zio->io_child_type = ZIO_CHILD_LOGICAL;
1010
1011 if (bp != NULL) {
1012 if (type != ZIO_TYPE_WRITE ||
1013 zio->io_child_type == ZIO_CHILD_DDT) {
1014 zio->io_bp_copy = *bp;
1015 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
1016 } else {
1017 zio->io_bp = (blkptr_t *)bp;
1018 }
1019 zio->io_bp_orig = *bp;
1020 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
1021 zio->io_logical = zio;
1022 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
1023 pipeline |= ZIO_GANG_STAGES;
1024 }
1025
1026 zio->io_spa = spa;
1027 zio->io_txg = txg;
1028 zio->io_done = done;
1029 zio->io_private = private;
1030 zio->io_type = type;
1031 zio->io_priority = priority;
1032 zio->io_vd = vd;
1033 zio->io_offset = offset;
1034 zio->io_orig_abd = zio->io_abd = data;
1035 zio->io_orig_size = zio->io_size = psize;
1036 zio->io_lsize = lsize;
1037 zio->io_orig_flags = zio->io_flags = flags;
1038 zio->io_orig_stage = zio->io_stage = stage;
1039 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
1040 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
1041 zio->io_allocator = ZIO_ALLOCATOR_NONE;
1042
1043 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
1044 (pipeline & ZIO_STAGE_READY) == 0;
1045 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
1046
1047 if (zb != NULL)
1048 zio->io_bookmark = *zb;
1049
1050 if (pio != NULL) {
1051 zio->io_metaslab_class = pio->io_metaslab_class;
1052 if (zio->io_logical == NULL)
1053 zio->io_logical = pio->io_logical;
1054 if (zio->io_child_type == ZIO_CHILD_GANG)
1055 zio->io_gang_leader = pio->io_gang_leader;
1056 zio_add_child_first(pio, zio);
1057 }
1058
1059 taskq_init_ent(&zio->io_tqent);
1060
1061 return (zio);
1062 }
1063
1064 void
zio_destroy(zio_t * zio)1065 zio_destroy(zio_t *zio)
1066 {
1067 metaslab_trace_fini(&zio->io_alloc_list);
1068 list_destroy(&zio->io_parent_list);
1069 list_destroy(&zio->io_child_list);
1070 mutex_destroy(&zio->io_lock);
1071 cv_destroy(&zio->io_cv);
1072 kmem_cache_free(zio_cache, zio);
1073 }
1074
1075 /*
1076 * ZIO intended to be between others. Provides synchronization at READY
1077 * and DONE pipeline stages and calls the respective callbacks.
1078 */
1079 zio_t *
zio_null(zio_t * pio,spa_t * spa,vdev_t * vd,zio_done_func_t * done,void * private,zio_flag_t flags)1080 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
1081 void *private, zio_flag_t flags)
1082 {
1083 zio_t *zio;
1084
1085 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1086 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1087 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1088
1089 return (zio);
1090 }
1091
1092 /*
1093 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a
1094 * READY pipeline stage (is ready on creation), so it should not be used
1095 * as child of any ZIO that may need waiting for grandchildren READY stage
1096 * (any other ZIO type).
1097 */
1098 zio_t *
zio_root(spa_t * spa,zio_done_func_t * done,void * private,zio_flag_t flags)1099 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1100 {
1101 zio_t *zio;
1102
1103 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1104 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1105 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1106
1107 return (zio);
1108 }
1109
1110 static int
zfs_blkptr_verify_log(spa_t * spa,const blkptr_t * bp,enum blk_verify_flag blk_verify,const char * fmt,...)1111 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1112 enum blk_verify_flag blk_verify, const char *fmt, ...)
1113 {
1114 va_list adx;
1115 char buf[256];
1116
1117 va_start(adx, fmt);
1118 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
1119 va_end(adx);
1120
1121 zfs_dbgmsg("bad blkptr at %px: "
1122 "DVA[0]=%#llx/%#llx "
1123 "DVA[1]=%#llx/%#llx "
1124 "DVA[2]=%#llx/%#llx "
1125 "prop=%#llx "
1126 "pad=%#llx,%#llx "
1127 "phys_birth=%#llx "
1128 "birth=%#llx "
1129 "fill=%#llx "
1130 "cksum=%#llx/%#llx/%#llx/%#llx",
1131 bp,
1132 (long long)bp->blk_dva[0].dva_word[0],
1133 (long long)bp->blk_dva[0].dva_word[1],
1134 (long long)bp->blk_dva[1].dva_word[0],
1135 (long long)bp->blk_dva[1].dva_word[1],
1136 (long long)bp->blk_dva[2].dva_word[0],
1137 (long long)bp->blk_dva[2].dva_word[1],
1138 (long long)bp->blk_prop,
1139 (long long)bp->blk_pad[0],
1140 (long long)bp->blk_pad[1],
1141 (long long)BP_GET_PHYSICAL_BIRTH(bp),
1142 (long long)BP_GET_LOGICAL_BIRTH(bp),
1143 (long long)bp->blk_fill,
1144 (long long)bp->blk_cksum.zc_word[0],
1145 (long long)bp->blk_cksum.zc_word[1],
1146 (long long)bp->blk_cksum.zc_word[2],
1147 (long long)bp->blk_cksum.zc_word[3]);
1148 switch (blk_verify) {
1149 case BLK_VERIFY_HALT:
1150 zfs_panic_recover("%s: %s", spa_name(spa), buf);
1151 break;
1152 case BLK_VERIFY_LOG:
1153 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1154 break;
1155 case BLK_VERIFY_ONLY:
1156 break;
1157 }
1158
1159 return (1);
1160 }
1161
1162 /*
1163 * Verify the block pointer fields contain reasonable values. This means
1164 * it only contains known object types, checksum/compression identifiers,
1165 * block sizes within the maximum allowed limits, valid DVAs, etc.
1166 *
1167 * If everything checks out 0 is returned. The zfs_blkptr_verify
1168 * argument controls the behavior when an invalid field is detected.
1169 *
1170 * Values for blk_verify_flag:
1171 * BLK_VERIFY_ONLY: evaluate the block
1172 * BLK_VERIFY_LOG: evaluate the block and log problems
1173 * BLK_VERIFY_HALT: call zfs_panic_recover on error
1174 *
1175 * Values for blk_config_flag:
1176 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1177 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1178 * obtained for reader
1179 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1180 * performance
1181 */
1182 int
zfs_blkptr_verify(spa_t * spa,const blkptr_t * bp,enum blk_config_flag blk_config,enum blk_verify_flag blk_verify)1183 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1184 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1185 {
1186 int errors = 0;
1187
1188 if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
1189 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1190 "blkptr at %px has invalid TYPE %llu",
1191 bp, (longlong_t)BP_GET_TYPE(bp));
1192 }
1193 if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
1194 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1195 "blkptr at %px has invalid COMPRESS %llu",
1196 bp, (longlong_t)BP_GET_COMPRESS(bp));
1197 }
1198 if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1199 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1200 "blkptr at %px has invalid LSIZE %llu",
1201 bp, (longlong_t)BP_GET_LSIZE(bp));
1202 }
1203 if (BP_IS_EMBEDDED(bp)) {
1204 if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
1205 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1206 "blkptr at %px has invalid ETYPE %llu",
1207 bp, (longlong_t)BPE_GET_ETYPE(bp));
1208 }
1209 if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
1210 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1211 "blkptr at %px has invalid PSIZE %llu",
1212 bp, (longlong_t)BPE_GET_PSIZE(bp));
1213 }
1214 return (errors ? ECKSUM : 0);
1215 }
1216 if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
1217 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1218 "blkptr at %px has invalid CHECKSUM %llu",
1219 bp, (longlong_t)BP_GET_CHECKSUM(bp));
1220 }
1221 if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1222 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1223 "blkptr at %px has invalid PSIZE %llu",
1224 bp, (longlong_t)BP_GET_PSIZE(bp));
1225 }
1226
1227 /*
1228 * Do not verify individual DVAs if the config is not trusted. This
1229 * will be done once the zio is executed in vdev_mirror_map_alloc.
1230 */
1231 if (unlikely(!spa->spa_trust_config))
1232 return (errors ? ECKSUM : 0);
1233
1234 switch (blk_config) {
1235 case BLK_CONFIG_HELD:
1236 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1237 break;
1238 case BLK_CONFIG_NEEDED:
1239 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1240 break;
1241 case BLK_CONFIG_NEEDED_TRY:
1242 if (!spa_config_tryenter(spa, SCL_VDEV, bp, RW_READER))
1243 return (EBUSY);
1244 break;
1245 case BLK_CONFIG_SKIP:
1246 return (errors ? ECKSUM : 0);
1247 default:
1248 panic("invalid blk_config %u", blk_config);
1249 }
1250
1251 /*
1252 * Pool-specific checks.
1253 *
1254 * Note: it would be nice to verify that the logical birth
1255 * and physical birth are not too large. However,
1256 * spa_freeze() allows the birth time of log blocks (and
1257 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1258 * large.
1259 */
1260 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1261 const dva_t *dva = &bp->blk_dva[i];
1262 uint64_t vdevid = DVA_GET_VDEV(dva);
1263
1264 if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
1265 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1266 "blkptr at %px DVA %u has invalid VDEV %llu",
1267 bp, i, (longlong_t)vdevid);
1268 continue;
1269 }
1270 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1271 if (unlikely(vd == NULL)) {
1272 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1273 "blkptr at %px DVA %u has invalid VDEV %llu",
1274 bp, i, (longlong_t)vdevid);
1275 continue;
1276 }
1277 if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
1278 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1279 "blkptr at %px DVA %u has hole VDEV %llu",
1280 bp, i, (longlong_t)vdevid);
1281 continue;
1282 }
1283 if (vd->vdev_ops == &vdev_missing_ops) {
1284 /*
1285 * "missing" vdevs are valid during import, but we
1286 * don't have their detailed info (e.g. asize), so
1287 * we can't perform any more checks on them.
1288 */
1289 continue;
1290 }
1291 uint64_t offset = DVA_GET_OFFSET(dva);
1292 uint64_t asize = DVA_GET_ASIZE(dva);
1293 if (DVA_GET_GANG(dva))
1294 asize = vdev_gang_header_asize(vd);
1295 if (unlikely(offset + asize > vd->vdev_asize)) {
1296 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1297 "blkptr at %px DVA %u has invalid OFFSET %llu",
1298 bp, i, (longlong_t)offset);
1299 }
1300 }
1301 if (blk_config == BLK_CONFIG_NEEDED || blk_config ==
1302 BLK_CONFIG_NEEDED_TRY)
1303 spa_config_exit(spa, SCL_VDEV, bp);
1304
1305 return (errors ? ECKSUM : 0);
1306 }
1307
1308 boolean_t
zfs_dva_valid(spa_t * spa,const dva_t * dva,const blkptr_t * bp)1309 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1310 {
1311 (void) bp;
1312 uint64_t vdevid = DVA_GET_VDEV(dva);
1313
1314 if (vdevid >= spa->spa_root_vdev->vdev_children)
1315 return (B_FALSE);
1316
1317 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1318 if (vd == NULL)
1319 return (B_FALSE);
1320
1321 if (vd->vdev_ops == &vdev_hole_ops)
1322 return (B_FALSE);
1323
1324 if (vd->vdev_ops == &vdev_missing_ops) {
1325 return (B_FALSE);
1326 }
1327
1328 uint64_t offset = DVA_GET_OFFSET(dva);
1329 uint64_t asize = DVA_GET_ASIZE(dva);
1330
1331 if (DVA_GET_GANG(dva))
1332 asize = vdev_gang_header_asize(vd);
1333 if (offset + asize > vd->vdev_asize)
1334 return (B_FALSE);
1335
1336 return (B_TRUE);
1337 }
1338
1339 zio_t *
zio_read(zio_t * pio,spa_t * spa,const blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1340 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1341 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1342 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1343 {
1344 zio_t *zio;
1345
1346 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
1347 data, size, size, done, private,
1348 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1349 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1350 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1351
1352 return (zio);
1353 }
1354
1355 zio_t *
zio_write(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,const zio_prop_t * zp,zio_done_func_t * ready,zio_done_func_t * children_ready,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1356 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1357 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1358 zio_done_func_t *ready, zio_done_func_t *children_ready,
1359 zio_done_func_t *done, void *private, zio_priority_t priority,
1360 zio_flag_t flags, const zbookmark_phys_t *zb)
1361 {
1362 zio_t *zio;
1363 enum zio_stage pipeline = zp->zp_direct_write == B_TRUE ?
1364 ZIO_DIRECT_WRITE_PIPELINE : (flags & ZIO_FLAG_DDT_CHILD) ?
1365 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE;
1366
1367
1368 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1369 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1370 ZIO_STAGE_OPEN, pipeline);
1371
1372 zio->io_ready = ready;
1373 zio->io_children_ready = children_ready;
1374 zio->io_prop = *zp;
1375
1376 /*
1377 * Data can be NULL if we are going to call zio_write_override() to
1378 * provide the already-allocated BP. But we may need the data to
1379 * verify a dedup hit (if requested). In this case, don't try to
1380 * dedup (just take the already-allocated BP verbatim). Encrypted
1381 * dedup blocks need data as well so we also disable dedup in this
1382 * case.
1383 */
1384 if (data == NULL &&
1385 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1386 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1387 }
1388
1389 return (zio);
1390 }
1391
1392 zio_t *
zio_rewrite(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,zbookmark_phys_t * zb)1393 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1394 uint64_t size, zio_done_func_t *done, void *private,
1395 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1396 {
1397 zio_t *zio;
1398
1399 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1400 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1401 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1402
1403 return (zio);
1404 }
1405
1406 void
zio_write_override(zio_t * zio,blkptr_t * bp,int copies,boolean_t nopwrite,boolean_t brtwrite)1407 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
1408 boolean_t brtwrite)
1409 {
1410 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1411 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1412 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1413 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1414 ASSERT(!brtwrite || !nopwrite);
1415
1416 /*
1417 * We must reset the io_prop to match the values that existed
1418 * when the bp was first written by dmu_sync() keeping in mind
1419 * that nopwrite and dedup are mutually exclusive.
1420 */
1421 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1422 zio->io_prop.zp_nopwrite = nopwrite;
1423 zio->io_prop.zp_brtwrite = brtwrite;
1424 zio->io_prop.zp_copies = copies;
1425 zio->io_bp_override = bp;
1426 }
1427
1428 void
zio_free(spa_t * spa,uint64_t txg,const blkptr_t * bp)1429 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1430 {
1431
1432 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1433
1434 /*
1435 * The check for EMBEDDED is a performance optimization. We
1436 * process the free here (by ignoring it) rather than
1437 * putting it on the list and then processing it in zio_free_sync().
1438 */
1439 if (BP_IS_EMBEDDED(bp))
1440 return;
1441
1442 /*
1443 * Frees that are for the currently-syncing txg, are not going to be
1444 * deferred, and which will not need to do a read (i.e. not GANG or
1445 * DEDUP), can be processed immediately. Otherwise, put them on the
1446 * in-memory list for later processing.
1447 *
1448 * Note that we only defer frees after zfs_sync_pass_deferred_free
1449 * when the log space map feature is disabled. [see relevant comment
1450 * in spa_sync_iterate_to_convergence()]
1451 */
1452 if (BP_IS_GANG(bp) ||
1453 BP_GET_DEDUP(bp) ||
1454 txg != spa->spa_syncing_txg ||
1455 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1456 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1457 brt_maybe_exists(spa, bp)) {
1458 metaslab_check_free(spa, bp);
1459 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1460 } else {
1461 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
1462 }
1463 }
1464
1465 /*
1466 * To improve performance, this function may return NULL if we were able
1467 * to do the free immediately. This avoids the cost of creating a zio
1468 * (and linking it to the parent, etc).
1469 */
1470 zio_t *
zio_free_sync(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_flag_t flags)1471 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1472 zio_flag_t flags)
1473 {
1474 ASSERT(!BP_IS_HOLE(bp));
1475 ASSERT(spa_syncing_txg(spa) == txg);
1476
1477 if (BP_IS_EMBEDDED(bp))
1478 return (NULL);
1479
1480 metaslab_check_free(spa, bp);
1481 arc_freed(spa, bp);
1482 dsl_scan_freed(spa, bp);
1483
1484 if (BP_IS_GANG(bp) ||
1485 BP_GET_DEDUP(bp) ||
1486 brt_maybe_exists(spa, bp)) {
1487 /*
1488 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1489 * block header, the DDT or the BRT), so issue them
1490 * asynchronously so that this thread is not tied up.
1491 */
1492 enum zio_stage stage =
1493 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1494
1495 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1496 BP_GET_PSIZE(bp), NULL, NULL,
1497 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1498 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1499 } else {
1500 metaslab_free(spa, bp, txg, B_FALSE);
1501 return (NULL);
1502 }
1503 }
1504
1505 zio_t *
zio_claim(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_done_func_t * done,void * private,zio_flag_t flags)1506 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1507 zio_done_func_t *done, void *private, zio_flag_t flags)
1508 {
1509 zio_t *zio;
1510
1511 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1512 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1513
1514 if (BP_IS_EMBEDDED(bp))
1515 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1516
1517 /*
1518 * A claim is an allocation of a specific block. Claims are needed
1519 * to support immediate writes in the intent log. The issue is that
1520 * immediate writes contain committed data, but in a txg that was
1521 * *not* committed. Upon opening the pool after an unclean shutdown,
1522 * the intent log claims all blocks that contain immediate write data
1523 * so that the SPA knows they're in use.
1524 *
1525 * All claims *must* be resolved in the first txg -- before the SPA
1526 * starts allocating blocks -- so that nothing is allocated twice.
1527 * If txg == 0 we just verify that the block is claimable.
1528 */
1529 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1530 spa_min_claim_txg(spa));
1531 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1532 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
1533
1534 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1535 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1536 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1537 ASSERT0(zio->io_queued_timestamp);
1538
1539 return (zio);
1540 }
1541
1542 zio_t *
zio_trim(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,enum trim_flag trim_flags)1543 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1544 zio_done_func_t *done, void *private, zio_priority_t priority,
1545 zio_flag_t flags, enum trim_flag trim_flags)
1546 {
1547 zio_t *zio;
1548
1549 ASSERT0(vd->vdev_children);
1550 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1551 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1552 ASSERT3U(size, !=, 0);
1553
1554 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1555 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1556 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1557 zio->io_trim_flags = trim_flags;
1558
1559 return (zio);
1560 }
1561
1562 zio_t *
zio_read_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1563 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1564 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1565 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1566 {
1567 zio_t *zio;
1568
1569 ASSERT(vd->vdev_children == 0);
1570 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1571 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1572 ASSERT3U(offset + size, <=, vd->vdev_psize);
1573
1574 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1575 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1576 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1577
1578 zio->io_prop.zp_checksum = checksum;
1579
1580 return (zio);
1581 }
1582
1583 zio_t *
zio_write_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1584 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1585 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1586 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1587 {
1588 zio_t *zio;
1589
1590 ASSERT(vd->vdev_children == 0);
1591 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1592 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1593 ASSERT3U(offset + size, <=, vd->vdev_psize);
1594
1595 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1596 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1597 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1598
1599 zio->io_prop.zp_checksum = checksum;
1600
1601 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1602 /*
1603 * zec checksums are necessarily destructive -- they modify
1604 * the end of the write buffer to hold the verifier/checksum.
1605 * Therefore, we must make a local copy in case the data is
1606 * being written to multiple places in parallel.
1607 */
1608 abd_t *wbuf = abd_alloc_sametype(data, size);
1609 abd_copy(wbuf, data, size);
1610
1611 zio_push_transform(zio, wbuf, size, size, NULL);
1612 }
1613
1614 return (zio);
1615 }
1616
1617 /*
1618 * Create a child I/O to do some work for us.
1619 */
1620 zio_t *
zio_vdev_child_io(zio_t * pio,blkptr_t * bp,vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,int type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1621 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1622 abd_t *data, uint64_t size, int type, zio_priority_t priority,
1623 zio_flag_t flags, zio_done_func_t *done, void *private)
1624 {
1625 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1626 zio_t *zio;
1627
1628 /*
1629 * vdev child I/Os do not propagate their error to the parent.
1630 * Therefore, for correct operation the caller *must* check for
1631 * and handle the error in the child i/o's done callback.
1632 * The only exceptions are i/os that we don't care about
1633 * (OPTIONAL or REPAIR).
1634 */
1635 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1636 done != NULL);
1637
1638 if (type == ZIO_TYPE_READ && bp != NULL) {
1639 /*
1640 * If we have the bp, then the child should perform the
1641 * checksum and the parent need not. This pushes error
1642 * detection as close to the leaves as possible and
1643 * eliminates redundant checksums in the interior nodes.
1644 */
1645 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1646 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1647 /*
1648 * We never allow the mirror VDEV to attempt reading from any
1649 * additional data copies after the first Direct I/O checksum
1650 * verify failure. This is to avoid bad data being written out
1651 * through the mirror during self healing. See comment in
1652 * vdev_mirror_io_done() for more details.
1653 */
1654 ASSERT0(pio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
1655 } else if (type == ZIO_TYPE_WRITE &&
1656 pio->io_prop.zp_direct_write == B_TRUE) {
1657 /*
1658 * By default we only will verify checksums for Direct I/O
1659 * writes for Linux. FreeBSD is able to place user pages under
1660 * write protection before issuing them to the ZIO pipeline.
1661 *
1662 * Checksum validation errors will only be reported through
1663 * the top-level VDEV, which is set by this child ZIO.
1664 */
1665 ASSERT3P(bp, !=, NULL);
1666 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
1667 pipeline |= ZIO_STAGE_DIO_CHECKSUM_VERIFY;
1668 }
1669
1670 if (vd->vdev_ops->vdev_op_leaf) {
1671 ASSERT0(vd->vdev_children);
1672 offset += VDEV_LABEL_START_SIZE;
1673 }
1674
1675 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1676
1677 /*
1678 * If we've decided to do a repair, the write is not speculative --
1679 * even if the original read was.
1680 */
1681 if (flags & ZIO_FLAG_IO_REPAIR)
1682 flags &= ~ZIO_FLAG_SPECULATIVE;
1683
1684 /*
1685 * If we're creating a child I/O that is not associated with a
1686 * top-level vdev, then the child zio is not an allocating I/O.
1687 * If this is a retried I/O then we ignore it since we will
1688 * have already processed the original allocating I/O.
1689 */
1690 if (flags & ZIO_FLAG_IO_ALLOCATING &&
1691 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1692 ASSERT(pio->io_metaslab_class != NULL);
1693 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1694 ASSERT(type == ZIO_TYPE_WRITE);
1695 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1696 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1697 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1698 pio->io_child_type == ZIO_CHILD_GANG);
1699
1700 flags &= ~ZIO_FLAG_IO_ALLOCATING;
1701 }
1702
1703 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1704 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1705 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1706 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1707
1708 return (zio);
1709 }
1710
1711 zio_t *
zio_vdev_delegated_io(vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,zio_type_t type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1712 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1713 zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1714 zio_done_func_t *done, void *private)
1715 {
1716 zio_t *zio;
1717
1718 ASSERT(vd->vdev_ops->vdev_op_leaf);
1719
1720 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1721 data, size, size, done, private, type, priority,
1722 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1723 vd, offset, NULL,
1724 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1725
1726 return (zio);
1727 }
1728
1729
1730 /*
1731 * Send a flush command to the given vdev. Unlike most zio creation functions,
1732 * the flush zios are issued immediately. You can wait on pio to pause until
1733 * the flushes complete.
1734 */
1735 void
zio_flush(zio_t * pio,vdev_t * vd)1736 zio_flush(zio_t *pio, vdev_t *vd)
1737 {
1738 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1739 ZIO_FLAG_DONT_RETRY;
1740
1741 if (vd->vdev_nowritecache)
1742 return;
1743
1744 if (vd->vdev_children == 0) {
1745 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
1746 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1747 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
1748 } else {
1749 for (uint64_t c = 0; c < vd->vdev_children; c++)
1750 zio_flush(pio, vd->vdev_child[c]);
1751 }
1752 }
1753
1754 void
zio_shrink(zio_t * zio,uint64_t size)1755 zio_shrink(zio_t *zio, uint64_t size)
1756 {
1757 ASSERT3P(zio->io_executor, ==, NULL);
1758 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1759 ASSERT3U(size, <=, zio->io_size);
1760
1761 /*
1762 * We don't shrink for raidz because of problems with the
1763 * reconstruction when reading back less than the block size.
1764 * Note, BP_IS_RAIDZ() assumes no compression.
1765 */
1766 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1767 if (!BP_IS_RAIDZ(zio->io_bp)) {
1768 /* we are not doing a raw write */
1769 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1770 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1771 }
1772 }
1773
1774 /*
1775 * Round provided allocation size up to a value that can be allocated
1776 * by at least some vdev(s) in the pool with minimum or no additional
1777 * padding and without extra space usage on others
1778 */
1779 static uint64_t
zio_roundup_alloc_size(spa_t * spa,uint64_t size)1780 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1781 {
1782 if (size > spa->spa_min_alloc)
1783 return (roundup(size, spa->spa_gcd_alloc));
1784 return (spa->spa_min_alloc);
1785 }
1786
1787 size_t
zio_get_compression_max_size(enum zio_compress compress,uint64_t gcd_alloc,uint64_t min_alloc,size_t s_len)1788 zio_get_compression_max_size(enum zio_compress compress, uint64_t gcd_alloc,
1789 uint64_t min_alloc, size_t s_len)
1790 {
1791 size_t d_len;
1792
1793 /* minimum 12.5% must be saved (legacy value, may be changed later) */
1794 d_len = s_len - (s_len >> 3);
1795
1796 /* ZLE can't use exactly d_len bytes, it needs more, so ignore it */
1797 if (compress == ZIO_COMPRESS_ZLE)
1798 return (d_len);
1799
1800 d_len = d_len - d_len % gcd_alloc;
1801
1802 if (d_len < min_alloc)
1803 return (BPE_PAYLOAD_SIZE);
1804 return (d_len);
1805 }
1806
1807 /*
1808 * ==========================================================================
1809 * Prepare to read and write logical blocks
1810 * ==========================================================================
1811 */
1812
1813 static zio_t *
zio_read_bp_init(zio_t * zio)1814 zio_read_bp_init(zio_t *zio)
1815 {
1816 blkptr_t *bp = zio->io_bp;
1817 uint64_t psize =
1818 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1819
1820 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1821
1822 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1823 zio->io_child_type == ZIO_CHILD_LOGICAL &&
1824 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1825 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1826 psize, psize, zio_decompress);
1827 }
1828
1829 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1830 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1831 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1832 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1833 psize, psize, zio_decrypt);
1834 }
1835
1836 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1837 int psize = BPE_GET_PSIZE(bp);
1838 void *data = abd_borrow_buf(zio->io_abd, psize);
1839
1840 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1841 decode_embedded_bp_compressed(bp, data);
1842 abd_return_buf_copy(zio->io_abd, data, psize);
1843 } else {
1844 ASSERT(!BP_IS_EMBEDDED(bp));
1845 }
1846
1847 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1848 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1849
1850 return (zio);
1851 }
1852
1853 static zio_t *
zio_write_bp_init(zio_t * zio)1854 zio_write_bp_init(zio_t *zio)
1855 {
1856 if (!IO_IS_ALLOCATING(zio))
1857 return (zio);
1858
1859 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1860
1861 if (zio->io_bp_override) {
1862 blkptr_t *bp = zio->io_bp;
1863 zio_prop_t *zp = &zio->io_prop;
1864
1865 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
1866
1867 *bp = *zio->io_bp_override;
1868 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1869
1870 if (zp->zp_brtwrite)
1871 return (zio);
1872
1873 ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1874
1875 if (BP_IS_EMBEDDED(bp))
1876 return (zio);
1877
1878 /*
1879 * If we've been overridden and nopwrite is set then
1880 * set the flag accordingly to indicate that a nopwrite
1881 * has already occurred.
1882 */
1883 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1884 ASSERT(!zp->zp_dedup);
1885 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1886 zio->io_flags |= ZIO_FLAG_NOPWRITE;
1887 return (zio);
1888 }
1889
1890 ASSERT(!zp->zp_nopwrite);
1891
1892 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1893 return (zio);
1894
1895 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1896 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1897
1898 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1899 !zp->zp_encrypt) {
1900 BP_SET_DEDUP(bp, 1);
1901 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1902 return (zio);
1903 }
1904
1905 /*
1906 * We were unable to handle this as an override bp, treat
1907 * it as a regular write I/O.
1908 */
1909 zio->io_bp_override = NULL;
1910 *bp = zio->io_bp_orig;
1911 zio->io_pipeline = zio->io_orig_pipeline;
1912 }
1913
1914 return (zio);
1915 }
1916
1917 static zio_t *
zio_write_compress(zio_t * zio)1918 zio_write_compress(zio_t *zio)
1919 {
1920 spa_t *spa = zio->io_spa;
1921 zio_prop_t *zp = &zio->io_prop;
1922 enum zio_compress compress = zp->zp_compress;
1923 blkptr_t *bp = zio->io_bp;
1924 uint64_t lsize = zio->io_lsize;
1925 uint64_t psize = zio->io_size;
1926 uint32_t pass = 1;
1927
1928 /*
1929 * If our children haven't all reached the ready stage,
1930 * wait for them and then repeat this pipeline stage.
1931 */
1932 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1933 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1934 return (NULL);
1935 }
1936
1937 if (!IO_IS_ALLOCATING(zio))
1938 return (zio);
1939
1940 if (zio->io_children_ready != NULL) {
1941 /*
1942 * Now that all our children are ready, run the callback
1943 * associated with this zio in case it wants to modify the
1944 * data to be written.
1945 */
1946 ASSERT3U(zp->zp_level, >, 0);
1947 zio->io_children_ready(zio);
1948 }
1949
1950 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1951 ASSERT(zio->io_bp_override == NULL);
1952
1953 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
1954 /*
1955 * We're rewriting an existing block, which means we're
1956 * working on behalf of spa_sync(). For spa_sync() to
1957 * converge, it must eventually be the case that we don't
1958 * have to allocate new blocks. But compression changes
1959 * the blocksize, which forces a reallocate, and makes
1960 * convergence take longer. Therefore, after the first
1961 * few passes, stop compressing to ensure convergence.
1962 */
1963 pass = spa_sync_pass(spa);
1964
1965 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1966 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1967 ASSERT(!BP_GET_DEDUP(bp));
1968
1969 if (pass >= zfs_sync_pass_dont_compress)
1970 compress = ZIO_COMPRESS_OFF;
1971
1972 /* Make sure someone doesn't change their mind on overwrites */
1973 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1974 MIN(zp->zp_copies, spa_max_replication(spa))
1975 == BP_GET_NDVAS(bp));
1976 }
1977
1978 /* If it's a compressed write that is not raw, compress the buffer. */
1979 if (compress != ZIO_COMPRESS_OFF &&
1980 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1981 abd_t *cabd = NULL;
1982 if (abd_cmp_zero(zio->io_abd, lsize) == 0)
1983 psize = 0;
1984 else if (compress == ZIO_COMPRESS_EMPTY)
1985 psize = lsize;
1986 else
1987 psize = zio_compress_data(compress, zio->io_abd, &cabd,
1988 lsize,
1989 zio_get_compression_max_size(compress,
1990 spa->spa_gcd_alloc, spa->spa_min_alloc, lsize),
1991 zp->zp_complevel);
1992 if (psize == 0) {
1993 compress = ZIO_COMPRESS_OFF;
1994 } else if (psize >= lsize) {
1995 compress = ZIO_COMPRESS_OFF;
1996 if (cabd != NULL)
1997 abd_free(cabd);
1998 } else if (!zp->zp_dedup && !zp->zp_encrypt &&
1999 psize <= BPE_PAYLOAD_SIZE &&
2000 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
2001 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
2002 void *cbuf = abd_borrow_buf_copy(cabd, lsize);
2003 encode_embedded_bp_compressed(bp,
2004 cbuf, compress, lsize, psize);
2005 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
2006 BP_SET_TYPE(bp, zio->io_prop.zp_type);
2007 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
2008 abd_return_buf(cabd, cbuf, lsize);
2009 abd_free(cabd);
2010 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
2011 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2012 ASSERT(spa_feature_is_active(spa,
2013 SPA_FEATURE_EMBEDDED_DATA));
2014 return (zio);
2015 } else {
2016 /*
2017 * Round compressed size up to the minimum allocation
2018 * size of the smallest-ashift device, and zero the
2019 * tail. This ensures that the compressed size of the
2020 * BP (and thus compressratio property) are correct,
2021 * in that we charge for the padding used to fill out
2022 * the last sector.
2023 */
2024 size_t rounded = (size_t)zio_roundup_alloc_size(spa,
2025 psize);
2026 if (rounded >= lsize) {
2027 compress = ZIO_COMPRESS_OFF;
2028 abd_free(cabd);
2029 psize = lsize;
2030 } else {
2031 abd_zero_off(cabd, psize, rounded - psize);
2032 psize = rounded;
2033 zio_push_transform(zio, cabd,
2034 psize, lsize, NULL);
2035 }
2036 }
2037
2038 /*
2039 * We were unable to handle this as an override bp, treat
2040 * it as a regular write I/O.
2041 */
2042 zio->io_bp_override = NULL;
2043 *bp = zio->io_bp_orig;
2044 zio->io_pipeline = zio->io_orig_pipeline;
2045
2046 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
2047 zp->zp_type == DMU_OT_DNODE) {
2048 /*
2049 * The DMU actually relies on the zio layer's compression
2050 * to free metadnode blocks that have had all contained
2051 * dnodes freed. As a result, even when doing a raw
2052 * receive, we must check whether the block can be compressed
2053 * to a hole.
2054 */
2055 if (abd_cmp_zero(zio->io_abd, lsize) == 0) {
2056 psize = 0;
2057 compress = ZIO_COMPRESS_OFF;
2058 } else {
2059 psize = lsize;
2060 }
2061 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
2062 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
2063 /*
2064 * If we are raw receiving an encrypted dataset we should not
2065 * take this codepath because it will change the on-disk block
2066 * and decryption will fail.
2067 */
2068 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
2069 lsize);
2070
2071 if (rounded != psize) {
2072 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
2073 abd_zero_off(cdata, psize, rounded - psize);
2074 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
2075 psize = rounded;
2076 zio_push_transform(zio, cdata,
2077 psize, rounded, NULL);
2078 }
2079 } else {
2080 ASSERT3U(psize, !=, 0);
2081 }
2082
2083 /*
2084 * The final pass of spa_sync() must be all rewrites, but the first
2085 * few passes offer a trade-off: allocating blocks defers convergence,
2086 * but newly allocated blocks are sequential, so they can be written
2087 * to disk faster. Therefore, we allow the first few passes of
2088 * spa_sync() to allocate new blocks, but force rewrites after that.
2089 * There should only be a handful of blocks after pass 1 in any case.
2090 */
2091 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
2092 BP_GET_PSIZE(bp) == psize &&
2093 pass >= zfs_sync_pass_rewrite) {
2094 VERIFY3U(psize, !=, 0);
2095 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
2096
2097 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
2098 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
2099 } else {
2100 BP_ZERO(bp);
2101 zio->io_pipeline = ZIO_WRITE_PIPELINE;
2102 }
2103
2104 if (psize == 0) {
2105 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
2106 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
2107 BP_SET_LSIZE(bp, lsize);
2108 BP_SET_TYPE(bp, zp->zp_type);
2109 BP_SET_LEVEL(bp, zp->zp_level);
2110 BP_SET_BIRTH(bp, zio->io_txg, 0);
2111 }
2112 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2113 } else {
2114 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
2115 BP_SET_LSIZE(bp, lsize);
2116 BP_SET_TYPE(bp, zp->zp_type);
2117 BP_SET_LEVEL(bp, zp->zp_level);
2118 BP_SET_PSIZE(bp, psize);
2119 BP_SET_COMPRESS(bp, compress);
2120 BP_SET_CHECKSUM(bp, zp->zp_checksum);
2121 BP_SET_DEDUP(bp, zp->zp_dedup);
2122 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
2123 if (zp->zp_dedup) {
2124 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2125 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2126 ASSERT(!zp->zp_encrypt ||
2127 DMU_OT_IS_ENCRYPTED(zp->zp_type));
2128 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
2129 }
2130 if (zp->zp_nopwrite) {
2131 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2132 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2133 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
2134 }
2135 }
2136 return (zio);
2137 }
2138
2139 static zio_t *
zio_free_bp_init(zio_t * zio)2140 zio_free_bp_init(zio_t *zio)
2141 {
2142 blkptr_t *bp = zio->io_bp;
2143
2144 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2145 if (BP_GET_DEDUP(bp))
2146 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2147 }
2148
2149 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2150
2151 return (zio);
2152 }
2153
2154 /*
2155 * ==========================================================================
2156 * Execute the I/O pipeline
2157 * ==========================================================================
2158 */
2159
2160 static void
zio_taskq_dispatch(zio_t * zio,zio_taskq_type_t q,boolean_t cutinline)2161 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2162 {
2163 spa_t *spa = zio->io_spa;
2164 zio_type_t t = zio->io_type;
2165
2166 /*
2167 * If we're a config writer or a probe, the normal issue and
2168 * interrupt threads may all be blocked waiting for the config lock.
2169 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2170 */
2171 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2172 t = ZIO_TYPE_NULL;
2173
2174 /*
2175 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2176 */
2177 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2178 t = ZIO_TYPE_NULL;
2179
2180 /*
2181 * If this is a high priority I/O, then use the high priority taskq if
2182 * available or cut the line otherwise.
2183 */
2184 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
2185 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2186 q++;
2187 else
2188 cutinline = B_TRUE;
2189 }
2190
2191 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2192
2193 spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
2194 }
2195
2196 static boolean_t
zio_taskq_member(zio_t * zio,zio_taskq_type_t q)2197 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2198 {
2199 spa_t *spa = zio->io_spa;
2200
2201 taskq_t *tq = taskq_of_curthread();
2202
2203 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2204 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2205 uint_t i;
2206 for (i = 0; i < tqs->stqs_count; i++) {
2207 if (tqs->stqs_taskq[i] == tq)
2208 return (B_TRUE);
2209 }
2210 }
2211
2212 return (B_FALSE);
2213 }
2214
2215 static zio_t *
zio_issue_async(zio_t * zio)2216 zio_issue_async(zio_t *zio)
2217 {
2218 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2219 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2220 return (NULL);
2221 }
2222
2223 void
zio_interrupt(void * zio)2224 zio_interrupt(void *zio)
2225 {
2226 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2227 }
2228
2229 void
zio_delay_interrupt(zio_t * zio)2230 zio_delay_interrupt(zio_t *zio)
2231 {
2232 /*
2233 * The timeout_generic() function isn't defined in userspace, so
2234 * rather than trying to implement the function, the zio delay
2235 * functionality has been disabled for userspace builds.
2236 */
2237
2238 #ifdef _KERNEL
2239 /*
2240 * If io_target_timestamp is zero, then no delay has been registered
2241 * for this IO, thus jump to the end of this function and "skip" the
2242 * delay; issuing it directly to the zio layer.
2243 */
2244 if (zio->io_target_timestamp != 0) {
2245 hrtime_t now = gethrtime();
2246
2247 if (now >= zio->io_target_timestamp) {
2248 /*
2249 * This IO has already taken longer than the target
2250 * delay to complete, so we don't want to delay it
2251 * any longer; we "miss" the delay and issue it
2252 * directly to the zio layer. This is likely due to
2253 * the target latency being set to a value less than
2254 * the underlying hardware can satisfy (e.g. delay
2255 * set to 1ms, but the disks take 10ms to complete an
2256 * IO request).
2257 */
2258
2259 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2260 hrtime_t, now);
2261
2262 zio_interrupt(zio);
2263 } else {
2264 taskqid_t tid;
2265 hrtime_t diff = zio->io_target_timestamp - now;
2266 int ticks = MAX(1, NSEC_TO_TICK(diff));
2267 clock_t expire_at_tick = ddi_get_lbolt() + ticks;
2268
2269 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2270 hrtime_t, now, hrtime_t, diff);
2271
2272 tid = taskq_dispatch_delay(system_taskq, zio_interrupt,
2273 zio, TQ_NOSLEEP, expire_at_tick);
2274 if (tid == TASKQID_INVALID) {
2275 /*
2276 * Couldn't allocate a task. Just finish the
2277 * zio without a delay.
2278 */
2279 zio_interrupt(zio);
2280 }
2281 }
2282 return;
2283 }
2284 #endif
2285 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2286 zio_interrupt(zio);
2287 }
2288
2289 static void
zio_deadman_impl(zio_t * pio,int ziodepth)2290 zio_deadman_impl(zio_t *pio, int ziodepth)
2291 {
2292 zio_t *cio, *cio_next;
2293 zio_link_t *zl = NULL;
2294 vdev_t *vd = pio->io_vd;
2295
2296 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2297 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2298 zbookmark_phys_t *zb = &pio->io_bookmark;
2299 uint64_t delta = gethrtime() - pio->io_timestamp;
2300 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2301
2302 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2303 "delta=%llu queued=%llu io=%llu "
2304 "path=%s "
2305 "last=%llu type=%d "
2306 "priority=%d flags=0x%llx stage=0x%x "
2307 "pipeline=0x%x pipeline-trace=0x%x "
2308 "objset=%llu object=%llu "
2309 "level=%llu blkid=%llu "
2310 "offset=%llu size=%llu "
2311 "error=%d",
2312 ziodepth, pio, pio->io_timestamp,
2313 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2314 vd ? vd->vdev_path : "NULL",
2315 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2316 pio->io_priority, (u_longlong_t)pio->io_flags,
2317 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2318 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2319 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2320 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2321 pio->io_error);
2322 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2323 pio->io_spa, vd, zb, pio, 0);
2324
2325 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2326 taskq_empty_ent(&pio->io_tqent)) {
2327 zio_interrupt(pio);
2328 }
2329 }
2330
2331 mutex_enter(&pio->io_lock);
2332 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2333 cio_next = zio_walk_children(pio, &zl);
2334 zio_deadman_impl(cio, ziodepth + 1);
2335 }
2336 mutex_exit(&pio->io_lock);
2337 }
2338
2339 /*
2340 * Log the critical information describing this zio and all of its children
2341 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2342 */
2343 void
zio_deadman(zio_t * pio,const char * tag)2344 zio_deadman(zio_t *pio, const char *tag)
2345 {
2346 spa_t *spa = pio->io_spa;
2347 char *name = spa_name(spa);
2348
2349 if (!zfs_deadman_enabled || spa_suspended(spa))
2350 return;
2351
2352 zio_deadman_impl(pio, 0);
2353
2354 switch (spa_get_deadman_failmode(spa)) {
2355 case ZIO_FAILURE_MODE_WAIT:
2356 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2357 break;
2358
2359 case ZIO_FAILURE_MODE_CONTINUE:
2360 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2361 break;
2362
2363 case ZIO_FAILURE_MODE_PANIC:
2364 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2365 break;
2366 }
2367 }
2368
2369 /*
2370 * Execute the I/O pipeline until one of the following occurs:
2371 * (1) the I/O completes; (2) the pipeline stalls waiting for
2372 * dependent child I/Os; (3) the I/O issues, so we're waiting
2373 * for an I/O completion interrupt; (4) the I/O is delegated by
2374 * vdev-level caching or aggregation; (5) the I/O is deferred
2375 * due to vdev-level queueing; (6) the I/O is handed off to
2376 * another thread. In all cases, the pipeline stops whenever
2377 * there's no CPU work; it never burns a thread in cv_wait_io().
2378 *
2379 * There's no locking on io_stage because there's no legitimate way
2380 * for multiple threads to be attempting to process the same I/O.
2381 */
2382 static zio_pipe_stage_t *zio_pipeline[];
2383
2384 /*
2385 * zio_execute() is a wrapper around the static function
2386 * __zio_execute() so that we can force __zio_execute() to be
2387 * inlined. This reduces stack overhead which is important
2388 * because __zio_execute() is called recursively in several zio
2389 * code paths. zio_execute() itself cannot be inlined because
2390 * it is externally visible.
2391 */
2392 void
zio_execute(void * zio)2393 zio_execute(void *zio)
2394 {
2395 fstrans_cookie_t cookie;
2396
2397 cookie = spl_fstrans_mark();
2398 __zio_execute(zio);
2399 spl_fstrans_unmark(cookie);
2400 }
2401
2402 /*
2403 * Used to determine if in the current context the stack is sized large
2404 * enough to allow zio_execute() to be called recursively. A minimum
2405 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2406 */
2407 static boolean_t
zio_execute_stack_check(zio_t * zio)2408 zio_execute_stack_check(zio_t *zio)
2409 {
2410 #if !defined(HAVE_LARGE_STACKS)
2411 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2412
2413 /* Executing in txg_sync_thread() context. */
2414 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2415 return (B_TRUE);
2416
2417 /* Pool initialization outside of zio_taskq context. */
2418 if (dp && spa_is_initializing(dp->dp_spa) &&
2419 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2420 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2421 return (B_TRUE);
2422 #else
2423 (void) zio;
2424 #endif /* HAVE_LARGE_STACKS */
2425
2426 return (B_FALSE);
2427 }
2428
2429 __attribute__((always_inline))
2430 static inline void
__zio_execute(zio_t * zio)2431 __zio_execute(zio_t *zio)
2432 {
2433 ASSERT3U(zio->io_queued_timestamp, >, 0);
2434
2435 while (zio->io_stage < ZIO_STAGE_DONE) {
2436 enum zio_stage pipeline = zio->io_pipeline;
2437 enum zio_stage stage = zio->io_stage;
2438
2439 zio->io_executor = curthread;
2440
2441 ASSERT(!MUTEX_HELD(&zio->io_lock));
2442 ASSERT(ISP2(stage));
2443 ASSERT(zio->io_stall == NULL);
2444
2445 do {
2446 stage <<= 1;
2447 } while ((stage & pipeline) == 0);
2448
2449 ASSERT(stage <= ZIO_STAGE_DONE);
2450
2451 /*
2452 * If we are in interrupt context and this pipeline stage
2453 * will grab a config lock that is held across I/O,
2454 * or may wait for an I/O that needs an interrupt thread
2455 * to complete, issue async to avoid deadlock.
2456 *
2457 * For VDEV_IO_START, we cut in line so that the io will
2458 * be sent to disk promptly.
2459 */
2460 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2461 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2462 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2463 zio_requeue_io_start_cut_in_line : B_FALSE;
2464 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2465 return;
2466 }
2467
2468 /*
2469 * If the current context doesn't have large enough stacks
2470 * the zio must be issued asynchronously to prevent overflow.
2471 */
2472 if (zio_execute_stack_check(zio)) {
2473 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2474 zio_requeue_io_start_cut_in_line : B_FALSE;
2475 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2476 return;
2477 }
2478
2479 zio->io_stage = stage;
2480 zio->io_pipeline_trace |= zio->io_stage;
2481
2482 /*
2483 * The zio pipeline stage returns the next zio to execute
2484 * (typically the same as this one), or NULL if we should
2485 * stop.
2486 */
2487 zio = zio_pipeline[highbit64(stage) - 1](zio);
2488
2489 if (zio == NULL)
2490 return;
2491 }
2492 }
2493
2494
2495 /*
2496 * ==========================================================================
2497 * Initiate I/O, either sync or async
2498 * ==========================================================================
2499 */
2500 int
zio_wait(zio_t * zio)2501 zio_wait(zio_t *zio)
2502 {
2503 /*
2504 * Some routines, like zio_free_sync(), may return a NULL zio
2505 * to avoid the performance overhead of creating and then destroying
2506 * an unneeded zio. For the callers' simplicity, we accept a NULL
2507 * zio and ignore it.
2508 */
2509 if (zio == NULL)
2510 return (0);
2511
2512 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2513 int error;
2514
2515 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2516 ASSERT3P(zio->io_executor, ==, NULL);
2517
2518 zio->io_waiter = curthread;
2519 ASSERT0(zio->io_queued_timestamp);
2520 zio->io_queued_timestamp = gethrtime();
2521
2522 if (zio->io_type == ZIO_TYPE_WRITE) {
2523 spa_select_allocator(zio);
2524 }
2525 __zio_execute(zio);
2526
2527 mutex_enter(&zio->io_lock);
2528 while (zio->io_executor != NULL) {
2529 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2530 ddi_get_lbolt() + timeout);
2531
2532 if (zfs_deadman_enabled && error == -1 &&
2533 gethrtime() - zio->io_queued_timestamp >
2534 spa_deadman_ziotime(zio->io_spa)) {
2535 mutex_exit(&zio->io_lock);
2536 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2537 zio_deadman(zio, FTAG);
2538 mutex_enter(&zio->io_lock);
2539 }
2540 }
2541 mutex_exit(&zio->io_lock);
2542
2543 error = zio->io_error;
2544 zio_destroy(zio);
2545
2546 return (error);
2547 }
2548
2549 void
zio_nowait(zio_t * zio)2550 zio_nowait(zio_t *zio)
2551 {
2552 /*
2553 * See comment in zio_wait().
2554 */
2555 if (zio == NULL)
2556 return;
2557
2558 ASSERT3P(zio->io_executor, ==, NULL);
2559
2560 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2561 list_is_empty(&zio->io_parent_list)) {
2562 zio_t *pio;
2563
2564 /*
2565 * This is a logical async I/O with no parent to wait for it.
2566 * We add it to the spa_async_root_zio "Godfather" I/O which
2567 * will ensure they complete prior to unloading the pool.
2568 */
2569 spa_t *spa = zio->io_spa;
2570 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2571
2572 zio_add_child(pio, zio);
2573 }
2574
2575 ASSERT0(zio->io_queued_timestamp);
2576 zio->io_queued_timestamp = gethrtime();
2577 if (zio->io_type == ZIO_TYPE_WRITE) {
2578 spa_select_allocator(zio);
2579 }
2580 __zio_execute(zio);
2581 }
2582
2583 /*
2584 * ==========================================================================
2585 * Reexecute, cancel, or suspend/resume failed I/O
2586 * ==========================================================================
2587 */
2588
2589 static void
zio_reexecute(void * arg)2590 zio_reexecute(void *arg)
2591 {
2592 zio_t *pio = arg;
2593 zio_t *cio, *cio_next, *gio;
2594
2595 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2596 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2597 ASSERT(pio->io_gang_leader == NULL);
2598 ASSERT(pio->io_gang_tree == NULL);
2599
2600 mutex_enter(&pio->io_lock);
2601 pio->io_flags = pio->io_orig_flags;
2602 pio->io_stage = pio->io_orig_stage;
2603 pio->io_pipeline = pio->io_orig_pipeline;
2604 pio->io_reexecute = 0;
2605 pio->io_flags |= ZIO_FLAG_REEXECUTED;
2606 pio->io_pipeline_trace = 0;
2607 pio->io_error = 0;
2608 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2609 (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2610 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2611
2612 /*
2613 * It's possible for a failed ZIO to be a descendant of more than one
2614 * ZIO tree. When reexecuting it, we have to be sure to add its wait
2615 * states to all parent wait counts.
2616 *
2617 * Those parents, in turn, may have other children that are currently
2618 * active, usually because they've already been reexecuted after
2619 * resuming. Those children may be executing and may call
2620 * zio_notify_parent() at the same time as we're updating our parent's
2621 * counts. To avoid races while updating the counts, we take
2622 * gio->io_lock before each update.
2623 */
2624 zio_link_t *zl = NULL;
2625 while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2626 mutex_enter(&gio->io_lock);
2627 for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2628 gio->io_children[pio->io_child_type][w] +=
2629 !pio->io_state[w];
2630 }
2631 mutex_exit(&gio->io_lock);
2632 }
2633
2634 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2635 pio->io_child_error[c] = 0;
2636
2637 if (IO_IS_ALLOCATING(pio))
2638 BP_ZERO(pio->io_bp);
2639
2640 /*
2641 * As we reexecute pio's children, new children could be created.
2642 * New children go to the head of pio's io_child_list, however,
2643 * so we will (correctly) not reexecute them. The key is that
2644 * the remainder of pio's io_child_list, from 'cio_next' onward,
2645 * cannot be affected by any side effects of reexecuting 'cio'.
2646 */
2647 zl = NULL;
2648 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2649 cio_next = zio_walk_children(pio, &zl);
2650 mutex_exit(&pio->io_lock);
2651 zio_reexecute(cio);
2652 mutex_enter(&pio->io_lock);
2653 }
2654 mutex_exit(&pio->io_lock);
2655
2656 /*
2657 * Now that all children have been reexecuted, execute the parent.
2658 * We don't reexecute "The Godfather" I/O here as it's the
2659 * responsibility of the caller to wait on it.
2660 */
2661 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2662 pio->io_queued_timestamp = gethrtime();
2663 __zio_execute(pio);
2664 }
2665 }
2666
2667 void
zio_suspend(spa_t * spa,zio_t * zio,zio_suspend_reason_t reason)2668 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2669 {
2670 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2671 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2672 "failure and the failure mode property for this pool "
2673 "is set to panic.", spa_name(spa));
2674
2675 if (reason != ZIO_SUSPEND_MMP) {
2676 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2677 "I/O failure and has been suspended.", spa_name(spa));
2678 }
2679
2680 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2681 NULL, NULL, 0);
2682
2683 mutex_enter(&spa->spa_suspend_lock);
2684
2685 if (spa->spa_suspend_zio_root == NULL)
2686 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2687 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2688 ZIO_FLAG_GODFATHER);
2689
2690 spa->spa_suspended = reason;
2691
2692 if (zio != NULL) {
2693 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2694 ASSERT(zio != spa->spa_suspend_zio_root);
2695 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2696 ASSERT(zio_unique_parent(zio) == NULL);
2697 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2698 zio_add_child(spa->spa_suspend_zio_root, zio);
2699 }
2700
2701 mutex_exit(&spa->spa_suspend_lock);
2702 }
2703
2704 int
zio_resume(spa_t * spa)2705 zio_resume(spa_t *spa)
2706 {
2707 zio_t *pio;
2708
2709 /*
2710 * Reexecute all previously suspended i/o.
2711 */
2712 mutex_enter(&spa->spa_suspend_lock);
2713 if (spa->spa_suspended != ZIO_SUSPEND_NONE)
2714 cmn_err(CE_WARN, "Pool '%s' was suspended and is being "
2715 "resumed. Failed I/O will be retried.",
2716 spa_name(spa));
2717 spa->spa_suspended = ZIO_SUSPEND_NONE;
2718 cv_broadcast(&spa->spa_suspend_cv);
2719 pio = spa->spa_suspend_zio_root;
2720 spa->spa_suspend_zio_root = NULL;
2721 mutex_exit(&spa->spa_suspend_lock);
2722
2723 if (pio == NULL)
2724 return (0);
2725
2726 zio_reexecute(pio);
2727 return (zio_wait(pio));
2728 }
2729
2730 void
zio_resume_wait(spa_t * spa)2731 zio_resume_wait(spa_t *spa)
2732 {
2733 mutex_enter(&spa->spa_suspend_lock);
2734 while (spa_suspended(spa))
2735 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2736 mutex_exit(&spa->spa_suspend_lock);
2737 }
2738
2739 /*
2740 * ==========================================================================
2741 * Gang blocks.
2742 *
2743 * A gang block is a collection of small blocks that looks to the DMU
2744 * like one large block. When zio_dva_allocate() cannot find a block
2745 * of the requested size, due to either severe fragmentation or the pool
2746 * being nearly full, it calls zio_write_gang_block() to construct the
2747 * block from smaller fragments.
2748 *
2749 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2750 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
2751 * an indirect block: it's an array of block pointers. It consumes
2752 * only one sector and hence is allocatable regardless of fragmentation.
2753 * The gang header's bps point to its gang members, which hold the data.
2754 *
2755 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2756 * as the verifier to ensure uniqueness of the SHA256 checksum.
2757 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2758 * not the gang header. This ensures that data block signatures (needed for
2759 * deduplication) are independent of how the block is physically stored.
2760 *
2761 * Gang blocks can be nested: a gang member may itself be a gang block.
2762 * Thus every gang block is a tree in which root and all interior nodes are
2763 * gang headers, and the leaves are normal blocks that contain user data.
2764 * The root of the gang tree is called the gang leader.
2765 *
2766 * To perform any operation (read, rewrite, free, claim) on a gang block,
2767 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2768 * in the io_gang_tree field of the original logical i/o by recursively
2769 * reading the gang leader and all gang headers below it. This yields
2770 * an in-core tree containing the contents of every gang header and the
2771 * bps for every constituent of the gang block.
2772 *
2773 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2774 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2775 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2776 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2777 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2778 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2779 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2780 * of the gang header plus zio_checksum_compute() of the data to update the
2781 * gang header's blk_cksum as described above.
2782 *
2783 * The two-phase assemble/issue model solves the problem of partial failure --
2784 * what if you'd freed part of a gang block but then couldn't read the
2785 * gang header for another part? Assembling the entire gang tree first
2786 * ensures that all the necessary gang header I/O has succeeded before
2787 * starting the actual work of free, claim, or write. Once the gang tree
2788 * is assembled, free and claim are in-memory operations that cannot fail.
2789 *
2790 * In the event that a gang write fails, zio_dva_unallocate() walks the
2791 * gang tree to immediately free (i.e. insert back into the space map)
2792 * everything we've allocated. This ensures that we don't get ENOSPC
2793 * errors during repeated suspend/resume cycles due to a flaky device.
2794 *
2795 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2796 * the gang tree, we won't modify the block, so we can safely defer the free
2797 * (knowing that the block is still intact). If we *can* assemble the gang
2798 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2799 * each constituent bp and we can allocate a new block on the next sync pass.
2800 *
2801 * In all cases, the gang tree allows complete recovery from partial failure.
2802 * ==========================================================================
2803 */
2804
2805 static void
zio_gang_issue_func_done(zio_t * zio)2806 zio_gang_issue_func_done(zio_t *zio)
2807 {
2808 abd_free(zio->io_abd);
2809 }
2810
2811 static zio_t *
zio_read_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2812 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2813 uint64_t offset)
2814 {
2815 if (gn != NULL)
2816 return (pio);
2817
2818 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2819 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2820 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2821 &pio->io_bookmark));
2822 }
2823
2824 static zio_t *
zio_rewrite_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2825 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2826 uint64_t offset)
2827 {
2828 zio_t *zio;
2829
2830 if (gn != NULL) {
2831 abd_t *gbh_abd =
2832 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2833 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2834 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2835 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2836 &pio->io_bookmark);
2837 /*
2838 * As we rewrite each gang header, the pipeline will compute
2839 * a new gang block header checksum for it; but no one will
2840 * compute a new data checksum, so we do that here. The one
2841 * exception is the gang leader: the pipeline already computed
2842 * its data checksum because that stage precedes gang assembly.
2843 * (Presently, nothing actually uses interior data checksums;
2844 * this is just good hygiene.)
2845 */
2846 if (gn != pio->io_gang_leader->io_gang_tree) {
2847 abd_t *buf = abd_get_offset(data, offset);
2848
2849 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2850 buf, BP_GET_PSIZE(bp));
2851
2852 abd_free(buf);
2853 }
2854 /*
2855 * If we are here to damage data for testing purposes,
2856 * leave the GBH alone so that we can detect the damage.
2857 */
2858 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2859 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2860 } else {
2861 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2862 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2863 zio_gang_issue_func_done, NULL, pio->io_priority,
2864 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2865 }
2866
2867 return (zio);
2868 }
2869
2870 static zio_t *
zio_free_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2871 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2872 uint64_t offset)
2873 {
2874 (void) gn, (void) data, (void) offset;
2875
2876 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2877 ZIO_GANG_CHILD_FLAGS(pio));
2878 if (zio == NULL) {
2879 zio = zio_null(pio, pio->io_spa,
2880 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2881 }
2882 return (zio);
2883 }
2884
2885 static zio_t *
zio_claim_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2886 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2887 uint64_t offset)
2888 {
2889 (void) gn, (void) data, (void) offset;
2890 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2891 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2892 }
2893
2894 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2895 NULL,
2896 zio_read_gang,
2897 zio_rewrite_gang,
2898 zio_free_gang,
2899 zio_claim_gang,
2900 NULL
2901 };
2902
2903 static void zio_gang_tree_assemble_done(zio_t *zio);
2904
2905 static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t ** gnpp)2906 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2907 {
2908 zio_gang_node_t *gn;
2909
2910 ASSERT(*gnpp == NULL);
2911
2912 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2913 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2914 *gnpp = gn;
2915
2916 return (gn);
2917 }
2918
2919 static void
zio_gang_node_free(zio_gang_node_t ** gnpp)2920 zio_gang_node_free(zio_gang_node_t **gnpp)
2921 {
2922 zio_gang_node_t *gn = *gnpp;
2923
2924 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2925 ASSERT(gn->gn_child[g] == NULL);
2926
2927 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2928 kmem_free(gn, sizeof (*gn));
2929 *gnpp = NULL;
2930 }
2931
2932 static void
zio_gang_tree_free(zio_gang_node_t ** gnpp)2933 zio_gang_tree_free(zio_gang_node_t **gnpp)
2934 {
2935 zio_gang_node_t *gn = *gnpp;
2936
2937 if (gn == NULL)
2938 return;
2939
2940 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2941 zio_gang_tree_free(&gn->gn_child[g]);
2942
2943 zio_gang_node_free(gnpp);
2944 }
2945
2946 static void
zio_gang_tree_assemble(zio_t * gio,blkptr_t * bp,zio_gang_node_t ** gnpp)2947 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2948 {
2949 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2950 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2951
2952 ASSERT(gio->io_gang_leader == gio);
2953 ASSERT(BP_IS_GANG(bp));
2954
2955 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2956 zio_gang_tree_assemble_done, gn, gio->io_priority,
2957 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2958 }
2959
2960 static void
zio_gang_tree_assemble_done(zio_t * zio)2961 zio_gang_tree_assemble_done(zio_t *zio)
2962 {
2963 zio_t *gio = zio->io_gang_leader;
2964 zio_gang_node_t *gn = zio->io_private;
2965 blkptr_t *bp = zio->io_bp;
2966
2967 ASSERT(gio == zio_unique_parent(zio));
2968 ASSERT(list_is_empty(&zio->io_child_list));
2969
2970 if (zio->io_error)
2971 return;
2972
2973 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
2974 if (BP_SHOULD_BYTESWAP(bp))
2975 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2976
2977 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2978 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2979 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2980
2981 abd_free(zio->io_abd);
2982
2983 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2984 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2985 if (!BP_IS_GANG(gbp))
2986 continue;
2987 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2988 }
2989 }
2990
2991 static void
zio_gang_tree_issue(zio_t * pio,zio_gang_node_t * gn,blkptr_t * bp,abd_t * data,uint64_t offset)2992 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2993 uint64_t offset)
2994 {
2995 zio_t *gio = pio->io_gang_leader;
2996 zio_t *zio;
2997
2998 ASSERT(BP_IS_GANG(bp) == !!gn);
2999 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
3000 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
3001
3002 /*
3003 * If you're a gang header, your data is in gn->gn_gbh.
3004 * If you're a gang member, your data is in 'data' and gn == NULL.
3005 */
3006 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
3007
3008 if (gn != NULL) {
3009 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
3010
3011 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
3012 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
3013 if (BP_IS_HOLE(gbp))
3014 continue;
3015 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
3016 offset);
3017 offset += BP_GET_PSIZE(gbp);
3018 }
3019 }
3020
3021 if (gn == gio->io_gang_tree)
3022 ASSERT3U(gio->io_size, ==, offset);
3023
3024 if (zio != pio)
3025 zio_nowait(zio);
3026 }
3027
3028 static zio_t *
zio_gang_assemble(zio_t * zio)3029 zio_gang_assemble(zio_t *zio)
3030 {
3031 blkptr_t *bp = zio->io_bp;
3032
3033 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
3034 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3035
3036 zio->io_gang_leader = zio;
3037
3038 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
3039
3040 return (zio);
3041 }
3042
3043 static zio_t *
zio_gang_issue(zio_t * zio)3044 zio_gang_issue(zio_t *zio)
3045 {
3046 blkptr_t *bp = zio->io_bp;
3047
3048 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
3049 return (NULL);
3050 }
3051
3052 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
3053 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3054
3055 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
3056 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
3057 0);
3058 else
3059 zio_gang_tree_free(&zio->io_gang_tree);
3060
3061 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3062
3063 return (zio);
3064 }
3065
3066 static void
zio_gang_inherit_allocator(zio_t * pio,zio_t * cio)3067 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
3068 {
3069 cio->io_allocator = pio->io_allocator;
3070 }
3071
3072 static void
zio_write_gang_member_ready(zio_t * zio)3073 zio_write_gang_member_ready(zio_t *zio)
3074 {
3075 zio_t *pio = zio_unique_parent(zio);
3076 dva_t *cdva = zio->io_bp->blk_dva;
3077 dva_t *pdva = pio->io_bp->blk_dva;
3078 uint64_t asize;
3079 zio_t *gio __maybe_unused = zio->io_gang_leader;
3080
3081 if (BP_IS_HOLE(zio->io_bp))
3082 return;
3083
3084 ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
3085
3086 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
3087 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
3088 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
3089 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
3090 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
3091
3092 mutex_enter(&pio->io_lock);
3093 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
3094 ASSERT(DVA_GET_GANG(&pdva[d]));
3095 asize = DVA_GET_ASIZE(&pdva[d]);
3096 asize += DVA_GET_ASIZE(&cdva[d]);
3097 DVA_SET_ASIZE(&pdva[d], asize);
3098 }
3099 mutex_exit(&pio->io_lock);
3100 }
3101
3102 static void
zio_write_gang_done(zio_t * zio)3103 zio_write_gang_done(zio_t *zio)
3104 {
3105 /*
3106 * The io_abd field will be NULL for a zio with no data. The io_flags
3107 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
3108 * check for it here as it is cleared in zio_ready.
3109 */
3110 if (zio->io_abd != NULL)
3111 abd_free(zio->io_abd);
3112 }
3113
3114 static zio_t *
zio_write_gang_block(zio_t * pio,metaslab_class_t * mc)3115 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
3116 {
3117 spa_t *spa = pio->io_spa;
3118 blkptr_t *bp = pio->io_bp;
3119 zio_t *gio = pio->io_gang_leader;
3120 zio_t *zio;
3121 zio_gang_node_t *gn, **gnpp;
3122 zio_gbh_phys_t *gbh;
3123 abd_t *gbh_abd;
3124 uint64_t txg = pio->io_txg;
3125 uint64_t resid = pio->io_size;
3126 uint64_t lsize;
3127 int copies = gio->io_prop.zp_copies;
3128 zio_prop_t zp;
3129 int error;
3130 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
3131
3132 /*
3133 * If one copy was requested, store 2 copies of the GBH, so that we
3134 * can still traverse all the data (e.g. to free or scrub) even if a
3135 * block is damaged. Note that we can't store 3 copies of the GBH in
3136 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
3137 */
3138 int gbh_copies = copies;
3139 if (gbh_copies == 1) {
3140 gbh_copies = MIN(2, spa_max_replication(spa));
3141 }
3142
3143 ASSERT(ZIO_HAS_ALLOCATOR(pio));
3144 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
3145 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3146 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3147 ASSERT(has_data);
3148
3149 flags |= METASLAB_ASYNC_ALLOC;
3150 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
3151 mca_alloc_slots, pio));
3152
3153 /*
3154 * The logical zio has already placed a reservation for
3155 * 'copies' allocation slots but gang blocks may require
3156 * additional copies. These additional copies
3157 * (i.e. gbh_copies - copies) are guaranteed to succeed
3158 * since metaslab_class_throttle_reserve() always allows
3159 * additional reservations for gang blocks.
3160 */
3161 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
3162 pio->io_allocator, pio, flags));
3163 }
3164
3165 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
3166 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3167 &pio->io_alloc_list, pio, pio->io_allocator);
3168 if (error) {
3169 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3170 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3171 ASSERT(has_data);
3172
3173 /*
3174 * If we failed to allocate the gang block header then
3175 * we remove any additional allocation reservations that
3176 * we placed here. The original reservation will
3177 * be removed when the logical I/O goes to the ready
3178 * stage.
3179 */
3180 metaslab_class_throttle_unreserve(mc,
3181 gbh_copies - copies, pio->io_allocator, pio);
3182 }
3183
3184 pio->io_error = error;
3185 return (pio);
3186 }
3187
3188 if (pio == gio) {
3189 gnpp = &gio->io_gang_tree;
3190 } else {
3191 gnpp = pio->io_private;
3192 ASSERT(pio->io_ready == zio_write_gang_member_ready);
3193 }
3194
3195 gn = zio_gang_node_alloc(gnpp);
3196 gbh = gn->gn_gbh;
3197 memset(gbh, 0, SPA_GANGBLOCKSIZE);
3198 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
3199
3200 /*
3201 * Create the gang header.
3202 */
3203 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3204 zio_write_gang_done, NULL, pio->io_priority,
3205 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3206
3207 zio_gang_inherit_allocator(pio, zio);
3208
3209 /*
3210 * Create and nowait the gang children.
3211 */
3212 for (int g = 0; resid != 0; resid -= lsize, g++) {
3213 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
3214 SPA_MINBLOCKSIZE);
3215 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
3216
3217 zp.zp_checksum = gio->io_prop.zp_checksum;
3218 zp.zp_compress = ZIO_COMPRESS_OFF;
3219 zp.zp_complevel = gio->io_prop.zp_complevel;
3220 zp.zp_type = zp.zp_storage_type = DMU_OT_NONE;
3221 zp.zp_level = 0;
3222 zp.zp_copies = gio->io_prop.zp_copies;
3223 zp.zp_dedup = B_FALSE;
3224 zp.zp_dedup_verify = B_FALSE;
3225 zp.zp_nopwrite = B_FALSE;
3226 zp.zp_encrypt = gio->io_prop.zp_encrypt;
3227 zp.zp_byteorder = gio->io_prop.zp_byteorder;
3228 zp.zp_direct_write = B_FALSE;
3229 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3230 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3231 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3232
3233 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
3234 has_data ? abd_get_offset(pio->io_abd, pio->io_size -
3235 resid) : NULL, lsize, lsize, &zp,
3236 zio_write_gang_member_ready, NULL,
3237 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3238 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3239
3240 zio_gang_inherit_allocator(zio, cio);
3241
3242 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3243 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3244 ASSERT(has_data);
3245
3246 /*
3247 * Gang children won't throttle but we should
3248 * account for their work, so reserve an allocation
3249 * slot for them here.
3250 */
3251 VERIFY(metaslab_class_throttle_reserve(mc,
3252 zp.zp_copies, cio->io_allocator, cio, flags));
3253 }
3254 zio_nowait(cio);
3255 }
3256
3257 /*
3258 * Set pio's pipeline to just wait for zio to finish.
3259 */
3260 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3261
3262 zio_nowait(zio);
3263
3264 return (pio);
3265 }
3266
3267 /*
3268 * The zio_nop_write stage in the pipeline determines if allocating a
3269 * new bp is necessary. The nopwrite feature can handle writes in
3270 * either syncing or open context (i.e. zil writes) and as a result is
3271 * mutually exclusive with dedup.
3272 *
3273 * By leveraging a cryptographically secure checksum, such as SHA256, we
3274 * can compare the checksums of the new data and the old to determine if
3275 * allocating a new block is required. Note that our requirements for
3276 * cryptographic strength are fairly weak: there can't be any accidental
3277 * hash collisions, but we don't need to be secure against intentional
3278 * (malicious) collisions. To trigger a nopwrite, you have to be able
3279 * to write the file to begin with, and triggering an incorrect (hash
3280 * collision) nopwrite is no worse than simply writing to the file.
3281 * That said, there are no known attacks against the checksum algorithms
3282 * used for nopwrite, assuming that the salt and the checksums
3283 * themselves remain secret.
3284 */
3285 static zio_t *
zio_nop_write(zio_t * zio)3286 zio_nop_write(zio_t *zio)
3287 {
3288 blkptr_t *bp = zio->io_bp;
3289 blkptr_t *bp_orig = &zio->io_bp_orig;
3290 zio_prop_t *zp = &zio->io_prop;
3291
3292 ASSERT(BP_IS_HOLE(bp));
3293 ASSERT(BP_GET_LEVEL(bp) == 0);
3294 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3295 ASSERT(zp->zp_nopwrite);
3296 ASSERT(!zp->zp_dedup);
3297 ASSERT(zio->io_bp_override == NULL);
3298 ASSERT(IO_IS_ALLOCATING(zio));
3299
3300 /*
3301 * Check to see if the original bp and the new bp have matching
3302 * characteristics (i.e. same checksum, compression algorithms, etc).
3303 * If they don't then just continue with the pipeline which will
3304 * allocate a new bp.
3305 */
3306 if (BP_IS_HOLE(bp_orig) ||
3307 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3308 ZCHECKSUM_FLAG_NOPWRITE) ||
3309 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3310 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3311 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3312 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3313 zp->zp_copies != BP_GET_NDVAS(bp_orig))
3314 return (zio);
3315
3316 /*
3317 * If the checksums match then reset the pipeline so that we
3318 * avoid allocating a new bp and issuing any I/O.
3319 */
3320 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3321 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3322 ZCHECKSUM_FLAG_NOPWRITE);
3323 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3324 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3325 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3326 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3327
3328 /*
3329 * If we're overwriting a block that is currently on an
3330 * indirect vdev, then ignore the nopwrite request and
3331 * allow a new block to be allocated on a concrete vdev.
3332 */
3333 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3334 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3335 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3336 DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3337 if (tvd->vdev_ops == &vdev_indirect_ops) {
3338 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3339 return (zio);
3340 }
3341 }
3342 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3343
3344 *bp = *bp_orig;
3345 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3346 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3347 }
3348
3349 return (zio);
3350 }
3351
3352 /*
3353 * ==========================================================================
3354 * Block Reference Table
3355 * ==========================================================================
3356 */
3357 static zio_t *
zio_brt_free(zio_t * zio)3358 zio_brt_free(zio_t *zio)
3359 {
3360 blkptr_t *bp;
3361
3362 bp = zio->io_bp;
3363
3364 if (BP_GET_LEVEL(bp) > 0 ||
3365 BP_IS_METADATA(bp) ||
3366 !brt_maybe_exists(zio->io_spa, bp)) {
3367 return (zio);
3368 }
3369
3370 if (!brt_entry_decref(zio->io_spa, bp)) {
3371 /*
3372 * This isn't the last reference, so we cannot free
3373 * the data yet.
3374 */
3375 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3376 }
3377
3378 return (zio);
3379 }
3380
3381 /*
3382 * ==========================================================================
3383 * Dedup
3384 * ==========================================================================
3385 */
3386 static void
zio_ddt_child_read_done(zio_t * zio)3387 zio_ddt_child_read_done(zio_t *zio)
3388 {
3389 blkptr_t *bp = zio->io_bp;
3390 ddt_t *ddt;
3391 ddt_entry_t *dde = zio->io_private;
3392 zio_t *pio = zio_unique_parent(zio);
3393
3394 mutex_enter(&pio->io_lock);
3395 ddt = ddt_select(zio->io_spa, bp);
3396
3397 if (zio->io_error == 0) {
3398 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
3399 /* this phys variant doesn't need repair */
3400 ddt_phys_clear(dde->dde_phys, v);
3401 }
3402
3403 if (zio->io_error == 0 && dde->dde_io->dde_repair_abd == NULL)
3404 dde->dde_io->dde_repair_abd = zio->io_abd;
3405 else
3406 abd_free(zio->io_abd);
3407 mutex_exit(&pio->io_lock);
3408 }
3409
3410 static zio_t *
zio_ddt_read_start(zio_t * zio)3411 zio_ddt_read_start(zio_t *zio)
3412 {
3413 blkptr_t *bp = zio->io_bp;
3414
3415 ASSERT(BP_GET_DEDUP(bp));
3416 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3417 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3418
3419 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3420 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3421 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3422 ddt_phys_variant_t v_self = ddt_phys_select(ddt, dde, bp);
3423 ddt_univ_phys_t *ddp = dde->dde_phys;
3424 blkptr_t blk;
3425
3426 ASSERT(zio->io_vsd == NULL);
3427 zio->io_vsd = dde;
3428
3429 if (v_self == DDT_PHYS_NONE)
3430 return (zio);
3431
3432 /* issue I/O for the other copies */
3433 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3434 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3435
3436 if (ddt_phys_birth(ddp, v) == 0 || v == v_self)
3437 continue;
3438
3439 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key,
3440 ddp, v, &blk);
3441 zio_nowait(zio_read(zio, zio->io_spa, &blk,
3442 abd_alloc_for_io(zio->io_size, B_TRUE),
3443 zio->io_size, zio_ddt_child_read_done, dde,
3444 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3445 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3446 }
3447 return (zio);
3448 }
3449
3450 zio_nowait(zio_read(zio, zio->io_spa, bp,
3451 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3452 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3453
3454 return (zio);
3455 }
3456
3457 static zio_t *
zio_ddt_read_done(zio_t * zio)3458 zio_ddt_read_done(zio_t *zio)
3459 {
3460 blkptr_t *bp = zio->io_bp;
3461
3462 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3463 return (NULL);
3464 }
3465
3466 ASSERT(BP_GET_DEDUP(bp));
3467 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3468 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3469
3470 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3471 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3472 ddt_entry_t *dde = zio->io_vsd;
3473 if (ddt == NULL) {
3474 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3475 return (zio);
3476 }
3477 if (dde == NULL) {
3478 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3479 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3480 return (NULL);
3481 }
3482 if (dde->dde_io->dde_repair_abd != NULL) {
3483 abd_copy(zio->io_abd, dde->dde_io->dde_repair_abd,
3484 zio->io_size);
3485 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3486 }
3487 ddt_repair_done(ddt, dde);
3488 zio->io_vsd = NULL;
3489 }
3490
3491 ASSERT(zio->io_vsd == NULL);
3492
3493 return (zio);
3494 }
3495
3496 static boolean_t
zio_ddt_collision(zio_t * zio,ddt_t * ddt,ddt_entry_t * dde)3497 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3498 {
3499 spa_t *spa = zio->io_spa;
3500 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3501
3502 ASSERT(!(zio->io_bp_override && do_raw));
3503
3504 /*
3505 * Note: we compare the original data, not the transformed data,
3506 * because when zio->io_bp is an override bp, we will not have
3507 * pushed the I/O transforms. That's an important optimization
3508 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3509 * However, we should never get a raw, override zio so in these
3510 * cases we can compare the io_abd directly. This is useful because
3511 * it allows us to do dedup verification even if we don't have access
3512 * to the original data (for instance, if the encryption keys aren't
3513 * loaded).
3514 */
3515
3516 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3517 if (DDT_PHYS_IS_DITTO(ddt, p))
3518 continue;
3519
3520 if (dde->dde_io == NULL)
3521 continue;
3522
3523 zio_t *lio = dde->dde_io->dde_lead_zio[p];
3524 if (lio == NULL)
3525 continue;
3526
3527 if (do_raw)
3528 return (lio->io_size != zio->io_size ||
3529 abd_cmp(zio->io_abd, lio->io_abd) != 0);
3530
3531 return (lio->io_orig_size != zio->io_orig_size ||
3532 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3533 }
3534
3535 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3536 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3537 uint64_t phys_birth = ddt_phys_birth(dde->dde_phys, v);
3538
3539 if (phys_birth != 0 && do_raw) {
3540 blkptr_t blk = *zio->io_bp;
3541 uint64_t psize;
3542 abd_t *tmpabd;
3543 int error;
3544
3545 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3546 psize = BP_GET_PSIZE(&blk);
3547
3548 if (psize != zio->io_size)
3549 return (B_TRUE);
3550
3551 ddt_exit(ddt);
3552
3553 tmpabd = abd_alloc_for_io(psize, B_TRUE);
3554
3555 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3556 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3557 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3558 ZIO_FLAG_RAW, &zio->io_bookmark));
3559
3560 if (error == 0) {
3561 if (abd_cmp(tmpabd, zio->io_abd) != 0)
3562 error = SET_ERROR(ENOENT);
3563 }
3564
3565 abd_free(tmpabd);
3566 ddt_enter(ddt);
3567 return (error != 0);
3568 } else if (phys_birth != 0) {
3569 arc_buf_t *abuf = NULL;
3570 arc_flags_t aflags = ARC_FLAG_WAIT;
3571 blkptr_t blk = *zio->io_bp;
3572 int error;
3573
3574 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3575
3576 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3577 return (B_TRUE);
3578
3579 ddt_exit(ddt);
3580
3581 error = arc_read(NULL, spa, &blk,
3582 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3583 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3584 &aflags, &zio->io_bookmark);
3585
3586 if (error == 0) {
3587 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3588 zio->io_orig_size) != 0)
3589 error = SET_ERROR(ENOENT);
3590 arc_buf_destroy(abuf, &abuf);
3591 }
3592
3593 ddt_enter(ddt);
3594 return (error != 0);
3595 }
3596 }
3597
3598 return (B_FALSE);
3599 }
3600
3601 static void
zio_ddt_child_write_done(zio_t * zio)3602 zio_ddt_child_write_done(zio_t *zio)
3603 {
3604 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3605 ddt_entry_t *dde = zio->io_private;
3606
3607 zio_link_t *zl = NULL;
3608 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3609
3610 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3611 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3612 ddt_univ_phys_t *ddp = dde->dde_phys;
3613
3614 ddt_enter(ddt);
3615
3616 /* we're the lead, so once we're done there's no one else outstanding */
3617 if (dde->dde_io->dde_lead_zio[p] == zio)
3618 dde->dde_io->dde_lead_zio[p] = NULL;
3619
3620 ddt_univ_phys_t *orig = &dde->dde_io->dde_orig_phys;
3621
3622 if (zio->io_error != 0) {
3623 /*
3624 * The write failed, so we're about to abort the entire IO
3625 * chain. We need to revert the entry back to what it was at
3626 * the last time it was successfully extended.
3627 */
3628 ddt_phys_copy(ddp, orig, v);
3629 ddt_phys_clear(orig, v);
3630
3631 ddt_exit(ddt);
3632 return;
3633 }
3634
3635 /*
3636 * We've successfully added new DVAs to the entry. Clear the saved
3637 * state or, if there's still outstanding IO, remember it so we can
3638 * revert to a known good state if that IO fails.
3639 */
3640 if (dde->dde_io->dde_lead_zio[p] == NULL)
3641 ddt_phys_clear(orig, v);
3642 else
3643 ddt_phys_copy(orig, ddp, v);
3644
3645 /*
3646 * Add references for all dedup writes that were waiting on the
3647 * physical one, skipping any other physical writes that are waiting.
3648 */
3649 zio_t *pio;
3650 zl = NULL;
3651 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3652 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3653 ddt_phys_addref(ddp, v);
3654 }
3655
3656 ddt_exit(ddt);
3657 }
3658
3659 static void
zio_ddt_child_write_ready(zio_t * zio)3660 zio_ddt_child_write_ready(zio_t *zio)
3661 {
3662 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3663 ddt_entry_t *dde = zio->io_private;
3664
3665 zio_link_t *zl = NULL;
3666 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3667
3668 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3669 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3670
3671 if (zio->io_error != 0)
3672 return;
3673
3674 ddt_enter(ddt);
3675
3676 ddt_phys_extend(dde->dde_phys, v, zio->io_bp);
3677
3678 zio_t *pio;
3679 zl = NULL;
3680 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3681 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3682 ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg);
3683 }
3684
3685 ddt_exit(ddt);
3686 }
3687
3688 static zio_t *
zio_ddt_write(zio_t * zio)3689 zio_ddt_write(zio_t *zio)
3690 {
3691 spa_t *spa = zio->io_spa;
3692 blkptr_t *bp = zio->io_bp;
3693 uint64_t txg = zio->io_txg;
3694 zio_prop_t *zp = &zio->io_prop;
3695 ddt_t *ddt = ddt_select(spa, bp);
3696 ddt_entry_t *dde;
3697
3698 ASSERT(BP_GET_DEDUP(bp));
3699 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3700 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3701 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3702 /*
3703 * Deduplication will not take place for Direct I/O writes. The
3704 * ddt_tree will be emptied in syncing context. Direct I/O writes take
3705 * place in the open-context. Direct I/O write can not attempt to
3706 * modify the ddt_tree while issuing out a write.
3707 */
3708 ASSERT3B(zio->io_prop.zp_direct_write, ==, B_FALSE);
3709
3710 ddt_enter(ddt);
3711 dde = ddt_lookup(ddt, bp);
3712 if (dde == NULL) {
3713 /* DDT size is over its quota so no new entries */
3714 zp->zp_dedup = B_FALSE;
3715 BP_SET_DEDUP(bp, B_FALSE);
3716 if (zio->io_bp_override == NULL)
3717 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3718 ddt_exit(ddt);
3719 return (zio);
3720 }
3721
3722 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3723 /*
3724 * If we're using a weak checksum, upgrade to a strong checksum
3725 * and try again. If we're already using a strong checksum,
3726 * we can't resolve it, so just convert to an ordinary write.
3727 * (And automatically e-mail a paper to Nature?)
3728 */
3729 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3730 ZCHECKSUM_FLAG_DEDUP)) {
3731 zp->zp_checksum = spa_dedup_checksum(spa);
3732 zio_pop_transforms(zio);
3733 zio->io_stage = ZIO_STAGE_OPEN;
3734 BP_ZERO(bp);
3735 } else {
3736 zp->zp_dedup = B_FALSE;
3737 BP_SET_DEDUP(bp, B_FALSE);
3738 }
3739 ASSERT(!BP_GET_DEDUP(bp));
3740 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3741 ddt_exit(ddt);
3742 return (zio);
3743 }
3744
3745 int p = DDT_PHYS_FOR_COPIES(ddt, zp->zp_copies);
3746 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3747 ddt_univ_phys_t *ddp = dde->dde_phys;
3748
3749 /*
3750 * In the common cases, at this point we have a regular BP with no
3751 * allocated DVAs, and the corresponding DDT entry for its checksum.
3752 * Our goal is to fill the BP with enough DVAs to satisfy its copies=
3753 * requirement.
3754 *
3755 * One of three things needs to happen to fulfill this:
3756 *
3757 * - if the DDT entry has enough DVAs to satisfy the BP, we just copy
3758 * them out of the entry and return;
3759 *
3760 * - if the DDT entry has no DVAs (ie its brand new), then we have to
3761 * issue the write as normal so that DVAs can be allocated and the
3762 * data land on disk. We then copy the DVAs into the DDT entry on
3763 * return.
3764 *
3765 * - if the DDT entry has some DVAs, but too few, we have to issue the
3766 * write, adjusted to have allocate fewer copies. When it returns, we
3767 * add the new DVAs to the DDT entry, and update the BP to have the
3768 * full amount it originally requested.
3769 *
3770 * In all cases, if there's already a writing IO in flight, we need to
3771 * defer the action until after the write is done. If our action is to
3772 * write, we need to adjust our request for additional DVAs to match
3773 * what will be in the DDT entry after it completes. In this way every
3774 * IO can be guaranteed to recieve enough DVAs simply by joining the
3775 * end of the chain and letting the sequence play out.
3776 */
3777
3778 /*
3779 * Number of DVAs in the DDT entry. If the BP is encrypted we ignore
3780 * the third one as normal.
3781 */
3782 int have_dvas = ddt_phys_dva_count(ddp, v, BP_IS_ENCRYPTED(bp));
3783 IMPLY(have_dvas == 0, ddt_phys_birth(ddp, v) == 0);
3784
3785 /* Number of DVAs requested bya the IO. */
3786 uint8_t need_dvas = zp->zp_copies;
3787
3788 /*
3789 * What we do next depends on whether or not there's IO outstanding that
3790 * will update this entry.
3791 */
3792 if (dde->dde_io == NULL || dde->dde_io->dde_lead_zio[p] == NULL) {
3793 /*
3794 * No IO outstanding, so we only need to worry about ourselves.
3795 */
3796
3797 /*
3798 * Override BPs bring their own DVAs and their own problems.
3799 */
3800 if (zio->io_bp_override) {
3801 /*
3802 * For a brand-new entry, all the work has been done
3803 * for us, and we can just fill it out from the provided
3804 * block and leave.
3805 */
3806 if (have_dvas == 0) {
3807 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
3808 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3809 ddt_phys_extend(ddp, v, bp);
3810 ddt_phys_addref(ddp, v);
3811 ddt_exit(ddt);
3812 return (zio);
3813 }
3814
3815 /*
3816 * If we already have this entry, then we want to treat
3817 * it like a regular write. To do this we just wipe
3818 * them out and proceed like a regular write.
3819 *
3820 * Even if there are some DVAs in the entry, we still
3821 * have to clear them out. We can't use them to fill
3822 * out the dedup entry, as they are all referenced
3823 * together by a bp already on disk, and will be freed
3824 * as a group.
3825 */
3826 BP_ZERO_DVAS(bp);
3827 BP_SET_BIRTH(bp, 0, 0);
3828 }
3829
3830 /*
3831 * If there are enough DVAs in the entry to service our request,
3832 * then we can just use them as-is.
3833 */
3834 if (have_dvas >= need_dvas) {
3835 ddt_bp_fill(ddp, v, bp, txg);
3836 ddt_phys_addref(ddp, v);
3837 ddt_exit(ddt);
3838 return (zio);
3839 }
3840
3841 /*
3842 * Otherwise, we have to issue IO to fill the entry up to the
3843 * amount we need.
3844 */
3845 need_dvas -= have_dvas;
3846 } else {
3847 /*
3848 * There's a write in-flight. If there's already enough DVAs on
3849 * the entry, then either there were already enough to start
3850 * with, or the in-flight IO is between READY and DONE, and so
3851 * has extended the entry with new DVAs. Either way, we don't
3852 * need to do anything, we can just slot in behind it.
3853 */
3854
3855 if (zio->io_bp_override) {
3856 /*
3857 * If there's a write out, then we're soon going to
3858 * have our own copies of this block, so clear out the
3859 * override block and treat it as a regular dedup
3860 * write. See comment above.
3861 */
3862 BP_ZERO_DVAS(bp);
3863 BP_SET_BIRTH(bp, 0, 0);
3864 }
3865
3866 if (have_dvas >= need_dvas) {
3867 /*
3868 * A minor point: there might already be enough
3869 * committed DVAs in the entry to service our request,
3870 * but we don't know which are completed and which are
3871 * allocated but not yet written. In this case, should
3872 * the IO for the new DVAs fail, we will be on the end
3873 * of the IO chain and will also recieve an error, even
3874 * though our request could have been serviced.
3875 *
3876 * This is an extremely rare case, as it requires the
3877 * original block to be copied with a request for a
3878 * larger number of DVAs, then copied again requesting
3879 * the same (or already fulfilled) number of DVAs while
3880 * the first request is active, and then that first
3881 * request errors. In return, the logic required to
3882 * catch and handle it is complex. For now, I'm just
3883 * not going to bother with it.
3884 */
3885
3886 /*
3887 * We always fill the bp here as we may have arrived
3888 * after the in-flight write has passed READY, and so
3889 * missed out.
3890 */
3891 ddt_bp_fill(ddp, v, bp, txg);
3892 zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
3893 ddt_exit(ddt);
3894 return (zio);
3895 }
3896
3897 /*
3898 * There's not enough in the entry yet, so we need to look at
3899 * the write in-flight and see how many DVAs it will have once
3900 * it completes.
3901 *
3902 * The in-flight write has potentially had its copies request
3903 * reduced (if we're filling out an existing entry), so we need
3904 * to reach in and get the original write to find out what it is
3905 * expecting.
3906 *
3907 * Note that the parent of the lead zio will always have the
3908 * highest zp_copies of any zio in the chain, because ones that
3909 * can be serviced without additional IO are always added to
3910 * the back of the chain.
3911 */
3912 zio_link_t *zl = NULL;
3913 zio_t *pio =
3914 zio_walk_parents(dde->dde_io->dde_lead_zio[p], &zl);
3915 ASSERT(pio);
3916 uint8_t parent_dvas = pio->io_prop.zp_copies;
3917
3918 if (parent_dvas >= need_dvas) {
3919 zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
3920 ddt_exit(ddt);
3921 return (zio);
3922 }
3923
3924 /*
3925 * Still not enough, so we will need to issue to get the
3926 * shortfall.
3927 */
3928 need_dvas -= parent_dvas;
3929 }
3930
3931 /*
3932 * We need to write. We will create a new write with the copies
3933 * property adjusted to match the number of DVAs we need to need to
3934 * grow the DDT entry by to satisfy the request.
3935 */
3936 zio_prop_t czp = *zp;
3937 czp.zp_copies = need_dvas;
3938 zio_t *cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
3939 zio->io_orig_size, zio->io_orig_size, &czp,
3940 zio_ddt_child_write_ready, NULL,
3941 zio_ddt_child_write_done, dde, zio->io_priority,
3942 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3943
3944 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
3945
3946 /*
3947 * We are the new lead zio, because our parent has the highest
3948 * zp_copies that has been requested for this entry so far.
3949 */
3950 ddt_alloc_entry_io(dde);
3951 if (dde->dde_io->dde_lead_zio[p] == NULL) {
3952 /*
3953 * First time out, take a copy of the stable entry to revert
3954 * to if there's an error (see zio_ddt_child_write_done())
3955 */
3956 ddt_phys_copy(&dde->dde_io->dde_orig_phys, dde->dde_phys, v);
3957 } else {
3958 /*
3959 * Make the existing chain our child, because it cannot
3960 * complete until we have.
3961 */
3962 zio_add_child(cio, dde->dde_io->dde_lead_zio[p]);
3963 }
3964 dde->dde_io->dde_lead_zio[p] = cio;
3965
3966 ddt_exit(ddt);
3967
3968 zio_nowait(cio);
3969
3970 return (zio);
3971 }
3972
3973 static ddt_entry_t *freedde; /* for debugging */
3974
3975 static zio_t *
zio_ddt_free(zio_t * zio)3976 zio_ddt_free(zio_t *zio)
3977 {
3978 spa_t *spa = zio->io_spa;
3979 blkptr_t *bp = zio->io_bp;
3980 ddt_t *ddt = ddt_select(spa, bp);
3981 ddt_entry_t *dde = NULL;
3982
3983 ASSERT(BP_GET_DEDUP(bp));
3984 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3985
3986 ddt_enter(ddt);
3987 freedde = dde = ddt_lookup(ddt, bp);
3988 if (dde) {
3989 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
3990 if (v != DDT_PHYS_NONE)
3991 ddt_phys_decref(dde->dde_phys, v);
3992 }
3993 ddt_exit(ddt);
3994
3995 /*
3996 * When no entry was found, it must have been pruned,
3997 * so we can free it now instead of decrementing the
3998 * refcount in the DDT.
3999 */
4000 if (!dde) {
4001 BP_SET_DEDUP(bp, 0);
4002 zio->io_pipeline |= ZIO_STAGE_DVA_FREE;
4003 }
4004
4005 return (zio);
4006 }
4007
4008 /*
4009 * ==========================================================================
4010 * Allocate and free blocks
4011 * ==========================================================================
4012 */
4013
4014 static zio_t *
zio_io_to_allocate(spa_t * spa,int allocator)4015 zio_io_to_allocate(spa_t *spa, int allocator)
4016 {
4017 zio_t *zio;
4018
4019 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
4020
4021 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
4022 if (zio == NULL)
4023 return (NULL);
4024
4025 ASSERT(IO_IS_ALLOCATING(zio));
4026 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4027
4028 /*
4029 * Try to place a reservation for this zio. If we're unable to
4030 * reserve then we throttle.
4031 */
4032 ASSERT3U(zio->io_allocator, ==, allocator);
4033 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
4034 zio->io_prop.zp_copies, allocator, zio, 0)) {
4035 return (NULL);
4036 }
4037
4038 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
4039 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
4040
4041 return (zio);
4042 }
4043
4044 static zio_t *
zio_dva_throttle(zio_t * zio)4045 zio_dva_throttle(zio_t *zio)
4046 {
4047 spa_t *spa = zio->io_spa;
4048 zio_t *nio;
4049 metaslab_class_t *mc;
4050
4051 /* locate an appropriate allocation class */
4052 mc = spa_preferred_class(spa, zio);
4053
4054 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
4055 !mc->mc_alloc_throttle_enabled ||
4056 zio->io_child_type == ZIO_CHILD_GANG ||
4057 zio->io_flags & ZIO_FLAG_NODATA) {
4058 return (zio);
4059 }
4060
4061 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4062 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4063 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
4064 ASSERT3U(zio->io_queued_timestamp, >, 0);
4065 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
4066
4067 int allocator = zio->io_allocator;
4068 zio->io_metaslab_class = mc;
4069 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
4070 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
4071 nio = zio_io_to_allocate(spa, allocator);
4072 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
4073 return (nio);
4074 }
4075
4076 static void
zio_allocate_dispatch(spa_t * spa,int allocator)4077 zio_allocate_dispatch(spa_t *spa, int allocator)
4078 {
4079 zio_t *zio;
4080
4081 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
4082 zio = zio_io_to_allocate(spa, allocator);
4083 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
4084 if (zio == NULL)
4085 return;
4086
4087 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
4088 ASSERT0(zio->io_error);
4089 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
4090 }
4091
4092 static zio_t *
zio_dva_allocate(zio_t * zio)4093 zio_dva_allocate(zio_t *zio)
4094 {
4095 spa_t *spa = zio->io_spa;
4096 metaslab_class_t *mc;
4097 blkptr_t *bp = zio->io_bp;
4098 int error;
4099 int flags = 0;
4100
4101 if (zio->io_gang_leader == NULL) {
4102 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
4103 zio->io_gang_leader = zio;
4104 }
4105
4106 ASSERT(BP_IS_HOLE(bp));
4107 ASSERT0(BP_GET_NDVAS(bp));
4108 ASSERT3U(zio->io_prop.zp_copies, >, 0);
4109 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
4110 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
4111
4112 if (zio->io_flags & ZIO_FLAG_NODATA)
4113 flags |= METASLAB_DONT_THROTTLE;
4114 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
4115 flags |= METASLAB_GANG_CHILD;
4116 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
4117 flags |= METASLAB_ASYNC_ALLOC;
4118
4119 /*
4120 * if not already chosen, locate an appropriate allocation class
4121 */
4122 mc = zio->io_metaslab_class;
4123 if (mc == NULL) {
4124 mc = spa_preferred_class(spa, zio);
4125 zio->io_metaslab_class = mc;
4126 }
4127 ZIOSTAT_BUMP(ziostat_total_allocations);
4128
4129 /*
4130 * Try allocating the block in the usual metaslab class.
4131 * If that's full, allocate it in the normal class.
4132 * If that's full, allocate as a gang block,
4133 * and if all are full, the allocation fails (which shouldn't happen).
4134 *
4135 * Note that we do not fall back on embedded slog (ZIL) space, to
4136 * preserve unfragmented slog space, which is critical for decent
4137 * sync write performance. If a log allocation fails, we will fall
4138 * back to spa_sync() which is abysmal for performance.
4139 */
4140 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4141 error = metaslab_alloc(spa, mc, zio->io_size, bp,
4142 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
4143 &zio->io_alloc_list, zio, zio->io_allocator);
4144
4145 /*
4146 * Fallback to normal class when an alloc class is full
4147 */
4148 if (error == ENOSPC && mc != spa_normal_class(spa)) {
4149 /*
4150 * When the dedup or special class is spilling into the normal
4151 * class, there can still be significant space available due
4152 * to deferred frees that are in-flight. We track the txg when
4153 * this occurred and back off adding new DDT entries for a few
4154 * txgs to allow the free blocks to be processed.
4155 */
4156 if ((mc == spa_dedup_class(spa) || (spa_special_has_ddt(spa) &&
4157 mc == spa_special_class(spa))) &&
4158 spa->spa_dedup_class_full_txg != zio->io_txg) {
4159 spa->spa_dedup_class_full_txg = zio->io_txg;
4160 zfs_dbgmsg("%s[%d]: %s class spilling, req size %d, "
4161 "%llu allocated of %llu",
4162 spa_name(spa), (int)zio->io_txg,
4163 mc == spa_dedup_class(spa) ? "dedup" : "special",
4164 (int)zio->io_size,
4165 (u_longlong_t)metaslab_class_get_alloc(mc),
4166 (u_longlong_t)metaslab_class_get_space(mc));
4167 }
4168
4169 /*
4170 * If throttling, transfer reservation over to normal class.
4171 * The io_allocator slot can remain the same even though we
4172 * are switching classes.
4173 */
4174 if (mc->mc_alloc_throttle_enabled &&
4175 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
4176 metaslab_class_throttle_unreserve(mc,
4177 zio->io_prop.zp_copies, zio->io_allocator, zio);
4178 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
4179
4180 VERIFY(metaslab_class_throttle_reserve(
4181 spa_normal_class(spa),
4182 zio->io_prop.zp_copies, zio->io_allocator, zio,
4183 flags | METASLAB_MUST_RESERVE));
4184 }
4185 zio->io_metaslab_class = mc = spa_normal_class(spa);
4186 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4187 zfs_dbgmsg("%s: metaslab allocation failure, "
4188 "trying normal class: zio %px, size %llu, error %d",
4189 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4190 error);
4191 }
4192
4193 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
4194 error = metaslab_alloc(spa, mc, zio->io_size, bp,
4195 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
4196 &zio->io_alloc_list, zio, zio->io_allocator);
4197 }
4198
4199 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
4200 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4201 zfs_dbgmsg("%s: metaslab allocation failure, "
4202 "trying ganging: zio %px, size %llu, error %d",
4203 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4204 error);
4205 }
4206 ZIOSTAT_BUMP(ziostat_gang_writes);
4207 if (flags & METASLAB_GANG_CHILD)
4208 ZIOSTAT_BUMP(ziostat_gang_multilevel);
4209 return (zio_write_gang_block(zio, mc));
4210 }
4211 if (error != 0) {
4212 if (error != ENOSPC ||
4213 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
4214 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
4215 "size %llu, error %d",
4216 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4217 error);
4218 }
4219 zio->io_error = error;
4220 }
4221
4222 return (zio);
4223 }
4224
4225 static zio_t *
zio_dva_free(zio_t * zio)4226 zio_dva_free(zio_t *zio)
4227 {
4228 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
4229
4230 return (zio);
4231 }
4232
4233 static zio_t *
zio_dva_claim(zio_t * zio)4234 zio_dva_claim(zio_t *zio)
4235 {
4236 int error;
4237
4238 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
4239 if (error)
4240 zio->io_error = error;
4241
4242 return (zio);
4243 }
4244
4245 /*
4246 * Undo an allocation. This is used by zio_done() when an I/O fails
4247 * and we want to give back the block we just allocated.
4248 * This handles both normal blocks and gang blocks.
4249 */
4250 static void
zio_dva_unallocate(zio_t * zio,zio_gang_node_t * gn,blkptr_t * bp)4251 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
4252 {
4253 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
4254 ASSERT(zio->io_bp_override == NULL);
4255
4256 if (!BP_IS_HOLE(bp)) {
4257 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
4258 B_TRUE);
4259 }
4260
4261 if (gn != NULL) {
4262 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
4263 zio_dva_unallocate(zio, gn->gn_child[g],
4264 &gn->gn_gbh->zg_blkptr[g]);
4265 }
4266 }
4267 }
4268
4269 /*
4270 * Try to allocate an intent log block. Return 0 on success, errno on failure.
4271 */
4272 int
zio_alloc_zil(spa_t * spa,objset_t * os,uint64_t txg,blkptr_t * new_bp,uint64_t size,boolean_t * slog)4273 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
4274 uint64_t size, boolean_t *slog)
4275 {
4276 int error = 1;
4277 zio_alloc_list_t io_alloc_list;
4278
4279 ASSERT(txg > spa_syncing_txg(spa));
4280
4281 metaslab_trace_init(&io_alloc_list);
4282
4283 /*
4284 * Block pointer fields are useful to metaslabs for stats and debugging.
4285 * Fill in the obvious ones before calling into metaslab_alloc().
4286 */
4287 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4288 BP_SET_PSIZE(new_bp, size);
4289 BP_SET_LEVEL(new_bp, 0);
4290
4291 /*
4292 * When allocating a zil block, we don't have information about
4293 * the final destination of the block except the objset it's part
4294 * of, so we just hash the objset ID to pick the allocator to get
4295 * some parallelism.
4296 */
4297 int flags = METASLAB_ZIL;
4298 int allocator = (uint_t)cityhash1(os->os_dsl_dataset->ds_object)
4299 % spa->spa_alloc_count;
4300 ZIOSTAT_BUMP(ziostat_total_allocations);
4301 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
4302 txg, NULL, flags, &io_alloc_list, NULL, allocator);
4303 *slog = (error == 0);
4304 if (error != 0) {
4305 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
4306 new_bp, 1, txg, NULL, flags,
4307 &io_alloc_list, NULL, allocator);
4308 }
4309 if (error != 0) {
4310 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
4311 error = metaslab_alloc(spa, spa_normal_class(spa), size,
4312 new_bp, 1, txg, NULL, flags,
4313 &io_alloc_list, NULL, allocator);
4314 }
4315 metaslab_trace_fini(&io_alloc_list);
4316
4317 if (error == 0) {
4318 BP_SET_LSIZE(new_bp, size);
4319 BP_SET_PSIZE(new_bp, size);
4320 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
4321 BP_SET_CHECKSUM(new_bp,
4322 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
4323 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
4324 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4325 BP_SET_LEVEL(new_bp, 0);
4326 BP_SET_DEDUP(new_bp, 0);
4327 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
4328
4329 /*
4330 * encrypted blocks will require an IV and salt. We generate
4331 * these now since we will not be rewriting the bp at
4332 * rewrite time.
4333 */
4334 if (os->os_encrypted) {
4335 uint8_t iv[ZIO_DATA_IV_LEN];
4336 uint8_t salt[ZIO_DATA_SALT_LEN];
4337
4338 BP_SET_CRYPT(new_bp, B_TRUE);
4339 VERIFY0(spa_crypt_get_salt(spa,
4340 dmu_objset_id(os), salt));
4341 VERIFY0(zio_crypt_generate_iv(iv));
4342
4343 zio_crypt_encode_params_bp(new_bp, salt, iv);
4344 }
4345 } else {
4346 zfs_dbgmsg("%s: zil block allocation failure: "
4347 "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
4348 error);
4349 }
4350
4351 return (error);
4352 }
4353
4354 /*
4355 * ==========================================================================
4356 * Read and write to physical devices
4357 * ==========================================================================
4358 */
4359
4360 /*
4361 * Issue an I/O to the underlying vdev. Typically the issue pipeline
4362 * stops after this stage and will resume upon I/O completion.
4363 * However, there are instances where the vdev layer may need to
4364 * continue the pipeline when an I/O was not issued. Since the I/O
4365 * that was sent to the vdev layer might be different than the one
4366 * currently active in the pipeline (see vdev_queue_io()), we explicitly
4367 * force the underlying vdev layers to call either zio_execute() or
4368 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
4369 */
4370 static zio_t *
zio_vdev_io_start(zio_t * zio)4371 zio_vdev_io_start(zio_t *zio)
4372 {
4373 vdev_t *vd = zio->io_vd;
4374 uint64_t align;
4375 spa_t *spa = zio->io_spa;
4376
4377 zio->io_delay = 0;
4378
4379 ASSERT(zio->io_error == 0);
4380 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
4381
4382 if (vd == NULL) {
4383 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4384 spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
4385
4386 /*
4387 * The mirror_ops handle multiple DVAs in a single BP.
4388 */
4389 vdev_mirror_ops.vdev_op_io_start(zio);
4390 return (NULL);
4391 }
4392
4393 ASSERT3P(zio->io_logical, !=, zio);
4394 if (zio->io_type == ZIO_TYPE_WRITE) {
4395 ASSERT(spa->spa_trust_config);
4396
4397 /*
4398 * Note: the code can handle other kinds of writes,
4399 * but we don't expect them.
4400 */
4401 if (zio->io_vd->vdev_noalloc) {
4402 ASSERT(zio->io_flags &
4403 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
4404 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
4405 }
4406 }
4407
4408 align = 1ULL << vd->vdev_top->vdev_ashift;
4409
4410 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
4411 P2PHASE(zio->io_size, align) != 0) {
4412 /* Transform logical writes to be a full physical block size. */
4413 uint64_t asize = P2ROUNDUP(zio->io_size, align);
4414 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
4415 ASSERT(vd == vd->vdev_top);
4416 if (zio->io_type == ZIO_TYPE_WRITE) {
4417 abd_copy(abuf, zio->io_abd, zio->io_size);
4418 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
4419 }
4420 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
4421 }
4422
4423 /*
4424 * If this is not a physical io, make sure that it is properly aligned
4425 * before proceeding.
4426 */
4427 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
4428 ASSERT0(P2PHASE(zio->io_offset, align));
4429 ASSERT0(P2PHASE(zio->io_size, align));
4430 } else {
4431 /*
4432 * For physical writes, we allow 512b aligned writes and assume
4433 * the device will perform a read-modify-write as necessary.
4434 */
4435 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
4436 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
4437 }
4438
4439 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
4440
4441 /*
4442 * If this is a repair I/O, and there's no self-healing involved --
4443 * that is, we're just resilvering what we expect to resilver --
4444 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4445 * This prevents spurious resilvering.
4446 *
4447 * There are a few ways that we can end up creating these spurious
4448 * resilver i/os:
4449 *
4450 * 1. A resilver i/o will be issued if any DVA in the BP has a
4451 * dirty DTL. The mirror code will issue resilver writes to
4452 * each DVA, including the one(s) that are not on vdevs with dirty
4453 * DTLs.
4454 *
4455 * 2. With nested replication, which happens when we have a
4456 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4457 * For example, given mirror(replacing(A+B), C), it's likely that
4458 * only A is out of date (it's the new device). In this case, we'll
4459 * read from C, then use the data to resilver A+B -- but we don't
4460 * actually want to resilver B, just A. The top-level mirror has no
4461 * way to know this, so instead we just discard unnecessary repairs
4462 * as we work our way down the vdev tree.
4463 *
4464 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4465 * The same logic applies to any form of nested replication: ditto
4466 * + mirror, RAID-Z + replacing, etc.
4467 *
4468 * However, indirect vdevs point off to other vdevs which may have
4469 * DTL's, so we never bypass them. The child i/os on concrete vdevs
4470 * will be properly bypassed instead.
4471 *
4472 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4473 * a dRAID spare vdev. For example, when a dRAID spare is first
4474 * used, its spare blocks need to be written to but the leaf vdev's
4475 * of such blocks can have empty DTL_PARTIAL.
4476 *
4477 * There seemed no clean way to allow such writes while bypassing
4478 * spurious ones. At this point, just avoid all bypassing for dRAID
4479 * for correctness.
4480 */
4481 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4482 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4483 zio->io_txg != 0 && /* not a delegated i/o */
4484 vd->vdev_ops != &vdev_indirect_ops &&
4485 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4486 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4487 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4488 zio_vdev_io_bypass(zio);
4489 return (zio);
4490 }
4491
4492 /*
4493 * Select the next best leaf I/O to process. Distributed spares are
4494 * excluded since they dispatch the I/O directly to a leaf vdev after
4495 * applying the dRAID mapping.
4496 */
4497 if (vd->vdev_ops->vdev_op_leaf &&
4498 vd->vdev_ops != &vdev_draid_spare_ops &&
4499 (zio->io_type == ZIO_TYPE_READ ||
4500 zio->io_type == ZIO_TYPE_WRITE ||
4501 zio->io_type == ZIO_TYPE_TRIM)) {
4502
4503 if ((zio = vdev_queue_io(zio)) == NULL)
4504 return (NULL);
4505
4506 if (!vdev_accessible(vd, zio)) {
4507 zio->io_error = SET_ERROR(ENXIO);
4508 zio_interrupt(zio);
4509 return (NULL);
4510 }
4511 zio->io_delay = gethrtime();
4512
4513 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
4514 /*
4515 * "no-op" injections return success, but do no actual
4516 * work. Just return it.
4517 */
4518 zio_delay_interrupt(zio);
4519 return (NULL);
4520 }
4521 }
4522
4523 vd->vdev_ops->vdev_op_io_start(zio);
4524 return (NULL);
4525 }
4526
4527 static zio_t *
zio_vdev_io_done(zio_t * zio)4528 zio_vdev_io_done(zio_t *zio)
4529 {
4530 vdev_t *vd = zio->io_vd;
4531 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4532 boolean_t unexpected_error = B_FALSE;
4533
4534 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4535 return (NULL);
4536 }
4537
4538 ASSERT(zio->io_type == ZIO_TYPE_READ ||
4539 zio->io_type == ZIO_TYPE_WRITE ||
4540 zio->io_type == ZIO_TYPE_FLUSH ||
4541 zio->io_type == ZIO_TYPE_TRIM);
4542
4543 if (zio->io_delay)
4544 zio->io_delay = gethrtime() - zio->io_delay;
4545
4546 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4547 vd->vdev_ops != &vdev_draid_spare_ops) {
4548 if (zio->io_type != ZIO_TYPE_FLUSH)
4549 vdev_queue_io_done(zio);
4550
4551 if (zio_injection_enabled && zio->io_error == 0)
4552 zio->io_error = zio_handle_device_injections(vd, zio,
4553 EIO, EILSEQ);
4554
4555 if (zio_injection_enabled && zio->io_error == 0)
4556 zio->io_error = zio_handle_label_injection(zio, EIO);
4557
4558 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4559 zio->io_type != ZIO_TYPE_TRIM) {
4560 if (!vdev_accessible(vd, zio)) {
4561 zio->io_error = SET_ERROR(ENXIO);
4562 } else {
4563 unexpected_error = B_TRUE;
4564 }
4565 }
4566 }
4567
4568 ops->vdev_op_io_done(zio);
4569
4570 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4571 VERIFY(vdev_probe(vd, zio) == NULL);
4572
4573 return (zio);
4574 }
4575
4576 /*
4577 * This function is used to change the priority of an existing zio that is
4578 * currently in-flight. This is used by the arc to upgrade priority in the
4579 * event that a demand read is made for a block that is currently queued
4580 * as a scrub or async read IO. Otherwise, the high priority read request
4581 * would end up having to wait for the lower priority IO.
4582 */
4583 void
zio_change_priority(zio_t * pio,zio_priority_t priority)4584 zio_change_priority(zio_t *pio, zio_priority_t priority)
4585 {
4586 zio_t *cio, *cio_next;
4587 zio_link_t *zl = NULL;
4588
4589 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4590
4591 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4592 vdev_queue_change_io_priority(pio, priority);
4593 } else {
4594 pio->io_priority = priority;
4595 }
4596
4597 mutex_enter(&pio->io_lock);
4598 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4599 cio_next = zio_walk_children(pio, &zl);
4600 zio_change_priority(cio, priority);
4601 }
4602 mutex_exit(&pio->io_lock);
4603 }
4604
4605 /*
4606 * For non-raidz ZIOs, we can just copy aside the bad data read from the
4607 * disk, and use that to finish the checksum ereport later.
4608 */
4609 static void
zio_vsd_default_cksum_finish(zio_cksum_report_t * zcr,const abd_t * good_buf)4610 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4611 const abd_t *good_buf)
4612 {
4613 /* no processing needed */
4614 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4615 }
4616
4617 void
zio_vsd_default_cksum_report(zio_t * zio,zio_cksum_report_t * zcr)4618 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4619 {
4620 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4621
4622 abd_copy(abd, zio->io_abd, zio->io_size);
4623
4624 zcr->zcr_cbinfo = zio->io_size;
4625 zcr->zcr_cbdata = abd;
4626 zcr->zcr_finish = zio_vsd_default_cksum_finish;
4627 zcr->zcr_free = zio_abd_free;
4628 }
4629
4630 static zio_t *
zio_vdev_io_assess(zio_t * zio)4631 zio_vdev_io_assess(zio_t *zio)
4632 {
4633 vdev_t *vd = zio->io_vd;
4634
4635 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4636 return (NULL);
4637 }
4638
4639 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4640 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4641
4642 if (zio->io_vsd != NULL) {
4643 zio->io_vsd_ops->vsd_free(zio);
4644 zio->io_vsd = NULL;
4645 }
4646
4647 /*
4648 * If a Direct I/O operation has a checksum verify error then this I/O
4649 * should not attempt to be issued again.
4650 */
4651 if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) {
4652 if (zio->io_type == ZIO_TYPE_WRITE) {
4653 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL);
4654 ASSERT3U(zio->io_error, ==, EIO);
4655 }
4656 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4657 return (zio);
4658 }
4659
4660 if (zio_injection_enabled && zio->io_error == 0)
4661 zio->io_error = zio_handle_fault_injection(zio, EIO);
4662
4663 /*
4664 * If the I/O failed, determine whether we should attempt to retry it.
4665 *
4666 * On retry, we cut in line in the issue queue, since we don't want
4667 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4668 */
4669 if (zio->io_error && vd == NULL &&
4670 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4671 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4672 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
4673 zio->io_error = 0;
4674 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4675 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4676 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4677 zio_requeue_io_start_cut_in_line);
4678 return (NULL);
4679 }
4680
4681 /*
4682 * If we got an error on a leaf device, convert it to ENXIO
4683 * if the device is not accessible at all.
4684 */
4685 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4686 !vdev_accessible(vd, zio))
4687 zio->io_error = SET_ERROR(ENXIO);
4688
4689 /*
4690 * If we can't write to an interior vdev (mirror or RAID-Z),
4691 * set vdev_cant_write so that we stop trying to allocate from it.
4692 */
4693 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4694 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4695 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4696 "cant_write=TRUE due to write failure with ENXIO",
4697 zio);
4698 vd->vdev_cant_write = B_TRUE;
4699 }
4700
4701 /*
4702 * If a cache flush returns ENOTSUP we know that no future
4703 * attempts will ever succeed. In this case we set a persistent
4704 * boolean flag so that we don't bother with it in the future, and
4705 * then we act like the flush succeeded.
4706 */
4707 if (zio->io_error == ENOTSUP && zio->io_type == ZIO_TYPE_FLUSH &&
4708 vd != NULL) {
4709 vd->vdev_nowritecache = B_TRUE;
4710 zio->io_error = 0;
4711 }
4712
4713 if (zio->io_error)
4714 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4715
4716 return (zio);
4717 }
4718
4719 void
zio_vdev_io_reissue(zio_t * zio)4720 zio_vdev_io_reissue(zio_t *zio)
4721 {
4722 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4723 ASSERT(zio->io_error == 0);
4724
4725 zio->io_stage >>= 1;
4726 }
4727
4728 void
zio_vdev_io_redone(zio_t * zio)4729 zio_vdev_io_redone(zio_t *zio)
4730 {
4731 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4732
4733 zio->io_stage >>= 1;
4734 }
4735
4736 void
zio_vdev_io_bypass(zio_t * zio)4737 zio_vdev_io_bypass(zio_t *zio)
4738 {
4739 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4740 ASSERT(zio->io_error == 0);
4741
4742 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
4743 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
4744 }
4745
4746 /*
4747 * ==========================================================================
4748 * Encrypt and store encryption parameters
4749 * ==========================================================================
4750 */
4751
4752
4753 /*
4754 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4755 * managing the storage of encryption parameters and passing them to the
4756 * lower-level encryption functions.
4757 */
4758 static zio_t *
zio_encrypt(zio_t * zio)4759 zio_encrypt(zio_t *zio)
4760 {
4761 zio_prop_t *zp = &zio->io_prop;
4762 spa_t *spa = zio->io_spa;
4763 blkptr_t *bp = zio->io_bp;
4764 uint64_t psize = BP_GET_PSIZE(bp);
4765 uint64_t dsobj = zio->io_bookmark.zb_objset;
4766 dmu_object_type_t ot = BP_GET_TYPE(bp);
4767 void *enc_buf = NULL;
4768 abd_t *eabd = NULL;
4769 uint8_t salt[ZIO_DATA_SALT_LEN];
4770 uint8_t iv[ZIO_DATA_IV_LEN];
4771 uint8_t mac[ZIO_DATA_MAC_LEN];
4772 boolean_t no_crypt = B_FALSE;
4773
4774 /* the root zio already encrypted the data */
4775 if (zio->io_child_type == ZIO_CHILD_GANG)
4776 return (zio);
4777
4778 /* only ZIL blocks are re-encrypted on rewrite */
4779 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
4780 return (zio);
4781
4782 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4783 BP_SET_CRYPT(bp, B_FALSE);
4784 return (zio);
4785 }
4786
4787 /* if we are doing raw encryption set the provided encryption params */
4788 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
4789 ASSERT0(BP_GET_LEVEL(bp));
4790 BP_SET_CRYPT(bp, B_TRUE);
4791 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4792 if (ot != DMU_OT_OBJSET)
4793 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
4794
4795 /* dnode blocks must be written out in the provided byteorder */
4796 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4797 ot == DMU_OT_DNODE) {
4798 void *bswap_buf = zio_buf_alloc(psize);
4799 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4800
4801 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4802 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4803 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4804 psize);
4805
4806 abd_take_ownership_of_buf(babd, B_TRUE);
4807 zio_push_transform(zio, babd, psize, psize, NULL);
4808 }
4809
4810 if (DMU_OT_IS_ENCRYPTED(ot))
4811 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
4812 return (zio);
4813 }
4814
4815 /* indirect blocks only maintain a cksum of the lower level MACs */
4816 if (BP_GET_LEVEL(bp) > 0) {
4817 BP_SET_CRYPT(bp, B_TRUE);
4818 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4819 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4820 mac));
4821 zio_crypt_encode_mac_bp(bp, mac);
4822 return (zio);
4823 }
4824
4825 /*
4826 * Objset blocks are a special case since they have 2 256-bit MACs
4827 * embedded within them.
4828 */
4829 if (ot == DMU_OT_OBJSET) {
4830 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4831 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4832 BP_SET_CRYPT(bp, B_TRUE);
4833 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4834 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
4835 return (zio);
4836 }
4837
4838 /* unencrypted object types are only authenticated with a MAC */
4839 if (!DMU_OT_IS_ENCRYPTED(ot)) {
4840 BP_SET_CRYPT(bp, B_TRUE);
4841 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4842 zio->io_abd, psize, mac));
4843 zio_crypt_encode_mac_bp(bp, mac);
4844 return (zio);
4845 }
4846
4847 /*
4848 * Later passes of sync-to-convergence may decide to rewrite data
4849 * in place to avoid more disk reallocations. This presents a problem
4850 * for encryption because this constitutes rewriting the new data with
4851 * the same encryption key and IV. However, this only applies to blocks
4852 * in the MOS (particularly the spacemaps) and we do not encrypt the
4853 * MOS. We assert that the zio is allocating or an intent log write
4854 * to enforce this.
4855 */
4856 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4857 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4858 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4859 ASSERT3U(psize, !=, 0);
4860
4861 enc_buf = zio_buf_alloc(psize);
4862 eabd = abd_get_from_buf(enc_buf, psize);
4863 abd_take_ownership_of_buf(eabd, B_TRUE);
4864
4865 /*
4866 * For an explanation of what encryption parameters are stored
4867 * where, see the block comment in zio_crypt.c.
4868 */
4869 if (ot == DMU_OT_INTENT_LOG) {
4870 zio_crypt_decode_params_bp(bp, salt, iv);
4871 } else {
4872 BP_SET_CRYPT(bp, B_TRUE);
4873 }
4874
4875 /* Perform the encryption. This should not fail */
4876 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4877 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4878 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
4879
4880 /* encode encryption metadata into the bp */
4881 if (ot == DMU_OT_INTENT_LOG) {
4882 /*
4883 * ZIL blocks store the MAC in the embedded checksum, so the
4884 * transform must always be applied.
4885 */
4886 zio_crypt_encode_mac_zil(enc_buf, mac);
4887 zio_push_transform(zio, eabd, psize, psize, NULL);
4888 } else {
4889 BP_SET_CRYPT(bp, B_TRUE);
4890 zio_crypt_encode_params_bp(bp, salt, iv);
4891 zio_crypt_encode_mac_bp(bp, mac);
4892
4893 if (no_crypt) {
4894 ASSERT3U(ot, ==, DMU_OT_DNODE);
4895 abd_free(eabd);
4896 } else {
4897 zio_push_transform(zio, eabd, psize, psize, NULL);
4898 }
4899 }
4900
4901 return (zio);
4902 }
4903
4904 /*
4905 * ==========================================================================
4906 * Generate and verify checksums
4907 * ==========================================================================
4908 */
4909 static zio_t *
zio_checksum_generate(zio_t * zio)4910 zio_checksum_generate(zio_t *zio)
4911 {
4912 blkptr_t *bp = zio->io_bp;
4913 enum zio_checksum checksum;
4914
4915 if (bp == NULL) {
4916 /*
4917 * This is zio_write_phys().
4918 * We're either generating a label checksum, or none at all.
4919 */
4920 checksum = zio->io_prop.zp_checksum;
4921
4922 if (checksum == ZIO_CHECKSUM_OFF)
4923 return (zio);
4924
4925 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4926 } else {
4927 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4928 ASSERT(!IO_IS_ALLOCATING(zio));
4929 checksum = ZIO_CHECKSUM_GANG_HEADER;
4930 } else {
4931 checksum = BP_GET_CHECKSUM(bp);
4932 }
4933 }
4934
4935 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
4936
4937 return (zio);
4938 }
4939
4940 static zio_t *
zio_checksum_verify(zio_t * zio)4941 zio_checksum_verify(zio_t *zio)
4942 {
4943 zio_bad_cksum_t info;
4944 blkptr_t *bp = zio->io_bp;
4945 int error;
4946
4947 ASSERT(zio->io_vd != NULL);
4948
4949 if (bp == NULL) {
4950 /*
4951 * This is zio_read_phys().
4952 * We're either verifying a label checksum, or nothing at all.
4953 */
4954 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
4955 return (zio);
4956
4957 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
4958 }
4959
4960 ASSERT0(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
4961 IMPLY(zio->io_flags & ZIO_FLAG_DIO_READ,
4962 !(zio->io_flags & ZIO_FLAG_SPECULATIVE));
4963
4964 if ((error = zio_checksum_error(zio, &info)) != 0) {
4965 zio->io_error = error;
4966 if (error == ECKSUM &&
4967 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
4968 if (zio->io_flags & ZIO_FLAG_DIO_READ) {
4969 zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
4970 zio_t *pio = zio_unique_parent(zio);
4971 /*
4972 * Any Direct I/O read that has a checksum
4973 * error must be treated as suspicous as the
4974 * contents of the buffer could be getting
4975 * manipulated while the I/O is taking place.
4976 *
4977 * The checksum verify error will only be
4978 * reported here for disk and file VDEV's and
4979 * will be reported on those that the failure
4980 * occurred on. Other types of VDEV's report the
4981 * verify failure in their own code paths.
4982 */
4983 if (pio->io_child_type == ZIO_CHILD_LOGICAL) {
4984 zio_dio_chksum_verify_error_report(zio);
4985 }
4986 } else {
4987 mutex_enter(&zio->io_vd->vdev_stat_lock);
4988 zio->io_vd->vdev_stat.vs_checksum_errors++;
4989 mutex_exit(&zio->io_vd->vdev_stat_lock);
4990 (void) zfs_ereport_start_checksum(zio->io_spa,
4991 zio->io_vd, &zio->io_bookmark, zio,
4992 zio->io_offset, zio->io_size, &info);
4993 }
4994 }
4995 }
4996
4997 return (zio);
4998 }
4999
5000 static zio_t *
zio_dio_checksum_verify(zio_t * zio)5001 zio_dio_checksum_verify(zio_t *zio)
5002 {
5003 zio_t *pio = zio_unique_parent(zio);
5004 int error;
5005
5006 ASSERT3P(zio->io_vd, !=, NULL);
5007 ASSERT3P(zio->io_bp, !=, NULL);
5008 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
5009 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5010 ASSERT3B(pio->io_prop.zp_direct_write, ==, B_TRUE);
5011 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
5012
5013 if (zfs_vdev_direct_write_verify == 0 || zio->io_error != 0)
5014 goto out;
5015
5016 if ((error = zio_checksum_error(zio, NULL)) != 0) {
5017 zio->io_error = error;
5018 if (error == ECKSUM) {
5019 zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
5020 zio_dio_chksum_verify_error_report(zio);
5021 }
5022 }
5023
5024 out:
5025 return (zio);
5026 }
5027
5028
5029 /*
5030 * Called by RAID-Z to ensure we don't compute the checksum twice.
5031 */
5032 void
zio_checksum_verified(zio_t * zio)5033 zio_checksum_verified(zio_t *zio)
5034 {
5035 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
5036 }
5037
5038 /*
5039 * Report Direct I/O checksum verify error and create ZED event.
5040 */
5041 void
zio_dio_chksum_verify_error_report(zio_t * zio)5042 zio_dio_chksum_verify_error_report(zio_t *zio)
5043 {
5044 ASSERT(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
5045
5046 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
5047 return;
5048
5049 mutex_enter(&zio->io_vd->vdev_stat_lock);
5050 zio->io_vd->vdev_stat.vs_dio_verify_errors++;
5051 mutex_exit(&zio->io_vd->vdev_stat_lock);
5052 if (zio->io_type == ZIO_TYPE_WRITE) {
5053 /*
5054 * Convert checksum error for writes into EIO.
5055 */
5056 zio->io_error = SET_ERROR(EIO);
5057 /*
5058 * Report dio_verify_wr ZED event.
5059 */
5060 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_WR,
5061 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5062 } else {
5063 /*
5064 * Report dio_verify_rd ZED event.
5065 */
5066 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_RD,
5067 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5068 }
5069 }
5070
5071 /*
5072 * ==========================================================================
5073 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
5074 * An error of 0 indicates success. ENXIO indicates whole-device failure,
5075 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
5076 * indicate errors that are specific to one I/O, and most likely permanent.
5077 * Any other error is presumed to be worse because we weren't expecting it.
5078 * ==========================================================================
5079 */
5080 int
zio_worst_error(int e1,int e2)5081 zio_worst_error(int e1, int e2)
5082 {
5083 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
5084 int r1, r2;
5085
5086 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
5087 if (e1 == zio_error_rank[r1])
5088 break;
5089
5090 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
5091 if (e2 == zio_error_rank[r2])
5092 break;
5093
5094 return (r1 > r2 ? e1 : e2);
5095 }
5096
5097 /*
5098 * ==========================================================================
5099 * I/O completion
5100 * ==========================================================================
5101 */
5102 static zio_t *
zio_ready(zio_t * zio)5103 zio_ready(zio_t *zio)
5104 {
5105 blkptr_t *bp = zio->io_bp;
5106 zio_t *pio, *pio_next;
5107 zio_link_t *zl = NULL;
5108
5109 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
5110 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
5111 return (NULL);
5112 }
5113
5114 if (zio->io_ready) {
5115 ASSERT(IO_IS_ALLOCATING(zio));
5116 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
5117 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
5118 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
5119
5120 zio->io_ready(zio);
5121 }
5122
5123 #ifdef ZFS_DEBUG
5124 if (bp != NULL && bp != &zio->io_bp_copy)
5125 zio->io_bp_copy = *bp;
5126 #endif
5127
5128 if (zio->io_error != 0) {
5129 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
5130
5131 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
5132 ASSERT(IO_IS_ALLOCATING(zio));
5133 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
5134 ASSERT(zio->io_metaslab_class != NULL);
5135 ASSERT(ZIO_HAS_ALLOCATOR(zio));
5136
5137 /*
5138 * We were unable to allocate anything, unreserve and
5139 * issue the next I/O to allocate.
5140 */
5141 metaslab_class_throttle_unreserve(
5142 zio->io_metaslab_class, zio->io_prop.zp_copies,
5143 zio->io_allocator, zio);
5144 zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
5145 }
5146 }
5147
5148 mutex_enter(&zio->io_lock);
5149 zio->io_state[ZIO_WAIT_READY] = 1;
5150 pio = zio_walk_parents(zio, &zl);
5151 mutex_exit(&zio->io_lock);
5152
5153 /*
5154 * As we notify zio's parents, new parents could be added.
5155 * New parents go to the head of zio's io_parent_list, however,
5156 * so we will (correctly) not notify them. The remainder of zio's
5157 * io_parent_list, from 'pio_next' onward, cannot change because
5158 * all parents must wait for us to be done before they can be done.
5159 */
5160 for (; pio != NULL; pio = pio_next) {
5161 pio_next = zio_walk_parents(zio, &zl);
5162 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
5163 }
5164
5165 if (zio->io_flags & ZIO_FLAG_NODATA) {
5166 if (bp != NULL && BP_IS_GANG(bp)) {
5167 zio->io_flags &= ~ZIO_FLAG_NODATA;
5168 } else {
5169 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
5170 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
5171 }
5172 }
5173
5174 if (zio_injection_enabled &&
5175 zio->io_spa->spa_syncing_txg == zio->io_txg)
5176 zio_handle_ignored_writes(zio);
5177
5178 return (zio);
5179 }
5180
5181 /*
5182 * Update the allocation throttle accounting.
5183 */
5184 static void
zio_dva_throttle_done(zio_t * zio)5185 zio_dva_throttle_done(zio_t *zio)
5186 {
5187 zio_t *lio __maybe_unused = zio->io_logical;
5188 zio_t *pio = zio_unique_parent(zio);
5189 vdev_t *vd = zio->io_vd;
5190 int flags = METASLAB_ASYNC_ALLOC;
5191
5192 ASSERT3P(zio->io_bp, !=, NULL);
5193 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5194 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
5195 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
5196 ASSERT(vd != NULL);
5197 ASSERT3P(vd, ==, vd->vdev_top);
5198 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
5199 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
5200 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
5201 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
5202 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
5203
5204 /*
5205 * Parents of gang children can have two flavors -- ones that
5206 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
5207 * and ones that allocated the constituent blocks. The allocation
5208 * throttle needs to know the allocating parent zio so we must find
5209 * it here.
5210 */
5211 if (pio->io_child_type == ZIO_CHILD_GANG) {
5212 /*
5213 * If our parent is a rewrite gang child then our grandparent
5214 * would have been the one that performed the allocation.
5215 */
5216 if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
5217 pio = zio_unique_parent(pio);
5218 flags |= METASLAB_GANG_CHILD;
5219 }
5220
5221 ASSERT(IO_IS_ALLOCATING(pio));
5222 ASSERT(ZIO_HAS_ALLOCATOR(pio));
5223 ASSERT3P(zio, !=, zio->io_logical);
5224 ASSERT(zio->io_logical != NULL);
5225 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
5226 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
5227 ASSERT(zio->io_metaslab_class != NULL);
5228
5229 mutex_enter(&pio->io_lock);
5230 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
5231 pio->io_allocator, B_TRUE);
5232 mutex_exit(&pio->io_lock);
5233
5234 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
5235 pio->io_allocator, pio);
5236
5237 /*
5238 * Call into the pipeline to see if there is more work that
5239 * needs to be done. If there is work to be done it will be
5240 * dispatched to another taskq thread.
5241 */
5242 zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
5243 }
5244
5245 static zio_t *
zio_done(zio_t * zio)5246 zio_done(zio_t *zio)
5247 {
5248 /*
5249 * Always attempt to keep stack usage minimal here since
5250 * we can be called recursively up to 19 levels deep.
5251 */
5252 const uint64_t psize = zio->io_size;
5253 zio_t *pio, *pio_next;
5254 zio_link_t *zl = NULL;
5255
5256 /*
5257 * If our children haven't all completed,
5258 * wait for them and then repeat this pipeline stage.
5259 */
5260 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
5261 return (NULL);
5262 }
5263
5264 /*
5265 * If the allocation throttle is enabled, then update the accounting.
5266 * We only track child I/Os that are part of an allocating async
5267 * write. We must do this since the allocation is performed
5268 * by the logical I/O but the actual write is done by child I/Os.
5269 */
5270 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
5271 zio->io_child_type == ZIO_CHILD_VDEV) {
5272 ASSERT(zio->io_metaslab_class != NULL);
5273 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
5274 zio_dva_throttle_done(zio);
5275 }
5276
5277 /*
5278 * If the allocation throttle is enabled, verify that
5279 * we have decremented the refcounts for every I/O that was throttled.
5280 */
5281 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
5282 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
5283 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
5284 ASSERT(zio->io_bp != NULL);
5285 ASSERT(ZIO_HAS_ALLOCATOR(zio));
5286
5287 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
5288 zio->io_allocator);
5289 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
5290 mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
5291 }
5292
5293
5294 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
5295 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
5296 ASSERT(zio->io_children[c][w] == 0);
5297
5298 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
5299 ASSERT(zio->io_bp->blk_pad[0] == 0);
5300 ASSERT(zio->io_bp->blk_pad[1] == 0);
5301 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
5302 sizeof (blkptr_t)) == 0 ||
5303 (zio->io_bp == zio_unique_parent(zio)->io_bp));
5304 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
5305 zio->io_bp_override == NULL &&
5306 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
5307 ASSERT3U(zio->io_prop.zp_copies, <=,
5308 BP_GET_NDVAS(zio->io_bp));
5309 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
5310 (BP_COUNT_GANG(zio->io_bp) ==
5311 BP_GET_NDVAS(zio->io_bp)));
5312 }
5313 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
5314 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
5315 }
5316
5317 /*
5318 * If there were child vdev/gang/ddt errors, they apply to us now.
5319 */
5320 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
5321 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
5322 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
5323
5324 /*
5325 * If the I/O on the transformed data was successful, generate any
5326 * checksum reports now while we still have the transformed data.
5327 */
5328 if (zio->io_error == 0) {
5329 while (zio->io_cksum_report != NULL) {
5330 zio_cksum_report_t *zcr = zio->io_cksum_report;
5331 uint64_t align = zcr->zcr_align;
5332 uint64_t asize = P2ROUNDUP(psize, align);
5333 abd_t *adata = zio->io_abd;
5334
5335 if (adata != NULL && asize != psize) {
5336 adata = abd_alloc(asize, B_TRUE);
5337 abd_copy(adata, zio->io_abd, psize);
5338 abd_zero_off(adata, psize, asize - psize);
5339 }
5340
5341 zio->io_cksum_report = zcr->zcr_next;
5342 zcr->zcr_next = NULL;
5343 zcr->zcr_finish(zcr, adata);
5344 zfs_ereport_free_checksum(zcr);
5345
5346 if (adata != NULL && asize != psize)
5347 abd_free(adata);
5348 }
5349 }
5350
5351 zio_pop_transforms(zio); /* note: may set zio->io_error */
5352
5353 vdev_stat_update(zio, psize);
5354
5355 /*
5356 * If this I/O is attached to a particular vdev is slow, exceeding
5357 * 30 seconds to complete, post an error described the I/O delay.
5358 * We ignore these errors if the device is currently unavailable.
5359 */
5360 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
5361 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
5362 /*
5363 * We want to only increment our slow IO counters if
5364 * the IO is valid (i.e. not if the drive is removed).
5365 *
5366 * zfs_ereport_post() will also do these checks, but
5367 * it can also ratelimit and have other failures, so we
5368 * need to increment the slow_io counters independent
5369 * of it.
5370 */
5371 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
5372 zio->io_spa, zio->io_vd, zio)) {
5373 mutex_enter(&zio->io_vd->vdev_stat_lock);
5374 zio->io_vd->vdev_stat.vs_slow_ios++;
5375 mutex_exit(&zio->io_vd->vdev_stat_lock);
5376
5377 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
5378 zio->io_spa, zio->io_vd, &zio->io_bookmark,
5379 zio, 0);
5380 }
5381 }
5382 }
5383
5384 if (zio->io_error) {
5385 /*
5386 * If this I/O is attached to a particular vdev,
5387 * generate an error message describing the I/O failure
5388 * at the block level. We ignore these errors if the
5389 * device is currently unavailable.
5390 */
5391 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
5392 !vdev_is_dead(zio->io_vd) &&
5393 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
5394 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
5395 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5396 if (ret != EALREADY) {
5397 mutex_enter(&zio->io_vd->vdev_stat_lock);
5398 if (zio->io_type == ZIO_TYPE_READ)
5399 zio->io_vd->vdev_stat.vs_read_errors++;
5400 else if (zio->io_type == ZIO_TYPE_WRITE)
5401 zio->io_vd->vdev_stat.vs_write_errors++;
5402 mutex_exit(&zio->io_vd->vdev_stat_lock);
5403 }
5404 }
5405
5406 if ((zio->io_error == EIO || !(zio->io_flags &
5407 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
5408 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) &&
5409 zio == zio->io_logical) {
5410 /*
5411 * For logical I/O requests, tell the SPA to log the
5412 * error and generate a logical data ereport.
5413 */
5414 spa_log_error(zio->io_spa, &zio->io_bookmark,
5415 BP_GET_LOGICAL_BIRTH(zio->io_bp));
5416 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
5417 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
5418 }
5419 }
5420
5421 if (zio->io_error && zio == zio->io_logical) {
5422 /*
5423 * Determine whether zio should be reexecuted. This will
5424 * propagate all the way to the root via zio_notify_parent().
5425 */
5426 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
5427 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5428
5429 if (IO_IS_ALLOCATING(zio) &&
5430 !(zio->io_flags & ZIO_FLAG_CANFAIL) &&
5431 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
5432 if (zio->io_error != ENOSPC)
5433 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
5434 else
5435 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5436 }
5437
5438 if ((zio->io_type == ZIO_TYPE_READ ||
5439 zio->io_type == ZIO_TYPE_FREE) &&
5440 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
5441 zio->io_error == ENXIO &&
5442 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
5443 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
5444 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5445
5446 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
5447 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5448
5449 /*
5450 * Here is a possibly good place to attempt to do
5451 * either combinatorial reconstruction or error correction
5452 * based on checksums. It also might be a good place
5453 * to send out preliminary ereports before we suspend
5454 * processing.
5455 */
5456 }
5457
5458 /*
5459 * If there were logical child errors, they apply to us now.
5460 * We defer this until now to avoid conflating logical child
5461 * errors with errors that happened to the zio itself when
5462 * updating vdev stats and reporting FMA events above.
5463 */
5464 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
5465
5466 if ((zio->io_error || zio->io_reexecute) &&
5467 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
5468 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
5469 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
5470
5471 zio_gang_tree_free(&zio->io_gang_tree);
5472
5473 /*
5474 * Godfather I/Os should never suspend.
5475 */
5476 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
5477 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
5478 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
5479
5480 if (zio->io_reexecute) {
5481 /*
5482 * A Direct I/O operation that has a checksum verify error
5483 * should not attempt to reexecute. Instead, the error should
5484 * just be propagated back.
5485 */
5486 ASSERT(!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR));
5487
5488 /*
5489 * This is a logical I/O that wants to reexecute.
5490 *
5491 * Reexecute is top-down. When an i/o fails, if it's not
5492 * the root, it simply notifies its parent and sticks around.
5493 * The parent, seeing that it still has children in zio_done(),
5494 * does the same. This percolates all the way up to the root.
5495 * The root i/o will reexecute or suspend the entire tree.
5496 *
5497 * This approach ensures that zio_reexecute() honors
5498 * all the original i/o dependency relationships, e.g.
5499 * parents not executing until children are ready.
5500 */
5501 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5502
5503 zio->io_gang_leader = NULL;
5504
5505 mutex_enter(&zio->io_lock);
5506 zio->io_state[ZIO_WAIT_DONE] = 1;
5507 mutex_exit(&zio->io_lock);
5508
5509 /*
5510 * "The Godfather" I/O monitors its children but is
5511 * not a true parent to them. It will track them through
5512 * the pipeline but severs its ties whenever they get into
5513 * trouble (e.g. suspended). This allows "The Godfather"
5514 * I/O to return status without blocking.
5515 */
5516 zl = NULL;
5517 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
5518 pio = pio_next) {
5519 zio_link_t *remove_zl = zl;
5520 pio_next = zio_walk_parents(zio, &zl);
5521
5522 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
5523 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
5524 zio_remove_child(pio, zio, remove_zl);
5525 /*
5526 * This is a rare code path, so we don't
5527 * bother with "next_to_execute".
5528 */
5529 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
5530 NULL);
5531 }
5532 }
5533
5534 if ((pio = zio_unique_parent(zio)) != NULL) {
5535 /*
5536 * We're not a root i/o, so there's nothing to do
5537 * but notify our parent. Don't propagate errors
5538 * upward since we haven't permanently failed yet.
5539 */
5540 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
5541 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
5542 /*
5543 * This is a rare code path, so we don't bother with
5544 * "next_to_execute".
5545 */
5546 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
5547 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
5548 /*
5549 * We'd fail again if we reexecuted now, so suspend
5550 * until conditions improve (e.g. device comes online).
5551 */
5552 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
5553 } else {
5554 /*
5555 * Reexecution is potentially a huge amount of work.
5556 * Hand it off to the otherwise-unused claim taskq.
5557 */
5558 spa_taskq_dispatch(zio->io_spa,
5559 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
5560 zio_reexecute, zio, B_FALSE);
5561 }
5562 return (NULL);
5563 }
5564
5565 ASSERT(list_is_empty(&zio->io_child_list));
5566 ASSERT(zio->io_reexecute == 0);
5567 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5568
5569 /*
5570 * Report any checksum errors, since the I/O is complete.
5571 */
5572 while (zio->io_cksum_report != NULL) {
5573 zio_cksum_report_t *zcr = zio->io_cksum_report;
5574 zio->io_cksum_report = zcr->zcr_next;
5575 zcr->zcr_next = NULL;
5576 zcr->zcr_finish(zcr, NULL);
5577 zfs_ereport_free_checksum(zcr);
5578 }
5579
5580 /*
5581 * It is the responsibility of the done callback to ensure that this
5582 * particular zio is no longer discoverable for adoption, and as
5583 * such, cannot acquire any new parents.
5584 */
5585 if (zio->io_done)
5586 zio->io_done(zio);
5587
5588 mutex_enter(&zio->io_lock);
5589 zio->io_state[ZIO_WAIT_DONE] = 1;
5590 mutex_exit(&zio->io_lock);
5591
5592 /*
5593 * We are done executing this zio. We may want to execute a parent
5594 * next. See the comment in zio_notify_parent().
5595 */
5596 zio_t *next_to_execute = NULL;
5597 zl = NULL;
5598 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5599 zio_link_t *remove_zl = zl;
5600 pio_next = zio_walk_parents(zio, &zl);
5601 zio_remove_child(pio, zio, remove_zl);
5602 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5603 }
5604
5605 if (zio->io_waiter != NULL) {
5606 mutex_enter(&zio->io_lock);
5607 zio->io_executor = NULL;
5608 cv_broadcast(&zio->io_cv);
5609 mutex_exit(&zio->io_lock);
5610 } else {
5611 zio_destroy(zio);
5612 }
5613
5614 return (next_to_execute);
5615 }
5616
5617 /*
5618 * ==========================================================================
5619 * I/O pipeline definition
5620 * ==========================================================================
5621 */
5622 static zio_pipe_stage_t *zio_pipeline[] = {
5623 NULL,
5624 zio_read_bp_init,
5625 zio_write_bp_init,
5626 zio_free_bp_init,
5627 zio_issue_async,
5628 zio_write_compress,
5629 zio_encrypt,
5630 zio_checksum_generate,
5631 zio_nop_write,
5632 zio_brt_free,
5633 zio_ddt_read_start,
5634 zio_ddt_read_done,
5635 zio_ddt_write,
5636 zio_ddt_free,
5637 zio_gang_assemble,
5638 zio_gang_issue,
5639 zio_dva_throttle,
5640 zio_dva_allocate,
5641 zio_dva_free,
5642 zio_dva_claim,
5643 zio_ready,
5644 zio_vdev_io_start,
5645 zio_vdev_io_done,
5646 zio_vdev_io_assess,
5647 zio_checksum_verify,
5648 zio_dio_checksum_verify,
5649 zio_done
5650 };
5651
5652
5653
5654
5655 /*
5656 * Compare two zbookmark_phys_t's to see which we would reach first in a
5657 * pre-order traversal of the object tree.
5658 *
5659 * This is simple in every case aside from the meta-dnode object. For all other
5660 * objects, we traverse them in order (object 1 before object 2, and so on).
5661 * However, all of these objects are traversed while traversing object 0, since
5662 * the data it points to is the list of objects. Thus, we need to convert to a
5663 * canonical representation so we can compare meta-dnode bookmarks to
5664 * non-meta-dnode bookmarks.
5665 *
5666 * We do this by calculating "equivalents" for each field of the zbookmark.
5667 * zbookmarks outside of the meta-dnode use their own object and level, and
5668 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5669 * blocks this bookmark refers to) by multiplying their blkid by their span
5670 * (the number of L0 blocks contained within one block at their level).
5671 * zbookmarks inside the meta-dnode calculate their object equivalent
5672 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5673 * level + 1<<31 (any value larger than a level could ever be) for their level.
5674 * This causes them to always compare before a bookmark in their object
5675 * equivalent, compare appropriately to bookmarks in other objects, and to
5676 * compare appropriately to other bookmarks in the meta-dnode.
5677 */
5678 int
zbookmark_compare(uint16_t dbss1,uint8_t ibs1,uint16_t dbss2,uint8_t ibs2,const zbookmark_phys_t * zb1,const zbookmark_phys_t * zb2)5679 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5680 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5681 {
5682 /*
5683 * These variables represent the "equivalent" values for the zbookmark,
5684 * after converting zbookmarks inside the meta dnode to their
5685 * normal-object equivalents.
5686 */
5687 uint64_t zb1obj, zb2obj;
5688 uint64_t zb1L0, zb2L0;
5689 uint64_t zb1level, zb2level;
5690
5691 if (zb1->zb_object == zb2->zb_object &&
5692 zb1->zb_level == zb2->zb_level &&
5693 zb1->zb_blkid == zb2->zb_blkid)
5694 return (0);
5695
5696 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5697 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5698
5699 /*
5700 * BP_SPANB calculates the span in blocks.
5701 */
5702 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5703 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
5704
5705 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
5706 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5707 zb1L0 = 0;
5708 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5709 } else {
5710 zb1obj = zb1->zb_object;
5711 zb1level = zb1->zb_level;
5712 }
5713
5714 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5715 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5716 zb2L0 = 0;
5717 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5718 } else {
5719 zb2obj = zb2->zb_object;
5720 zb2level = zb2->zb_level;
5721 }
5722
5723 /* Now that we have a canonical representation, do the comparison. */
5724 if (zb1obj != zb2obj)
5725 return (zb1obj < zb2obj ? -1 : 1);
5726 else if (zb1L0 != zb2L0)
5727 return (zb1L0 < zb2L0 ? -1 : 1);
5728 else if (zb1level != zb2level)
5729 return (zb1level > zb2level ? -1 : 1);
5730 /*
5731 * This can (theoretically) happen if the bookmarks have the same object
5732 * and level, but different blkids, if the block sizes are not the same.
5733 * There is presently no way to change the indirect block sizes
5734 */
5735 return (0);
5736 }
5737
5738 /*
5739 * This function checks the following: given that last_block is the place that
5740 * our traversal stopped last time, does that guarantee that we've visited
5741 * every node under subtree_root? Therefore, we can't just use the raw output
5742 * of zbookmark_compare. We have to pass in a modified version of
5743 * subtree_root; by incrementing the block id, and then checking whether
5744 * last_block is before or equal to that, we can tell whether or not having
5745 * visited last_block implies that all of subtree_root's children have been
5746 * visited.
5747 */
5748 boolean_t
zbookmark_subtree_completed(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5749 zbookmark_subtree_completed(const dnode_phys_t *dnp,
5750 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5751 {
5752 zbookmark_phys_t mod_zb = *subtree_root;
5753 mod_zb.zb_blkid++;
5754 ASSERT0(last_block->zb_level);
5755
5756 /* The objset_phys_t isn't before anything. */
5757 if (dnp == NULL)
5758 return (B_FALSE);
5759
5760 /*
5761 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5762 * data block size in sectors, because that variable is only used if
5763 * the bookmark refers to a block in the meta-dnode. Since we don't
5764 * know without examining it what object it refers to, and there's no
5765 * harm in passing in this value in other cases, we always pass it in.
5766 *
5767 * We pass in 0 for the indirect block size shift because zb2 must be
5768 * level 0. The indirect block size is only used to calculate the span
5769 * of the bookmark, but since the bookmark must be level 0, the span is
5770 * always 1, so the math works out.
5771 *
5772 * If you make changes to how the zbookmark_compare code works, be sure
5773 * to make sure that this code still works afterwards.
5774 */
5775 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5776 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5777 last_block) <= 0);
5778 }
5779
5780 /*
5781 * This function is similar to zbookmark_subtree_completed(), but returns true
5782 * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5783 */
5784 boolean_t
zbookmark_subtree_tbd(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5785 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5786 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5787 {
5788 ASSERT0(last_block->zb_level);
5789 if (dnp == NULL)
5790 return (B_FALSE);
5791 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5792 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5793 last_block) >= 0);
5794 }
5795
5796 EXPORT_SYMBOL(zio_type_name);
5797 EXPORT_SYMBOL(zio_buf_alloc);
5798 EXPORT_SYMBOL(zio_data_buf_alloc);
5799 EXPORT_SYMBOL(zio_buf_free);
5800 EXPORT_SYMBOL(zio_data_buf_free);
5801
5802 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
5803 "Max I/O completion time (milliseconds) before marking it as slow");
5804
5805 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5806 "Prioritize requeued I/O");
5807
5808 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
5809 "Defer frees starting in this pass");
5810
5811 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
5812 "Don't compress starting in this pass");
5813
5814 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
5815 "Rewrite new bps starting in this pass");
5816
5817 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
5818 "Throttle block allocations in the ZIO pipeline");
5819
5820 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
5821 "Log all slow ZIOs, not just those with vdevs");
5822