1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017, Intel Corporation.
27 * Copyright (c) 2019, 2023, 2024, 2025, Klara, Inc.
28 * Copyright (c) 2019, Allan Jude
29 * Copyright (c) 2021, Datto, Inc.
30 * Copyright (c) 2021, 2024 by George Melikov. All rights reserved.
31 */
32
33 #include <sys/sysmacros.h>
34 #include <sys/zfs_context.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/spa.h>
37 #include <sys/txg.h>
38 #include <sys/spa_impl.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/vdev_trim.h>
41 #include <sys/zio_impl.h>
42 #include <sys/zio_compress.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/dmu_objset.h>
45 #include <sys/arc.h>
46 #include <sys/brt.h>
47 #include <sys/ddt.h>
48 #include <sys/blkptr.h>
49 #include <sys/zfeature.h>
50 #include <sys/dsl_scan.h>
51 #include <sys/metaslab_impl.h>
52 #include <sys/time.h>
53 #include <sys/trace_zfs.h>
54 #include <sys/abd.h>
55 #include <sys/dsl_crypt.h>
56 #include <cityhash.h>
57
58 /*
59 * ==========================================================================
60 * I/O type descriptions
61 * ==========================================================================
62 */
63 const char *const zio_type_name[ZIO_TYPES] = {
64 /*
65 * Note: Linux kernel thread name length is limited
66 * so these names will differ from upstream open zfs.
67 */
68 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
69 };
70
71 int zio_dva_throttle_enabled = B_TRUE;
72 static int zio_deadman_log_all = B_FALSE;
73
74 /*
75 * ==========================================================================
76 * I/O kmem caches
77 * ==========================================================================
78 */
79 static kmem_cache_t *zio_cache;
80 static kmem_cache_t *zio_link_cache;
81 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
82 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
84 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
85 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
86 #endif
87
88 /* Mark IOs as "slow" if they take longer than 30 seconds */
89 static uint_t zio_slow_io_ms = (30 * MILLISEC);
90
91 #define BP_SPANB(indblkshift, level) \
92 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
93 #define COMPARE_META_LEVEL 0x80000000ul
94 /*
95 * The following actions directly effect the spa's sync-to-convergence logic.
96 * The values below define the sync pass when we start performing the action.
97 * Care should be taken when changing these values as they directly impact
98 * spa_sync() performance. Tuning these values may introduce subtle performance
99 * pathologies and should only be done in the context of performance analysis.
100 * These tunables will eventually be removed and replaced with #defines once
101 * enough analysis has been done to determine optimal values.
102 *
103 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
104 * regular blocks are not deferred.
105 *
106 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
107 * compression (including of metadata). In practice, we don't have this
108 * many sync passes, so this has no effect.
109 *
110 * The original intent was that disabling compression would help the sync
111 * passes to converge. However, in practice disabling compression increases
112 * the average number of sync passes, because when we turn compression off, a
113 * lot of block's size will change and thus we have to re-allocate (not
114 * overwrite) them. It also increases the number of 128KB allocations (e.g.
115 * for indirect blocks and spacemaps) because these will not be compressed.
116 * The 128K allocations are especially detrimental to performance on highly
117 * fragmented systems, which may have very few free segments of this size,
118 * and may need to load new metaslabs to satisfy 128K allocations.
119 */
120
121 /* defer frees starting in this pass */
122 uint_t zfs_sync_pass_deferred_free = 2;
123
124 /* don't compress starting in this pass */
125 static uint_t zfs_sync_pass_dont_compress = 8;
126
127 /* rewrite new bps starting in this pass */
128 static uint_t zfs_sync_pass_rewrite = 2;
129
130 /*
131 * An allocating zio is one that either currently has the DVA allocate
132 * stage set or will have it later in its lifetime.
133 */
134 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
135
136 /*
137 * Enable smaller cores by excluding metadata
138 * allocations as well.
139 */
140 int zio_exclude_metadata = 0;
141 static int zio_requeue_io_start_cut_in_line = 1;
142
143 #ifdef ZFS_DEBUG
144 static const int zio_buf_debug_limit = 16384;
145 #else
146 static const int zio_buf_debug_limit = 0;
147 #endif
148
149 typedef struct zio_stats {
150 kstat_named_t ziostat_total_allocations;
151 kstat_named_t ziostat_alloc_class_fallbacks;
152 kstat_named_t ziostat_gang_writes;
153 kstat_named_t ziostat_gang_multilevel;
154 } zio_stats_t;
155
156 static zio_stats_t zio_stats = {
157 { "total_allocations", KSTAT_DATA_UINT64 },
158 { "alloc_class_fallbacks", KSTAT_DATA_UINT64 },
159 { "gang_writes", KSTAT_DATA_UINT64 },
160 { "gang_multilevel", KSTAT_DATA_UINT64 },
161 };
162
163 struct {
164 wmsum_t ziostat_total_allocations;
165 wmsum_t ziostat_alloc_class_fallbacks;
166 wmsum_t ziostat_gang_writes;
167 wmsum_t ziostat_gang_multilevel;
168 } ziostat_sums;
169
170 #define ZIOSTAT_BUMP(stat) wmsum_add(&ziostat_sums.stat, 1);
171
172 static kstat_t *zio_ksp;
173
174 static inline void __zio_execute(zio_t *zio);
175
176 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
177
178 static int
zio_kstats_update(kstat_t * ksp,int rw)179 zio_kstats_update(kstat_t *ksp, int rw)
180 {
181 zio_stats_t *zs = ksp->ks_data;
182 if (rw == KSTAT_WRITE)
183 return (EACCES);
184
185 zs->ziostat_total_allocations.value.ui64 =
186 wmsum_value(&ziostat_sums.ziostat_total_allocations);
187 zs->ziostat_alloc_class_fallbacks.value.ui64 =
188 wmsum_value(&ziostat_sums.ziostat_alloc_class_fallbacks);
189 zs->ziostat_gang_writes.value.ui64 =
190 wmsum_value(&ziostat_sums.ziostat_gang_writes);
191 zs->ziostat_gang_multilevel.value.ui64 =
192 wmsum_value(&ziostat_sums.ziostat_gang_multilevel);
193 return (0);
194 }
195
196 void
zio_init(void)197 zio_init(void)
198 {
199 size_t c;
200
201 zio_cache = kmem_cache_create("zio_cache",
202 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
203 zio_link_cache = kmem_cache_create("zio_link_cache",
204 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
205
206 wmsum_init(&ziostat_sums.ziostat_total_allocations, 0);
207 wmsum_init(&ziostat_sums.ziostat_alloc_class_fallbacks, 0);
208 wmsum_init(&ziostat_sums.ziostat_gang_writes, 0);
209 wmsum_init(&ziostat_sums.ziostat_gang_multilevel, 0);
210 zio_ksp = kstat_create("zfs", 0, "zio_stats",
211 "misc", KSTAT_TYPE_NAMED, sizeof (zio_stats) /
212 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
213 if (zio_ksp != NULL) {
214 zio_ksp->ks_data = &zio_stats;
215 zio_ksp->ks_update = zio_kstats_update;
216 kstat_install(zio_ksp);
217 }
218
219 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
220 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
221 size_t align, cflags, data_cflags;
222 char name[32];
223
224 /*
225 * Create cache for each half-power of 2 size, starting from
226 * SPA_MINBLOCKSIZE. It should give us memory space efficiency
227 * of ~7/8, sufficient for transient allocations mostly using
228 * these caches.
229 */
230 size_t p2 = size;
231 while (!ISP2(p2))
232 p2 &= p2 - 1;
233 if (!IS_P2ALIGNED(size, p2 / 2))
234 continue;
235
236 #ifndef _KERNEL
237 /*
238 * If we are using watchpoints, put each buffer on its own page,
239 * to eliminate the performance overhead of trapping to the
240 * kernel when modifying a non-watched buffer that shares the
241 * page with a watched buffer.
242 */
243 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
244 continue;
245 #endif
246
247 if (IS_P2ALIGNED(size, PAGESIZE))
248 align = PAGESIZE;
249 else
250 align = 1 << (highbit64(size ^ (size - 1)) - 1);
251
252 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
253 KMC_NODEBUG : 0;
254 data_cflags = KMC_NODEBUG;
255 if (abd_size_alloc_linear(size)) {
256 cflags |= KMC_RECLAIMABLE;
257 data_cflags |= KMC_RECLAIMABLE;
258 }
259 if (cflags == data_cflags) {
260 /*
261 * Resulting kmem caches would be identical.
262 * Save memory by creating only one.
263 */
264 (void) snprintf(name, sizeof (name),
265 "zio_buf_comb_%lu", (ulong_t)size);
266 zio_buf_cache[c] = kmem_cache_create(name, size, align,
267 NULL, NULL, NULL, NULL, NULL, cflags);
268 zio_data_buf_cache[c] = zio_buf_cache[c];
269 continue;
270 }
271 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
272 (ulong_t)size);
273 zio_buf_cache[c] = kmem_cache_create(name, size, align,
274 NULL, NULL, NULL, NULL, NULL, cflags);
275
276 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
277 (ulong_t)size);
278 zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
279 NULL, NULL, NULL, NULL, NULL, data_cflags);
280 }
281
282 while (--c != 0) {
283 ASSERT(zio_buf_cache[c] != NULL);
284 if (zio_buf_cache[c - 1] == NULL)
285 zio_buf_cache[c - 1] = zio_buf_cache[c];
286
287 ASSERT(zio_data_buf_cache[c] != NULL);
288 if (zio_data_buf_cache[c - 1] == NULL)
289 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
290 }
291
292 zio_inject_init();
293
294 lz4_init();
295 }
296
297 void
zio_fini(void)298 zio_fini(void)
299 {
300 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
301
302 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
303 for (size_t i = 0; i < n; i++) {
304 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
305 (void) printf("zio_fini: [%d] %llu != %llu\n",
306 (int)((i + 1) << SPA_MINBLOCKSHIFT),
307 (long long unsigned)zio_buf_cache_allocs[i],
308 (long long unsigned)zio_buf_cache_frees[i]);
309 }
310 #endif
311
312 /*
313 * The same kmem cache can show up multiple times in both zio_buf_cache
314 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
315 * sort it out.
316 */
317 for (size_t i = 0; i < n; i++) {
318 kmem_cache_t *cache = zio_buf_cache[i];
319 if (cache == NULL)
320 continue;
321 for (size_t j = i; j < n; j++) {
322 if (cache == zio_buf_cache[j])
323 zio_buf_cache[j] = NULL;
324 if (cache == zio_data_buf_cache[j])
325 zio_data_buf_cache[j] = NULL;
326 }
327 kmem_cache_destroy(cache);
328 }
329
330 for (size_t i = 0; i < n; i++) {
331 kmem_cache_t *cache = zio_data_buf_cache[i];
332 if (cache == NULL)
333 continue;
334 for (size_t j = i; j < n; j++) {
335 if (cache == zio_data_buf_cache[j])
336 zio_data_buf_cache[j] = NULL;
337 }
338 kmem_cache_destroy(cache);
339 }
340
341 for (size_t i = 0; i < n; i++) {
342 VERIFY0P(zio_buf_cache[i]);
343 VERIFY0P(zio_data_buf_cache[i]);
344 }
345
346 if (zio_ksp != NULL) {
347 kstat_delete(zio_ksp);
348 zio_ksp = NULL;
349 }
350
351 wmsum_fini(&ziostat_sums.ziostat_total_allocations);
352 wmsum_fini(&ziostat_sums.ziostat_alloc_class_fallbacks);
353 wmsum_fini(&ziostat_sums.ziostat_gang_writes);
354 wmsum_fini(&ziostat_sums.ziostat_gang_multilevel);
355
356 kmem_cache_destroy(zio_link_cache);
357 kmem_cache_destroy(zio_cache);
358
359 zio_inject_fini();
360
361 lz4_fini();
362 }
363
364 /*
365 * ==========================================================================
366 * Allocate and free I/O buffers
367 * ==========================================================================
368 */
369
370 #if defined(ZFS_DEBUG) && defined(_KERNEL)
371 #define ZFS_ZIO_BUF_CANARY 1
372 #endif
373
374 #ifdef ZFS_ZIO_BUF_CANARY
375 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
376
377 /*
378 * Use empty space after the buffer to detect overflows.
379 *
380 * Since zio_init() creates kmem caches only for certain set of buffer sizes,
381 * allocations of different sizes may have some unused space after the data.
382 * Filling part of that space with a known pattern on allocation and checking
383 * it on free should allow us to detect some buffer overflows.
384 */
385 static void
zio_buf_put_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)386 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
387 {
388 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
389 ulong_t *canary = p + off / sizeof (ulong_t);
390 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
391 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
392 cache[c] == cache[c + 1])
393 asize = (c + 2) << SPA_MINBLOCKSHIFT;
394 for (; off < asize; canary++, off += sizeof (ulong_t))
395 *canary = zio_buf_canary;
396 }
397
398 static void
zio_buf_check_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)399 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
400 {
401 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
402 ulong_t *canary = p + off / sizeof (ulong_t);
403 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
404 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
405 cache[c] == cache[c + 1])
406 asize = (c + 2) << SPA_MINBLOCKSHIFT;
407 for (; off < asize; canary++, off += sizeof (ulong_t)) {
408 if (unlikely(*canary != zio_buf_canary)) {
409 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
410 p, size, (canary - p) * sizeof (ulong_t),
411 *canary, zio_buf_canary);
412 }
413 }
414 }
415 #endif
416
417 /*
418 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
419 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
420 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
421 * excess / transient data in-core during a crashdump.
422 */
423 void *
zio_buf_alloc(size_t size)424 zio_buf_alloc(size_t size)
425 {
426 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
427
428 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
429 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
430 atomic_add_64(&zio_buf_cache_allocs[c], 1);
431 #endif
432
433 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
434 #ifdef ZFS_ZIO_BUF_CANARY
435 zio_buf_put_canary(p, size, zio_buf_cache, c);
436 #endif
437 return (p);
438 }
439
440 /*
441 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
442 * crashdump if the kernel panics. This exists so that we will limit the amount
443 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
444 * of kernel heap dumped to disk when the kernel panics)
445 */
446 void *
zio_data_buf_alloc(size_t size)447 zio_data_buf_alloc(size_t size)
448 {
449 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
450
451 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
452
453 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
454 #ifdef ZFS_ZIO_BUF_CANARY
455 zio_buf_put_canary(p, size, zio_data_buf_cache, c);
456 #endif
457 return (p);
458 }
459
460 void
zio_buf_free(void * buf,size_t size)461 zio_buf_free(void *buf, size_t size)
462 {
463 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
464
465 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
466 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
467 atomic_add_64(&zio_buf_cache_frees[c], 1);
468 #endif
469
470 #ifdef ZFS_ZIO_BUF_CANARY
471 zio_buf_check_canary(buf, size, zio_buf_cache, c);
472 #endif
473 kmem_cache_free(zio_buf_cache[c], buf);
474 }
475
476 void
zio_data_buf_free(void * buf,size_t size)477 zio_data_buf_free(void *buf, size_t size)
478 {
479 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
480
481 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
482
483 #ifdef ZFS_ZIO_BUF_CANARY
484 zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
485 #endif
486 kmem_cache_free(zio_data_buf_cache[c], buf);
487 }
488
489 static void
zio_abd_free(void * abd,size_t size)490 zio_abd_free(void *abd, size_t size)
491 {
492 (void) size;
493 abd_free((abd_t *)abd);
494 }
495
496 /*
497 * ==========================================================================
498 * Push and pop I/O transform buffers
499 * ==========================================================================
500 */
501 void
zio_push_transform(zio_t * zio,abd_t * data,uint64_t size,uint64_t bufsize,zio_transform_func_t * transform)502 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
503 zio_transform_func_t *transform)
504 {
505 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
506
507 zt->zt_orig_abd = zio->io_abd;
508 zt->zt_orig_size = zio->io_size;
509 zt->zt_bufsize = bufsize;
510 zt->zt_transform = transform;
511
512 zt->zt_next = zio->io_transform_stack;
513 zio->io_transform_stack = zt;
514
515 zio->io_abd = data;
516 zio->io_size = size;
517 }
518
519 void
zio_pop_transforms(zio_t * zio)520 zio_pop_transforms(zio_t *zio)
521 {
522 zio_transform_t *zt;
523
524 while ((zt = zio->io_transform_stack) != NULL) {
525 if (zt->zt_transform != NULL)
526 zt->zt_transform(zio,
527 zt->zt_orig_abd, zt->zt_orig_size);
528
529 if (zt->zt_bufsize != 0)
530 abd_free(zio->io_abd);
531
532 zio->io_abd = zt->zt_orig_abd;
533 zio->io_size = zt->zt_orig_size;
534 zio->io_transform_stack = zt->zt_next;
535
536 kmem_free(zt, sizeof (zio_transform_t));
537 }
538 }
539
540 /*
541 * ==========================================================================
542 * I/O transform callbacks for subblocks, decompression, and decryption
543 * ==========================================================================
544 */
545 static void
zio_subblock(zio_t * zio,abd_t * data,uint64_t size)546 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
547 {
548 ASSERT(zio->io_size > size);
549
550 if (zio->io_type == ZIO_TYPE_READ)
551 abd_copy(data, zio->io_abd, size);
552 }
553
554 static void
zio_decompress(zio_t * zio,abd_t * data,uint64_t size)555 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
556 {
557 if (zio->io_error == 0) {
558 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
559 zio->io_abd, data, zio->io_size, size,
560 &zio->io_prop.zp_complevel);
561
562 if (zio_injection_enabled && ret == 0)
563 ret = zio_handle_fault_injection(zio, EINVAL);
564
565 if (ret != 0)
566 zio->io_error = SET_ERROR(EIO);
567 }
568 }
569
570 static void
zio_decrypt(zio_t * zio,abd_t * data,uint64_t size)571 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
572 {
573 int ret;
574 void *tmp;
575 blkptr_t *bp = zio->io_bp;
576 spa_t *spa = zio->io_spa;
577 uint64_t dsobj = zio->io_bookmark.zb_objset;
578 uint64_t lsize = BP_GET_LSIZE(bp);
579 dmu_object_type_t ot = BP_GET_TYPE(bp);
580 uint8_t salt[ZIO_DATA_SALT_LEN];
581 uint8_t iv[ZIO_DATA_IV_LEN];
582 uint8_t mac[ZIO_DATA_MAC_LEN];
583 boolean_t no_crypt = B_FALSE;
584
585 ASSERT(BP_USES_CRYPT(bp));
586 ASSERT3U(size, !=, 0);
587
588 if (zio->io_error != 0)
589 return;
590
591 /*
592 * Verify the cksum of MACs stored in an indirect bp. It will always
593 * be possible to verify this since it does not require an encryption
594 * key.
595 */
596 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
597 zio_crypt_decode_mac_bp(bp, mac);
598
599 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
600 /*
601 * We haven't decompressed the data yet, but
602 * zio_crypt_do_indirect_mac_checksum() requires
603 * decompressed data to be able to parse out the MACs
604 * from the indirect block. We decompress it now and
605 * throw away the result after we are finished.
606 */
607 abd_t *abd = abd_alloc_linear(lsize, B_TRUE);
608 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
609 zio->io_abd, abd, zio->io_size, lsize,
610 &zio->io_prop.zp_complevel);
611 if (ret != 0) {
612 abd_free(abd);
613 ret = SET_ERROR(EIO);
614 goto error;
615 }
616 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
617 abd, lsize, BP_SHOULD_BYTESWAP(bp), mac);
618 abd_free(abd);
619 } else {
620 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
621 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
622 }
623 abd_copy(data, zio->io_abd, size);
624
625 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
626 ret = zio_handle_decrypt_injection(spa,
627 &zio->io_bookmark, ot, ECKSUM);
628 }
629 if (ret != 0)
630 goto error;
631
632 return;
633 }
634
635 /*
636 * If this is an authenticated block, just check the MAC. It would be
637 * nice to separate this out into its own flag, but when this was done,
638 * we had run out of bits in what is now zio_flag_t. Future cleanup
639 * could make this a flag bit.
640 */
641 if (BP_IS_AUTHENTICATED(bp)) {
642 if (ot == DMU_OT_OBJSET) {
643 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
644 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
645 } else {
646 zio_crypt_decode_mac_bp(bp, mac);
647 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
648 zio->io_abd, size, mac);
649 if (zio_injection_enabled && ret == 0) {
650 ret = zio_handle_decrypt_injection(spa,
651 &zio->io_bookmark, ot, ECKSUM);
652 }
653 }
654 abd_copy(data, zio->io_abd, size);
655
656 if (ret != 0)
657 goto error;
658
659 return;
660 }
661
662 zio_crypt_decode_params_bp(bp, salt, iv);
663
664 if (ot == DMU_OT_INTENT_LOG) {
665 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
666 zio_crypt_decode_mac_zil(tmp, mac);
667 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
668 } else {
669 zio_crypt_decode_mac_bp(bp, mac);
670 }
671
672 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
673 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
674 zio->io_abd, &no_crypt);
675 if (no_crypt)
676 abd_copy(data, zio->io_abd, size);
677
678 if (ret != 0)
679 goto error;
680
681 return;
682
683 error:
684 /* assert that the key was found unless this was speculative */
685 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
686
687 /*
688 * If there was a decryption / authentication error return EIO as
689 * the io_error. If this was not a speculative zio, create an ereport.
690 */
691 if (ret == ECKSUM) {
692 zio->io_error = SET_ERROR(EIO);
693 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
694 spa_log_error(spa, &zio->io_bookmark,
695 BP_GET_PHYSICAL_BIRTH(zio->io_bp));
696 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
697 spa, NULL, &zio->io_bookmark, zio, 0);
698 }
699 } else {
700 zio->io_error = ret;
701 }
702 }
703
704 /*
705 * ==========================================================================
706 * I/O parent/child relationships and pipeline interlocks
707 * ==========================================================================
708 */
709 zio_t *
zio_walk_parents(zio_t * cio,zio_link_t ** zl)710 zio_walk_parents(zio_t *cio, zio_link_t **zl)
711 {
712 list_t *pl = &cio->io_parent_list;
713
714 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
715 if (*zl == NULL)
716 return (NULL);
717
718 ASSERT((*zl)->zl_child == cio);
719 return ((*zl)->zl_parent);
720 }
721
722 zio_t *
zio_walk_children(zio_t * pio,zio_link_t ** zl)723 zio_walk_children(zio_t *pio, zio_link_t **zl)
724 {
725 list_t *cl = &pio->io_child_list;
726
727 ASSERT(MUTEX_HELD(&pio->io_lock));
728
729 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
730 if (*zl == NULL)
731 return (NULL);
732
733 ASSERT((*zl)->zl_parent == pio);
734 return ((*zl)->zl_child);
735 }
736
737 zio_t *
zio_unique_parent(zio_t * cio)738 zio_unique_parent(zio_t *cio)
739 {
740 zio_link_t *zl = NULL;
741 zio_t *pio = zio_walk_parents(cio, &zl);
742
743 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
744 return (pio);
745 }
746
747 static void
zio_add_child_impl(zio_t * pio,zio_t * cio,boolean_t first)748 zio_add_child_impl(zio_t *pio, zio_t *cio, boolean_t first)
749 {
750 /*
751 * Logical I/Os can have logical, gang, or vdev children.
752 * Gang I/Os can have gang or vdev children.
753 * Vdev I/Os can only have vdev children.
754 * The following ASSERT captures all of these constraints.
755 */
756 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
757
758 /* Parent should not have READY stage if child doesn't have it. */
759 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
760 (cio->io_child_type != ZIO_CHILD_VDEV),
761 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
762
763 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
764 zl->zl_parent = pio;
765 zl->zl_child = cio;
766
767 mutex_enter(&pio->io_lock);
768
769 if (first)
770 ASSERT(list_is_empty(&cio->io_parent_list));
771 else
772 mutex_enter(&cio->io_lock);
773
774 ASSERT0(pio->io_state[ZIO_WAIT_DONE]);
775
776 uint64_t *countp = pio->io_children[cio->io_child_type];
777 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
778 countp[w] += !cio->io_state[w];
779
780 list_insert_head(&pio->io_child_list, zl);
781 list_insert_head(&cio->io_parent_list, zl);
782
783 if (!first)
784 mutex_exit(&cio->io_lock);
785
786 mutex_exit(&pio->io_lock);
787 }
788
789 void
zio_add_child(zio_t * pio,zio_t * cio)790 zio_add_child(zio_t *pio, zio_t *cio)
791 {
792 zio_add_child_impl(pio, cio, B_FALSE);
793 }
794
795 static void
zio_add_child_first(zio_t * pio,zio_t * cio)796 zio_add_child_first(zio_t *pio, zio_t *cio)
797 {
798 zio_add_child_impl(pio, cio, B_TRUE);
799 }
800
801 static void
zio_remove_child(zio_t * pio,zio_t * cio,zio_link_t * zl)802 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
803 {
804 ASSERT(zl->zl_parent == pio);
805 ASSERT(zl->zl_child == cio);
806
807 mutex_enter(&pio->io_lock);
808 mutex_enter(&cio->io_lock);
809
810 list_remove(&pio->io_child_list, zl);
811 list_remove(&cio->io_parent_list, zl);
812
813 mutex_exit(&cio->io_lock);
814 mutex_exit(&pio->io_lock);
815 kmem_cache_free(zio_link_cache, zl);
816 }
817
818 static boolean_t
zio_wait_for_children(zio_t * zio,uint8_t childbits,enum zio_wait_type wait)819 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
820 {
821 boolean_t waiting = B_FALSE;
822
823 mutex_enter(&zio->io_lock);
824 ASSERT0P(zio->io_stall);
825 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
826 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
827 continue;
828
829 uint64_t *countp = &zio->io_children[c][wait];
830 if (*countp != 0) {
831 zio->io_stage >>= 1;
832 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
833 zio->io_stall = countp;
834 waiting = B_TRUE;
835 break;
836 }
837 }
838 mutex_exit(&zio->io_lock);
839 return (waiting);
840 }
841
842 __attribute__((always_inline))
843 static inline void
zio_notify_parent(zio_t * pio,zio_t * zio,enum zio_wait_type wait,zio_t ** next_to_executep)844 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
845 zio_t **next_to_executep)
846 {
847 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
848 int *errorp = &pio->io_child_error[zio->io_child_type];
849
850 mutex_enter(&pio->io_lock);
851 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
852 *errorp = zio_worst_error(*errorp, zio->io_error);
853 pio->io_post |= zio->io_post;
854 ASSERT3U(*countp, >, 0);
855
856 (*countp)--;
857
858 if (*countp == 0 && pio->io_stall == countp) {
859 zio_taskq_type_t type =
860 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
861 ZIO_TASKQ_INTERRUPT;
862 pio->io_stall = NULL;
863 mutex_exit(&pio->io_lock);
864
865 /*
866 * If we can tell the caller to execute this parent next, do
867 * so. We do this if the parent's zio type matches the child's
868 * type, or if it's a zio_null() with no done callback, and so
869 * has no actual work to do. Otherwise dispatch the parent zio
870 * in its own taskq.
871 *
872 * Having the caller execute the parent when possible reduces
873 * locking on the zio taskq's, reduces context switch
874 * overhead, and has no recursion penalty. Note that one
875 * read from disk typically causes at least 3 zio's: a
876 * zio_null(), the logical zio_read(), and then a physical
877 * zio. When the physical ZIO completes, we are able to call
878 * zio_done() on all 3 of these zio's from one invocation of
879 * zio_execute() by returning the parent back to
880 * zio_execute(). Since the parent isn't executed until this
881 * thread returns back to zio_execute(), the caller should do
882 * so promptly.
883 *
884 * In other cases, dispatching the parent prevents
885 * overflowing the stack when we have deeply nested
886 * parent-child relationships, as we do with the "mega zio"
887 * of writes for spa_sync(), and the chain of ZIL blocks.
888 */
889 if (next_to_executep != NULL && *next_to_executep == NULL &&
890 (pio->io_type == zio->io_type ||
891 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
892 *next_to_executep = pio;
893 } else {
894 zio_taskq_dispatch(pio, type, B_FALSE);
895 }
896 } else {
897 mutex_exit(&pio->io_lock);
898 }
899 }
900
901 static void
zio_inherit_child_errors(zio_t * zio,enum zio_child c)902 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
903 {
904 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
905 zio->io_error = zio->io_child_error[c];
906 }
907
908 int
zio_bookmark_compare(const void * x1,const void * x2)909 zio_bookmark_compare(const void *x1, const void *x2)
910 {
911 const zio_t *z1 = x1;
912 const zio_t *z2 = x2;
913 const zbookmark_phys_t *zb1 = &z1->io_bookmark;
914 const zbookmark_phys_t *zb2 = &z2->io_bookmark;
915
916 int cmp = TREE_CMP(zb1->zb_objset, zb2->zb_objset);
917 if (cmp != 0)
918 return (cmp);
919
920 cmp = TREE_CMP(zb1->zb_object, zb2->zb_object);
921 if (cmp != 0)
922 return (cmp);
923
924 cmp = TREE_CMP(zb1->zb_level, zb2->zb_level);
925 if (cmp != 0)
926 return (cmp);
927
928 cmp = TREE_CMP(zb1->zb_blkid, zb2->zb_blkid);
929 if (cmp != 0)
930 return (cmp);
931
932 return (TREE_PCMP(z1, z2));
933 }
934
935 /*
936 * ==========================================================================
937 * Create the various types of I/O (read, write, free, etc)
938 * ==========================================================================
939 */
940 static zio_t *
zio_create(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,zio_done_func_t * done,void * private,zio_type_t type,zio_priority_t priority,zio_flag_t flags,vdev_t * vd,uint64_t offset,const zbookmark_phys_t * zb,enum zio_stage stage,enum zio_stage pipeline)941 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
942 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
943 void *private, zio_type_t type, zio_priority_t priority,
944 zio_flag_t flags, vdev_t *vd, uint64_t offset,
945 const zbookmark_phys_t *zb, enum zio_stage stage,
946 enum zio_stage pipeline)
947 {
948 zio_t *zio;
949
950 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
951 ASSERT0(P2PHASE(psize, SPA_MINBLOCKSIZE));
952 ASSERT0(P2PHASE(offset, SPA_MINBLOCKSIZE));
953
954 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
955 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
956 ASSERT(vd || stage == ZIO_STAGE_OPEN);
957
958 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
959
960 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
961 memset(zio, 0, sizeof (zio_t));
962
963 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
964 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
965
966 list_create(&zio->io_parent_list, sizeof (zio_link_t),
967 offsetof(zio_link_t, zl_parent_node));
968 list_create(&zio->io_child_list, sizeof (zio_link_t),
969 offsetof(zio_link_t, zl_child_node));
970 metaslab_trace_init(&zio->io_alloc_list);
971
972 if (vd != NULL)
973 zio->io_child_type = ZIO_CHILD_VDEV;
974 else if (flags & ZIO_FLAG_GANG_CHILD)
975 zio->io_child_type = ZIO_CHILD_GANG;
976 else if (flags & ZIO_FLAG_DDT_CHILD)
977 zio->io_child_type = ZIO_CHILD_DDT;
978 else
979 zio->io_child_type = ZIO_CHILD_LOGICAL;
980
981 if (bp != NULL) {
982 if (type != ZIO_TYPE_WRITE ||
983 zio->io_child_type == ZIO_CHILD_DDT) {
984 zio->io_bp_copy = *bp;
985 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
986 } else {
987 zio->io_bp = (blkptr_t *)bp;
988 }
989 zio->io_bp_orig = *bp;
990 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
991 zio->io_logical = zio;
992 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
993 pipeline |= ZIO_GANG_STAGES;
994 if (flags & ZIO_FLAG_PREALLOCATED) {
995 BP_ZERO_DVAS(zio->io_bp);
996 BP_SET_BIRTH(zio->io_bp, 0, 0);
997 }
998 }
999
1000 zio->io_spa = spa;
1001 zio->io_txg = txg;
1002 zio->io_done = done;
1003 zio->io_private = private;
1004 zio->io_type = type;
1005 zio->io_priority = priority;
1006 zio->io_vd = vd;
1007 zio->io_offset = offset;
1008 zio->io_orig_abd = zio->io_abd = data;
1009 zio->io_orig_size = zio->io_size = psize;
1010 zio->io_lsize = lsize;
1011 zio->io_orig_flags = zio->io_flags = flags;
1012 zio->io_orig_stage = zio->io_stage = stage;
1013 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
1014 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
1015 zio->io_allocator = ZIO_ALLOCATOR_NONE;
1016
1017 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
1018 (pipeline & ZIO_STAGE_READY) == 0;
1019 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
1020
1021 if (zb != NULL)
1022 zio->io_bookmark = *zb;
1023
1024 if (pio != NULL) {
1025 zio->io_metaslab_class = pio->io_metaslab_class;
1026 if (zio->io_logical == NULL)
1027 zio->io_logical = pio->io_logical;
1028 if (zio->io_child_type == ZIO_CHILD_GANG)
1029 zio->io_gang_leader = pio->io_gang_leader;
1030 zio_add_child_first(pio, zio);
1031 }
1032
1033 taskq_init_ent(&zio->io_tqent);
1034
1035 return (zio);
1036 }
1037
1038 void
zio_destroy(zio_t * zio)1039 zio_destroy(zio_t *zio)
1040 {
1041 metaslab_trace_fini(&zio->io_alloc_list);
1042 list_destroy(&zio->io_parent_list);
1043 list_destroy(&zio->io_child_list);
1044 mutex_destroy(&zio->io_lock);
1045 cv_destroy(&zio->io_cv);
1046 kmem_cache_free(zio_cache, zio);
1047 }
1048
1049 /*
1050 * ZIO intended to be between others. Provides synchronization at READY
1051 * and DONE pipeline stages and calls the respective callbacks.
1052 */
1053 zio_t *
zio_null(zio_t * pio,spa_t * spa,vdev_t * vd,zio_done_func_t * done,void * private,zio_flag_t flags)1054 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
1055 void *private, zio_flag_t flags)
1056 {
1057 zio_t *zio;
1058
1059 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1060 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1061 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1062
1063 return (zio);
1064 }
1065
1066 /*
1067 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a
1068 * READY pipeline stage (is ready on creation), so it should not be used
1069 * as child of any ZIO that may need waiting for grandchildren READY stage
1070 * (any other ZIO type).
1071 */
1072 zio_t *
zio_root(spa_t * spa,zio_done_func_t * done,void * private,zio_flag_t flags)1073 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1074 {
1075 zio_t *zio;
1076
1077 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1078 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1079 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1080
1081 return (zio);
1082 }
1083
1084 static int
zfs_blkptr_verify_log(spa_t * spa,const blkptr_t * bp,enum blk_verify_flag blk_verify,const char * fmt,...)1085 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1086 enum blk_verify_flag blk_verify, const char *fmt, ...)
1087 {
1088 va_list adx;
1089 char buf[256];
1090
1091 va_start(adx, fmt);
1092 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
1093 va_end(adx);
1094
1095 zfs_dbgmsg("bad blkptr at %px: "
1096 "DVA[0]=%#llx/%#llx "
1097 "DVA[1]=%#llx/%#llx "
1098 "DVA[2]=%#llx/%#llx "
1099 "prop=%#llx "
1100 "prop2=%#llx "
1101 "pad=%#llx "
1102 "phys_birth=%#llx "
1103 "birth=%#llx "
1104 "fill=%#llx "
1105 "cksum=%#llx/%#llx/%#llx/%#llx",
1106 bp,
1107 (long long)bp->blk_dva[0].dva_word[0],
1108 (long long)bp->blk_dva[0].dva_word[1],
1109 (long long)bp->blk_dva[1].dva_word[0],
1110 (long long)bp->blk_dva[1].dva_word[1],
1111 (long long)bp->blk_dva[2].dva_word[0],
1112 (long long)bp->blk_dva[2].dva_word[1],
1113 (long long)bp->blk_prop,
1114 (long long)bp->blk_prop2,
1115 (long long)bp->blk_pad,
1116 (long long)BP_GET_RAW_PHYSICAL_BIRTH(bp),
1117 (long long)BP_GET_LOGICAL_BIRTH(bp),
1118 (long long)bp->blk_fill,
1119 (long long)bp->blk_cksum.zc_word[0],
1120 (long long)bp->blk_cksum.zc_word[1],
1121 (long long)bp->blk_cksum.zc_word[2],
1122 (long long)bp->blk_cksum.zc_word[3]);
1123 switch (blk_verify) {
1124 case BLK_VERIFY_HALT:
1125 zfs_panic_recover("%s: %s", spa_name(spa), buf);
1126 break;
1127 case BLK_VERIFY_LOG:
1128 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1129 break;
1130 case BLK_VERIFY_ONLY:
1131 break;
1132 }
1133
1134 return (1);
1135 }
1136
1137 /*
1138 * Verify the block pointer fields contain reasonable values. This means
1139 * it only contains known object types, checksum/compression identifiers,
1140 * block sizes within the maximum allowed limits, valid DVAs, etc.
1141 *
1142 * If everything checks out 0 is returned. The zfs_blkptr_verify
1143 * argument controls the behavior when an invalid field is detected.
1144 *
1145 * Values for blk_verify_flag:
1146 * BLK_VERIFY_ONLY: evaluate the block
1147 * BLK_VERIFY_LOG: evaluate the block and log problems
1148 * BLK_VERIFY_HALT: call zfs_panic_recover on error
1149 *
1150 * Values for blk_config_flag:
1151 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1152 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1153 * obtained for reader
1154 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1155 * performance
1156 */
1157 int
zfs_blkptr_verify(spa_t * spa,const blkptr_t * bp,enum blk_config_flag blk_config,enum blk_verify_flag blk_verify)1158 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1159 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1160 {
1161 int errors = 0;
1162
1163 if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
1164 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1165 "blkptr at %px has invalid TYPE %llu",
1166 bp, (longlong_t)BP_GET_TYPE(bp));
1167 }
1168 if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
1169 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1170 "blkptr at %px has invalid COMPRESS %llu",
1171 bp, (longlong_t)BP_GET_COMPRESS(bp));
1172 }
1173 if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1174 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1175 "blkptr at %px has invalid LSIZE %llu",
1176 bp, (longlong_t)BP_GET_LSIZE(bp));
1177 }
1178 if (BP_IS_EMBEDDED(bp)) {
1179 if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
1180 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1181 "blkptr at %px has invalid ETYPE %llu",
1182 bp, (longlong_t)BPE_GET_ETYPE(bp));
1183 }
1184 if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
1185 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1186 "blkptr at %px has invalid PSIZE %llu",
1187 bp, (longlong_t)BPE_GET_PSIZE(bp));
1188 }
1189 return (errors ? ECKSUM : 0);
1190 } else if (BP_IS_HOLE(bp)) {
1191 /*
1192 * Holes are allowed (expected, even) to have no DVAs, no
1193 * checksum, and no psize.
1194 */
1195 return (errors ? ECKSUM : 0);
1196 } else if (unlikely(!DVA_IS_VALID(&bp->blk_dva[0]))) {
1197 /* Non-hole, non-embedded BPs _must_ have at least one DVA */
1198 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1199 "blkptr at %px has no valid DVAs", bp);
1200 }
1201 if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
1202 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1203 "blkptr at %px has invalid CHECKSUM %llu",
1204 bp, (longlong_t)BP_GET_CHECKSUM(bp));
1205 }
1206 if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1207 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1208 "blkptr at %px has invalid PSIZE %llu",
1209 bp, (longlong_t)BP_GET_PSIZE(bp));
1210 }
1211
1212 /*
1213 * Do not verify individual DVAs if the config is not trusted. This
1214 * will be done once the zio is executed in vdev_mirror_map_alloc.
1215 */
1216 if (unlikely(!spa->spa_trust_config))
1217 return (errors ? ECKSUM : 0);
1218
1219 switch (blk_config) {
1220 case BLK_CONFIG_HELD:
1221 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1222 break;
1223 case BLK_CONFIG_NEEDED:
1224 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1225 break;
1226 case BLK_CONFIG_NEEDED_TRY:
1227 if (!spa_config_tryenter(spa, SCL_VDEV, bp, RW_READER))
1228 return (EBUSY);
1229 break;
1230 case BLK_CONFIG_SKIP:
1231 return (errors ? ECKSUM : 0);
1232 default:
1233 panic("invalid blk_config %u", blk_config);
1234 }
1235
1236 /*
1237 * Pool-specific checks.
1238 *
1239 * Note: it would be nice to verify that the logical birth
1240 * and physical birth are not too large. However,
1241 * spa_freeze() allows the birth time of log blocks (and
1242 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1243 * large.
1244 */
1245 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1246 const dva_t *dva = &bp->blk_dva[i];
1247 uint64_t vdevid = DVA_GET_VDEV(dva);
1248
1249 if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
1250 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1251 "blkptr at %px DVA %u has invalid VDEV %llu",
1252 bp, i, (longlong_t)vdevid);
1253 continue;
1254 }
1255 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1256 if (unlikely(vd == NULL)) {
1257 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1258 "blkptr at %px DVA %u has invalid VDEV %llu",
1259 bp, i, (longlong_t)vdevid);
1260 continue;
1261 }
1262 if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
1263 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1264 "blkptr at %px DVA %u has hole VDEV %llu",
1265 bp, i, (longlong_t)vdevid);
1266 continue;
1267 }
1268 if (vd->vdev_ops == &vdev_missing_ops) {
1269 /*
1270 * "missing" vdevs are valid during import, but we
1271 * don't have their detailed info (e.g. asize), so
1272 * we can't perform any more checks on them.
1273 */
1274 continue;
1275 }
1276 uint64_t offset = DVA_GET_OFFSET(dva);
1277 uint64_t asize = DVA_GET_ASIZE(dva);
1278 if (DVA_GET_GANG(dva))
1279 asize = vdev_gang_header_asize(vd);
1280 if (unlikely(offset + asize > vd->vdev_asize)) {
1281 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1282 "blkptr at %px DVA %u has invalid OFFSET %llu",
1283 bp, i, (longlong_t)offset);
1284 }
1285 }
1286 if (blk_config == BLK_CONFIG_NEEDED || blk_config ==
1287 BLK_CONFIG_NEEDED_TRY)
1288 spa_config_exit(spa, SCL_VDEV, bp);
1289
1290 return (errors ? ECKSUM : 0);
1291 }
1292
1293 boolean_t
zfs_dva_valid(spa_t * spa,const dva_t * dva,const blkptr_t * bp)1294 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1295 {
1296 (void) bp;
1297 uint64_t vdevid = DVA_GET_VDEV(dva);
1298
1299 if (vdevid >= spa->spa_root_vdev->vdev_children)
1300 return (B_FALSE);
1301
1302 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1303 if (vd == NULL)
1304 return (B_FALSE);
1305
1306 if (vd->vdev_ops == &vdev_hole_ops)
1307 return (B_FALSE);
1308
1309 if (vd->vdev_ops == &vdev_missing_ops) {
1310 return (B_FALSE);
1311 }
1312
1313 uint64_t offset = DVA_GET_OFFSET(dva);
1314 uint64_t asize = DVA_GET_ASIZE(dva);
1315
1316 if (DVA_GET_GANG(dva))
1317 asize = vdev_gang_header_asize(vd);
1318 if (offset + asize > vd->vdev_asize)
1319 return (B_FALSE);
1320
1321 return (B_TRUE);
1322 }
1323
1324 zio_t *
zio_read(zio_t * pio,spa_t * spa,const blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1325 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1326 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1327 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1328 {
1329 zio_t *zio;
1330
1331 zio = zio_create(pio, spa, BP_GET_PHYSICAL_BIRTH(bp), bp,
1332 data, size, size, done, private,
1333 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1334 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1335 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1336
1337 return (zio);
1338 }
1339
1340 zio_t *
zio_write(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,const zio_prop_t * zp,zio_done_func_t * ready,zio_done_func_t * children_ready,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1341 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1342 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1343 zio_done_func_t *ready, zio_done_func_t *children_ready,
1344 zio_done_func_t *done, void *private, zio_priority_t priority,
1345 zio_flag_t flags, const zbookmark_phys_t *zb)
1346 {
1347 zio_t *zio;
1348 enum zio_stage pipeline = zp->zp_direct_write == B_TRUE ?
1349 ZIO_DIRECT_WRITE_PIPELINE : (flags & ZIO_FLAG_DDT_CHILD) ?
1350 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE;
1351
1352
1353 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1354 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1355 ZIO_STAGE_OPEN, pipeline);
1356
1357 zio->io_ready = ready;
1358 zio->io_children_ready = children_ready;
1359 zio->io_prop = *zp;
1360
1361 /*
1362 * Data can be NULL if we are going to call zio_write_override() to
1363 * provide the already-allocated BP. But we may need the data to
1364 * verify a dedup hit (if requested). In this case, don't try to
1365 * dedup (just take the already-allocated BP verbatim). Encrypted
1366 * dedup blocks need data as well so we also disable dedup in this
1367 * case.
1368 */
1369 if (data == NULL &&
1370 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1371 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1372 }
1373
1374 return (zio);
1375 }
1376
1377 zio_t *
zio_rewrite(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,zbookmark_phys_t * zb)1378 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1379 uint64_t size, zio_done_func_t *done, void *private,
1380 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1381 {
1382 zio_t *zio;
1383
1384 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1385 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1386 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1387
1388 return (zio);
1389 }
1390
1391 void
zio_write_override(zio_t * zio,blkptr_t * bp,int copies,int gang_copies,boolean_t nopwrite,boolean_t brtwrite)1392 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, int gang_copies,
1393 boolean_t nopwrite, boolean_t brtwrite)
1394 {
1395 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1396 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1397 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1398 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1399 ASSERT(!brtwrite || !nopwrite);
1400
1401 /*
1402 * We must reset the io_prop to match the values that existed
1403 * when the bp was first written by dmu_sync() keeping in mind
1404 * that nopwrite and dedup are mutually exclusive.
1405 */
1406 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1407 zio->io_prop.zp_nopwrite = nopwrite;
1408 zio->io_prop.zp_brtwrite = brtwrite;
1409 zio->io_prop.zp_copies = copies;
1410 zio->io_prop.zp_gang_copies = gang_copies;
1411 zio->io_bp_override = bp;
1412 }
1413
1414 void
zio_free(spa_t * spa,uint64_t txg,const blkptr_t * bp)1415 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1416 {
1417
1418 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1419
1420 /*
1421 * The check for EMBEDDED is a performance optimization. We
1422 * process the free here (by ignoring it) rather than
1423 * putting it on the list and then processing it in zio_free_sync().
1424 */
1425 if (BP_IS_EMBEDDED(bp))
1426 return;
1427
1428 /*
1429 * Frees that are for the currently-syncing txg, are not going to be
1430 * deferred, and which will not need to do a read (i.e. not GANG or
1431 * DEDUP), can be processed immediately. Otherwise, put them on the
1432 * in-memory list for later processing.
1433 *
1434 * Note that we only defer frees after zfs_sync_pass_deferred_free
1435 * when the log space map feature is disabled. [see relevant comment
1436 * in spa_sync_iterate_to_convergence()]
1437 */
1438 if (BP_IS_GANG(bp) ||
1439 BP_GET_DEDUP(bp) ||
1440 txg != spa->spa_syncing_txg ||
1441 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1442 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1443 brt_maybe_exists(spa, bp)) {
1444 metaslab_check_free(spa, bp);
1445 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1446 } else {
1447 VERIFY0P(zio_free_sync(NULL, spa, txg, bp, 0));
1448 }
1449 }
1450
1451 /*
1452 * To improve performance, this function may return NULL if we were able
1453 * to do the free immediately. This avoids the cost of creating a zio
1454 * (and linking it to the parent, etc).
1455 */
1456 zio_t *
zio_free_sync(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_flag_t flags)1457 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1458 zio_flag_t flags)
1459 {
1460 ASSERT(!BP_IS_HOLE(bp));
1461 ASSERT(spa_syncing_txg(spa) == txg);
1462
1463 if (BP_IS_EMBEDDED(bp))
1464 return (NULL);
1465
1466 metaslab_check_free(spa, bp);
1467 arc_freed(spa, bp);
1468 dsl_scan_freed(spa, bp);
1469
1470 if (BP_IS_GANG(bp) ||
1471 BP_GET_DEDUP(bp) ||
1472 brt_maybe_exists(spa, bp)) {
1473 /*
1474 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1475 * block header, the DDT or the BRT), so issue them
1476 * asynchronously so that this thread is not tied up.
1477 */
1478 enum zio_stage stage =
1479 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1480
1481 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1482 BP_GET_PSIZE(bp), NULL, NULL,
1483 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1484 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1485 } else {
1486 metaslab_free(spa, bp, txg, B_FALSE);
1487 return (NULL);
1488 }
1489 }
1490
1491 zio_t *
zio_claim(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_done_func_t * done,void * private,zio_flag_t flags)1492 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1493 zio_done_func_t *done, void *private, zio_flag_t flags)
1494 {
1495 zio_t *zio;
1496
1497 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1498 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1499
1500 if (BP_IS_EMBEDDED(bp))
1501 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1502
1503 /*
1504 * A claim is an allocation of a specific block. Claims are needed
1505 * to support immediate writes in the intent log. The issue is that
1506 * immediate writes contain committed data, but in a txg that was
1507 * *not* committed. Upon opening the pool after an unclean shutdown,
1508 * the intent log claims all blocks that contain immediate write data
1509 * so that the SPA knows they're in use.
1510 *
1511 * All claims *must* be resolved in the first txg -- before the SPA
1512 * starts allocating blocks -- so that nothing is allocated twice.
1513 * If txg == 0 we just verify that the block is claimable.
1514 */
1515 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1516 spa_min_claim_txg(spa));
1517 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1518 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
1519
1520 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1521 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1522 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1523 ASSERT0(zio->io_queued_timestamp);
1524
1525 return (zio);
1526 }
1527
1528 zio_t *
zio_trim(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,enum trim_flag trim_flags)1529 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1530 zio_done_func_t *done, void *private, zio_priority_t priority,
1531 zio_flag_t flags, enum trim_flag trim_flags)
1532 {
1533 zio_t *zio;
1534
1535 ASSERT0(vd->vdev_children);
1536 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1537 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1538 ASSERT3U(size, !=, 0);
1539
1540 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1541 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1542 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1543 zio->io_trim_flags = trim_flags;
1544
1545 return (zio);
1546 }
1547
1548 zio_t *
zio_read_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1549 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1550 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1551 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1552 {
1553 zio_t *zio;
1554
1555 ASSERT0(vd->vdev_children);
1556 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1557 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1558 ASSERT3U(offset + size, <=, vd->vdev_psize);
1559
1560 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1561 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1562 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1563
1564 zio->io_prop.zp_checksum = checksum;
1565
1566 return (zio);
1567 }
1568
1569 zio_t *
zio_write_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1570 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1571 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1572 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1573 {
1574 zio_t *zio;
1575
1576 ASSERT0(vd->vdev_children);
1577 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1578 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1579 ASSERT3U(offset + size, <=, vd->vdev_psize);
1580
1581 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1582 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1583 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1584
1585 zio->io_prop.zp_checksum = checksum;
1586
1587 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1588 /*
1589 * zec checksums are necessarily destructive -- they modify
1590 * the end of the write buffer to hold the verifier/checksum.
1591 * Therefore, we must make a local copy in case the data is
1592 * being written to multiple places in parallel.
1593 */
1594 abd_t *wbuf = abd_alloc_sametype(data, size);
1595 abd_copy(wbuf, data, size);
1596
1597 zio_push_transform(zio, wbuf, size, size, NULL);
1598 }
1599
1600 return (zio);
1601 }
1602
1603 /*
1604 * Create a child I/O to do some work for us.
1605 */
1606 zio_t *
zio_vdev_child_io(zio_t * pio,blkptr_t * bp,vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,int type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1607 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1608 abd_t *data, uint64_t size, int type, zio_priority_t priority,
1609 zio_flag_t flags, zio_done_func_t *done, void *private)
1610 {
1611 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1612 zio_t *zio;
1613
1614 /*
1615 * vdev child I/Os do not propagate their error to the parent.
1616 * Therefore, for correct operation the caller *must* check for
1617 * and handle the error in the child i/o's done callback.
1618 * The only exceptions are i/os that we don't care about
1619 * (OPTIONAL or REPAIR).
1620 */
1621 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1622 done != NULL);
1623
1624 if (type == ZIO_TYPE_READ && bp != NULL) {
1625 /*
1626 * If we have the bp, then the child should perform the
1627 * checksum and the parent need not. This pushes error
1628 * detection as close to the leaves as possible and
1629 * eliminates redundant checksums in the interior nodes.
1630 */
1631 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1632 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1633 /*
1634 * We never allow the mirror VDEV to attempt reading from any
1635 * additional data copies after the first Direct I/O checksum
1636 * verify failure. This is to avoid bad data being written out
1637 * through the mirror during self healing. See comment in
1638 * vdev_mirror_io_done() for more details.
1639 */
1640 ASSERT0(pio->io_post & ZIO_POST_DIO_CHKSUM_ERR);
1641 } else if (type == ZIO_TYPE_WRITE &&
1642 pio->io_prop.zp_direct_write == B_TRUE) {
1643 /*
1644 * By default we only will verify checksums for Direct I/O
1645 * writes for Linux. FreeBSD is able to place user pages under
1646 * write protection before issuing them to the ZIO pipeline.
1647 *
1648 * Checksum validation errors will only be reported through
1649 * the top-level VDEV, which is set by this child ZIO.
1650 */
1651 ASSERT3P(bp, !=, NULL);
1652 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
1653 pipeline |= ZIO_STAGE_DIO_CHECKSUM_VERIFY;
1654 }
1655
1656 if (vd->vdev_ops->vdev_op_leaf) {
1657 ASSERT0(vd->vdev_children);
1658 offset += VDEV_LABEL_START_SIZE;
1659 }
1660
1661 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1662
1663 /*
1664 * If we've decided to do a repair, the write is not speculative --
1665 * even if the original read was. Rebuild is an exception since we
1666 * cannot always ensure its data integrity.
1667 */
1668 if ((flags & ZIO_FLAG_IO_REPAIR) &&
1669 pio->io_priority != ZIO_PRIORITY_REBUILD)
1670 flags &= ~ZIO_FLAG_SPECULATIVE;
1671
1672 /*
1673 * If we're creating a child I/O that is not associated with a
1674 * top-level vdev, then the child zio is not an allocating I/O.
1675 * If this is a retried I/O then we ignore it since we will
1676 * have already processed the original allocating I/O.
1677 */
1678 if (flags & ZIO_FLAG_ALLOC_THROTTLED &&
1679 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY)) &&
1680 type == ZIO_TYPE_WRITE) {
1681 ASSERT(pio->io_metaslab_class != NULL);
1682 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1683 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1684 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1685 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1686 pio->io_child_type == ZIO_CHILD_GANG);
1687
1688 flags &= ~ZIO_FLAG_ALLOC_THROTTLED;
1689 }
1690
1691 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1692 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1693 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1694 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1695
1696 return (zio);
1697 }
1698
1699 zio_t *
zio_vdev_delegated_io(vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,zio_type_t type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1700 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1701 zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1702 zio_done_func_t *done, void *private)
1703 {
1704 zio_t *zio;
1705
1706 ASSERT(vd->vdev_ops->vdev_op_leaf);
1707
1708 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1709 data, size, size, done, private, type, priority,
1710 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1711 vd, offset, NULL,
1712 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1713
1714 return (zio);
1715 }
1716
1717
1718 /*
1719 * Send a flush command to the given vdev. Unlike most zio creation functions,
1720 * the flush zios are issued immediately. You can wait on pio to pause until
1721 * the flushes complete.
1722 */
1723 void
zio_flush(zio_t * pio,vdev_t * vd)1724 zio_flush(zio_t *pio, vdev_t *vd)
1725 {
1726 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1727 ZIO_FLAG_DONT_RETRY;
1728
1729 if (vd->vdev_nowritecache)
1730 return;
1731
1732 if (vd->vdev_children == 0) {
1733 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
1734 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1735 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
1736 } else {
1737 for (uint64_t c = 0; c < vd->vdev_children; c++)
1738 zio_flush(pio, vd->vdev_child[c]);
1739 }
1740 }
1741
1742 void
zio_shrink(zio_t * zio,uint64_t size)1743 zio_shrink(zio_t *zio, uint64_t size)
1744 {
1745 ASSERT0P(zio->io_executor);
1746 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1747 ASSERT3U(size, <=, zio->io_size);
1748
1749 /*
1750 * We don't shrink for raidz because of problems with the
1751 * reconstruction when reading back less than the block size.
1752 * Note, BP_IS_RAIDZ() assumes no compression.
1753 */
1754 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1755 if (!BP_IS_RAIDZ(zio->io_bp)) {
1756 /* we are not doing a raw write */
1757 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1758 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1759 }
1760 }
1761
1762 /*
1763 * Round provided allocation size up to a value that can be allocated
1764 * by at least some vdev(s) in the pool with minimum or no additional
1765 * padding and without extra space usage on others
1766 */
1767 static uint64_t
zio_roundup_alloc_size(spa_t * spa,uint64_t size)1768 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1769 {
1770 if (size > spa->spa_min_alloc)
1771 return (roundup(size, spa->spa_gcd_alloc));
1772 return (spa->spa_min_alloc);
1773 }
1774
1775 size_t
zio_get_compression_max_size(enum zio_compress compress,uint64_t gcd_alloc,uint64_t min_alloc,size_t s_len)1776 zio_get_compression_max_size(enum zio_compress compress, uint64_t gcd_alloc,
1777 uint64_t min_alloc, size_t s_len)
1778 {
1779 size_t d_len;
1780
1781 /* minimum 12.5% must be saved (legacy value, may be changed later) */
1782 d_len = s_len - (s_len >> 3);
1783
1784 /* ZLE can't use exactly d_len bytes, it needs more, so ignore it */
1785 if (compress == ZIO_COMPRESS_ZLE)
1786 return (d_len);
1787
1788 d_len = d_len - d_len % gcd_alloc;
1789
1790 if (d_len < min_alloc)
1791 return (BPE_PAYLOAD_SIZE);
1792 return (d_len);
1793 }
1794
1795 /*
1796 * ==========================================================================
1797 * Prepare to read and write logical blocks
1798 * ==========================================================================
1799 */
1800
1801 static zio_t *
zio_read_bp_init(zio_t * zio)1802 zio_read_bp_init(zio_t *zio)
1803 {
1804 blkptr_t *bp = zio->io_bp;
1805 uint64_t psize =
1806 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1807
1808 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1809
1810 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1811 zio->io_child_type == ZIO_CHILD_LOGICAL &&
1812 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1813 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1814 psize, psize, zio_decompress);
1815 }
1816
1817 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1818 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1819 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1820 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1821 psize, psize, zio_decrypt);
1822 }
1823
1824 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1825 int psize = BPE_GET_PSIZE(bp);
1826 void *data = abd_borrow_buf(zio->io_abd, psize);
1827
1828 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1829 decode_embedded_bp_compressed(bp, data);
1830 abd_return_buf_copy(zio->io_abd, data, psize);
1831 } else {
1832 ASSERT(!BP_IS_EMBEDDED(bp));
1833 }
1834
1835 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1836 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1837
1838 return (zio);
1839 }
1840
1841 static zio_t *
zio_write_bp_init(zio_t * zio)1842 zio_write_bp_init(zio_t *zio)
1843 {
1844 if (!IO_IS_ALLOCATING(zio))
1845 return (zio);
1846
1847 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1848
1849 if (zio->io_bp_override) {
1850 blkptr_t *bp = zio->io_bp;
1851 zio_prop_t *zp = &zio->io_prop;
1852
1853 ASSERT(BP_GET_BIRTH(bp) != zio->io_txg);
1854
1855 *bp = *zio->io_bp_override;
1856 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1857
1858 if (zp->zp_brtwrite)
1859 return (zio);
1860
1861 ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1862
1863 if (BP_IS_EMBEDDED(bp))
1864 return (zio);
1865
1866 /*
1867 * If we've been overridden and nopwrite is set then
1868 * set the flag accordingly to indicate that a nopwrite
1869 * has already occurred.
1870 */
1871 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1872 ASSERT(!zp->zp_dedup);
1873 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1874 zio->io_flags |= ZIO_FLAG_NOPWRITE;
1875 return (zio);
1876 }
1877
1878 ASSERT(!zp->zp_nopwrite);
1879
1880 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1881 return (zio);
1882
1883 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1884 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1885
1886 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1887 !zp->zp_encrypt) {
1888 BP_SET_DEDUP(bp, 1);
1889 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1890 return (zio);
1891 }
1892
1893 /*
1894 * We were unable to handle this as an override bp, treat
1895 * it as a regular write I/O.
1896 */
1897 zio->io_bp_override = NULL;
1898 *bp = zio->io_bp_orig;
1899 zio->io_pipeline = zio->io_orig_pipeline;
1900 }
1901
1902 return (zio);
1903 }
1904
1905 static zio_t *
zio_write_compress(zio_t * zio)1906 zio_write_compress(zio_t *zio)
1907 {
1908 spa_t *spa = zio->io_spa;
1909 zio_prop_t *zp = &zio->io_prop;
1910 enum zio_compress compress = zp->zp_compress;
1911 blkptr_t *bp = zio->io_bp;
1912 uint64_t lsize = zio->io_lsize;
1913 uint64_t psize = zio->io_size;
1914 uint32_t pass = 1;
1915
1916 /*
1917 * If our children haven't all reached the ready stage,
1918 * wait for them and then repeat this pipeline stage.
1919 */
1920 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1921 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1922 return (NULL);
1923 }
1924
1925 if (!IO_IS_ALLOCATING(zio))
1926 return (zio);
1927
1928 if (zio->io_children_ready != NULL) {
1929 /*
1930 * Now that all our children are ready, run the callback
1931 * associated with this zio in case it wants to modify the
1932 * data to be written.
1933 */
1934 ASSERT3U(zp->zp_level, >, 0);
1935 zio->io_children_ready(zio);
1936 }
1937
1938 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1939 ASSERT0P(zio->io_bp_override);
1940
1941 if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg) {
1942 /*
1943 * We're rewriting an existing block, which means we're
1944 * working on behalf of spa_sync(). For spa_sync() to
1945 * converge, it must eventually be the case that we don't
1946 * have to allocate new blocks. But compression changes
1947 * the blocksize, which forces a reallocate, and makes
1948 * convergence take longer. Therefore, after the first
1949 * few passes, stop compressing to ensure convergence.
1950 */
1951 pass = spa_sync_pass(spa);
1952
1953 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1954 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1955 ASSERT(!BP_GET_DEDUP(bp));
1956
1957 if (pass >= zfs_sync_pass_dont_compress)
1958 compress = ZIO_COMPRESS_OFF;
1959
1960 /* Make sure someone doesn't change their mind on overwrites */
1961 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1962 MIN(zp->zp_copies, spa_max_replication(spa))
1963 == BP_GET_NDVAS(bp));
1964 }
1965
1966 /* If it's a compressed write that is not raw, compress the buffer. */
1967 if (compress != ZIO_COMPRESS_OFF &&
1968 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1969 abd_t *cabd = NULL;
1970 if (abd_cmp_zero(zio->io_abd, lsize) == 0)
1971 psize = 0;
1972 else if (compress == ZIO_COMPRESS_EMPTY)
1973 psize = lsize;
1974 else
1975 psize = zio_compress_data(compress, zio->io_abd, &cabd,
1976 lsize,
1977 zio_get_compression_max_size(compress,
1978 spa->spa_gcd_alloc, spa->spa_min_alloc, lsize),
1979 zp->zp_complevel);
1980 if (psize == 0) {
1981 compress = ZIO_COMPRESS_OFF;
1982 } else if (psize >= lsize) {
1983 compress = ZIO_COMPRESS_OFF;
1984 if (cabd != NULL)
1985 abd_free(cabd);
1986 } else if (psize <= BPE_PAYLOAD_SIZE && !zp->zp_encrypt &&
1987 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1988 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1989 void *cbuf = abd_borrow_buf_copy(cabd, lsize);
1990 encode_embedded_bp_compressed(bp,
1991 cbuf, compress, lsize, psize);
1992 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1993 BP_SET_TYPE(bp, zio->io_prop.zp_type);
1994 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1995 abd_return_buf(cabd, cbuf, lsize);
1996 abd_free(cabd);
1997 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
1998 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1999 ASSERT(spa_feature_is_active(spa,
2000 SPA_FEATURE_EMBEDDED_DATA));
2001 return (zio);
2002 } else {
2003 /*
2004 * Round compressed size up to the minimum allocation
2005 * size of the smallest-ashift device, and zero the
2006 * tail. This ensures that the compressed size of the
2007 * BP (and thus compressratio property) are correct,
2008 * in that we charge for the padding used to fill out
2009 * the last sector.
2010 */
2011 size_t rounded = (size_t)zio_roundup_alloc_size(spa,
2012 psize);
2013 if (rounded >= lsize) {
2014 compress = ZIO_COMPRESS_OFF;
2015 abd_free(cabd);
2016 psize = lsize;
2017 } else {
2018 abd_zero_off(cabd, psize, rounded - psize);
2019 psize = rounded;
2020 zio_push_transform(zio, cabd,
2021 psize, lsize, NULL);
2022 }
2023 }
2024
2025 /*
2026 * We were unable to handle this as an override bp, treat
2027 * it as a regular write I/O.
2028 */
2029 zio->io_bp_override = NULL;
2030 *bp = zio->io_bp_orig;
2031 zio->io_pipeline = zio->io_orig_pipeline;
2032
2033 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
2034 zp->zp_type == DMU_OT_DNODE) {
2035 /*
2036 * The DMU actually relies on the zio layer's compression
2037 * to free metadnode blocks that have had all contained
2038 * dnodes freed. As a result, even when doing a raw
2039 * receive, we must check whether the block can be compressed
2040 * to a hole.
2041 */
2042 if (abd_cmp_zero(zio->io_abd, lsize) == 0) {
2043 psize = 0;
2044 compress = ZIO_COMPRESS_OFF;
2045 } else {
2046 psize = lsize;
2047 }
2048 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
2049 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
2050 /*
2051 * If we are raw receiving an encrypted dataset we should not
2052 * take this codepath because it will change the on-disk block
2053 * and decryption will fail.
2054 */
2055 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
2056 lsize);
2057
2058 if (rounded != psize) {
2059 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
2060 abd_zero_off(cdata, psize, rounded - psize);
2061 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
2062 psize = rounded;
2063 zio_push_transform(zio, cdata,
2064 psize, rounded, NULL);
2065 }
2066 } else {
2067 ASSERT3U(psize, !=, 0);
2068 }
2069
2070 /*
2071 * The final pass of spa_sync() must be all rewrites, but the first
2072 * few passes offer a trade-off: allocating blocks defers convergence,
2073 * but newly allocated blocks are sequential, so they can be written
2074 * to disk faster. Therefore, we allow the first few passes of
2075 * spa_sync() to allocate new blocks, but force rewrites after that.
2076 * There should only be a handful of blocks after pass 1 in any case.
2077 */
2078 if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg &&
2079 BP_GET_PSIZE(bp) == psize &&
2080 pass >= zfs_sync_pass_rewrite) {
2081 VERIFY3U(psize, !=, 0);
2082 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
2083
2084 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
2085 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
2086 } else {
2087 BP_ZERO(bp);
2088 zio->io_pipeline = ZIO_WRITE_PIPELINE;
2089 }
2090
2091 if (psize == 0) {
2092 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
2093 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
2094 BP_SET_LSIZE(bp, lsize);
2095 BP_SET_TYPE(bp, zp->zp_type);
2096 BP_SET_LEVEL(bp, zp->zp_level);
2097 BP_SET_BIRTH(bp, zio->io_txg, 0);
2098 }
2099 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2100 } else {
2101 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
2102 BP_SET_LSIZE(bp, lsize);
2103 BP_SET_TYPE(bp, zp->zp_type);
2104 BP_SET_LEVEL(bp, zp->zp_level);
2105 BP_SET_PSIZE(bp, psize);
2106 BP_SET_COMPRESS(bp, compress);
2107 BP_SET_CHECKSUM(bp, zp->zp_checksum);
2108 BP_SET_DEDUP(bp, zp->zp_dedup);
2109 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
2110 if (zp->zp_dedup) {
2111 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2112 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2113 ASSERT(!zp->zp_encrypt ||
2114 DMU_OT_IS_ENCRYPTED(zp->zp_type));
2115 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
2116 }
2117 if (zp->zp_nopwrite) {
2118 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2119 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2120 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
2121 }
2122 }
2123 return (zio);
2124 }
2125
2126 static zio_t *
zio_free_bp_init(zio_t * zio)2127 zio_free_bp_init(zio_t *zio)
2128 {
2129 blkptr_t *bp = zio->io_bp;
2130
2131 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2132 if (BP_GET_DEDUP(bp))
2133 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2134 }
2135
2136 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2137
2138 return (zio);
2139 }
2140
2141 /*
2142 * ==========================================================================
2143 * Execute the I/O pipeline
2144 * ==========================================================================
2145 */
2146
2147 static void
zio_taskq_dispatch(zio_t * zio,zio_taskq_type_t q,boolean_t cutinline)2148 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2149 {
2150 spa_t *spa = zio->io_spa;
2151 zio_type_t t = zio->io_type;
2152
2153 /*
2154 * If we're a config writer or a probe, the normal issue and
2155 * interrupt threads may all be blocked waiting for the config lock.
2156 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2157 */
2158 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2159 t = ZIO_TYPE_NULL;
2160
2161 /*
2162 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2163 */
2164 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2165 t = ZIO_TYPE_NULL;
2166
2167 /*
2168 * If this is a high priority I/O, then use the high priority taskq if
2169 * available or cut the line otherwise.
2170 */
2171 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
2172 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2173 q++;
2174 else
2175 cutinline = B_TRUE;
2176 }
2177
2178 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2179
2180 spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
2181 }
2182
2183 static boolean_t
zio_taskq_member(zio_t * zio,zio_taskq_type_t q)2184 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2185 {
2186 spa_t *spa = zio->io_spa;
2187
2188 taskq_t *tq = taskq_of_curthread();
2189
2190 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2191 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2192 uint_t i;
2193 for (i = 0; i < tqs->stqs_count; i++) {
2194 if (tqs->stqs_taskq[i] == tq)
2195 return (B_TRUE);
2196 }
2197 }
2198
2199 return (B_FALSE);
2200 }
2201
2202 static zio_t *
zio_issue_async(zio_t * zio)2203 zio_issue_async(zio_t *zio)
2204 {
2205 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2206 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2207 return (NULL);
2208 }
2209
2210 void
zio_interrupt(void * zio)2211 zio_interrupt(void *zio)
2212 {
2213 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2214 }
2215
2216 void
zio_delay_interrupt(zio_t * zio)2217 zio_delay_interrupt(zio_t *zio)
2218 {
2219 /*
2220 * The timeout_generic() function isn't defined in userspace, so
2221 * rather than trying to implement the function, the zio delay
2222 * functionality has been disabled for userspace builds.
2223 */
2224
2225 #ifdef _KERNEL
2226 /*
2227 * If io_target_timestamp is zero, then no delay has been registered
2228 * for this IO, thus jump to the end of this function and "skip" the
2229 * delay; issuing it directly to the zio layer.
2230 */
2231 if (zio->io_target_timestamp != 0) {
2232 hrtime_t now = gethrtime();
2233
2234 if (now >= zio->io_target_timestamp) {
2235 /*
2236 * This IO has already taken longer than the target
2237 * delay to complete, so we don't want to delay it
2238 * any longer; we "miss" the delay and issue it
2239 * directly to the zio layer. This is likely due to
2240 * the target latency being set to a value less than
2241 * the underlying hardware can satisfy (e.g. delay
2242 * set to 1ms, but the disks take 10ms to complete an
2243 * IO request).
2244 */
2245
2246 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2247 hrtime_t, now);
2248
2249 zio_interrupt(zio);
2250 } else {
2251 taskqid_t tid;
2252 hrtime_t diff = zio->io_target_timestamp - now;
2253 int ticks = MAX(1, NSEC_TO_TICK(diff));
2254 clock_t expire_at_tick = ddi_get_lbolt() + ticks;
2255
2256 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2257 hrtime_t, now, hrtime_t, diff);
2258
2259 tid = taskq_dispatch_delay(system_taskq, zio_interrupt,
2260 zio, TQ_NOSLEEP, expire_at_tick);
2261 if (tid == TASKQID_INVALID) {
2262 /*
2263 * Couldn't allocate a task. Just finish the
2264 * zio without a delay.
2265 */
2266 zio_interrupt(zio);
2267 }
2268 }
2269 return;
2270 }
2271 #endif
2272 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2273 zio_interrupt(zio);
2274 }
2275
2276 static void
zio_deadman_impl(zio_t * pio,int ziodepth)2277 zio_deadman_impl(zio_t *pio, int ziodepth)
2278 {
2279 zio_t *cio, *cio_next;
2280 zio_link_t *zl = NULL;
2281 vdev_t *vd = pio->io_vd;
2282 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2283
2284 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2285 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2286 zbookmark_phys_t *zb = &pio->io_bookmark;
2287 uint64_t delta = gethrtime() - pio->io_timestamp;
2288
2289 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2290 "delta=%llu queued=%llu io=%llu "
2291 "path=%s "
2292 "last=%llu type=%d "
2293 "priority=%d flags=0x%llx stage=0x%x "
2294 "pipeline=0x%x pipeline-trace=0x%x "
2295 "objset=%llu object=%llu "
2296 "level=%llu blkid=%llu "
2297 "offset=%llu size=%llu "
2298 "error=%d",
2299 ziodepth, pio, pio->io_timestamp,
2300 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2301 vd ? vd->vdev_path : "NULL",
2302 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2303 pio->io_priority, (u_longlong_t)pio->io_flags,
2304 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2305 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2306 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2307 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2308 pio->io_error);
2309 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2310 pio->io_spa, vd, zb, pio, 0);
2311 }
2312
2313 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
2314 list_is_empty(&pio->io_child_list) &&
2315 failmode == ZIO_FAILURE_MODE_CONTINUE &&
2316 taskq_empty_ent(&pio->io_tqent) &&
2317 pio->io_queue_state == ZIO_QS_ACTIVE) {
2318 pio->io_error = EINTR;
2319 zio_interrupt(pio);
2320 }
2321
2322 mutex_enter(&pio->io_lock);
2323 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2324 cio_next = zio_walk_children(pio, &zl);
2325 zio_deadman_impl(cio, ziodepth + 1);
2326 }
2327 mutex_exit(&pio->io_lock);
2328 }
2329
2330 /*
2331 * Log the critical information describing this zio and all of its children
2332 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2333 */
2334 void
zio_deadman(zio_t * pio,const char * tag)2335 zio_deadman(zio_t *pio, const char *tag)
2336 {
2337 spa_t *spa = pio->io_spa;
2338 char *name = spa_name(spa);
2339
2340 if (!zfs_deadman_enabled || spa_suspended(spa))
2341 return;
2342
2343 zio_deadman_impl(pio, 0);
2344
2345 switch (spa_get_deadman_failmode(spa)) {
2346 case ZIO_FAILURE_MODE_WAIT:
2347 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2348 break;
2349
2350 case ZIO_FAILURE_MODE_CONTINUE:
2351 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2352 break;
2353
2354 case ZIO_FAILURE_MODE_PANIC:
2355 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2356 break;
2357 }
2358 }
2359
2360 /*
2361 * Execute the I/O pipeline until one of the following occurs:
2362 * (1) the I/O completes; (2) the pipeline stalls waiting for
2363 * dependent child I/Os; (3) the I/O issues, so we're waiting
2364 * for an I/O completion interrupt; (4) the I/O is delegated by
2365 * vdev-level caching or aggregation; (5) the I/O is deferred
2366 * due to vdev-level queueing; (6) the I/O is handed off to
2367 * another thread. In all cases, the pipeline stops whenever
2368 * there's no CPU work; it never burns a thread in cv_wait_io().
2369 *
2370 * There's no locking on io_stage because there's no legitimate way
2371 * for multiple threads to be attempting to process the same I/O.
2372 */
2373 static zio_pipe_stage_t *zio_pipeline[];
2374
2375 /*
2376 * zio_execute() is a wrapper around the static function
2377 * __zio_execute() so that we can force __zio_execute() to be
2378 * inlined. This reduces stack overhead which is important
2379 * because __zio_execute() is called recursively in several zio
2380 * code paths. zio_execute() itself cannot be inlined because
2381 * it is externally visible.
2382 */
2383 void
zio_execute(void * zio)2384 zio_execute(void *zio)
2385 {
2386 fstrans_cookie_t cookie;
2387
2388 cookie = spl_fstrans_mark();
2389 __zio_execute(zio);
2390 spl_fstrans_unmark(cookie);
2391 }
2392
2393 /*
2394 * Used to determine if in the current context the stack is sized large
2395 * enough to allow zio_execute() to be called recursively. A minimum
2396 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2397 */
2398 static boolean_t
zio_execute_stack_check(zio_t * zio)2399 zio_execute_stack_check(zio_t *zio)
2400 {
2401 #if !defined(HAVE_LARGE_STACKS)
2402 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2403
2404 /* Executing in txg_sync_thread() context. */
2405 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2406 return (B_TRUE);
2407
2408 /* Pool initialization outside of zio_taskq context. */
2409 if (dp && spa_is_initializing(dp->dp_spa) &&
2410 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2411 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2412 return (B_TRUE);
2413 #else
2414 (void) zio;
2415 #endif /* HAVE_LARGE_STACKS */
2416
2417 return (B_FALSE);
2418 }
2419
2420 __attribute__((always_inline))
2421 static inline void
__zio_execute(zio_t * zio)2422 __zio_execute(zio_t *zio)
2423 {
2424 ASSERT3U(zio->io_queued_timestamp, >, 0);
2425
2426 while (zio->io_stage < ZIO_STAGE_DONE) {
2427 enum zio_stage pipeline = zio->io_pipeline;
2428 enum zio_stage stage = zio->io_stage;
2429
2430 zio->io_executor = curthread;
2431
2432 ASSERT(!MUTEX_HELD(&zio->io_lock));
2433 ASSERT(ISP2(stage));
2434 ASSERT0P(zio->io_stall);
2435
2436 do {
2437 stage <<= 1;
2438 } while ((stage & pipeline) == 0);
2439
2440 ASSERT(stage <= ZIO_STAGE_DONE);
2441
2442 /*
2443 * If we are in interrupt context and this pipeline stage
2444 * will grab a config lock that is held across I/O,
2445 * or may wait for an I/O that needs an interrupt thread
2446 * to complete, issue async to avoid deadlock.
2447 *
2448 * For VDEV_IO_START, we cut in line so that the io will
2449 * be sent to disk promptly.
2450 */
2451 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2452 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2453 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2454 zio_requeue_io_start_cut_in_line : B_FALSE;
2455 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2456 return;
2457 }
2458
2459 /*
2460 * If the current context doesn't have large enough stacks
2461 * the zio must be issued asynchronously to prevent overflow.
2462 */
2463 if (zio_execute_stack_check(zio)) {
2464 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2465 zio_requeue_io_start_cut_in_line : B_FALSE;
2466 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2467 return;
2468 }
2469
2470 zio->io_stage = stage;
2471 zio->io_pipeline_trace |= zio->io_stage;
2472
2473 /*
2474 * The zio pipeline stage returns the next zio to execute
2475 * (typically the same as this one), or NULL if we should
2476 * stop.
2477 */
2478 zio = zio_pipeline[highbit64(stage) - 1](zio);
2479
2480 if (zio == NULL)
2481 return;
2482 }
2483 }
2484
2485
2486 /*
2487 * ==========================================================================
2488 * Initiate I/O, either sync or async
2489 * ==========================================================================
2490 */
2491 int
zio_wait(zio_t * zio)2492 zio_wait(zio_t *zio)
2493 {
2494 /*
2495 * Some routines, like zio_free_sync(), may return a NULL zio
2496 * to avoid the performance overhead of creating and then destroying
2497 * an unneeded zio. For the callers' simplicity, we accept a NULL
2498 * zio and ignore it.
2499 */
2500 if (zio == NULL)
2501 return (0);
2502
2503 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2504 int error;
2505
2506 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2507 ASSERT0P(zio->io_executor);
2508
2509 zio->io_waiter = curthread;
2510 ASSERT0(zio->io_queued_timestamp);
2511 zio->io_queued_timestamp = gethrtime();
2512
2513 if (zio->io_type == ZIO_TYPE_WRITE) {
2514 spa_select_allocator(zio);
2515 }
2516 __zio_execute(zio);
2517
2518 mutex_enter(&zio->io_lock);
2519 while (zio->io_executor != NULL) {
2520 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2521 ddi_get_lbolt() + timeout);
2522
2523 if (zfs_deadman_enabled && error == -1 &&
2524 gethrtime() - zio->io_queued_timestamp >
2525 spa_deadman_ziotime(zio->io_spa)) {
2526 mutex_exit(&zio->io_lock);
2527 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2528 zio_deadman(zio, FTAG);
2529 mutex_enter(&zio->io_lock);
2530 }
2531 }
2532 mutex_exit(&zio->io_lock);
2533
2534 error = zio->io_error;
2535 zio_destroy(zio);
2536
2537 return (error);
2538 }
2539
2540 void
zio_nowait(zio_t * zio)2541 zio_nowait(zio_t *zio)
2542 {
2543 /*
2544 * See comment in zio_wait().
2545 */
2546 if (zio == NULL)
2547 return;
2548
2549 ASSERT0P(zio->io_executor);
2550
2551 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2552 list_is_empty(&zio->io_parent_list)) {
2553 zio_t *pio;
2554
2555 /*
2556 * This is a logical async I/O with no parent to wait for it.
2557 * We add it to the spa_async_root_zio "Godfather" I/O which
2558 * will ensure they complete prior to unloading the pool.
2559 */
2560 spa_t *spa = zio->io_spa;
2561 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2562
2563 zio_add_child(pio, zio);
2564 }
2565
2566 ASSERT0(zio->io_queued_timestamp);
2567 zio->io_queued_timestamp = gethrtime();
2568 if (zio->io_type == ZIO_TYPE_WRITE) {
2569 spa_select_allocator(zio);
2570 }
2571 __zio_execute(zio);
2572 }
2573
2574 /*
2575 * ==========================================================================
2576 * Reexecute, cancel, or suspend/resume failed I/O
2577 * ==========================================================================
2578 */
2579
2580 static void
zio_reexecute(void * arg)2581 zio_reexecute(void *arg)
2582 {
2583 zio_t *pio = arg;
2584 zio_t *cio, *cio_next, *gio;
2585
2586 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2587 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2588 ASSERT0P(pio->io_gang_leader);
2589 ASSERT0P(pio->io_gang_tree);
2590
2591 mutex_enter(&pio->io_lock);
2592 pio->io_flags = pio->io_orig_flags;
2593 pio->io_stage = pio->io_orig_stage;
2594 pio->io_pipeline = pio->io_orig_pipeline;
2595 pio->io_post = 0;
2596 pio->io_flags |= ZIO_FLAG_REEXECUTED;
2597 pio->io_pipeline_trace = 0;
2598 pio->io_error = 0;
2599 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2600 (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2601 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2602
2603 /*
2604 * It's possible for a failed ZIO to be a descendant of more than one
2605 * ZIO tree. When reexecuting it, we have to be sure to add its wait
2606 * states to all parent wait counts.
2607 *
2608 * Those parents, in turn, may have other children that are currently
2609 * active, usually because they've already been reexecuted after
2610 * resuming. Those children may be executing and may call
2611 * zio_notify_parent() at the same time as we're updating our parent's
2612 * counts. To avoid races while updating the counts, we take
2613 * gio->io_lock before each update.
2614 */
2615 zio_link_t *zl = NULL;
2616 while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2617 mutex_enter(&gio->io_lock);
2618 for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2619 gio->io_children[pio->io_child_type][w] +=
2620 !pio->io_state[w];
2621 }
2622 mutex_exit(&gio->io_lock);
2623 }
2624
2625 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2626 pio->io_child_error[c] = 0;
2627
2628 if (IO_IS_ALLOCATING(pio))
2629 BP_ZERO(pio->io_bp);
2630
2631 /*
2632 * As we reexecute pio's children, new children could be created.
2633 * New children go to the head of pio's io_child_list, however,
2634 * so we will (correctly) not reexecute them. The key is that
2635 * the remainder of pio's io_child_list, from 'cio_next' onward,
2636 * cannot be affected by any side effects of reexecuting 'cio'.
2637 */
2638 zl = NULL;
2639 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2640 cio_next = zio_walk_children(pio, &zl);
2641 mutex_exit(&pio->io_lock);
2642 zio_reexecute(cio);
2643 mutex_enter(&pio->io_lock);
2644 }
2645 mutex_exit(&pio->io_lock);
2646
2647 /*
2648 * Now that all children have been reexecuted, execute the parent.
2649 * We don't reexecute "The Godfather" I/O here as it's the
2650 * responsibility of the caller to wait on it.
2651 */
2652 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2653 pio->io_queued_timestamp = gethrtime();
2654 __zio_execute(pio);
2655 }
2656 }
2657
2658 void
zio_suspend(spa_t * spa,zio_t * zio,zio_suspend_reason_t reason)2659 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2660 {
2661 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2662 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2663 "failure and the failure mode property for this pool "
2664 "is set to panic.", spa_name(spa));
2665
2666 if (reason != ZIO_SUSPEND_MMP) {
2667 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2668 "I/O failure and has been suspended.", spa_name(spa));
2669 }
2670
2671 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2672 NULL, NULL, 0);
2673
2674 mutex_enter(&spa->spa_suspend_lock);
2675
2676 if (spa->spa_suspend_zio_root == NULL)
2677 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2678 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2679 ZIO_FLAG_GODFATHER);
2680
2681 spa->spa_suspended = reason;
2682
2683 if (zio != NULL) {
2684 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2685 ASSERT(zio != spa->spa_suspend_zio_root);
2686 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2687 ASSERT0P(zio_unique_parent(zio));
2688 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2689 zio_add_child(spa->spa_suspend_zio_root, zio);
2690 }
2691
2692 mutex_exit(&spa->spa_suspend_lock);
2693
2694 txg_wait_kick(spa->spa_dsl_pool);
2695 }
2696
2697 int
zio_resume(spa_t * spa)2698 zio_resume(spa_t *spa)
2699 {
2700 zio_t *pio;
2701
2702 /*
2703 * Reexecute all previously suspended i/o.
2704 */
2705 mutex_enter(&spa->spa_suspend_lock);
2706 if (spa->spa_suspended != ZIO_SUSPEND_NONE)
2707 cmn_err(CE_WARN, "Pool '%s' was suspended and is being "
2708 "resumed. Failed I/O will be retried.",
2709 spa_name(spa));
2710 spa->spa_suspended = ZIO_SUSPEND_NONE;
2711 cv_broadcast(&spa->spa_suspend_cv);
2712 pio = spa->spa_suspend_zio_root;
2713 spa->spa_suspend_zio_root = NULL;
2714 mutex_exit(&spa->spa_suspend_lock);
2715
2716 if (pio == NULL)
2717 return (0);
2718
2719 zio_reexecute(pio);
2720 return (zio_wait(pio));
2721 }
2722
2723 void
zio_resume_wait(spa_t * spa)2724 zio_resume_wait(spa_t *spa)
2725 {
2726 mutex_enter(&spa->spa_suspend_lock);
2727 while (spa_suspended(spa))
2728 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2729 mutex_exit(&spa->spa_suspend_lock);
2730 }
2731
2732 /*
2733 * ==========================================================================
2734 * Gang blocks.
2735 *
2736 * A gang block is a collection of small blocks that looks to the DMU
2737 * like one large block. When zio_dva_allocate() cannot find a block
2738 * of the requested size, due to either severe fragmentation or the pool
2739 * being nearly full, it calls zio_write_gang_block() to construct the
2740 * block from smaller fragments.
2741 *
2742 * A gang block consists of a a gang header and up to gbh_nblkptrs(size)
2743 * gang members. The gang header is like an indirect block: it's an array
2744 * of block pointers, though the header has a small tail (a zio_eck_t)
2745 * that stores an embedded checksum. It is allocated using only a single
2746 * sector as the requested size, and hence is allocatable regardless of
2747 * fragmentation. Its size is determined by the smallest allocatable
2748 * asize of the vdevs it was allocated on. The gang header's bps point
2749 * to its gang members, which hold the data.
2750 *
2751 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2752 * as the verifier to ensure uniqueness of the SHA256 checksum.
2753 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2754 * not the gang header. This ensures that data block signatures (needed for
2755 * deduplication) are independent of how the block is physically stored.
2756 *
2757 * Gang blocks can be nested: a gang member may itself be a gang block.
2758 * Thus every gang block is a tree in which root and all interior nodes are
2759 * gang headers, and the leaves are normal blocks that contain user data.
2760 * The root of the gang tree is called the gang leader.
2761 *
2762 * To perform any operation (read, rewrite, free, claim) on a gang block,
2763 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2764 * in the io_gang_tree field of the original logical i/o by recursively
2765 * reading the gang leader and all gang headers below it. This yields
2766 * an in-core tree containing the contents of every gang header and the
2767 * bps for every constituent of the gang block.
2768 *
2769 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2770 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2771 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2772 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2773 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2774 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2775 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2776 * of the gang header plus zio_checksum_compute() of the data to update the
2777 * gang header's blk_cksum as described above.
2778 *
2779 * The two-phase assemble/issue model solves the problem of partial failure --
2780 * what if you'd freed part of a gang block but then couldn't read the
2781 * gang header for another part? Assembling the entire gang tree first
2782 * ensures that all the necessary gang header I/O has succeeded before
2783 * starting the actual work of free, claim, or write. Once the gang tree
2784 * is assembled, free and claim are in-memory operations that cannot fail.
2785 *
2786 * In the event that a gang write fails, zio_dva_unallocate() walks the
2787 * gang tree to immediately free (i.e. insert back into the space map)
2788 * everything we've allocated. This ensures that we don't get ENOSPC
2789 * errors during repeated suspend/resume cycles due to a flaky device.
2790 *
2791 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2792 * the gang tree, we won't modify the block, so we can safely defer the free
2793 * (knowing that the block is still intact). If we *can* assemble the gang
2794 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2795 * each constituent bp and we can allocate a new block on the next sync pass.
2796 *
2797 * In all cases, the gang tree allows complete recovery from partial failure.
2798 * ==========================================================================
2799 */
2800
2801 static void
zio_gang_issue_func_done(zio_t * zio)2802 zio_gang_issue_func_done(zio_t *zio)
2803 {
2804 abd_free(zio->io_abd);
2805 }
2806
2807 static zio_t *
zio_read_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2808 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2809 uint64_t offset)
2810 {
2811 if (gn != NULL)
2812 return (pio);
2813
2814 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2815 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2816 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2817 &pio->io_bookmark));
2818 }
2819
2820 static zio_t *
zio_rewrite_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2821 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2822 uint64_t offset)
2823 {
2824 zio_t *zio;
2825
2826 if (gn != NULL) {
2827 abd_t *gbh_abd =
2828 abd_get_from_buf(gn->gn_gbh, gn->gn_gangblocksize);
2829 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2830 gbh_abd, gn->gn_gangblocksize, zio_gang_issue_func_done,
2831 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2832 &pio->io_bookmark);
2833 /*
2834 * As we rewrite each gang header, the pipeline will compute
2835 * a new gang block header checksum for it; but no one will
2836 * compute a new data checksum, so we do that here. The one
2837 * exception is the gang leader: the pipeline already computed
2838 * its data checksum because that stage precedes gang assembly.
2839 * (Presently, nothing actually uses interior data checksums;
2840 * this is just good hygiene.)
2841 */
2842 if (gn != pio->io_gang_leader->io_gang_tree) {
2843 abd_t *buf = abd_get_offset(data, offset);
2844
2845 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2846 buf, BP_GET_PSIZE(bp));
2847
2848 abd_free(buf);
2849 }
2850 /*
2851 * If we are here to damage data for testing purposes,
2852 * leave the GBH alone so that we can detect the damage.
2853 */
2854 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2855 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2856 } else {
2857 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2858 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2859 zio_gang_issue_func_done, NULL, pio->io_priority,
2860 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2861 }
2862
2863 return (zio);
2864 }
2865
2866 static zio_t *
zio_free_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2867 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2868 uint64_t offset)
2869 {
2870 (void) gn, (void) data, (void) offset;
2871
2872 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2873 ZIO_GANG_CHILD_FLAGS(pio));
2874 if (zio == NULL) {
2875 zio = zio_null(pio, pio->io_spa,
2876 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2877 }
2878 return (zio);
2879 }
2880
2881 static zio_t *
zio_claim_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2882 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2883 uint64_t offset)
2884 {
2885 (void) gn, (void) data, (void) offset;
2886 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2887 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2888 }
2889
2890 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2891 NULL,
2892 zio_read_gang,
2893 zio_rewrite_gang,
2894 zio_free_gang,
2895 zio_claim_gang,
2896 NULL
2897 };
2898
2899 static void zio_gang_tree_assemble_done(zio_t *zio);
2900
2901 static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t ** gnpp,uint64_t gangblocksize)2902 zio_gang_node_alloc(zio_gang_node_t **gnpp, uint64_t gangblocksize)
2903 {
2904 zio_gang_node_t *gn;
2905
2906 ASSERT0P(*gnpp);
2907
2908 gn = kmem_zalloc(sizeof (*gn) +
2909 (gbh_nblkptrs(gangblocksize) * sizeof (gn)), KM_SLEEP);
2910 gn->gn_gangblocksize = gn->gn_allocsize = gangblocksize;
2911 gn->gn_gbh = zio_buf_alloc(gangblocksize);
2912 *gnpp = gn;
2913
2914 return (gn);
2915 }
2916
2917 static void
zio_gang_node_free(zio_gang_node_t ** gnpp)2918 zio_gang_node_free(zio_gang_node_t **gnpp)
2919 {
2920 zio_gang_node_t *gn = *gnpp;
2921
2922 for (int g = 0; g < gbh_nblkptrs(gn->gn_allocsize); g++)
2923 ASSERT0P(gn->gn_child[g]);
2924
2925 zio_buf_free(gn->gn_gbh, gn->gn_allocsize);
2926 kmem_free(gn, sizeof (*gn) +
2927 (gbh_nblkptrs(gn->gn_allocsize) * sizeof (gn)));
2928 *gnpp = NULL;
2929 }
2930
2931 static void
zio_gang_tree_free(zio_gang_node_t ** gnpp)2932 zio_gang_tree_free(zio_gang_node_t **gnpp)
2933 {
2934 zio_gang_node_t *gn = *gnpp;
2935
2936 if (gn == NULL)
2937 return;
2938
2939 for (int g = 0; g < gbh_nblkptrs(gn->gn_allocsize); g++)
2940 zio_gang_tree_free(&gn->gn_child[g]);
2941
2942 zio_gang_node_free(gnpp);
2943 }
2944
2945 static void
zio_gang_tree_assemble(zio_t * gio,blkptr_t * bp,zio_gang_node_t ** gnpp)2946 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2947 {
2948 uint64_t gangblocksize = UINT64_MAX;
2949 if (spa_feature_is_active(gio->io_spa,
2950 SPA_FEATURE_DYNAMIC_GANG_HEADER)) {
2951 spa_config_enter(gio->io_spa, SCL_VDEV, FTAG, RW_READER);
2952 for (int dva = 0; dva < BP_GET_NDVAS(bp); dva++) {
2953 vdev_t *vd = vdev_lookup_top(gio->io_spa,
2954 DVA_GET_VDEV(&bp->blk_dva[dva]));
2955 uint64_t psize = vdev_gang_header_psize(vd);
2956 gangblocksize = MIN(gangblocksize, psize);
2957 }
2958 spa_config_exit(gio->io_spa, SCL_VDEV, FTAG);
2959 } else {
2960 gangblocksize = SPA_OLD_GANGBLOCKSIZE;
2961 }
2962 ASSERT3U(gangblocksize, !=, UINT64_MAX);
2963 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp, gangblocksize);
2964 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, gangblocksize);
2965
2966 ASSERT(gio->io_gang_leader == gio);
2967 ASSERT(BP_IS_GANG(bp));
2968
2969 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, gangblocksize,
2970 zio_gang_tree_assemble_done, gn, gio->io_priority,
2971 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2972 }
2973
2974 static void
zio_gang_tree_assemble_done(zio_t * zio)2975 zio_gang_tree_assemble_done(zio_t *zio)
2976 {
2977 zio_t *gio = zio->io_gang_leader;
2978 zio_gang_node_t *gn = zio->io_private;
2979 blkptr_t *bp = zio->io_bp;
2980
2981 ASSERT(gio == zio_unique_parent(zio));
2982 ASSERT(list_is_empty(&zio->io_child_list));
2983
2984 if (zio->io_error)
2985 return;
2986
2987 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
2988 if (BP_SHOULD_BYTESWAP(bp))
2989 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2990
2991 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2992 /*
2993 * If this was an old-style gangblock, the gangblocksize should have
2994 * been updated in zio_checksum_error to reflect that.
2995 */
2996 ASSERT3U(gbh_eck(gn->gn_gbh, gn->gn_gangblocksize)->zec_magic,
2997 ==, ZEC_MAGIC);
2998
2999 abd_free(zio->io_abd);
3000
3001 for (int g = 0; g < gbh_nblkptrs(gn->gn_gangblocksize); g++) {
3002 blkptr_t *gbp = gbh_bp(gn->gn_gbh, g);
3003 if (!BP_IS_GANG(gbp))
3004 continue;
3005 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
3006 }
3007 }
3008
3009 static void
zio_gang_tree_issue(zio_t * pio,zio_gang_node_t * gn,blkptr_t * bp,abd_t * data,uint64_t offset)3010 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
3011 uint64_t offset)
3012 {
3013 zio_t *gio = pio->io_gang_leader;
3014 zio_t *zio;
3015
3016 ASSERT(BP_IS_GANG(bp) == !!gn);
3017 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
3018 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
3019
3020 /*
3021 * If you're a gang header, your data is in gn->gn_gbh.
3022 * If you're a gang member, your data is in 'data' and gn == NULL.
3023 */
3024 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
3025
3026 if (gn != NULL) {
3027 ASSERT3U(gbh_eck(gn->gn_gbh,
3028 gn->gn_gangblocksize)->zec_magic, ==, ZEC_MAGIC);
3029
3030 for (int g = 0; g < gbh_nblkptrs(gn->gn_gangblocksize); g++) {
3031 blkptr_t *gbp = gbh_bp(gn->gn_gbh, g);
3032 if (BP_IS_HOLE(gbp))
3033 continue;
3034 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
3035 offset);
3036 offset += BP_GET_PSIZE(gbp);
3037 }
3038 }
3039
3040 if (gn == gio->io_gang_tree)
3041 ASSERT3U(gio->io_size, ==, offset);
3042
3043 if (zio != pio)
3044 zio_nowait(zio);
3045 }
3046
3047 static zio_t *
zio_gang_assemble(zio_t * zio)3048 zio_gang_assemble(zio_t *zio)
3049 {
3050 blkptr_t *bp = zio->io_bp;
3051
3052 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
3053 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3054
3055 zio->io_gang_leader = zio;
3056
3057 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
3058
3059 return (zio);
3060 }
3061
3062 static zio_t *
zio_gang_issue(zio_t * zio)3063 zio_gang_issue(zio_t *zio)
3064 {
3065 blkptr_t *bp = zio->io_bp;
3066
3067 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
3068 return (NULL);
3069 }
3070
3071 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
3072 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3073
3074 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
3075 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
3076 0);
3077 else
3078 zio_gang_tree_free(&zio->io_gang_tree);
3079
3080 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3081
3082 return (zio);
3083 }
3084
3085 static void
zio_inherit_allocator(zio_t * pio,zio_t * cio)3086 zio_inherit_allocator(zio_t *pio, zio_t *cio)
3087 {
3088 cio->io_allocator = pio->io_allocator;
3089 }
3090
3091 static void
zio_write_gang_member_ready(zio_t * zio)3092 zio_write_gang_member_ready(zio_t *zio)
3093 {
3094 zio_t *pio = zio_unique_parent(zio);
3095 dva_t *cdva = zio->io_bp->blk_dva;
3096 dva_t *pdva = pio->io_bp->blk_dva;
3097 uint64_t asize;
3098 zio_t *gio __maybe_unused = zio->io_gang_leader;
3099
3100 if (BP_IS_HOLE(zio->io_bp))
3101 return;
3102
3103 /*
3104 * If we're getting direct-invoked from zio_write_gang_block(),
3105 * the bp_orig will be set.
3106 */
3107 ASSERT(BP_IS_HOLE(&zio->io_bp_orig) ||
3108 zio->io_flags & ZIO_FLAG_PREALLOCATED);
3109
3110 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
3111 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
3112 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
3113 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
3114 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
3115
3116 mutex_enter(&pio->io_lock);
3117 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
3118 ASSERT(DVA_GET_GANG(&pdva[d]));
3119 asize = DVA_GET_ASIZE(&pdva[d]);
3120 asize += DVA_GET_ASIZE(&cdva[d]);
3121 DVA_SET_ASIZE(&pdva[d], asize);
3122 }
3123 mutex_exit(&pio->io_lock);
3124 }
3125
3126 static void
zio_write_gang_done(zio_t * zio)3127 zio_write_gang_done(zio_t *zio)
3128 {
3129 /*
3130 * The io_abd field will be NULL for a zio with no data. The io_flags
3131 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
3132 * check for it here as it is cleared in zio_ready.
3133 */
3134 if (zio->io_abd != NULL)
3135 abd_free(zio->io_abd);
3136 }
3137
3138 static void
zio_update_feature(void * arg,dmu_tx_t * tx)3139 zio_update_feature(void *arg, dmu_tx_t *tx)
3140 {
3141 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
3142 spa_feature_incr(spa, (spa_feature_t)(uintptr_t)arg, tx);
3143 }
3144
3145 static zio_t *
zio_write_gang_block(zio_t * pio,metaslab_class_t * mc)3146 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
3147 {
3148 spa_t *spa = pio->io_spa;
3149 blkptr_t *bp = pio->io_bp;
3150 zio_t *gio = pio->io_gang_leader;
3151 zio_t *zio;
3152 zio_gang_node_t *gn, **gnpp;
3153 zio_gbh_phys_t *gbh;
3154 abd_t *gbh_abd;
3155 uint64_t txg = pio->io_txg;
3156 uint64_t resid = pio->io_size;
3157 zio_prop_t zp;
3158 int error;
3159 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
3160
3161 /*
3162 * Store multiple copies of the GBH, so that we can still traverse
3163 * all the data (e.g. to free or scrub) even if a block is damaged.
3164 * This value respects the redundant_metadata property.
3165 */
3166 int gbh_copies = gio->io_prop.zp_gang_copies;
3167 if (gbh_copies == 0) {
3168 /*
3169 * This should only happen in the case where we're filling in
3170 * DDT entries for a parent that wants more copies than the DDT
3171 * has. In that case, we cannot gang without creating a mixed
3172 * blkptr, which is illegal.
3173 */
3174 ASSERT3U(gio->io_child_type, ==, ZIO_CHILD_DDT);
3175 pio->io_error = EAGAIN;
3176 return (pio);
3177 }
3178 ASSERT3S(gbh_copies, >, 0);
3179 ASSERT3S(gbh_copies, <=, SPA_DVAS_PER_BP);
3180
3181 ASSERT(ZIO_HAS_ALLOCATOR(pio));
3182 int flags = METASLAB_GANG_HEADER;
3183 if (pio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) {
3184 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3185 ASSERT(has_data);
3186
3187 flags |= METASLAB_ASYNC_ALLOC;
3188 }
3189
3190 uint64_t gangblocksize = SPA_OLD_GANGBLOCKSIZE;
3191 uint64_t candidate = gangblocksize;
3192 error = metaslab_alloc_range(spa, mc, gangblocksize, gangblocksize,
3193 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3194 &pio->io_alloc_list, pio->io_allocator, pio, &candidate);
3195 if (error) {
3196 pio->io_error = error;
3197 return (pio);
3198 }
3199 if (spa_feature_is_active(spa, SPA_FEATURE_DYNAMIC_GANG_HEADER))
3200 gangblocksize = candidate;
3201
3202 if (pio == gio) {
3203 gnpp = &gio->io_gang_tree;
3204 } else {
3205 gnpp = pio->io_private;
3206 ASSERT(pio->io_ready == zio_write_gang_member_ready);
3207 }
3208
3209 gn = zio_gang_node_alloc(gnpp, gangblocksize);
3210 gbh = gn->gn_gbh;
3211 memset(gbh, 0, gangblocksize);
3212 gbh_abd = abd_get_from_buf(gbh, gangblocksize);
3213
3214 /*
3215 * Create the gang header.
3216 */
3217 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, gangblocksize,
3218 zio_write_gang_done, NULL, pio->io_priority,
3219 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3220
3221 zio_inherit_allocator(pio, zio);
3222 if (pio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) {
3223 boolean_t more;
3224 VERIFY(metaslab_class_throttle_reserve(mc, zio->io_allocator,
3225 gbh_copies, zio->io_size, B_TRUE, &more));
3226 zio->io_flags |= ZIO_FLAG_ALLOC_THROTTLED;
3227 }
3228
3229 /*
3230 * Create and nowait the gang children. First, we try to do
3231 * opportunistic allocations. If that fails to generate enough
3232 * space, we fall back to normal zio_write calls for nested gang.
3233 */
3234 int g;
3235 boolean_t any_failed = B_FALSE;
3236 for (g = 0; resid != 0; g++) {
3237 flags &= METASLAB_ASYNC_ALLOC;
3238 flags |= METASLAB_GANG_CHILD;
3239 zp.zp_checksum = gio->io_prop.zp_checksum;
3240 zp.zp_compress = ZIO_COMPRESS_OFF;
3241 zp.zp_complevel = gio->io_prop.zp_complevel;
3242 zp.zp_type = zp.zp_storage_type = DMU_OT_NONE;
3243 zp.zp_level = 0;
3244 zp.zp_copies = gio->io_prop.zp_copies;
3245 zp.zp_gang_copies = gio->io_prop.zp_gang_copies;
3246 zp.zp_dedup = B_FALSE;
3247 zp.zp_dedup_verify = B_FALSE;
3248 zp.zp_nopwrite = B_FALSE;
3249 zp.zp_encrypt = gio->io_prop.zp_encrypt;
3250 zp.zp_byteorder = gio->io_prop.zp_byteorder;
3251 zp.zp_direct_write = B_FALSE;
3252 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3253 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3254 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3255
3256 uint64_t min_size = zio_roundup_alloc_size(spa,
3257 resid / (gbh_nblkptrs(gangblocksize) - g));
3258 min_size = MIN(min_size, resid);
3259 bp = &((blkptr_t *)gbh)[g];
3260
3261 zio_alloc_list_t cio_list;
3262 metaslab_trace_init(&cio_list);
3263 uint64_t allocated_size = UINT64_MAX;
3264 error = metaslab_alloc_range(spa, mc, min_size, resid,
3265 bp, gio->io_prop.zp_copies, txg, NULL,
3266 flags, &cio_list, zio->io_allocator, NULL, &allocated_size);
3267
3268 boolean_t allocated = error == 0;
3269 any_failed |= !allocated;
3270
3271 uint64_t psize = allocated ? MIN(resid, allocated_size) :
3272 min_size;
3273 ASSERT3U(psize, >=, min_size);
3274
3275 zio_t *cio = zio_write(zio, spa, txg, bp, has_data ?
3276 abd_get_offset(pio->io_abd, pio->io_size - resid) : NULL,
3277 psize, psize, &zp, zio_write_gang_member_ready, NULL,
3278 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3279 ZIO_GANG_CHILD_FLAGS(pio) |
3280 (allocated ? ZIO_FLAG_PREALLOCATED : 0), &pio->io_bookmark);
3281
3282 resid -= psize;
3283 zio_inherit_allocator(zio, cio);
3284 if (allocated) {
3285 metaslab_trace_move(&cio_list, &cio->io_alloc_list);
3286 metaslab_group_alloc_increment_all(spa,
3287 &cio->io_bp_orig, zio->io_allocator, flags, psize,
3288 cio);
3289 }
3290 /*
3291 * We do not reserve for the child writes, since we already
3292 * reserved for the parent. Unreserve though will be called
3293 * for individual children. We can do this since sum of all
3294 * child's physical sizes is equal to parent's physical size.
3295 * It would not work for potentially bigger allocation sizes.
3296 */
3297
3298 zio_nowait(cio);
3299 }
3300
3301 /*
3302 * If we used more gang children than the old limit, we must already be
3303 * using the new headers. No need to update anything, just move on.
3304 *
3305 * Otherwise, we might be in a case where we need to turn on the new
3306 * feature, so we check that. We enable the new feature if we didn't
3307 * manage to fit everything into 3 gang children and we could have
3308 * written more than that.
3309 */
3310 if (g > gbh_nblkptrs(SPA_OLD_GANGBLOCKSIZE)) {
3311 ASSERT(spa_feature_is_active(spa,
3312 SPA_FEATURE_DYNAMIC_GANG_HEADER));
3313 } else if (any_failed && candidate > SPA_OLD_GANGBLOCKSIZE &&
3314 spa_feature_is_enabled(spa, SPA_FEATURE_DYNAMIC_GANG_HEADER) &&
3315 !spa_feature_is_active(spa, SPA_FEATURE_DYNAMIC_GANG_HEADER)) {
3316 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
3317 MAX(txg, spa_syncing_txg(spa) + 1));
3318 dsl_sync_task_nowait(spa->spa_dsl_pool,
3319 zio_update_feature,
3320 (void *)SPA_FEATURE_DYNAMIC_GANG_HEADER, tx);
3321 dmu_tx_commit(tx);
3322 }
3323
3324 /*
3325 * Set pio's pipeline to just wait for zio to finish.
3326 */
3327 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3328
3329 zio_nowait(zio);
3330
3331 return (pio);
3332 }
3333
3334 /*
3335 * The zio_nop_write stage in the pipeline determines if allocating a
3336 * new bp is necessary. The nopwrite feature can handle writes in
3337 * either syncing or open context (i.e. zil writes) and as a result is
3338 * mutually exclusive with dedup.
3339 *
3340 * By leveraging a cryptographically secure checksum, such as SHA256, we
3341 * can compare the checksums of the new data and the old to determine if
3342 * allocating a new block is required. Note that our requirements for
3343 * cryptographic strength are fairly weak: there can't be any accidental
3344 * hash collisions, but we don't need to be secure against intentional
3345 * (malicious) collisions. To trigger a nopwrite, you have to be able
3346 * to write the file to begin with, and triggering an incorrect (hash
3347 * collision) nopwrite is no worse than simply writing to the file.
3348 * That said, there are no known attacks against the checksum algorithms
3349 * used for nopwrite, assuming that the salt and the checksums
3350 * themselves remain secret.
3351 */
3352 static zio_t *
zio_nop_write(zio_t * zio)3353 zio_nop_write(zio_t *zio)
3354 {
3355 blkptr_t *bp = zio->io_bp;
3356 blkptr_t *bp_orig = &zio->io_bp_orig;
3357 zio_prop_t *zp = &zio->io_prop;
3358
3359 ASSERT(BP_IS_HOLE(bp));
3360 ASSERT0(BP_GET_LEVEL(bp));
3361 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3362 ASSERT(zp->zp_nopwrite);
3363 ASSERT(!zp->zp_dedup);
3364 ASSERT0P(zio->io_bp_override);
3365 ASSERT(IO_IS_ALLOCATING(zio));
3366
3367 /*
3368 * Check to see if the original bp and the new bp have matching
3369 * characteristics (i.e. same checksum, compression algorithms, etc).
3370 * If they don't then just continue with the pipeline which will
3371 * allocate a new bp.
3372 */
3373 if (BP_IS_HOLE(bp_orig) ||
3374 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3375 ZCHECKSUM_FLAG_NOPWRITE) ||
3376 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3377 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3378 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3379 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3380 zp->zp_copies != BP_GET_NDVAS(bp_orig))
3381 return (zio);
3382
3383 /*
3384 * If the checksums match then reset the pipeline so that we
3385 * avoid allocating a new bp and issuing any I/O.
3386 */
3387 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3388 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3389 ZCHECKSUM_FLAG_NOPWRITE);
3390 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3391 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3392 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3393 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3394
3395 /*
3396 * If we're overwriting a block that is currently on an
3397 * indirect vdev, then ignore the nopwrite request and
3398 * allow a new block to be allocated on a concrete vdev.
3399 */
3400 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3401 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3402 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3403 DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3404 if (tvd->vdev_ops == &vdev_indirect_ops) {
3405 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3406 return (zio);
3407 }
3408 }
3409 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3410
3411 *bp = *bp_orig;
3412 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3413 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3414 }
3415
3416 return (zio);
3417 }
3418
3419 /*
3420 * ==========================================================================
3421 * Block Reference Table
3422 * ==========================================================================
3423 */
3424 static zio_t *
zio_brt_free(zio_t * zio)3425 zio_brt_free(zio_t *zio)
3426 {
3427 blkptr_t *bp;
3428
3429 bp = zio->io_bp;
3430
3431 if (BP_GET_LEVEL(bp) > 0 ||
3432 BP_IS_METADATA(bp) ||
3433 !brt_maybe_exists(zio->io_spa, bp)) {
3434 return (zio);
3435 }
3436
3437 if (!brt_entry_decref(zio->io_spa, bp)) {
3438 /*
3439 * This isn't the last reference, so we cannot free
3440 * the data yet.
3441 */
3442 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3443 }
3444
3445 return (zio);
3446 }
3447
3448 /*
3449 * ==========================================================================
3450 * Dedup
3451 * ==========================================================================
3452 */
3453 static void
zio_ddt_child_read_done(zio_t * zio)3454 zio_ddt_child_read_done(zio_t *zio)
3455 {
3456 blkptr_t *bp = zio->io_bp;
3457 ddt_t *ddt;
3458 ddt_entry_t *dde = zio->io_private;
3459 zio_t *pio = zio_unique_parent(zio);
3460
3461 mutex_enter(&pio->io_lock);
3462 ddt = ddt_select(zio->io_spa, bp);
3463
3464 if (zio->io_error == 0) {
3465 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
3466 /* this phys variant doesn't need repair */
3467 ddt_phys_clear(dde->dde_phys, v);
3468 }
3469
3470 if (zio->io_error == 0 && dde->dde_io->dde_repair_abd == NULL)
3471 dde->dde_io->dde_repair_abd = zio->io_abd;
3472 else
3473 abd_free(zio->io_abd);
3474 mutex_exit(&pio->io_lock);
3475 }
3476
3477 static zio_t *
zio_ddt_read_start(zio_t * zio)3478 zio_ddt_read_start(zio_t *zio)
3479 {
3480 blkptr_t *bp = zio->io_bp;
3481
3482 ASSERT(BP_GET_DEDUP(bp));
3483 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3484 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3485
3486 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3487 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3488 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3489 ddt_phys_variant_t v_self = ddt_phys_select(ddt, dde, bp);
3490 ddt_univ_phys_t *ddp = dde->dde_phys;
3491 blkptr_t blk;
3492
3493 ASSERT0P(zio->io_vsd);
3494 zio->io_vsd = dde;
3495
3496 if (v_self == DDT_PHYS_NONE)
3497 return (zio);
3498
3499 /* issue I/O for the other copies */
3500 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3501 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3502
3503 if (ddt_phys_birth(ddp, v) == 0 || v == v_self)
3504 continue;
3505
3506 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key,
3507 ddp, v, &blk);
3508 zio_nowait(zio_read(zio, zio->io_spa, &blk,
3509 abd_alloc_for_io(zio->io_size, B_TRUE),
3510 zio->io_size, zio_ddt_child_read_done, dde,
3511 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3512 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3513 }
3514 return (zio);
3515 }
3516
3517 zio_nowait(zio_read(zio, zio->io_spa, bp,
3518 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3519 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3520
3521 return (zio);
3522 }
3523
3524 static zio_t *
zio_ddt_read_done(zio_t * zio)3525 zio_ddt_read_done(zio_t *zio)
3526 {
3527 blkptr_t *bp = zio->io_bp;
3528
3529 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3530 return (NULL);
3531 }
3532
3533 ASSERT(BP_GET_DEDUP(bp));
3534 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3535 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3536
3537 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3538 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3539 ddt_entry_t *dde = zio->io_vsd;
3540 if (ddt == NULL) {
3541 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3542 return (zio);
3543 }
3544 if (dde == NULL) {
3545 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3546 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3547 return (NULL);
3548 }
3549 if (dde->dde_io->dde_repair_abd != NULL) {
3550 abd_copy(zio->io_abd, dde->dde_io->dde_repair_abd,
3551 zio->io_size);
3552 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3553 }
3554 ddt_repair_done(ddt, dde);
3555 zio->io_vsd = NULL;
3556 }
3557
3558 ASSERT0P(zio->io_vsd);
3559
3560 return (zio);
3561 }
3562
3563 static boolean_t
zio_ddt_collision(zio_t * zio,ddt_t * ddt,ddt_entry_t * dde)3564 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3565 {
3566 spa_t *spa = zio->io_spa;
3567 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3568
3569 ASSERT(!(zio->io_bp_override && do_raw));
3570
3571 /*
3572 * Note: we compare the original data, not the transformed data,
3573 * because when zio->io_bp is an override bp, we will not have
3574 * pushed the I/O transforms. That's an important optimization
3575 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3576 * However, we should never get a raw, override zio so in these
3577 * cases we can compare the io_abd directly. This is useful because
3578 * it allows us to do dedup verification even if we don't have access
3579 * to the original data (for instance, if the encryption keys aren't
3580 * loaded).
3581 */
3582
3583 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3584 if (DDT_PHYS_IS_DITTO(ddt, p))
3585 continue;
3586
3587 if (dde->dde_io == NULL)
3588 continue;
3589
3590 zio_t *lio = dde->dde_io->dde_lead_zio[p];
3591 if (lio == NULL)
3592 continue;
3593
3594 if (do_raw)
3595 return (lio->io_size != zio->io_size ||
3596 abd_cmp(zio->io_abd, lio->io_abd) != 0);
3597
3598 return (lio->io_orig_size != zio->io_orig_size ||
3599 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3600 }
3601
3602 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3603 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3604 uint64_t phys_birth = ddt_phys_birth(dde->dde_phys, v);
3605
3606 if (phys_birth != 0 && do_raw) {
3607 blkptr_t blk = *zio->io_bp;
3608 uint64_t psize;
3609 abd_t *tmpabd;
3610 int error;
3611
3612 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3613 psize = BP_GET_PSIZE(&blk);
3614
3615 if (psize != zio->io_size)
3616 return (B_TRUE);
3617
3618 ddt_exit(ddt);
3619
3620 tmpabd = abd_alloc_for_io(psize, B_TRUE);
3621
3622 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3623 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3624 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3625 ZIO_FLAG_RAW, &zio->io_bookmark));
3626
3627 if (error == 0) {
3628 if (abd_cmp(tmpabd, zio->io_abd) != 0)
3629 error = SET_ERROR(ENOENT);
3630 }
3631
3632 abd_free(tmpabd);
3633 ddt_enter(ddt);
3634 return (error != 0);
3635 } else if (phys_birth != 0) {
3636 arc_buf_t *abuf = NULL;
3637 arc_flags_t aflags = ARC_FLAG_WAIT;
3638 blkptr_t blk = *zio->io_bp;
3639 int error;
3640
3641 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3642
3643 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3644 return (B_TRUE);
3645
3646 ddt_exit(ddt);
3647
3648 error = arc_read(NULL, spa, &blk,
3649 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3650 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3651 &aflags, &zio->io_bookmark);
3652
3653 if (error == 0) {
3654 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3655 zio->io_orig_size) != 0)
3656 error = SET_ERROR(ENOENT);
3657 arc_buf_destroy(abuf, &abuf);
3658 }
3659
3660 ddt_enter(ddt);
3661 return (error != 0);
3662 }
3663 }
3664
3665 return (B_FALSE);
3666 }
3667
3668 static void
zio_ddt_child_write_done(zio_t * zio)3669 zio_ddt_child_write_done(zio_t *zio)
3670 {
3671 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3672 ddt_entry_t *dde = zio->io_private;
3673
3674 zio_link_t *zl = NULL;
3675 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3676
3677 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3678 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3679 ddt_univ_phys_t *ddp = dde->dde_phys;
3680
3681 mutex_enter(&dde->dde_io->dde_io_lock);
3682
3683 /* we're the lead, so once we're done there's no one else outstanding */
3684 if (dde->dde_io->dde_lead_zio[p] == zio)
3685 dde->dde_io->dde_lead_zio[p] = NULL;
3686
3687 ddt_univ_phys_t *orig = &dde->dde_io->dde_orig_phys;
3688
3689 if (zio->io_error != 0) {
3690 /*
3691 * The write failed, so we're about to abort the entire IO
3692 * chain. We need to revert the entry back to what it was at
3693 * the last time it was successfully extended.
3694 */
3695 ddt_phys_unextend(ddp, orig, v);
3696 ddt_phys_clear(orig, v);
3697
3698 mutex_exit(&dde->dde_io->dde_io_lock);
3699
3700 /*
3701 * Undo the optimistic refcount increments that were done in
3702 * zio_ddt_write() for all non-DDT-child parents. Since errors
3703 * are rare, taking the global lock here is acceptable.
3704 */
3705 ddt_enter(ddt);
3706 zio_t *pio;
3707 zl = NULL;
3708 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3709 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3710 ddt_phys_decref(ddp, v);
3711 }
3712 ddt_exit(ddt);
3713 return;
3714 }
3715
3716 /*
3717 * We've successfully added new DVAs to the entry. Clear the saved
3718 * state or, if there's still outstanding IO, remember it so we can
3719 * revert to a known good state if that IO fails.
3720 */
3721 if (dde->dde_io->dde_lead_zio[p] == NULL)
3722 ddt_phys_clear(orig, v);
3723 else
3724 ddt_phys_copy(orig, ddp, v);
3725
3726 mutex_exit(&dde->dde_io->dde_io_lock);
3727 }
3728
3729 static void
zio_ddt_child_write_ready(zio_t * zio)3730 zio_ddt_child_write_ready(zio_t *zio)
3731 {
3732 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3733 ddt_entry_t *dde = zio->io_private;
3734
3735 zio_link_t *zl = NULL;
3736 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3737
3738 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3739 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3740
3741 if (ddt_phys_is_gang(dde->dde_phys, v)) {
3742 for (int i = 0; i < BP_GET_NDVAS(zio->io_bp); i++) {
3743 dva_t *d = &zio->io_bp->blk_dva[i];
3744 metaslab_group_alloc_decrement(zio->io_spa,
3745 DVA_GET_VDEV(d), zio->io_allocator,
3746 METASLAB_ASYNC_ALLOC, zio->io_size, zio);
3747 }
3748 zio->io_error = EAGAIN;
3749 }
3750
3751 if (zio->io_error != 0)
3752 return;
3753
3754 mutex_enter(&dde->dde_io->dde_io_lock);
3755
3756 ddt_phys_extend(dde->dde_phys, v, zio->io_bp);
3757
3758 zio_t *pio;
3759 zl = NULL;
3760 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3761 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3762 ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg);
3763 }
3764
3765 mutex_exit(&dde->dde_io->dde_io_lock);
3766 }
3767
3768 static zio_t *
zio_ddt_write(zio_t * zio)3769 zio_ddt_write(zio_t *zio)
3770 {
3771 spa_t *spa = zio->io_spa;
3772 blkptr_t *bp = zio->io_bp;
3773 uint64_t txg = zio->io_txg;
3774 zio_prop_t *zp = &zio->io_prop;
3775 ddt_t *ddt = ddt_select(spa, bp);
3776 ddt_entry_t *dde;
3777
3778 ASSERT(BP_GET_DEDUP(bp));
3779 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3780 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3781 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3782 /*
3783 * Deduplication will not take place for Direct I/O writes. The
3784 * ddt_tree will be emptied in syncing context. Direct I/O writes take
3785 * place in the open-context. Direct I/O write can not attempt to
3786 * modify the ddt_tree while issuing out a write.
3787 */
3788 ASSERT3B(zio->io_prop.zp_direct_write, ==, B_FALSE);
3789
3790 ddt_enter(ddt);
3791 /*
3792 * Search DDT for matching entry. Skip DVAs verification here, since
3793 * they can go only from override, and once we get here the override
3794 * pointer can't have "D" flag to be confused with pruned DDT entries.
3795 */
3796 IMPLY(zio->io_bp_override, !BP_GET_DEDUP(zio->io_bp_override));
3797 dde = ddt_lookup(ddt, bp, B_FALSE);
3798 if (dde == NULL) {
3799 /* DDT size is over its quota so no new entries */
3800 ddt_exit(ddt);
3801 zp->zp_dedup = B_FALSE;
3802 BP_SET_DEDUP(bp, B_FALSE);
3803 if (zio->io_bp_override == NULL)
3804 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3805 return (zio);
3806 }
3807
3808 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3809 /*
3810 * If we're using a weak checksum, upgrade to a strong checksum
3811 * and try again. If we're already using a strong checksum,
3812 * we can't resolve it, so just convert to an ordinary write.
3813 * (And automatically e-mail a paper to Nature?)
3814 */
3815 ddt_exit(ddt);
3816 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3817 ZCHECKSUM_FLAG_DEDUP)) {
3818 zp->zp_checksum = spa_dedup_checksum(spa);
3819 zio_pop_transforms(zio);
3820 zio->io_stage = ZIO_STAGE_OPEN;
3821 BP_ZERO(bp);
3822 } else {
3823 zp->zp_dedup = B_FALSE;
3824 BP_SET_DEDUP(bp, B_FALSE);
3825 }
3826 ASSERT(!BP_GET_DEDUP(bp));
3827 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3828 return (zio);
3829 }
3830
3831 int p = DDT_PHYS_FOR_COPIES(ddt, zp->zp_copies);
3832 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3833 ddt_univ_phys_t *ddp = dde->dde_phys;
3834
3835 /*
3836 * In the common cases, at this point we have a regular BP with no
3837 * allocated DVAs, and the corresponding DDT entry for its checksum.
3838 * Our goal is to fill the BP with enough DVAs to satisfy its copies=
3839 * requirement.
3840 *
3841 * One of three things needs to happen to fulfill this:
3842 *
3843 * - if the DDT entry has enough DVAs to satisfy the BP, we just copy
3844 * them out of the entry and return;
3845 *
3846 * - if the DDT entry has no DVAs (ie its brand new), then we have to
3847 * issue the write as normal so that DVAs can be allocated and the
3848 * data land on disk. We then copy the DVAs into the DDT entry on
3849 * return.
3850 *
3851 * - if the DDT entry has some DVAs, but too few, we have to issue the
3852 * write, adjusted to have allocate fewer copies. When it returns, we
3853 * add the new DVAs to the DDT entry, and update the BP to have the
3854 * full amount it originally requested.
3855 *
3856 * In all cases, if there's already a writing IO in flight, we need to
3857 * defer the action until after the write is done. If our action is to
3858 * write, we need to adjust our request for additional DVAs to match
3859 * what will be in the DDT entry after it completes. In this way every
3860 * IO can be guaranteed to recieve enough DVAs simply by joining the
3861 * end of the chain and letting the sequence play out.
3862 */
3863
3864 /*
3865 * Number of DVAs in the DDT entry. If the BP is encrypted we ignore
3866 * the third one as normal.
3867 */
3868 int have_dvas = ddt_phys_dva_count(ddp, v, BP_IS_ENCRYPTED(bp));
3869 IMPLY(have_dvas == 0, ddt_phys_birth(ddp, v) == 0);
3870 boolean_t is_ganged = ddt_phys_is_gang(ddp, v);
3871
3872 /* Number of DVAs requested by the IO. */
3873 uint8_t need_dvas = zp->zp_copies;
3874 /* Number of DVAs in outstanding writes for this dde. */
3875 uint8_t parent_dvas = 0;
3876
3877 /*
3878 * What we do next depends on whether or not there's IO outstanding
3879 * that will update this entry. If dde_io exists, we need to hold
3880 * its lock to safely check and use dde_lead_zio.
3881 */
3882 ddt_entry_io_t *dde_io = dde->dde_io;
3883 if (dde_io != NULL)
3884 mutex_enter(&dde_io->dde_io_lock);
3885
3886 if (dde_io == NULL || dde_io->dde_lead_zio[p] == NULL) {
3887 /*
3888 * No IO outstanding, so we only need to worry about ourselves.
3889 */
3890
3891 /*
3892 * Override BPs bring their own DVAs and their own problems.
3893 */
3894 if (zio->io_bp_override) {
3895 /*
3896 * For a brand-new entry, all the work has been done
3897 * for us, and we can just fill it out from the provided
3898 * block and leave.
3899 */
3900 if (have_dvas == 0) {
3901 if (dde_io != NULL)
3902 mutex_exit(&dde_io->dde_io_lock);
3903 ASSERT(BP_GET_BIRTH(bp) == txg);
3904 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3905 ddt_phys_extend(ddp, v, bp);
3906 ddt_phys_addref(ddp, v);
3907 ddt_exit(ddt);
3908 return (zio);
3909 }
3910
3911 /*
3912 * If we already have this entry, then we want to treat
3913 * it like a regular write. To do this we just wipe
3914 * them out and proceed like a regular write.
3915 *
3916 * Even if there are some DVAs in the entry, we still
3917 * have to clear them out. We can't use them to fill
3918 * out the dedup entry, as they are all referenced
3919 * together by a bp already on disk, and will be freed
3920 * as a group.
3921 */
3922 BP_ZERO_DVAS(bp);
3923 BP_SET_BIRTH(bp, 0, 0);
3924 }
3925
3926 /*
3927 * If there are enough DVAs in the entry to service our request,
3928 * then we can just use them as-is.
3929 */
3930 if (have_dvas >= need_dvas) {
3931 if (dde_io != NULL)
3932 mutex_exit(&dde_io->dde_io_lock);
3933
3934 /*
3935 * For rewrite operations, try preserving the original
3936 * logical birth time. If the result matches the
3937 * original BP, this becomes a NOP.
3938 */
3939 if (zp->zp_rewrite) {
3940 uint64_t orig_logical_birth =
3941 BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig);
3942 ddt_bp_fill(ddp, v, bp, orig_logical_birth);
3943 if (BP_EQUAL(bp, &zio->io_bp_orig)) {
3944 /* We can skip accounting. */
3945 ddt_exit(ddt);
3946 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3947 return (zio);
3948 }
3949 }
3950
3951 ddt_bp_fill(ddp, v, bp, txg);
3952 ddt_phys_addref(ddp, v);
3953 ddt_exit(ddt);
3954 return (zio);
3955 }
3956
3957 /*
3958 * Otherwise, we have to issue IO to fill the entry up to the
3959 * amount we need.
3960 */
3961 need_dvas -= have_dvas;
3962 } else {
3963 /*
3964 * There's a write in-flight. If there's already enough DVAs on
3965 * the entry, then either there were already enough to start
3966 * with, or the in-flight IO is between READY and DONE, and so
3967 * has extended the entry with new DVAs. Either way, we don't
3968 * need to do anything, we can just slot in behind it.
3969 */
3970
3971 if (zio->io_bp_override) {
3972 /*
3973 * If there's a write out, then we're soon going to
3974 * have our own copies of this block, so clear out the
3975 * override block and treat it as a regular dedup
3976 * write. See comment above.
3977 */
3978 BP_ZERO_DVAS(bp);
3979 BP_SET_BIRTH(bp, 0, 0);
3980 }
3981
3982 if (have_dvas >= need_dvas) {
3983 /*
3984 * A minor point: there might already be enough
3985 * committed DVAs in the entry to service our request,
3986 * but we don't know which are completed and which are
3987 * allocated but not yet written. In this case, should
3988 * the IO for the new DVAs fail, we will be on the end
3989 * of the IO chain and will also recieve an error, even
3990 * though our request could have been serviced.
3991 *
3992 * This is an extremely rare case, as it requires the
3993 * original block to be copied with a request for a
3994 * larger number of DVAs, then copied again requesting
3995 * the same (or already fulfilled) number of DVAs while
3996 * the first request is active, and then that first
3997 * request errors. In return, the logic required to
3998 * catch and handle it is complex. For now, I'm just
3999 * not going to bother with it.
4000 */
4001
4002 /*
4003 * We always fill the bp here as we may have arrived
4004 * after the in-flight write has passed READY, and so
4005 * missed out.
4006 */
4007 ddt_bp_fill(ddp, v, bp, txg);
4008 piggyback:
4009 zio_add_child(zio, dde_io->dde_lead_zio[p]);
4010
4011 /*
4012 * Optimistically increment refcount for this parent.
4013 * If the write fails, zio_ddt_child_write_done() will
4014 * decrement for all non-DDT-child parents.
4015 */
4016 ddt_phys_addref(ddp, v);
4017 mutex_exit(&dde_io->dde_io_lock);
4018 ddt_exit(ddt);
4019 return (zio);
4020 }
4021
4022 /*
4023 * There's not enough in the entry yet, so we need to look at
4024 * the write in-flight and see how many DVAs it will have once
4025 * it completes.
4026 *
4027 * The in-flight write has potentially had its copies request
4028 * reduced (if we're filling out an existing entry), so we need
4029 * to reach in and get the original write to find out what it is
4030 * expecting.
4031 *
4032 * Note that the parent of the lead zio will always have the
4033 * highest zp_copies of any zio in the chain, because ones that
4034 * can be serviced without additional IO are always added to
4035 * the back of the chain.
4036 */
4037 zio_link_t *zl = NULL;
4038 zio_t *pio =
4039 zio_walk_parents(dde->dde_io->dde_lead_zio[p], &zl);
4040 ASSERT(pio);
4041 parent_dvas = pio->io_prop.zp_copies;
4042
4043 if (parent_dvas >= need_dvas)
4044 goto piggyback;
4045
4046 /*
4047 * Still not enough, so we will need to issue to get the
4048 * shortfall.
4049 */
4050 need_dvas -= parent_dvas;
4051 }
4052
4053 if (is_ganged) {
4054 if (dde_io != NULL)
4055 mutex_exit(&dde_io->dde_io_lock);
4056 ddt_exit(ddt);
4057 zp->zp_dedup = B_FALSE;
4058 BP_SET_DEDUP(bp, B_FALSE);
4059 zio->io_pipeline = ZIO_WRITE_PIPELINE;
4060 return (zio);
4061 }
4062
4063 /*
4064 * We need to write. We will create a new write with the copies
4065 * property adjusted to match the number of DVAs we need to grow
4066 * the DDT entry by to satisfy the request.
4067 */
4068 zio_prop_t czp;
4069 if (have_dvas > 0 || parent_dvas > 0) {
4070 czp = *zp;
4071 czp.zp_copies = need_dvas;
4072 czp.zp_gang_copies = 0;
4073 zp = &czp;
4074 } else {
4075 ASSERT3U(zp->zp_copies, ==, need_dvas);
4076 }
4077
4078 zio_t *cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
4079 zio->io_orig_size, zio->io_orig_size, zp,
4080 zio_ddt_child_write_ready, NULL,
4081 zio_ddt_child_write_done, dde, zio->io_priority,
4082 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
4083 zio_inherit_allocator(zio, cio);
4084
4085 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
4086
4087 /*
4088 * We are the new lead zio, because our parent has the highest
4089 * zp_copies that has been requested for this entry so far.
4090 */
4091 if (dde_io == NULL) {
4092 /*
4093 * New dde_io. No lock needed since no other thread can have
4094 * a reference yet.
4095 */
4096 ddt_alloc_entry_io(dde);
4097 dde_io = dde->dde_io;
4098 /*
4099 * First time out, take a copy of the stable entry to revert
4100 * to if there's an error (see zio_ddt_child_write_done())
4101 */
4102 ddt_phys_copy(&dde_io->dde_orig_phys, dde->dde_phys, v);
4103 dde_io->dde_lead_zio[p] = cio;
4104 } else {
4105 if (dde_io->dde_lead_zio[p] == NULL) {
4106 /*
4107 * First time out, take a copy of the stable entry
4108 * to revert to if there's an error (see
4109 * zio_ddt_child_write_done())
4110 */
4111 ddt_phys_copy(&dde_io->dde_orig_phys, dde->dde_phys,
4112 v);
4113 } else {
4114 /*
4115 * Make the existing chain our child, because it
4116 * cannot complete until we have.
4117 */
4118 zio_add_child(cio, dde_io->dde_lead_zio[p]);
4119 }
4120 dde_io->dde_lead_zio[p] = cio;
4121 mutex_exit(&dde_io->dde_io_lock);
4122 }
4123
4124 /*
4125 * Optimistically increment the refcount for this dedup write.
4126 * If the write fails, zio_ddt_child_write_done() will decrement
4127 * for all non-DDT-child parents.
4128 */
4129 ddt_phys_addref(ddp, v);
4130
4131 ddt_exit(ddt);
4132
4133 zio_nowait(cio);
4134
4135 return (zio);
4136 }
4137
4138 static ddt_entry_t *freedde; /* for debugging */
4139
4140 static zio_t *
zio_ddt_free(zio_t * zio)4141 zio_ddt_free(zio_t *zio)
4142 {
4143 spa_t *spa = zio->io_spa;
4144 blkptr_t *bp = zio->io_bp;
4145 ddt_t *ddt = ddt_select(spa, bp);
4146 ddt_entry_t *dde = NULL;
4147
4148 ASSERT(BP_GET_DEDUP(bp));
4149 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4150
4151 ddt_enter(ddt);
4152 freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
4153 if (dde) {
4154 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
4155 if (v != DDT_PHYS_NONE)
4156 ddt_phys_decref(dde->dde_phys, v);
4157 else
4158 /*
4159 * If the entry was found but the phys was not, then
4160 * this block must have been pruned from the dedup
4161 * table, and the entry refers to a later version of
4162 * this data. Therefore, the caller is trying to delete
4163 * the only stored instance of this block, and so we
4164 * need to do a normal (not dedup) free. Clear dde so
4165 * we fall into the block below.
4166 */
4167 dde = NULL;
4168 }
4169 ddt_exit(ddt);
4170
4171 /*
4172 * When no entry was found, it must have been pruned,
4173 * so we can free it now instead of decrementing the
4174 * refcount in the DDT.
4175 */
4176 if (!dde) {
4177 BP_SET_DEDUP(bp, 0);
4178 zio->io_pipeline |= ZIO_STAGE_DVA_FREE;
4179 }
4180
4181 return (zio);
4182 }
4183
4184 /*
4185 * ==========================================================================
4186 * Allocate and free blocks
4187 * ==========================================================================
4188 */
4189
4190 static zio_t *
zio_io_to_allocate(metaslab_class_allocator_t * mca,boolean_t * more)4191 zio_io_to_allocate(metaslab_class_allocator_t *mca, boolean_t *more)
4192 {
4193 zio_t *zio;
4194
4195 ASSERT(MUTEX_HELD(&mca->mca_lock));
4196
4197 zio = avl_first(&mca->mca_tree);
4198 if (zio == NULL) {
4199 *more = B_FALSE;
4200 return (NULL);
4201 }
4202
4203 ASSERT(IO_IS_ALLOCATING(zio));
4204 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4205
4206 /*
4207 * Try to place a reservation for this zio. If we're unable to
4208 * reserve then we throttle.
4209 */
4210 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
4211 zio->io_allocator, zio->io_prop.zp_copies, zio->io_size,
4212 B_FALSE, more)) {
4213 return (NULL);
4214 }
4215 zio->io_flags |= ZIO_FLAG_ALLOC_THROTTLED;
4216
4217 avl_remove(&mca->mca_tree, zio);
4218 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
4219
4220 if (avl_is_empty(&mca->mca_tree))
4221 *more = B_FALSE;
4222 return (zio);
4223 }
4224
4225 static zio_t *
zio_dva_throttle(zio_t * zio)4226 zio_dva_throttle(zio_t *zio)
4227 {
4228 spa_t *spa = zio->io_spa;
4229 zio_t *nio;
4230 metaslab_class_t *mc;
4231 boolean_t more;
4232
4233 /*
4234 * If not already chosen, choose an appropriate allocation class.
4235 */
4236 mc = zio->io_metaslab_class;
4237 if (mc == NULL)
4238 mc = spa_preferred_class(spa, zio);
4239
4240 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
4241 !mc->mc_alloc_throttle_enabled ||
4242 zio->io_child_type == ZIO_CHILD_GANG ||
4243 zio->io_flags & ZIO_FLAG_NODATA) {
4244 return (zio);
4245 }
4246
4247 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4248 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4249 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
4250 ASSERT3U(zio->io_queued_timestamp, >, 0);
4251 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
4252
4253 zio->io_metaslab_class = mc;
4254 metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
4255 mutex_enter(&mca->mca_lock);
4256 avl_add(&mca->mca_tree, zio);
4257 nio = zio_io_to_allocate(mca, &more);
4258 mutex_exit(&mca->mca_lock);
4259 return (nio);
4260 }
4261
4262 static void
zio_allocate_dispatch(metaslab_class_t * mc,int allocator)4263 zio_allocate_dispatch(metaslab_class_t *mc, int allocator)
4264 {
4265 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
4266 zio_t *zio;
4267 boolean_t more;
4268
4269 do {
4270 mutex_enter(&mca->mca_lock);
4271 zio = zio_io_to_allocate(mca, &more);
4272 mutex_exit(&mca->mca_lock);
4273 if (zio == NULL)
4274 return;
4275
4276 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
4277 ASSERT0(zio->io_error);
4278 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
4279 } while (more);
4280 }
4281
4282 static zio_t *
zio_dva_allocate(zio_t * zio)4283 zio_dva_allocate(zio_t *zio)
4284 {
4285 spa_t *spa = zio->io_spa;
4286 metaslab_class_t *mc, *newmc;
4287 blkptr_t *bp = zio->io_bp;
4288 int error;
4289 int flags = 0;
4290
4291 if (zio->io_gang_leader == NULL) {
4292 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
4293 zio->io_gang_leader = zio;
4294 }
4295 if (zio->io_flags & ZIO_FLAG_PREALLOCATED) {
4296 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_GANG);
4297 memcpy(zio->io_bp->blk_dva, zio->io_bp_orig.blk_dva,
4298 3 * sizeof (dva_t));
4299 BP_SET_LOGICAL_BIRTH(zio->io_bp,
4300 BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig));
4301 BP_SET_PHYSICAL_BIRTH(zio->io_bp,
4302 BP_GET_RAW_PHYSICAL_BIRTH(&zio->io_bp_orig));
4303 return (zio);
4304 }
4305
4306 ASSERT(BP_IS_HOLE(bp));
4307 ASSERT0(BP_GET_NDVAS(bp));
4308 ASSERT3U(zio->io_prop.zp_copies, >, 0);
4309
4310 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
4311 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
4312
4313 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
4314 flags |= METASLAB_GANG_CHILD;
4315 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
4316 flags |= METASLAB_ASYNC_ALLOC;
4317
4318 /*
4319 * If not already chosen, choose an appropriate allocation class.
4320 */
4321 mc = zio->io_metaslab_class;
4322 if (mc == NULL) {
4323 mc = spa_preferred_class(spa, zio);
4324 zio->io_metaslab_class = mc;
4325 }
4326 ZIOSTAT_BUMP(ziostat_total_allocations);
4327
4328 again:
4329 /*
4330 * Try allocating the block in the usual metaslab class.
4331 * If that's full, allocate it in some other class(es).
4332 * If that's full, allocate as a gang block,
4333 * and if all are full, the allocation fails (which shouldn't happen).
4334 *
4335 * Note that we do not fall back on embedded slog (ZIL) space, to
4336 * preserve unfragmented slog space, which is critical for decent
4337 * sync write performance. If a log allocation fails, we will fall
4338 * back to spa_sync() which is abysmal for performance.
4339 */
4340 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4341 error = metaslab_alloc(spa, mc, zio->io_size, bp,
4342 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
4343 &zio->io_alloc_list, zio->io_allocator, zio);
4344
4345 /*
4346 * When the dedup or special class is spilling into the normal class,
4347 * there can still be significant space available due to deferred
4348 * frees that are in-flight. We track the txg when this occurred and
4349 * back off adding new DDT entries for a few txgs to allow the free
4350 * blocks to be processed.
4351 */
4352 if (error == ENOSPC && spa->spa_dedup_class_full_txg != zio->io_txg &&
4353 (mc == spa_dedup_class(spa) || (mc == spa_special_class(spa) &&
4354 !spa_has_dedup(spa) && spa_special_has_ddt(spa)))) {
4355 spa->spa_dedup_class_full_txg = zio->io_txg;
4356 zfs_dbgmsg("%s[%llu]: %s class spilling, req size %llu, "
4357 "%llu allocated of %llu",
4358 spa_name(spa), (u_longlong_t)zio->io_txg,
4359 metaslab_class_get_name(mc),
4360 (u_longlong_t)zio->io_size,
4361 (u_longlong_t)metaslab_class_get_alloc(mc),
4362 (u_longlong_t)metaslab_class_get_space(mc));
4363 }
4364
4365 /*
4366 * Fall back to some other class when this one is full.
4367 */
4368 if (error == ENOSPC && (newmc = spa_preferred_class(spa, zio)) != mc) {
4369 /*
4370 * If we are holding old class reservation, drop it.
4371 * Dispatch the next ZIO(s) there if some are waiting.
4372 */
4373 if (zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) {
4374 if (metaslab_class_throttle_unreserve(mc,
4375 zio->io_allocator, zio->io_prop.zp_copies,
4376 zio->io_size)) {
4377 zio_allocate_dispatch(zio->io_metaslab_class,
4378 zio->io_allocator);
4379 }
4380 zio->io_flags &= ~ZIO_FLAG_ALLOC_THROTTLED;
4381 }
4382
4383 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4384 zfs_dbgmsg("%s: metaslab allocation failure in %s "
4385 "class, trying fallback to %s class: zio %px, "
4386 "size %llu, error %d", spa_name(spa),
4387 metaslab_class_get_name(mc),
4388 metaslab_class_get_name(newmc),
4389 zio, (u_longlong_t)zio->io_size, error);
4390 }
4391 zio->io_metaslab_class = mc = newmc;
4392 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
4393
4394 /*
4395 * If the new class uses throttling, return to that pipeline
4396 * stage. Otherwise just do another allocation attempt.
4397 */
4398 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
4399 mc->mc_alloc_throttle_enabled &&
4400 zio->io_child_type != ZIO_CHILD_GANG &&
4401 !(zio->io_flags & ZIO_FLAG_NODATA)) {
4402 zio->io_stage = ZIO_STAGE_DVA_THROTTLE >> 1;
4403 return (zio);
4404 }
4405 goto again;
4406 }
4407
4408 if (error == ENOSPC && zio->io_size > spa->spa_min_alloc) {
4409 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4410 zfs_dbgmsg("%s: metaslab allocation failure, "
4411 "trying ganging: zio %px, size %llu, error %d",
4412 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4413 error);
4414 }
4415 ZIOSTAT_BUMP(ziostat_gang_writes);
4416 if (flags & METASLAB_GANG_CHILD)
4417 ZIOSTAT_BUMP(ziostat_gang_multilevel);
4418 return (zio_write_gang_block(zio, mc));
4419 }
4420 if (error != 0) {
4421 if (error != ENOSPC ||
4422 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
4423 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
4424 "size %llu, error %d",
4425 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4426 error);
4427 }
4428 zio->io_error = error;
4429 } else if (zio->io_prop.zp_rewrite) {
4430 /*
4431 * For rewrite operations, preserve the logical birth time
4432 * but set the physical birth time to the current txg.
4433 */
4434 uint64_t logical_birth = BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig);
4435 ASSERT3U(logical_birth, <=, zio->io_txg);
4436 BP_SET_BIRTH(zio->io_bp, logical_birth, zio->io_txg);
4437 BP_SET_REWRITE(zio->io_bp, 1);
4438 }
4439
4440 return (zio);
4441 }
4442
4443 static zio_t *
zio_dva_free(zio_t * zio)4444 zio_dva_free(zio_t *zio)
4445 {
4446 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
4447
4448 return (zio);
4449 }
4450
4451 static zio_t *
zio_dva_claim(zio_t * zio)4452 zio_dva_claim(zio_t *zio)
4453 {
4454 int error;
4455
4456 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
4457 if (error)
4458 zio->io_error = error;
4459
4460 return (zio);
4461 }
4462
4463 /*
4464 * Undo an allocation. This is used by zio_done() when an I/O fails
4465 * and we want to give back the block we just allocated.
4466 * This handles both normal blocks and gang blocks.
4467 */
4468 static void
zio_dva_unallocate(zio_t * zio,zio_gang_node_t * gn,blkptr_t * bp)4469 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
4470 {
4471 ASSERT(BP_GET_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
4472 ASSERT0P(zio->io_bp_override);
4473
4474 if (!BP_IS_HOLE(bp)) {
4475 metaslab_free(zio->io_spa, bp, BP_GET_BIRTH(bp), B_TRUE);
4476 }
4477
4478 if (gn != NULL) {
4479 for (int g = 0; g < gbh_nblkptrs(gn->gn_gangblocksize); g++) {
4480 zio_dva_unallocate(zio, gn->gn_child[g],
4481 gbh_bp(gn->gn_gbh, g));
4482 }
4483 }
4484 }
4485
4486 /*
4487 * Try to allocate an intent log block. Return 0 on success, errno on failure.
4488 */
4489 int
zio_alloc_zil(spa_t * spa,objset_t * os,uint64_t txg,blkptr_t * new_bp,uint64_t min_size,uint64_t max_size,boolean_t * slog,boolean_t allow_larger)4490 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
4491 uint64_t min_size, uint64_t max_size, boolean_t *slog,
4492 boolean_t allow_larger)
4493 {
4494 int error;
4495 zio_alloc_list_t io_alloc_list;
4496 uint64_t alloc_size = 0;
4497
4498 ASSERT(txg > spa_syncing_txg(spa));
4499 ASSERT3U(min_size, <=, max_size);
4500
4501 metaslab_trace_init(&io_alloc_list);
4502
4503 /*
4504 * Block pointer fields are useful to metaslabs for stats and debugging.
4505 * Fill in the obvious ones before calling into metaslab_alloc().
4506 */
4507 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4508 BP_SET_PSIZE(new_bp, max_size);
4509 BP_SET_LEVEL(new_bp, 0);
4510
4511 /*
4512 * When allocating a zil block, we don't have information about
4513 * the final destination of the block except the objset it's part
4514 * of, so we just hash the objset ID to pick the allocator to get
4515 * some parallelism.
4516 */
4517 int flags = METASLAB_ZIL;
4518 int allocator = (uint_t)cityhash1(os->os_dsl_dataset->ds_object)
4519 % spa->spa_alloc_count;
4520 ZIOSTAT_BUMP(ziostat_total_allocations);
4521
4522 /* Try log class (dedicated slog devices) first */
4523 error = metaslab_alloc_range(spa, spa_log_class(spa), min_size,
4524 max_size, new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
4525 NULL, &alloc_size);
4526 *slog = (error == 0);
4527
4528 /* Try special_embedded_log class (reserved on special vdevs) */
4529 if (error != 0) {
4530 error = metaslab_alloc_range(spa,
4531 spa_special_embedded_log_class(spa), min_size, max_size,
4532 new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
4533 NULL, &alloc_size);
4534 }
4535
4536 /* Try special class (general special vdev allocation) */
4537 if (error != 0) {
4538 error = metaslab_alloc_range(spa, spa_special_class(spa),
4539 min_size, max_size, new_bp, 1, txg, NULL, flags,
4540 &io_alloc_list, allocator, NULL, &alloc_size);
4541 }
4542
4543 /* Try embedded_log class (reserved on normal vdevs) */
4544 if (error != 0) {
4545 error = metaslab_alloc_range(spa, spa_embedded_log_class(spa),
4546 min_size, max_size, new_bp, 1, txg, NULL, flags,
4547 &io_alloc_list, allocator, NULL, &alloc_size);
4548 }
4549
4550 /* Finally fall back to normal class */
4551 if (error != 0) {
4552 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
4553 error = metaslab_alloc_range(spa, spa_normal_class(spa),
4554 min_size, max_size, new_bp, 1, txg, NULL, flags,
4555 &io_alloc_list, allocator, NULL, &alloc_size);
4556 }
4557 metaslab_trace_fini(&io_alloc_list);
4558
4559 if (error == 0) {
4560 if (!allow_larger)
4561 alloc_size = MIN(alloc_size, max_size);
4562 else if (max_size <= SPA_OLD_MAXBLOCKSIZE)
4563 alloc_size = MIN(alloc_size, SPA_OLD_MAXBLOCKSIZE);
4564 alloc_size = P2ALIGN_TYPED(alloc_size, ZIL_MIN_BLKSZ, uint64_t);
4565
4566 BP_SET_LSIZE(new_bp, alloc_size);
4567 BP_SET_PSIZE(new_bp, alloc_size);
4568 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
4569 BP_SET_CHECKSUM(new_bp,
4570 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
4571 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
4572 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4573 BP_SET_LEVEL(new_bp, 0);
4574 BP_SET_DEDUP(new_bp, 0);
4575 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
4576
4577 /*
4578 * encrypted blocks will require an IV and salt. We generate
4579 * these now since we will not be rewriting the bp at
4580 * rewrite time.
4581 */
4582 if (os->os_encrypted) {
4583 uint8_t iv[ZIO_DATA_IV_LEN];
4584 uint8_t salt[ZIO_DATA_SALT_LEN];
4585
4586 BP_SET_CRYPT(new_bp, B_TRUE);
4587 VERIFY0(spa_crypt_get_salt(spa,
4588 dmu_objset_id(os), salt));
4589 VERIFY0(zio_crypt_generate_iv(iv));
4590
4591 zio_crypt_encode_params_bp(new_bp, salt, iv);
4592 }
4593 } else {
4594 zfs_dbgmsg("%s: zil block allocation failure: "
4595 "min_size %llu, max_size %llu, error %d", spa_name(spa),
4596 (u_longlong_t)min_size, (u_longlong_t)max_size, error);
4597 }
4598
4599 return (error);
4600 }
4601
4602 /*
4603 * ==========================================================================
4604 * Read and write to physical devices
4605 * ==========================================================================
4606 */
4607
4608 /*
4609 * Issue an I/O to the underlying vdev. Typically the issue pipeline
4610 * stops after this stage and will resume upon I/O completion.
4611 * However, there are instances where the vdev layer may need to
4612 * continue the pipeline when an I/O was not issued. Since the I/O
4613 * that was sent to the vdev layer might be different than the one
4614 * currently active in the pipeline (see vdev_queue_io()), we explicitly
4615 * force the underlying vdev layers to call either zio_execute() or
4616 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
4617 */
4618 static zio_t *
zio_vdev_io_start(zio_t * zio)4619 zio_vdev_io_start(zio_t *zio)
4620 {
4621 vdev_t *vd = zio->io_vd;
4622 uint64_t align;
4623 spa_t *spa = zio->io_spa;
4624
4625 zio->io_delay = 0;
4626
4627 ASSERT0(zio->io_error);
4628 ASSERT0(zio->io_child_error[ZIO_CHILD_VDEV]);
4629
4630 if (vd == NULL) {
4631 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) {
4632 /*
4633 * A deadlock workaround. The ddt_prune_unique_entries()
4634 * -> prune_candidates_sync() code path takes the
4635 * SCL_ZIO reader lock and may request it again here.
4636 * If there is another thread who wants the SCL_ZIO
4637 * writer lock, then scl_write_wanted will be set.
4638 * Thus, the spa_config_enter_priority() is used to
4639 * ignore pending writer requests.
4640 *
4641 * The locking should be revised to remove the need
4642 * for this workaround. If that's not workable then
4643 * it should only be applied to the zios involved in
4644 * the pruning process. This impacts the read/write
4645 * I/O balance while pruning.
4646 */
4647 if (spa->spa_active_ddt_prune)
4648 spa_config_enter_priority(spa, SCL_ZIO, zio,
4649 RW_READER);
4650 else
4651 spa_config_enter(spa, SCL_ZIO, zio,
4652 RW_READER);
4653 }
4654
4655 /*
4656 * The mirror_ops handle multiple DVAs in a single BP.
4657 */
4658 vdev_mirror_ops.vdev_op_io_start(zio);
4659 return (NULL);
4660 }
4661
4662 ASSERT3P(zio->io_logical, !=, zio);
4663 if (zio->io_type == ZIO_TYPE_WRITE) {
4664 ASSERT(spa->spa_trust_config);
4665
4666 /*
4667 * Note: the code can handle other kinds of writes,
4668 * but we don't expect them.
4669 */
4670 if (zio->io_vd->vdev_noalloc) {
4671 ASSERT(zio->io_flags &
4672 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
4673 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
4674 }
4675 }
4676
4677 align = 1ULL << vd->vdev_top->vdev_ashift;
4678
4679 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
4680 P2PHASE(zio->io_size, align) != 0) {
4681 /* Transform logical writes to be a full physical block size. */
4682 uint64_t asize = P2ROUNDUP(zio->io_size, align);
4683 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
4684 ASSERT(vd == vd->vdev_top);
4685 if (zio->io_type == ZIO_TYPE_WRITE) {
4686 abd_copy(abuf, zio->io_abd, zio->io_size);
4687 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
4688 }
4689 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
4690 }
4691
4692 /*
4693 * If this is not a physical io, make sure that it is properly aligned
4694 * before proceeding.
4695 */
4696 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
4697 ASSERT0(P2PHASE(zio->io_offset, align));
4698 ASSERT0(P2PHASE(zio->io_size, align));
4699 } else {
4700 /*
4701 * For physical writes, we allow 512b aligned writes and assume
4702 * the device will perform a read-modify-write as necessary.
4703 */
4704 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
4705 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
4706 }
4707
4708 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
4709
4710 /*
4711 * If this is a repair I/O, and there's no self-healing involved --
4712 * that is, we're just resilvering what we expect to resilver --
4713 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4714 * This prevents spurious resilvering.
4715 *
4716 * There are a few ways that we can end up creating these spurious
4717 * resilver i/os:
4718 *
4719 * 1. A resilver i/o will be issued if any DVA in the BP has a
4720 * dirty DTL. The mirror code will issue resilver writes to
4721 * each DVA, including the one(s) that are not on vdevs with dirty
4722 * DTLs.
4723 *
4724 * 2. With nested replication, which happens when we have a
4725 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4726 * For example, given mirror(replacing(A+B), C), it's likely that
4727 * only A is out of date (it's the new device). In this case, we'll
4728 * read from C, then use the data to resilver A+B -- but we don't
4729 * actually want to resilver B, just A. The top-level mirror has no
4730 * way to know this, so instead we just discard unnecessary repairs
4731 * as we work our way down the vdev tree.
4732 *
4733 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4734 * The same logic applies to any form of nested replication: ditto
4735 * + mirror, RAID-Z + replacing, etc.
4736 *
4737 * However, indirect vdevs point off to other vdevs which may have
4738 * DTL's, so we never bypass them. The child i/os on concrete vdevs
4739 * will be properly bypassed instead.
4740 *
4741 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4742 * a dRAID spare vdev. For example, when a dRAID spare is first
4743 * used, its spare blocks need to be written to but the leaf vdev's
4744 * of such blocks can have empty DTL_PARTIAL.
4745 *
4746 * There seemed no clean way to allow such writes while bypassing
4747 * spurious ones. At this point, just avoid all bypassing for dRAID
4748 * for correctness.
4749 */
4750 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4751 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4752 zio->io_txg != 0 && /* not a delegated i/o */
4753 vd->vdev_ops != &vdev_indirect_ops &&
4754 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4755 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4756 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4757 zio_vdev_io_bypass(zio);
4758 return (zio);
4759 }
4760
4761 /*
4762 * Select the next best leaf I/O to process. Distributed spares are
4763 * excluded since they dispatch the I/O directly to a leaf vdev after
4764 * applying the dRAID mapping.
4765 */
4766 if (vd->vdev_ops->vdev_op_leaf &&
4767 vd->vdev_ops != &vdev_draid_spare_ops &&
4768 (zio->io_type == ZIO_TYPE_READ ||
4769 zio->io_type == ZIO_TYPE_WRITE ||
4770 zio->io_type == ZIO_TYPE_TRIM)) {
4771
4772 if ((zio = vdev_queue_io(zio)) == NULL)
4773 return (NULL);
4774
4775 if (!vdev_accessible(vd, zio)) {
4776 zio->io_error = SET_ERROR(ENXIO);
4777 zio_interrupt(zio);
4778 return (NULL);
4779 }
4780 zio->io_delay = gethrtime();
4781
4782 int error = zio_handle_device_injections(vd, zio, ENOSYS,
4783 EFAULT);
4784 if (error == ENOSYS || (error == EFAULT &&
4785 !(zio->io_flags & ZIO_FLAG_IO_REPAIR))) {
4786 /*
4787 * "no-op" injections return success, but do no actual
4788 * work. Just return it. "io-prefail" injections are
4789 * similar, but don't return success.
4790 */
4791 if (error == EFAULT)
4792 zio->io_error = EIO;
4793 zio_delay_interrupt(zio);
4794 return (NULL);
4795 }
4796 }
4797
4798 vd->vdev_ops->vdev_op_io_start(zio);
4799 return (NULL);
4800 }
4801
4802 static zio_t *
zio_vdev_io_done(zio_t * zio)4803 zio_vdev_io_done(zio_t *zio)
4804 {
4805 vdev_t *vd = zio->io_vd;
4806 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4807 boolean_t unexpected_error = B_FALSE;
4808
4809 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4810 return (NULL);
4811 }
4812
4813 ASSERT(zio->io_type == ZIO_TYPE_READ ||
4814 zio->io_type == ZIO_TYPE_WRITE ||
4815 zio->io_type == ZIO_TYPE_FLUSH ||
4816 zio->io_type == ZIO_TYPE_TRIM);
4817
4818 if (zio->io_delay)
4819 zio->io_delay = gethrtime() - zio->io_delay;
4820
4821 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4822 vd->vdev_ops != &vdev_draid_spare_ops) {
4823 if (zio->io_type != ZIO_TYPE_FLUSH)
4824 vdev_queue_io_done(zio);
4825
4826 if (zio_injection_enabled && zio->io_error == 0)
4827 zio->io_error = zio_handle_device_injections(vd, zio,
4828 EIO, EILSEQ);
4829
4830 if (zio_injection_enabled && zio->io_error == 0)
4831 zio->io_error = zio_handle_label_injection(zio, EIO);
4832
4833 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4834 zio->io_type != ZIO_TYPE_TRIM) {
4835 if (!vdev_accessible(vd, zio)) {
4836 zio->io_error = SET_ERROR(ENXIO);
4837 } else {
4838 unexpected_error = B_TRUE;
4839 }
4840 }
4841 }
4842
4843 ops->vdev_op_io_done(zio);
4844
4845 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4846 VERIFY0P(vdev_probe(vd, zio));
4847
4848 return (zio);
4849 }
4850
4851 /*
4852 * This function is used to change the priority of an existing zio that is
4853 * currently in-flight. This is used by the arc to upgrade priority in the
4854 * event that a demand read is made for a block that is currently queued
4855 * as a scrub or async read IO. Otherwise, the high priority read request
4856 * would end up having to wait for the lower priority IO.
4857 */
4858 void
zio_change_priority(zio_t * pio,zio_priority_t priority)4859 zio_change_priority(zio_t *pio, zio_priority_t priority)
4860 {
4861 zio_t *cio, *cio_next;
4862 zio_link_t *zl = NULL;
4863
4864 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4865
4866 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4867 vdev_queue_change_io_priority(pio, priority);
4868 } else {
4869 pio->io_priority = priority;
4870 }
4871
4872 mutex_enter(&pio->io_lock);
4873 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4874 cio_next = zio_walk_children(pio, &zl);
4875 zio_change_priority(cio, priority);
4876 }
4877 mutex_exit(&pio->io_lock);
4878 }
4879
4880 /*
4881 * For non-raidz ZIOs, we can just copy aside the bad data read from the
4882 * disk, and use that to finish the checksum ereport later.
4883 */
4884 static void
zio_vsd_default_cksum_finish(zio_cksum_report_t * zcr,const abd_t * good_buf)4885 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4886 const abd_t *good_buf)
4887 {
4888 /* no processing needed */
4889 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4890 }
4891
4892 void
zio_vsd_default_cksum_report(zio_t * zio,zio_cksum_report_t * zcr)4893 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4894 {
4895 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4896
4897 abd_copy(abd, zio->io_abd, zio->io_size);
4898
4899 zcr->zcr_cbinfo = zio->io_size;
4900 zcr->zcr_cbdata = abd;
4901 zcr->zcr_finish = zio_vsd_default_cksum_finish;
4902 zcr->zcr_free = zio_abd_free;
4903 }
4904
4905 static zio_t *
zio_vdev_io_assess(zio_t * zio)4906 zio_vdev_io_assess(zio_t *zio)
4907 {
4908 vdev_t *vd = zio->io_vd;
4909
4910 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4911 return (NULL);
4912 }
4913
4914 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4915 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4916
4917 if (zio->io_vsd != NULL) {
4918 zio->io_vsd_ops->vsd_free(zio);
4919 zio->io_vsd = NULL;
4920 }
4921
4922 /*
4923 * If a Direct I/O operation has a checksum verify error then this I/O
4924 * should not attempt to be issued again.
4925 */
4926 if (zio->io_post & ZIO_POST_DIO_CHKSUM_ERR) {
4927 if (zio->io_type == ZIO_TYPE_WRITE) {
4928 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL);
4929 ASSERT3U(zio->io_error, ==, EIO);
4930 }
4931 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4932 return (zio);
4933 }
4934
4935 if (zio_injection_enabled && zio->io_error == 0)
4936 zio->io_error = zio_handle_fault_injection(zio, EIO);
4937
4938 /*
4939 * If the I/O failed, determine whether we should attempt to retry it.
4940 *
4941 * On retry, we cut in line in the issue queue, since we don't want
4942 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4943 */
4944 if (zio->io_error && vd == NULL &&
4945 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4946 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4947 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
4948 zio->io_error = 0;
4949 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4950 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4951 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4952 zio_requeue_io_start_cut_in_line);
4953 return (NULL);
4954 }
4955
4956 /*
4957 * If we got an error on a leaf device, convert it to ENXIO
4958 * if the device is not accessible at all.
4959 */
4960 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4961 !vdev_accessible(vd, zio))
4962 zio->io_error = SET_ERROR(ENXIO);
4963
4964 /*
4965 * If we can't write to an interior vdev (mirror or RAID-Z),
4966 * set vdev_cant_write so that we stop trying to allocate from it.
4967 */
4968 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4969 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4970 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4971 "cant_write=TRUE due to write failure with ENXIO",
4972 zio);
4973 vd->vdev_cant_write = B_TRUE;
4974 }
4975
4976 /*
4977 * If a cache flush returns ENOTSUP we know that no future
4978 * attempts will ever succeed. In this case we set a persistent
4979 * boolean flag so that we don't bother with it in the future, and
4980 * then we act like the flush succeeded.
4981 */
4982 if (zio->io_error == ENOTSUP && zio->io_type == ZIO_TYPE_FLUSH &&
4983 vd != NULL) {
4984 vd->vdev_nowritecache = B_TRUE;
4985 zio->io_error = 0;
4986 }
4987
4988 if (zio->io_error)
4989 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4990
4991 return (zio);
4992 }
4993
4994 void
zio_vdev_io_reissue(zio_t * zio)4995 zio_vdev_io_reissue(zio_t *zio)
4996 {
4997 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4998 ASSERT0(zio->io_error);
4999
5000 zio->io_stage >>= 1;
5001 }
5002
5003 void
zio_vdev_io_redone(zio_t * zio)5004 zio_vdev_io_redone(zio_t *zio)
5005 {
5006 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
5007
5008 zio->io_stage >>= 1;
5009 }
5010
5011 void
zio_vdev_io_bypass(zio_t * zio)5012 zio_vdev_io_bypass(zio_t *zio)
5013 {
5014 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
5015 ASSERT0(zio->io_error);
5016
5017 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
5018 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
5019 }
5020
5021 /*
5022 * ==========================================================================
5023 * Encrypt and store encryption parameters
5024 * ==========================================================================
5025 */
5026
5027
5028 /*
5029 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
5030 * managing the storage of encryption parameters and passing them to the
5031 * lower-level encryption functions.
5032 */
5033 static zio_t *
zio_encrypt(zio_t * zio)5034 zio_encrypt(zio_t *zio)
5035 {
5036 zio_prop_t *zp = &zio->io_prop;
5037 spa_t *spa = zio->io_spa;
5038 blkptr_t *bp = zio->io_bp;
5039 uint64_t psize = BP_GET_PSIZE(bp);
5040 uint64_t dsobj = zio->io_bookmark.zb_objset;
5041 dmu_object_type_t ot = BP_GET_TYPE(bp);
5042 void *enc_buf = NULL;
5043 abd_t *eabd = NULL;
5044 uint8_t salt[ZIO_DATA_SALT_LEN];
5045 uint8_t iv[ZIO_DATA_IV_LEN];
5046 uint8_t mac[ZIO_DATA_MAC_LEN];
5047 boolean_t no_crypt = B_FALSE;
5048
5049 /* the root zio already encrypted the data */
5050 if (zio->io_child_type == ZIO_CHILD_GANG)
5051 return (zio);
5052
5053 /* only ZIL blocks are re-encrypted on rewrite */
5054 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
5055 return (zio);
5056
5057 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
5058 BP_SET_CRYPT(bp, B_FALSE);
5059 return (zio);
5060 }
5061
5062 /* if we are doing raw encryption set the provided encryption params */
5063 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
5064 ASSERT0(BP_GET_LEVEL(bp));
5065 BP_SET_CRYPT(bp, B_TRUE);
5066 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
5067 if (ot != DMU_OT_OBJSET)
5068 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
5069
5070 /* dnode blocks must be written out in the provided byteorder */
5071 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
5072 ot == DMU_OT_DNODE) {
5073 void *bswap_buf = zio_buf_alloc(psize);
5074 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
5075
5076 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
5077 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
5078 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
5079 psize);
5080
5081 abd_take_ownership_of_buf(babd, B_TRUE);
5082 zio_push_transform(zio, babd, psize, psize, NULL);
5083 }
5084
5085 if (DMU_OT_IS_ENCRYPTED(ot))
5086 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
5087 return (zio);
5088 }
5089
5090 /* indirect blocks only maintain a cksum of the lower level MACs */
5091 if (BP_GET_LEVEL(bp) > 0) {
5092 BP_SET_CRYPT(bp, B_TRUE);
5093 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
5094 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
5095 mac));
5096 zio_crypt_encode_mac_bp(bp, mac);
5097 return (zio);
5098 }
5099
5100 /*
5101 * Objset blocks are a special case since they have 2 256-bit MACs
5102 * embedded within them.
5103 */
5104 if (ot == DMU_OT_OBJSET) {
5105 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
5106 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
5107 BP_SET_CRYPT(bp, B_TRUE);
5108 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
5109 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
5110 return (zio);
5111 }
5112
5113 /* unencrypted object types are only authenticated with a MAC */
5114 if (!DMU_OT_IS_ENCRYPTED(ot)) {
5115 BP_SET_CRYPT(bp, B_TRUE);
5116 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
5117 zio->io_abd, psize, mac));
5118 zio_crypt_encode_mac_bp(bp, mac);
5119 return (zio);
5120 }
5121
5122 /*
5123 * Later passes of sync-to-convergence may decide to rewrite data
5124 * in place to avoid more disk reallocations. This presents a problem
5125 * for encryption because this constitutes rewriting the new data with
5126 * the same encryption key and IV. However, this only applies to blocks
5127 * in the MOS (particularly the spacemaps) and we do not encrypt the
5128 * MOS. We assert that the zio is allocating or an intent log write
5129 * to enforce this.
5130 */
5131 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
5132 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
5133 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
5134 ASSERT3U(psize, !=, 0);
5135
5136 enc_buf = zio_buf_alloc(psize);
5137 eabd = abd_get_from_buf(enc_buf, psize);
5138 abd_take_ownership_of_buf(eabd, B_TRUE);
5139
5140 /*
5141 * For an explanation of what encryption parameters are stored
5142 * where, see the block comment in zio_crypt.c.
5143 */
5144 if (ot == DMU_OT_INTENT_LOG) {
5145 zio_crypt_decode_params_bp(bp, salt, iv);
5146 } else {
5147 BP_SET_CRYPT(bp, B_TRUE);
5148 }
5149
5150 /* Perform the encryption. This should not fail */
5151 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
5152 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
5153 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
5154
5155 /* encode encryption metadata into the bp */
5156 if (ot == DMU_OT_INTENT_LOG) {
5157 /*
5158 * ZIL blocks store the MAC in the embedded checksum, so the
5159 * transform must always be applied.
5160 */
5161 zio_crypt_encode_mac_zil(enc_buf, mac);
5162 zio_push_transform(zio, eabd, psize, psize, NULL);
5163 } else {
5164 BP_SET_CRYPT(bp, B_TRUE);
5165 zio_crypt_encode_params_bp(bp, salt, iv);
5166 zio_crypt_encode_mac_bp(bp, mac);
5167
5168 if (no_crypt) {
5169 ASSERT3U(ot, ==, DMU_OT_DNODE);
5170 abd_free(eabd);
5171 } else {
5172 zio_push_transform(zio, eabd, psize, psize, NULL);
5173 }
5174 }
5175
5176 return (zio);
5177 }
5178
5179 /*
5180 * ==========================================================================
5181 * Generate and verify checksums
5182 * ==========================================================================
5183 */
5184 static zio_t *
zio_checksum_generate(zio_t * zio)5185 zio_checksum_generate(zio_t *zio)
5186 {
5187 blkptr_t *bp = zio->io_bp;
5188 enum zio_checksum checksum;
5189
5190 if (bp == NULL) {
5191 /*
5192 * This is zio_write_phys().
5193 * We're either generating a label checksum, or none at all.
5194 */
5195 checksum = zio->io_prop.zp_checksum;
5196
5197 if (checksum == ZIO_CHECKSUM_OFF)
5198 return (zio);
5199
5200 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
5201 } else {
5202 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
5203 ASSERT(!IO_IS_ALLOCATING(zio));
5204 checksum = ZIO_CHECKSUM_GANG_HEADER;
5205 } else {
5206 checksum = BP_GET_CHECKSUM(bp);
5207 }
5208 }
5209
5210 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
5211
5212 return (zio);
5213 }
5214
5215 static zio_t *
zio_checksum_verify(zio_t * zio)5216 zio_checksum_verify(zio_t *zio)
5217 {
5218 zio_bad_cksum_t info;
5219 blkptr_t *bp = zio->io_bp;
5220 int error;
5221
5222 ASSERT(zio->io_vd != NULL);
5223
5224 if (bp == NULL) {
5225 /*
5226 * This is zio_read_phys().
5227 * We're either verifying a label checksum, or nothing at all.
5228 */
5229 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
5230 return (zio);
5231
5232 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
5233 }
5234
5235 ASSERT0(zio->io_post & ZIO_POST_DIO_CHKSUM_ERR);
5236 IMPLY(zio->io_flags & ZIO_FLAG_DIO_READ,
5237 !(zio->io_flags & ZIO_FLAG_SPECULATIVE));
5238
5239 if ((error = zio_checksum_error(zio, &info)) != 0) {
5240 zio->io_error = error;
5241 if (error == ECKSUM &&
5242 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
5243 if (zio->io_flags & ZIO_FLAG_DIO_READ) {
5244 zio->io_post |= ZIO_POST_DIO_CHKSUM_ERR;
5245 zio_t *pio = zio_unique_parent(zio);
5246 /*
5247 * Any Direct I/O read that has a checksum
5248 * error must be treated as suspicous as the
5249 * contents of the buffer could be getting
5250 * manipulated while the I/O is taking place.
5251 *
5252 * The checksum verify error will only be
5253 * reported here for disk and file VDEV's and
5254 * will be reported on those that the failure
5255 * occurred on. Other types of VDEV's report the
5256 * verify failure in their own code paths.
5257 */
5258 if (pio->io_child_type == ZIO_CHILD_LOGICAL) {
5259 zio_dio_chksum_verify_error_report(zio);
5260 }
5261 } else {
5262 mutex_enter(&zio->io_vd->vdev_stat_lock);
5263 zio->io_vd->vdev_stat.vs_checksum_errors++;
5264 mutex_exit(&zio->io_vd->vdev_stat_lock);
5265 (void) zfs_ereport_start_checksum(zio->io_spa,
5266 zio->io_vd, &zio->io_bookmark, zio,
5267 zio->io_offset, zio->io_size, &info);
5268 }
5269 }
5270 }
5271
5272 return (zio);
5273 }
5274
5275 static zio_t *
zio_dio_checksum_verify(zio_t * zio)5276 zio_dio_checksum_verify(zio_t *zio)
5277 {
5278 zio_t *pio = zio_unique_parent(zio);
5279 int error;
5280
5281 ASSERT3P(zio->io_vd, !=, NULL);
5282 ASSERT3P(zio->io_bp, !=, NULL);
5283 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
5284 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5285 ASSERT3B(pio->io_prop.zp_direct_write, ==, B_TRUE);
5286 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
5287
5288 if (zfs_vdev_direct_write_verify == 0 || zio->io_error != 0)
5289 goto out;
5290
5291 if ((error = zio_checksum_error(zio, NULL)) != 0) {
5292 zio->io_error = error;
5293 if (error == ECKSUM) {
5294 zio->io_post |= ZIO_POST_DIO_CHKSUM_ERR;
5295 zio_dio_chksum_verify_error_report(zio);
5296 }
5297 }
5298
5299 out:
5300 return (zio);
5301 }
5302
5303
5304 /*
5305 * Called by RAID-Z to ensure we don't compute the checksum twice.
5306 */
5307 void
zio_checksum_verified(zio_t * zio)5308 zio_checksum_verified(zio_t *zio)
5309 {
5310 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
5311 }
5312
5313 /*
5314 * Report Direct I/O checksum verify error and create ZED event.
5315 */
5316 void
zio_dio_chksum_verify_error_report(zio_t * zio)5317 zio_dio_chksum_verify_error_report(zio_t *zio)
5318 {
5319 ASSERT(zio->io_post & ZIO_POST_DIO_CHKSUM_ERR);
5320
5321 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
5322 return;
5323
5324 mutex_enter(&zio->io_vd->vdev_stat_lock);
5325 zio->io_vd->vdev_stat.vs_dio_verify_errors++;
5326 mutex_exit(&zio->io_vd->vdev_stat_lock);
5327 if (zio->io_type == ZIO_TYPE_WRITE) {
5328 /*
5329 * Convert checksum error for writes into EIO.
5330 */
5331 zio->io_error = SET_ERROR(EIO);
5332 /*
5333 * Report dio_verify_wr ZED event.
5334 */
5335 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_WR,
5336 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5337 } else {
5338 /*
5339 * Report dio_verify_rd ZED event.
5340 */
5341 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_RD,
5342 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5343 }
5344 }
5345
5346 /*
5347 * ==========================================================================
5348 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
5349 * An error of 0 indicates success. ENXIO indicates whole-device failure,
5350 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
5351 * indicate errors that are specific to one I/O, and most likely permanent.
5352 * Any other error is presumed to be worse because we weren't expecting it.
5353 * ==========================================================================
5354 */
5355 int
zio_worst_error(int e1,int e2)5356 zio_worst_error(int e1, int e2)
5357 {
5358 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
5359 int r1, r2;
5360
5361 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
5362 if (e1 == zio_error_rank[r1])
5363 break;
5364
5365 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
5366 if (e2 == zio_error_rank[r2])
5367 break;
5368
5369 return (r1 > r2 ? e1 : e2);
5370 }
5371
5372 /*
5373 * ==========================================================================
5374 * I/O completion
5375 * ==========================================================================
5376 */
5377 static zio_t *
zio_ready(zio_t * zio)5378 zio_ready(zio_t *zio)
5379 {
5380 blkptr_t *bp = zio->io_bp;
5381 zio_t *pio, *pio_next;
5382 zio_link_t *zl = NULL;
5383
5384 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
5385 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
5386 return (NULL);
5387 }
5388
5389 if (zio_injection_enabled) {
5390 hrtime_t target = zio_handle_ready_delay(zio);
5391 if (target != 0 && zio->io_target_timestamp == 0) {
5392 zio->io_stage >>= 1;
5393 zio->io_target_timestamp = target;
5394 zio_delay_interrupt(zio);
5395 return (NULL);
5396 }
5397 }
5398
5399 if (zio->io_ready) {
5400 ASSERT(IO_IS_ALLOCATING(zio));
5401 ASSERT(BP_GET_BIRTH(bp) == zio->io_txg ||
5402 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
5403 ASSERT0(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY]);
5404
5405 zio->io_ready(zio);
5406 }
5407
5408 #ifdef ZFS_DEBUG
5409 if (bp != NULL && bp != &zio->io_bp_copy)
5410 zio->io_bp_copy = *bp;
5411 #endif
5412
5413 if (zio->io_error != 0) {
5414 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
5415
5416 if (zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) {
5417 ASSERT(IO_IS_ALLOCATING(zio));
5418 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
5419 ASSERT(zio->io_metaslab_class != NULL);
5420 ASSERT(ZIO_HAS_ALLOCATOR(zio));
5421
5422 /*
5423 * We were unable to allocate anything, unreserve and
5424 * issue the next I/O to allocate.
5425 */
5426 if (metaslab_class_throttle_unreserve(
5427 zio->io_metaslab_class, zio->io_allocator,
5428 zio->io_prop.zp_copies, zio->io_size)) {
5429 zio_allocate_dispatch(zio->io_metaslab_class,
5430 zio->io_allocator);
5431 }
5432 }
5433 }
5434
5435 mutex_enter(&zio->io_lock);
5436 zio->io_state[ZIO_WAIT_READY] = 1;
5437 pio = zio_walk_parents(zio, &zl);
5438 mutex_exit(&zio->io_lock);
5439
5440 /*
5441 * As we notify zio's parents, new parents could be added.
5442 * New parents go to the head of zio's io_parent_list, however,
5443 * so we will (correctly) not notify them. The remainder of zio's
5444 * io_parent_list, from 'pio_next' onward, cannot change because
5445 * all parents must wait for us to be done before they can be done.
5446 */
5447 for (; pio != NULL; pio = pio_next) {
5448 pio_next = zio_walk_parents(zio, &zl);
5449 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
5450 }
5451
5452 if (zio->io_flags & ZIO_FLAG_NODATA) {
5453 if (bp != NULL && BP_IS_GANG(bp)) {
5454 zio->io_flags &= ~ZIO_FLAG_NODATA;
5455 } else {
5456 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
5457 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
5458 }
5459 }
5460
5461 if (zio_injection_enabled &&
5462 zio->io_spa->spa_syncing_txg == zio->io_txg)
5463 zio_handle_ignored_writes(zio);
5464
5465 return (zio);
5466 }
5467
5468 /*
5469 * Update the allocation throttle accounting.
5470 */
5471 static void
zio_dva_throttle_done(zio_t * zio)5472 zio_dva_throttle_done(zio_t *zio)
5473 {
5474 zio_t *pio = zio_unique_parent(zio);
5475 vdev_t *vd = zio->io_vd;
5476 int flags = METASLAB_ASYNC_ALLOC;
5477 const void *tag = pio;
5478 uint64_t size = pio->io_size;
5479
5480 ASSERT3P(zio->io_bp, !=, NULL);
5481 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5482 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
5483 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
5484 ASSERT(vd != NULL);
5485 ASSERT3P(vd, ==, vd->vdev_top);
5486 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
5487 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
5488 ASSERT(zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED);
5489
5490 /*
5491 * Parents of gang children can have two flavors -- ones that allocated
5492 * the gang header (will have ZIO_FLAG_IO_REWRITE set) and ones that
5493 * allocated the constituent blocks. The first use their parent as tag.
5494 * We set the size to match the original allocation call for that case.
5495 */
5496 if (pio->io_child_type == ZIO_CHILD_GANG &&
5497 (pio->io_flags & ZIO_FLAG_IO_REWRITE)) {
5498 tag = zio_unique_parent(pio);
5499 size = SPA_OLD_GANGBLOCKSIZE;
5500 }
5501
5502 ASSERT(IO_IS_ALLOCATING(pio) || (pio->io_child_type == ZIO_CHILD_GANG &&
5503 (pio->io_flags & ZIO_FLAG_IO_REWRITE)));
5504 ASSERT(ZIO_HAS_ALLOCATOR(pio));
5505 ASSERT3P(zio, !=, zio->io_logical);
5506 ASSERT(zio->io_logical != NULL);
5507 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
5508 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
5509 ASSERT(zio->io_metaslab_class != NULL);
5510 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
5511
5512 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id,
5513 pio->io_allocator, flags, size, tag);
5514
5515 if (metaslab_class_throttle_unreserve(pio->io_metaslab_class,
5516 pio->io_allocator, 1, pio->io_size)) {
5517 zio_allocate_dispatch(zio->io_metaslab_class,
5518 pio->io_allocator);
5519 }
5520 }
5521
5522 static void
zio_done_postread_done(zio_t * zio)5523 zio_done_postread_done(zio_t *zio)
5524 {
5525 abd_free(zio->io_abd);
5526 }
5527
5528 static zio_t *
zio_done(zio_t * zio)5529 zio_done(zio_t *zio)
5530 {
5531 /*
5532 * Always attempt to keep stack usage minimal here since
5533 * we can be called recursively up to 19 levels deep.
5534 */
5535 const uint64_t psize = zio->io_size;
5536 zio_t *pio, *pio_next;
5537 zio_link_t *zl = NULL;
5538
5539 /*
5540 * If our children haven't all completed,
5541 * wait for them and then repeat this pipeline stage.
5542 */
5543 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
5544 return (NULL);
5545 }
5546
5547 /*
5548 * If the allocation throttle is enabled, then update the accounting.
5549 * We only track child I/Os that are part of an allocating async
5550 * write. We must do this since the allocation is performed
5551 * by the logical I/O but the actual write is done by child I/Os.
5552 */
5553 if (zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED &&
5554 zio->io_child_type == ZIO_CHILD_VDEV)
5555 zio_dva_throttle_done(zio);
5556
5557 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
5558 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
5559 ASSERT0(zio->io_children[c][w]);
5560
5561 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
5562 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
5563 sizeof (blkptr_t)) == 0 ||
5564 (zio->io_bp == zio_unique_parent(zio)->io_bp));
5565 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
5566 zio->io_bp_override == NULL &&
5567 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
5568 ASSERT3U(zio->io_prop.zp_copies, <=,
5569 BP_GET_NDVAS(zio->io_bp));
5570 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
5571 (BP_COUNT_GANG(zio->io_bp) ==
5572 BP_GET_NDVAS(zio->io_bp)));
5573 }
5574 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
5575 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
5576 }
5577
5578 /*
5579 * If there were child vdev/gang/ddt errors, they apply to us now.
5580 */
5581 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
5582 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
5583 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
5584
5585 /*
5586 * If the I/O on the transformed data was successful, generate any
5587 * checksum reports now while we still have the transformed data.
5588 */
5589 if (zio->io_error == 0) {
5590 while (zio->io_cksum_report != NULL) {
5591 zio_cksum_report_t *zcr = zio->io_cksum_report;
5592 uint64_t align = zcr->zcr_align;
5593 uint64_t asize = P2ROUNDUP(psize, align);
5594 abd_t *adata = zio->io_abd;
5595
5596 if (adata != NULL && asize != psize) {
5597 adata = abd_alloc(asize, B_TRUE);
5598 abd_copy(adata, zio->io_abd, psize);
5599 abd_zero_off(adata, psize, asize - psize);
5600 }
5601
5602 zio->io_cksum_report = zcr->zcr_next;
5603 zcr->zcr_next = NULL;
5604 zcr->zcr_finish(zcr, adata);
5605 zfs_ereport_free_checksum(zcr);
5606
5607 if (adata != NULL && asize != psize)
5608 abd_free(adata);
5609 }
5610 }
5611
5612 zio_pop_transforms(zio); /* note: may set zio->io_error */
5613
5614 vdev_stat_update(zio, psize);
5615
5616 /*
5617 * If this I/O is attached to a particular vdev is slow, exceeding
5618 * 30 seconds to complete, post an error described the I/O delay.
5619 * We ignore these errors if the device is currently unavailable.
5620 */
5621 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
5622 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
5623 /*
5624 * We want to only increment our slow IO counters if
5625 * the IO is valid (i.e. not if the drive is removed).
5626 *
5627 * zfs_ereport_post() will also do these checks, but
5628 * it can also ratelimit and have other failures, so we
5629 * need to increment the slow_io counters independent
5630 * of it.
5631 */
5632 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
5633 zio->io_spa, zio->io_vd, zio)) {
5634 mutex_enter(&zio->io_vd->vdev_stat_lock);
5635 zio->io_vd->vdev_stat.vs_slow_ios++;
5636 mutex_exit(&zio->io_vd->vdev_stat_lock);
5637
5638 if (zio->io_vd->vdev_slow_io_events) {
5639 (void) zfs_ereport_post(
5640 FM_EREPORT_ZFS_DELAY,
5641 zio->io_spa, zio->io_vd,
5642 &zio->io_bookmark, zio, 0);
5643 }
5644 }
5645 }
5646 }
5647
5648 if (zio->io_error) {
5649 /*
5650 * If this I/O is attached to a particular vdev,
5651 * generate an error message describing the I/O failure
5652 * at the block level. We ignore these errors if the
5653 * device is currently unavailable.
5654 */
5655 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
5656 !vdev_is_dead(zio->io_vd) &&
5657 !(zio->io_post & ZIO_POST_DIO_CHKSUM_ERR)) {
5658 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
5659 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5660 if (ret != EALREADY) {
5661 mutex_enter(&zio->io_vd->vdev_stat_lock);
5662 if (zio->io_type == ZIO_TYPE_READ)
5663 zio->io_vd->vdev_stat.vs_read_errors++;
5664 else if (zio->io_type == ZIO_TYPE_WRITE)
5665 zio->io_vd->vdev_stat.vs_write_errors++;
5666 mutex_exit(&zio->io_vd->vdev_stat_lock);
5667 }
5668 }
5669
5670 if ((zio->io_error == EIO || !(zio->io_flags &
5671 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
5672 !(zio->io_post & ZIO_POST_DIO_CHKSUM_ERR) &&
5673 zio == zio->io_logical) {
5674 /*
5675 * For logical I/O requests, tell the SPA to log the
5676 * error and generate a logical data ereport.
5677 */
5678 spa_log_error(zio->io_spa, &zio->io_bookmark,
5679 BP_GET_PHYSICAL_BIRTH(zio->io_bp));
5680 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
5681 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
5682 }
5683 }
5684
5685 if (zio->io_error && zio == zio->io_logical) {
5686
5687 /*
5688 * A DDT child tried to create a mixed gang/non-gang BP. We're
5689 * going to have to just retry as a non-dedup IO.
5690 */
5691 if (zio->io_error == EAGAIN && IO_IS_ALLOCATING(zio) &&
5692 zio->io_prop.zp_dedup) {
5693 zio->io_post |= ZIO_POST_REEXECUTE;
5694 zio->io_prop.zp_dedup = B_FALSE;
5695 }
5696 /*
5697 * Determine whether zio should be reexecuted. This will
5698 * propagate all the way to the root via zio_notify_parent().
5699 */
5700 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
5701 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5702
5703 if (IO_IS_ALLOCATING(zio) &&
5704 !(zio->io_flags & ZIO_FLAG_CANFAIL) &&
5705 !(zio->io_post & ZIO_POST_DIO_CHKSUM_ERR)) {
5706 if (zio->io_error != ENOSPC)
5707 zio->io_post |= ZIO_POST_REEXECUTE;
5708 else
5709 zio->io_post |= ZIO_POST_SUSPEND;
5710 }
5711
5712 if ((zio->io_type == ZIO_TYPE_READ ||
5713 zio->io_type == ZIO_TYPE_FREE) &&
5714 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
5715 zio->io_error == ENXIO &&
5716 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
5717 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
5718 zio->io_post |= ZIO_POST_SUSPEND;
5719
5720 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) &&
5721 !(zio->io_post & (ZIO_POST_REEXECUTE|ZIO_POST_SUSPEND)))
5722 zio->io_post |= ZIO_POST_SUSPEND;
5723
5724 /*
5725 * Here is a possibly good place to attempt to do
5726 * either combinatorial reconstruction or error correction
5727 * based on checksums. It also might be a good place
5728 * to send out preliminary ereports before we suspend
5729 * processing.
5730 */
5731 }
5732
5733 /*
5734 * If there were logical child errors, they apply to us now.
5735 * We defer this until now to avoid conflating logical child
5736 * errors with errors that happened to the zio itself when
5737 * updating vdev stats and reporting FMA events above.
5738 */
5739 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
5740
5741 if ((zio->io_error ||
5742 (zio->io_post & (ZIO_POST_REEXECUTE|ZIO_POST_SUSPEND))) &&
5743 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
5744 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
5745 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
5746
5747 zio_gang_tree_free(&zio->io_gang_tree);
5748
5749 /*
5750 * Godfather I/Os should never suspend.
5751 */
5752 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
5753 (zio->io_post & ZIO_POST_SUSPEND))
5754 zio->io_post &= ~ZIO_POST_SUSPEND;
5755
5756 if (zio->io_post & (ZIO_POST_REEXECUTE|ZIO_POST_SUSPEND)) {
5757 /*
5758 * A Direct I/O operation that has a checksum verify error
5759 * should not attempt to reexecute. Instead, the error should
5760 * just be propagated back.
5761 */
5762 ASSERT0(zio->io_post & ZIO_POST_DIO_CHKSUM_ERR);
5763
5764 /*
5765 * This is a logical I/O that wants to reexecute.
5766 *
5767 * Reexecute is top-down. When an i/o fails, if it's not
5768 * the root, it simply notifies its parent and sticks around.
5769 * The parent, seeing that it still has children in zio_done(),
5770 * does the same. This percolates all the way up to the root.
5771 * The root i/o will reexecute or suspend the entire tree.
5772 *
5773 * This approach ensures that zio_reexecute() honors
5774 * all the original i/o dependency relationships, e.g.
5775 * parents not executing until children are ready.
5776 */
5777 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5778
5779 zio->io_gang_leader = NULL;
5780
5781 mutex_enter(&zio->io_lock);
5782 zio->io_state[ZIO_WAIT_DONE] = 1;
5783 mutex_exit(&zio->io_lock);
5784
5785 /*
5786 * "The Godfather" I/O monitors its children but is
5787 * not a true parent to them. It will track them through
5788 * the pipeline but severs its ties whenever they get into
5789 * trouble (e.g. suspended). This allows "The Godfather"
5790 * I/O to return status without blocking.
5791 */
5792 zl = NULL;
5793 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
5794 pio = pio_next) {
5795 zio_link_t *remove_zl = zl;
5796 pio_next = zio_walk_parents(zio, &zl);
5797
5798 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
5799 (zio->io_post & ZIO_POST_SUSPEND)) {
5800 zio_remove_child(pio, zio, remove_zl);
5801 /*
5802 * This is a rare code path, so we don't
5803 * bother with "next_to_execute".
5804 */
5805 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
5806 NULL);
5807 }
5808 }
5809
5810 if ((pio = zio_unique_parent(zio)) != NULL) {
5811 /*
5812 * We're not a root i/o, so there's nothing to do
5813 * but notify our parent. Don't propagate errors
5814 * upward since we haven't permanently failed yet.
5815 */
5816 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
5817 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
5818 /*
5819 * This is a rare code path, so we don't bother with
5820 * "next_to_execute".
5821 */
5822 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
5823 } else if (zio->io_post & ZIO_POST_SUSPEND) {
5824 /*
5825 * We'd fail again if we reexecuted now, so suspend
5826 * until conditions improve (e.g. device comes online).
5827 */
5828 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
5829 } else {
5830 ASSERT(zio->io_post & ZIO_POST_REEXECUTE);
5831 /*
5832 * Reexecution is potentially a huge amount of work.
5833 * Hand it off to the otherwise-unused claim taskq.
5834 */
5835 spa_taskq_dispatch(zio->io_spa,
5836 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
5837 zio_reexecute, zio, B_FALSE);
5838 }
5839 return (NULL);
5840 }
5841
5842 ASSERT(list_is_empty(&zio->io_child_list));
5843 ASSERT0(zio->io_post & ZIO_POST_REEXECUTE);
5844 ASSERT0(zio->io_post & ZIO_POST_SUSPEND);
5845 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5846
5847 /*
5848 * Report any checksum errors, since the I/O is complete.
5849 */
5850 while (zio->io_cksum_report != NULL) {
5851 zio_cksum_report_t *zcr = zio->io_cksum_report;
5852 zio->io_cksum_report = zcr->zcr_next;
5853 zcr->zcr_next = NULL;
5854 zcr->zcr_finish(zcr, NULL);
5855 zfs_ereport_free_checksum(zcr);
5856 }
5857
5858 if (zio->io_flags & ZIO_FLAG_POSTREAD) {
5859 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5860 zl = NULL;
5861 zio_t *pio = zio_walk_parents(zio, &zl);
5862 blkptr_t *bp = zio->io_bp;
5863 abd_t *abd = abd_alloc_for_io(BP_GET_PSIZE(bp), B_FALSE);
5864 zio_priority_t prio = zio->io_priority ==
5865 ZIO_PRIORITY_SYNC_WRITE ? ZIO_PRIORITY_SYNC_READ :
5866 ZIO_PRIORITY_SCRUB;
5867 zio_t *cio = zio_vdev_child_io(pio, zio->io_bp, zio->io_vd,
5868 zio->io_offset, abd, zio->io_size, ZIO_TYPE_READ, prio,
5869 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL |
5870 ZIO_FLAG_RESILVER | ZIO_FLAG_DONT_PROPAGATE,
5871 zio_done_postread_done, NULL);
5872 cio->io_flags &= ~ZIO_FLAG_ALLOC_THROTTLED;
5873 zio_nowait(cio);
5874 }
5875
5876 /*
5877 * It is the responsibility of the done callback to ensure that this
5878 * particular zio is no longer discoverable for adoption, and as
5879 * such, cannot acquire any new parents.
5880 */
5881 if (zio->io_done)
5882 zio->io_done(zio);
5883
5884 mutex_enter(&zio->io_lock);
5885 zio->io_state[ZIO_WAIT_DONE] = 1;
5886 mutex_exit(&zio->io_lock);
5887
5888 /*
5889 * We are done executing this zio. We may want to execute a parent
5890 * next. See the comment in zio_notify_parent().
5891 */
5892 zio_t *next_to_execute = NULL;
5893 zl = NULL;
5894 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5895 zio_link_t *remove_zl = zl;
5896 pio_next = zio_walk_parents(zio, &zl);
5897 zio_remove_child(pio, zio, remove_zl);
5898 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5899 }
5900
5901 if (zio->io_waiter != NULL) {
5902 mutex_enter(&zio->io_lock);
5903 zio->io_executor = NULL;
5904 cv_broadcast(&zio->io_cv);
5905 mutex_exit(&zio->io_lock);
5906 } else {
5907 zio_destroy(zio);
5908 }
5909
5910 return (next_to_execute);
5911 }
5912
5913 /*
5914 * ==========================================================================
5915 * I/O pipeline definition
5916 * ==========================================================================
5917 */
5918 static zio_pipe_stage_t *zio_pipeline[] = {
5919 NULL,
5920 zio_read_bp_init,
5921 zio_write_bp_init,
5922 zio_free_bp_init,
5923 zio_issue_async,
5924 zio_write_compress,
5925 zio_encrypt,
5926 zio_checksum_generate,
5927 zio_nop_write,
5928 zio_brt_free,
5929 zio_ddt_read_start,
5930 zio_ddt_read_done,
5931 zio_ddt_write,
5932 zio_ddt_free,
5933 zio_gang_assemble,
5934 zio_gang_issue,
5935 zio_dva_throttle,
5936 zio_dva_allocate,
5937 zio_dva_free,
5938 zio_dva_claim,
5939 zio_ready,
5940 zio_vdev_io_start,
5941 zio_vdev_io_done,
5942 zio_vdev_io_assess,
5943 zio_checksum_verify,
5944 zio_dio_checksum_verify,
5945 zio_done
5946 };
5947
5948
5949
5950
5951 /*
5952 * Compare two zbookmark_phys_t's to see which we would reach first in a
5953 * pre-order traversal of the object tree.
5954 *
5955 * This is simple in every case aside from the meta-dnode object. For all other
5956 * objects, we traverse them in order (object 1 before object 2, and so on).
5957 * However, all of these objects are traversed while traversing object 0, since
5958 * the data it points to is the list of objects. Thus, we need to convert to a
5959 * canonical representation so we can compare meta-dnode bookmarks to
5960 * non-meta-dnode bookmarks.
5961 *
5962 * We do this by calculating "equivalents" for each field of the zbookmark.
5963 * zbookmarks outside of the meta-dnode use their own object and level, and
5964 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5965 * blocks this bookmark refers to) by multiplying their blkid by their span
5966 * (the number of L0 blocks contained within one block at their level).
5967 * zbookmarks inside the meta-dnode calculate their object equivalent
5968 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5969 * level + 1<<31 (any value larger than a level could ever be) for their level.
5970 * This causes them to always compare before a bookmark in their object
5971 * equivalent, compare appropriately to bookmarks in other objects, and to
5972 * compare appropriately to other bookmarks in the meta-dnode.
5973 */
5974 int
zbookmark_compare(uint16_t dbss1,uint8_t ibs1,uint16_t dbss2,uint8_t ibs2,const zbookmark_phys_t * zb1,const zbookmark_phys_t * zb2)5975 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5976 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5977 {
5978 /*
5979 * These variables represent the "equivalent" values for the zbookmark,
5980 * after converting zbookmarks inside the meta dnode to their
5981 * normal-object equivalents.
5982 */
5983 uint64_t zb1obj, zb2obj;
5984 uint64_t zb1L0, zb2L0;
5985 uint64_t zb1level, zb2level;
5986
5987 if (zb1->zb_object == zb2->zb_object &&
5988 zb1->zb_level == zb2->zb_level &&
5989 zb1->zb_blkid == zb2->zb_blkid)
5990 return (0);
5991
5992 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5993 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5994
5995 /*
5996 * BP_SPANB calculates the span in blocks.
5997 */
5998 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5999 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
6000
6001 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
6002 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
6003 zb1L0 = 0;
6004 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
6005 } else {
6006 zb1obj = zb1->zb_object;
6007 zb1level = zb1->zb_level;
6008 }
6009
6010 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
6011 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
6012 zb2L0 = 0;
6013 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
6014 } else {
6015 zb2obj = zb2->zb_object;
6016 zb2level = zb2->zb_level;
6017 }
6018
6019 /* Now that we have a canonical representation, do the comparison. */
6020 if (zb1obj != zb2obj)
6021 return (zb1obj < zb2obj ? -1 : 1);
6022 else if (zb1L0 != zb2L0)
6023 return (zb1L0 < zb2L0 ? -1 : 1);
6024 else if (zb1level != zb2level)
6025 return (zb1level > zb2level ? -1 : 1);
6026 /*
6027 * This can (theoretically) happen if the bookmarks have the same object
6028 * and level, but different blkids, if the block sizes are not the same.
6029 * There is presently no way to change the indirect block sizes
6030 */
6031 return (0);
6032 }
6033
6034 /*
6035 * This function checks the following: given that last_block is the place that
6036 * our traversal stopped last time, does that guarantee that we've visited
6037 * every node under subtree_root? Therefore, we can't just use the raw output
6038 * of zbookmark_compare. We have to pass in a modified version of
6039 * subtree_root; by incrementing the block id, and then checking whether
6040 * last_block is before or equal to that, we can tell whether or not having
6041 * visited last_block implies that all of subtree_root's children have been
6042 * visited.
6043 */
6044 boolean_t
zbookmark_subtree_completed(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)6045 zbookmark_subtree_completed(const dnode_phys_t *dnp,
6046 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
6047 {
6048 zbookmark_phys_t mod_zb = *subtree_root;
6049 mod_zb.zb_blkid++;
6050 ASSERT0(last_block->zb_level);
6051
6052 /* The objset_phys_t isn't before anything. */
6053 if (dnp == NULL)
6054 return (B_FALSE);
6055
6056 /*
6057 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
6058 * data block size in sectors, because that variable is only used if
6059 * the bookmark refers to a block in the meta-dnode. Since we don't
6060 * know without examining it what object it refers to, and there's no
6061 * harm in passing in this value in other cases, we always pass it in.
6062 *
6063 * We pass in 0 for the indirect block size shift because zb2 must be
6064 * level 0. The indirect block size is only used to calculate the span
6065 * of the bookmark, but since the bookmark must be level 0, the span is
6066 * always 1, so the math works out.
6067 *
6068 * If you make changes to how the zbookmark_compare code works, be sure
6069 * to make sure that this code still works afterwards.
6070 */
6071 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
6072 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
6073 last_block) <= 0);
6074 }
6075
6076 /*
6077 * This function is similar to zbookmark_subtree_completed(), but returns true
6078 * if subtree_root is equal or ahead of last_block, i.e. still to be done.
6079 */
6080 boolean_t
zbookmark_subtree_tbd(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)6081 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
6082 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
6083 {
6084 ASSERT0(last_block->zb_level);
6085 if (dnp == NULL)
6086 return (B_FALSE);
6087 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
6088 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
6089 last_block) >= 0);
6090 }
6091
6092 EXPORT_SYMBOL(zio_type_name);
6093 EXPORT_SYMBOL(zio_buf_alloc);
6094 EXPORT_SYMBOL(zio_data_buf_alloc);
6095 EXPORT_SYMBOL(zio_buf_free);
6096 EXPORT_SYMBOL(zio_data_buf_free);
6097
6098 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
6099 "Max I/O completion time (milliseconds) before marking it as slow");
6100
6101 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
6102 "Prioritize requeued I/O");
6103
6104 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
6105 "Defer frees starting in this pass");
6106
6107 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
6108 "Don't compress starting in this pass");
6109
6110 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
6111 "Rewrite new bps starting in this pass");
6112
6113 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
6114 "Throttle block allocations in the ZIO pipeline");
6115
6116 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
6117 "Log all slow ZIOs, not just those with vdevs");
6118