1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017, Intel Corporation.
27 * Copyright (c) 2019, 2023, 2024, 2025, Klara, Inc.
28 * Copyright (c) 2019, Allan Jude
29 * Copyright (c) 2021, Datto, Inc.
30 * Copyright (c) 2021, 2024 by George Melikov. All rights reserved.
31 */
32
33 #include <sys/sysmacros.h>
34 #include <sys/zfs_context.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/spa.h>
37 #include <sys/txg.h>
38 #include <sys/spa_impl.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/vdev_trim.h>
41 #include <sys/zio_impl.h>
42 #include <sys/zio_compress.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/dmu_objset.h>
45 #include <sys/arc.h>
46 #include <sys/brt.h>
47 #include <sys/ddt.h>
48 #include <sys/blkptr.h>
49 #include <sys/zfeature.h>
50 #include <sys/dsl_scan.h>
51 #include <sys/metaslab_impl.h>
52 #include <sys/time.h>
53 #include <sys/trace_zfs.h>
54 #include <sys/abd.h>
55 #include <sys/dsl_crypt.h>
56 #include <cityhash.h>
57
58 /*
59 * ==========================================================================
60 * I/O type descriptions
61 * ==========================================================================
62 */
63 const char *const zio_type_name[ZIO_TYPES] = {
64 /*
65 * Note: Linux kernel thread name length is limited
66 * so these names will differ from upstream open zfs.
67 */
68 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
69 };
70
71 int zio_dva_throttle_enabled = B_TRUE;
72 static int zio_deadman_log_all = B_FALSE;
73
74 /*
75 * ==========================================================================
76 * I/O kmem caches
77 * ==========================================================================
78 */
79 static kmem_cache_t *zio_cache;
80 static kmem_cache_t *zio_link_cache;
81 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
82 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
84 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
85 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
86 #endif
87
88 /* Mark IOs as "slow" if they take longer than 30 seconds */
89 static uint_t zio_slow_io_ms = (30 * MILLISEC);
90
91 #define BP_SPANB(indblkshift, level) \
92 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
93 #define COMPARE_META_LEVEL 0x80000000ul
94 /*
95 * The following actions directly effect the spa's sync-to-convergence logic.
96 * The values below define the sync pass when we start performing the action.
97 * Care should be taken when changing these values as they directly impact
98 * spa_sync() performance. Tuning these values may introduce subtle performance
99 * pathologies and should only be done in the context of performance analysis.
100 * These tunables will eventually be removed and replaced with #defines once
101 * enough analysis has been done to determine optimal values.
102 *
103 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
104 * regular blocks are not deferred.
105 *
106 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
107 * compression (including of metadata). In practice, we don't have this
108 * many sync passes, so this has no effect.
109 *
110 * The original intent was that disabling compression would help the sync
111 * passes to converge. However, in practice disabling compression increases
112 * the average number of sync passes, because when we turn compression off, a
113 * lot of block's size will change and thus we have to re-allocate (not
114 * overwrite) them. It also increases the number of 128KB allocations (e.g.
115 * for indirect blocks and spacemaps) because these will not be compressed.
116 * The 128K allocations are especially detrimental to performance on highly
117 * fragmented systems, which may have very few free segments of this size,
118 * and may need to load new metaslabs to satisfy 128K allocations.
119 */
120
121 /* defer frees starting in this pass */
122 uint_t zfs_sync_pass_deferred_free = 2;
123
124 /* don't compress starting in this pass */
125 static uint_t zfs_sync_pass_dont_compress = 8;
126
127 /* rewrite new bps starting in this pass */
128 static uint_t zfs_sync_pass_rewrite = 2;
129
130 /*
131 * An allocating zio is one that either currently has the DVA allocate
132 * stage set or will have it later in its lifetime.
133 */
134 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
135
136 /*
137 * Enable smaller cores by excluding metadata
138 * allocations as well.
139 */
140 int zio_exclude_metadata = 0;
141 static int zio_requeue_io_start_cut_in_line = 1;
142
143 #ifdef ZFS_DEBUG
144 static const int zio_buf_debug_limit = 16384;
145 #else
146 static const int zio_buf_debug_limit = 0;
147 #endif
148
149 typedef struct zio_stats {
150 kstat_named_t ziostat_total_allocations;
151 kstat_named_t ziostat_alloc_class_fallbacks;
152 kstat_named_t ziostat_gang_writes;
153 kstat_named_t ziostat_gang_multilevel;
154 } zio_stats_t;
155
156 static zio_stats_t zio_stats = {
157 { "total_allocations", KSTAT_DATA_UINT64 },
158 { "alloc_class_fallbacks", KSTAT_DATA_UINT64 },
159 { "gang_writes", KSTAT_DATA_UINT64 },
160 { "gang_multilevel", KSTAT_DATA_UINT64 },
161 };
162
163 struct {
164 wmsum_t ziostat_total_allocations;
165 wmsum_t ziostat_alloc_class_fallbacks;
166 wmsum_t ziostat_gang_writes;
167 wmsum_t ziostat_gang_multilevel;
168 } ziostat_sums;
169
170 #define ZIOSTAT_BUMP(stat) wmsum_add(&ziostat_sums.stat, 1);
171
172 static kstat_t *zio_ksp;
173
174 static inline void __zio_execute(zio_t *zio);
175
176 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
177
178 static int
zio_kstats_update(kstat_t * ksp,int rw)179 zio_kstats_update(kstat_t *ksp, int rw)
180 {
181 zio_stats_t *zs = ksp->ks_data;
182 if (rw == KSTAT_WRITE)
183 return (EACCES);
184
185 zs->ziostat_total_allocations.value.ui64 =
186 wmsum_value(&ziostat_sums.ziostat_total_allocations);
187 zs->ziostat_alloc_class_fallbacks.value.ui64 =
188 wmsum_value(&ziostat_sums.ziostat_alloc_class_fallbacks);
189 zs->ziostat_gang_writes.value.ui64 =
190 wmsum_value(&ziostat_sums.ziostat_gang_writes);
191 zs->ziostat_gang_multilevel.value.ui64 =
192 wmsum_value(&ziostat_sums.ziostat_gang_multilevel);
193 return (0);
194 }
195
196 void
zio_init(void)197 zio_init(void)
198 {
199 size_t c;
200
201 zio_cache = kmem_cache_create("zio_cache",
202 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
203 zio_link_cache = kmem_cache_create("zio_link_cache",
204 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
205
206 wmsum_init(&ziostat_sums.ziostat_total_allocations, 0);
207 wmsum_init(&ziostat_sums.ziostat_alloc_class_fallbacks, 0);
208 wmsum_init(&ziostat_sums.ziostat_gang_writes, 0);
209 wmsum_init(&ziostat_sums.ziostat_gang_multilevel, 0);
210 zio_ksp = kstat_create("zfs", 0, "zio_stats",
211 "misc", KSTAT_TYPE_NAMED, sizeof (zio_stats) /
212 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
213 if (zio_ksp != NULL) {
214 zio_ksp->ks_data = &zio_stats;
215 zio_ksp->ks_update = zio_kstats_update;
216 kstat_install(zio_ksp);
217 }
218
219 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
220 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
221 size_t align, cflags, data_cflags;
222 char name[32];
223
224 /*
225 * Create cache for each half-power of 2 size, starting from
226 * SPA_MINBLOCKSIZE. It should give us memory space efficiency
227 * of ~7/8, sufficient for transient allocations mostly using
228 * these caches.
229 */
230 size_t p2 = size;
231 while (!ISP2(p2))
232 p2 &= p2 - 1;
233 if (!IS_P2ALIGNED(size, p2 / 2))
234 continue;
235
236 #ifndef _KERNEL
237 /*
238 * If we are using watchpoints, put each buffer on its own page,
239 * to eliminate the performance overhead of trapping to the
240 * kernel when modifying a non-watched buffer that shares the
241 * page with a watched buffer.
242 */
243 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
244 continue;
245 #endif
246
247 if (IS_P2ALIGNED(size, PAGESIZE))
248 align = PAGESIZE;
249 else
250 align = 1 << (highbit64(size ^ (size - 1)) - 1);
251
252 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
253 KMC_NODEBUG : 0;
254 data_cflags = KMC_NODEBUG;
255 if (abd_size_alloc_linear(size)) {
256 cflags |= KMC_RECLAIMABLE;
257 data_cflags |= KMC_RECLAIMABLE;
258 }
259 if (cflags == data_cflags) {
260 /*
261 * Resulting kmem caches would be identical.
262 * Save memory by creating only one.
263 */
264 (void) snprintf(name, sizeof (name),
265 "zio_buf_comb_%lu", (ulong_t)size);
266 zio_buf_cache[c] = kmem_cache_create(name, size, align,
267 NULL, NULL, NULL, NULL, NULL, cflags);
268 zio_data_buf_cache[c] = zio_buf_cache[c];
269 continue;
270 }
271 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
272 (ulong_t)size);
273 zio_buf_cache[c] = kmem_cache_create(name, size, align,
274 NULL, NULL, NULL, NULL, NULL, cflags);
275
276 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
277 (ulong_t)size);
278 zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
279 NULL, NULL, NULL, NULL, NULL, data_cflags);
280 }
281
282 while (--c != 0) {
283 ASSERT(zio_buf_cache[c] != NULL);
284 if (zio_buf_cache[c - 1] == NULL)
285 zio_buf_cache[c - 1] = zio_buf_cache[c];
286
287 ASSERT(zio_data_buf_cache[c] != NULL);
288 if (zio_data_buf_cache[c - 1] == NULL)
289 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
290 }
291
292 zio_inject_init();
293
294 lz4_init();
295 }
296
297 void
zio_fini(void)298 zio_fini(void)
299 {
300 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
301
302 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
303 for (size_t i = 0; i < n; i++) {
304 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
305 (void) printf("zio_fini: [%d] %llu != %llu\n",
306 (int)((i + 1) << SPA_MINBLOCKSHIFT),
307 (long long unsigned)zio_buf_cache_allocs[i],
308 (long long unsigned)zio_buf_cache_frees[i]);
309 }
310 #endif
311
312 /*
313 * The same kmem cache can show up multiple times in both zio_buf_cache
314 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
315 * sort it out.
316 */
317 for (size_t i = 0; i < n; i++) {
318 kmem_cache_t *cache = zio_buf_cache[i];
319 if (cache == NULL)
320 continue;
321 for (size_t j = i; j < n; j++) {
322 if (cache == zio_buf_cache[j])
323 zio_buf_cache[j] = NULL;
324 if (cache == zio_data_buf_cache[j])
325 zio_data_buf_cache[j] = NULL;
326 }
327 kmem_cache_destroy(cache);
328 }
329
330 for (size_t i = 0; i < n; i++) {
331 kmem_cache_t *cache = zio_data_buf_cache[i];
332 if (cache == NULL)
333 continue;
334 for (size_t j = i; j < n; j++) {
335 if (cache == zio_data_buf_cache[j])
336 zio_data_buf_cache[j] = NULL;
337 }
338 kmem_cache_destroy(cache);
339 }
340
341 for (size_t i = 0; i < n; i++) {
342 VERIFY3P(zio_buf_cache[i], ==, NULL);
343 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
344 }
345
346 if (zio_ksp != NULL) {
347 kstat_delete(zio_ksp);
348 zio_ksp = NULL;
349 }
350
351 wmsum_fini(&ziostat_sums.ziostat_total_allocations);
352 wmsum_fini(&ziostat_sums.ziostat_alloc_class_fallbacks);
353 wmsum_fini(&ziostat_sums.ziostat_gang_writes);
354 wmsum_fini(&ziostat_sums.ziostat_gang_multilevel);
355
356 kmem_cache_destroy(zio_link_cache);
357 kmem_cache_destroy(zio_cache);
358
359 zio_inject_fini();
360
361 lz4_fini();
362 }
363
364 /*
365 * ==========================================================================
366 * Allocate and free I/O buffers
367 * ==========================================================================
368 */
369
370 #if defined(ZFS_DEBUG) && defined(_KERNEL)
371 #define ZFS_ZIO_BUF_CANARY 1
372 #endif
373
374 #ifdef ZFS_ZIO_BUF_CANARY
375 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
376
377 /*
378 * Use empty space after the buffer to detect overflows.
379 *
380 * Since zio_init() creates kmem caches only for certain set of buffer sizes,
381 * allocations of different sizes may have some unused space after the data.
382 * Filling part of that space with a known pattern on allocation and checking
383 * it on free should allow us to detect some buffer overflows.
384 */
385 static void
zio_buf_put_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)386 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
387 {
388 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
389 ulong_t *canary = p + off / sizeof (ulong_t);
390 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
391 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
392 cache[c] == cache[c + 1])
393 asize = (c + 2) << SPA_MINBLOCKSHIFT;
394 for (; off < asize; canary++, off += sizeof (ulong_t))
395 *canary = zio_buf_canary;
396 }
397
398 static void
zio_buf_check_canary(ulong_t * p,size_t size,kmem_cache_t ** cache,size_t c)399 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
400 {
401 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
402 ulong_t *canary = p + off / sizeof (ulong_t);
403 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
404 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
405 cache[c] == cache[c + 1])
406 asize = (c + 2) << SPA_MINBLOCKSHIFT;
407 for (; off < asize; canary++, off += sizeof (ulong_t)) {
408 if (unlikely(*canary != zio_buf_canary)) {
409 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
410 p, size, (canary - p) * sizeof (ulong_t),
411 *canary, zio_buf_canary);
412 }
413 }
414 }
415 #endif
416
417 /*
418 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
419 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
420 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
421 * excess / transient data in-core during a crashdump.
422 */
423 void *
zio_buf_alloc(size_t size)424 zio_buf_alloc(size_t size)
425 {
426 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
427
428 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
429 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
430 atomic_add_64(&zio_buf_cache_allocs[c], 1);
431 #endif
432
433 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
434 #ifdef ZFS_ZIO_BUF_CANARY
435 zio_buf_put_canary(p, size, zio_buf_cache, c);
436 #endif
437 return (p);
438 }
439
440 /*
441 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
442 * crashdump if the kernel panics. This exists so that we will limit the amount
443 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
444 * of kernel heap dumped to disk when the kernel panics)
445 */
446 void *
zio_data_buf_alloc(size_t size)447 zio_data_buf_alloc(size_t size)
448 {
449 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
450
451 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
452
453 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
454 #ifdef ZFS_ZIO_BUF_CANARY
455 zio_buf_put_canary(p, size, zio_data_buf_cache, c);
456 #endif
457 return (p);
458 }
459
460 void
zio_buf_free(void * buf,size_t size)461 zio_buf_free(void *buf, size_t size)
462 {
463 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
464
465 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
466 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
467 atomic_add_64(&zio_buf_cache_frees[c], 1);
468 #endif
469
470 #ifdef ZFS_ZIO_BUF_CANARY
471 zio_buf_check_canary(buf, size, zio_buf_cache, c);
472 #endif
473 kmem_cache_free(zio_buf_cache[c], buf);
474 }
475
476 void
zio_data_buf_free(void * buf,size_t size)477 zio_data_buf_free(void *buf, size_t size)
478 {
479 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
480
481 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
482
483 #ifdef ZFS_ZIO_BUF_CANARY
484 zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
485 #endif
486 kmem_cache_free(zio_data_buf_cache[c], buf);
487 }
488
489 static void
zio_abd_free(void * abd,size_t size)490 zio_abd_free(void *abd, size_t size)
491 {
492 (void) size;
493 abd_free((abd_t *)abd);
494 }
495
496 /*
497 * ==========================================================================
498 * Push and pop I/O transform buffers
499 * ==========================================================================
500 */
501 void
zio_push_transform(zio_t * zio,abd_t * data,uint64_t size,uint64_t bufsize,zio_transform_func_t * transform)502 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
503 zio_transform_func_t *transform)
504 {
505 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
506
507 zt->zt_orig_abd = zio->io_abd;
508 zt->zt_orig_size = zio->io_size;
509 zt->zt_bufsize = bufsize;
510 zt->zt_transform = transform;
511
512 zt->zt_next = zio->io_transform_stack;
513 zio->io_transform_stack = zt;
514
515 zio->io_abd = data;
516 zio->io_size = size;
517 }
518
519 void
zio_pop_transforms(zio_t * zio)520 zio_pop_transforms(zio_t *zio)
521 {
522 zio_transform_t *zt;
523
524 while ((zt = zio->io_transform_stack) != NULL) {
525 if (zt->zt_transform != NULL)
526 zt->zt_transform(zio,
527 zt->zt_orig_abd, zt->zt_orig_size);
528
529 if (zt->zt_bufsize != 0)
530 abd_free(zio->io_abd);
531
532 zio->io_abd = zt->zt_orig_abd;
533 zio->io_size = zt->zt_orig_size;
534 zio->io_transform_stack = zt->zt_next;
535
536 kmem_free(zt, sizeof (zio_transform_t));
537 }
538 }
539
540 /*
541 * ==========================================================================
542 * I/O transform callbacks for subblocks, decompression, and decryption
543 * ==========================================================================
544 */
545 static void
zio_subblock(zio_t * zio,abd_t * data,uint64_t size)546 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
547 {
548 ASSERT(zio->io_size > size);
549
550 if (zio->io_type == ZIO_TYPE_READ)
551 abd_copy(data, zio->io_abd, size);
552 }
553
554 static void
zio_decompress(zio_t * zio,abd_t * data,uint64_t size)555 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
556 {
557 if (zio->io_error == 0) {
558 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
559 zio->io_abd, data, zio->io_size, size,
560 &zio->io_prop.zp_complevel);
561
562 if (zio_injection_enabled && ret == 0)
563 ret = zio_handle_fault_injection(zio, EINVAL);
564
565 if (ret != 0)
566 zio->io_error = SET_ERROR(EIO);
567 }
568 }
569
570 static void
zio_decrypt(zio_t * zio,abd_t * data,uint64_t size)571 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
572 {
573 int ret;
574 void *tmp;
575 blkptr_t *bp = zio->io_bp;
576 spa_t *spa = zio->io_spa;
577 uint64_t dsobj = zio->io_bookmark.zb_objset;
578 uint64_t lsize = BP_GET_LSIZE(bp);
579 dmu_object_type_t ot = BP_GET_TYPE(bp);
580 uint8_t salt[ZIO_DATA_SALT_LEN];
581 uint8_t iv[ZIO_DATA_IV_LEN];
582 uint8_t mac[ZIO_DATA_MAC_LEN];
583 boolean_t no_crypt = B_FALSE;
584
585 ASSERT(BP_USES_CRYPT(bp));
586 ASSERT3U(size, !=, 0);
587
588 if (zio->io_error != 0)
589 return;
590
591 /*
592 * Verify the cksum of MACs stored in an indirect bp. It will always
593 * be possible to verify this since it does not require an encryption
594 * key.
595 */
596 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
597 zio_crypt_decode_mac_bp(bp, mac);
598
599 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
600 /*
601 * We haven't decompressed the data yet, but
602 * zio_crypt_do_indirect_mac_checksum() requires
603 * decompressed data to be able to parse out the MACs
604 * from the indirect block. We decompress it now and
605 * throw away the result after we are finished.
606 */
607 abd_t *abd = abd_alloc_linear(lsize, B_TRUE);
608 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
609 zio->io_abd, abd, zio->io_size, lsize,
610 &zio->io_prop.zp_complevel);
611 if (ret != 0) {
612 abd_free(abd);
613 ret = SET_ERROR(EIO);
614 goto error;
615 }
616 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
617 abd, lsize, BP_SHOULD_BYTESWAP(bp), mac);
618 abd_free(abd);
619 } else {
620 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
621 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
622 }
623 abd_copy(data, zio->io_abd, size);
624
625 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
626 ret = zio_handle_decrypt_injection(spa,
627 &zio->io_bookmark, ot, ECKSUM);
628 }
629 if (ret != 0)
630 goto error;
631
632 return;
633 }
634
635 /*
636 * If this is an authenticated block, just check the MAC. It would be
637 * nice to separate this out into its own flag, but when this was done,
638 * we had run out of bits in what is now zio_flag_t. Future cleanup
639 * could make this a flag bit.
640 */
641 if (BP_IS_AUTHENTICATED(bp)) {
642 if (ot == DMU_OT_OBJSET) {
643 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
644 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
645 } else {
646 zio_crypt_decode_mac_bp(bp, mac);
647 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
648 zio->io_abd, size, mac);
649 if (zio_injection_enabled && ret == 0) {
650 ret = zio_handle_decrypt_injection(spa,
651 &zio->io_bookmark, ot, ECKSUM);
652 }
653 }
654 abd_copy(data, zio->io_abd, size);
655
656 if (ret != 0)
657 goto error;
658
659 return;
660 }
661
662 zio_crypt_decode_params_bp(bp, salt, iv);
663
664 if (ot == DMU_OT_INTENT_LOG) {
665 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
666 zio_crypt_decode_mac_zil(tmp, mac);
667 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
668 } else {
669 zio_crypt_decode_mac_bp(bp, mac);
670 }
671
672 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
673 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
674 zio->io_abd, &no_crypt);
675 if (no_crypt)
676 abd_copy(data, zio->io_abd, size);
677
678 if (ret != 0)
679 goto error;
680
681 return;
682
683 error:
684 /* assert that the key was found unless this was speculative */
685 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
686
687 /*
688 * If there was a decryption / authentication error return EIO as
689 * the io_error. If this was not a speculative zio, create an ereport.
690 */
691 if (ret == ECKSUM) {
692 zio->io_error = SET_ERROR(EIO);
693 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
694 spa_log_error(spa, &zio->io_bookmark,
695 BP_GET_LOGICAL_BIRTH(zio->io_bp));
696 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
697 spa, NULL, &zio->io_bookmark, zio, 0);
698 }
699 } else {
700 zio->io_error = ret;
701 }
702 }
703
704 /*
705 * ==========================================================================
706 * I/O parent/child relationships and pipeline interlocks
707 * ==========================================================================
708 */
709 zio_t *
zio_walk_parents(zio_t * cio,zio_link_t ** zl)710 zio_walk_parents(zio_t *cio, zio_link_t **zl)
711 {
712 list_t *pl = &cio->io_parent_list;
713
714 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
715 if (*zl == NULL)
716 return (NULL);
717
718 ASSERT((*zl)->zl_child == cio);
719 return ((*zl)->zl_parent);
720 }
721
722 zio_t *
zio_walk_children(zio_t * pio,zio_link_t ** zl)723 zio_walk_children(zio_t *pio, zio_link_t **zl)
724 {
725 list_t *cl = &pio->io_child_list;
726
727 ASSERT(MUTEX_HELD(&pio->io_lock));
728
729 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
730 if (*zl == NULL)
731 return (NULL);
732
733 ASSERT((*zl)->zl_parent == pio);
734 return ((*zl)->zl_child);
735 }
736
737 zio_t *
zio_unique_parent(zio_t * cio)738 zio_unique_parent(zio_t *cio)
739 {
740 zio_link_t *zl = NULL;
741 zio_t *pio = zio_walk_parents(cio, &zl);
742
743 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
744 return (pio);
745 }
746
747 void
zio_add_child(zio_t * pio,zio_t * cio)748 zio_add_child(zio_t *pio, zio_t *cio)
749 {
750 /*
751 * Logical I/Os can have logical, gang, or vdev children.
752 * Gang I/Os can have gang or vdev children.
753 * Vdev I/Os can only have vdev children.
754 * The following ASSERT captures all of these constraints.
755 */
756 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
757
758 /* Parent should not have READY stage if child doesn't have it. */
759 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
760 (cio->io_child_type != ZIO_CHILD_VDEV),
761 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
762
763 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
764 zl->zl_parent = pio;
765 zl->zl_child = cio;
766
767 mutex_enter(&pio->io_lock);
768 mutex_enter(&cio->io_lock);
769
770 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
771
772 uint64_t *countp = pio->io_children[cio->io_child_type];
773 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
774 countp[w] += !cio->io_state[w];
775
776 list_insert_head(&pio->io_child_list, zl);
777 list_insert_head(&cio->io_parent_list, zl);
778
779 mutex_exit(&cio->io_lock);
780 mutex_exit(&pio->io_lock);
781 }
782
783 void
zio_add_child_first(zio_t * pio,zio_t * cio)784 zio_add_child_first(zio_t *pio, zio_t *cio)
785 {
786 /*
787 * Logical I/Os can have logical, gang, or vdev children.
788 * Gang I/Os can have gang or vdev children.
789 * Vdev I/Os can only have vdev children.
790 * The following ASSERT captures all of these constraints.
791 */
792 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
793
794 /* Parent should not have READY stage if child doesn't have it. */
795 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
796 (cio->io_child_type != ZIO_CHILD_VDEV),
797 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
798
799 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
800 zl->zl_parent = pio;
801 zl->zl_child = cio;
802
803 ASSERT(list_is_empty(&cio->io_parent_list));
804 list_insert_head(&cio->io_parent_list, zl);
805
806 mutex_enter(&pio->io_lock);
807
808 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
809
810 uint64_t *countp = pio->io_children[cio->io_child_type];
811 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
812 countp[w] += !cio->io_state[w];
813
814 list_insert_head(&pio->io_child_list, zl);
815
816 mutex_exit(&pio->io_lock);
817 }
818
819 static void
zio_remove_child(zio_t * pio,zio_t * cio,zio_link_t * zl)820 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
821 {
822 ASSERT(zl->zl_parent == pio);
823 ASSERT(zl->zl_child == cio);
824
825 mutex_enter(&pio->io_lock);
826 mutex_enter(&cio->io_lock);
827
828 list_remove(&pio->io_child_list, zl);
829 list_remove(&cio->io_parent_list, zl);
830
831 mutex_exit(&cio->io_lock);
832 mutex_exit(&pio->io_lock);
833 kmem_cache_free(zio_link_cache, zl);
834 }
835
836 static boolean_t
zio_wait_for_children(zio_t * zio,uint8_t childbits,enum zio_wait_type wait)837 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
838 {
839 boolean_t waiting = B_FALSE;
840
841 mutex_enter(&zio->io_lock);
842 ASSERT(zio->io_stall == NULL);
843 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
844 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
845 continue;
846
847 uint64_t *countp = &zio->io_children[c][wait];
848 if (*countp != 0) {
849 zio->io_stage >>= 1;
850 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
851 zio->io_stall = countp;
852 waiting = B_TRUE;
853 break;
854 }
855 }
856 mutex_exit(&zio->io_lock);
857 return (waiting);
858 }
859
860 __attribute__((always_inline))
861 static inline void
zio_notify_parent(zio_t * pio,zio_t * zio,enum zio_wait_type wait,zio_t ** next_to_executep)862 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
863 zio_t **next_to_executep)
864 {
865 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
866 int *errorp = &pio->io_child_error[zio->io_child_type];
867
868 mutex_enter(&pio->io_lock);
869 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
870 *errorp = zio_worst_error(*errorp, zio->io_error);
871 pio->io_reexecute |= zio->io_reexecute;
872 ASSERT3U(*countp, >, 0);
873
874 /*
875 * Propogate the Direct I/O checksum verify failure to the parent.
876 */
877 if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
878 pio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
879
880 (*countp)--;
881
882 if (*countp == 0 && pio->io_stall == countp) {
883 zio_taskq_type_t type =
884 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
885 ZIO_TASKQ_INTERRUPT;
886 pio->io_stall = NULL;
887 mutex_exit(&pio->io_lock);
888
889 /*
890 * If we can tell the caller to execute this parent next, do
891 * so. We do this if the parent's zio type matches the child's
892 * type, or if it's a zio_null() with no done callback, and so
893 * has no actual work to do. Otherwise dispatch the parent zio
894 * in its own taskq.
895 *
896 * Having the caller execute the parent when possible reduces
897 * locking on the zio taskq's, reduces context switch
898 * overhead, and has no recursion penalty. Note that one
899 * read from disk typically causes at least 3 zio's: a
900 * zio_null(), the logical zio_read(), and then a physical
901 * zio. When the physical ZIO completes, we are able to call
902 * zio_done() on all 3 of these zio's from one invocation of
903 * zio_execute() by returning the parent back to
904 * zio_execute(). Since the parent isn't executed until this
905 * thread returns back to zio_execute(), the caller should do
906 * so promptly.
907 *
908 * In other cases, dispatching the parent prevents
909 * overflowing the stack when we have deeply nested
910 * parent-child relationships, as we do with the "mega zio"
911 * of writes for spa_sync(), and the chain of ZIL blocks.
912 */
913 if (next_to_executep != NULL && *next_to_executep == NULL &&
914 (pio->io_type == zio->io_type ||
915 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
916 *next_to_executep = pio;
917 } else {
918 zio_taskq_dispatch(pio, type, B_FALSE);
919 }
920 } else {
921 mutex_exit(&pio->io_lock);
922 }
923 }
924
925 static void
zio_inherit_child_errors(zio_t * zio,enum zio_child c)926 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
927 {
928 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
929 zio->io_error = zio->io_child_error[c];
930 }
931
932 int
zio_bookmark_compare(const void * x1,const void * x2)933 zio_bookmark_compare(const void *x1, const void *x2)
934 {
935 const zio_t *z1 = x1;
936 const zio_t *z2 = x2;
937
938 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
939 return (-1);
940 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
941 return (1);
942
943 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
944 return (-1);
945 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
946 return (1);
947
948 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
949 return (-1);
950 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
951 return (1);
952
953 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
954 return (-1);
955 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
956 return (1);
957
958 if (z1 < z2)
959 return (-1);
960 if (z1 > z2)
961 return (1);
962
963 return (0);
964 }
965
966 /*
967 * ==========================================================================
968 * Create the various types of I/O (read, write, free, etc)
969 * ==========================================================================
970 */
971 static zio_t *
zio_create(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,zio_done_func_t * done,void * private,zio_type_t type,zio_priority_t priority,zio_flag_t flags,vdev_t * vd,uint64_t offset,const zbookmark_phys_t * zb,enum zio_stage stage,enum zio_stage pipeline)972 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
973 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
974 void *private, zio_type_t type, zio_priority_t priority,
975 zio_flag_t flags, vdev_t *vd, uint64_t offset,
976 const zbookmark_phys_t *zb, enum zio_stage stage,
977 enum zio_stage pipeline)
978 {
979 zio_t *zio;
980
981 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
982 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
983 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
984
985 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
986 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
987 ASSERT(vd || stage == ZIO_STAGE_OPEN);
988
989 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
990
991 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
992 memset(zio, 0, sizeof (zio_t));
993
994 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
995 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
996
997 list_create(&zio->io_parent_list, sizeof (zio_link_t),
998 offsetof(zio_link_t, zl_parent_node));
999 list_create(&zio->io_child_list, sizeof (zio_link_t),
1000 offsetof(zio_link_t, zl_child_node));
1001 metaslab_trace_init(&zio->io_alloc_list);
1002
1003 if (vd != NULL)
1004 zio->io_child_type = ZIO_CHILD_VDEV;
1005 else if (flags & ZIO_FLAG_GANG_CHILD)
1006 zio->io_child_type = ZIO_CHILD_GANG;
1007 else if (flags & ZIO_FLAG_DDT_CHILD)
1008 zio->io_child_type = ZIO_CHILD_DDT;
1009 else
1010 zio->io_child_type = ZIO_CHILD_LOGICAL;
1011
1012 if (bp != NULL) {
1013 if (type != ZIO_TYPE_WRITE ||
1014 zio->io_child_type == ZIO_CHILD_DDT) {
1015 zio->io_bp_copy = *bp;
1016 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
1017 } else {
1018 zio->io_bp = (blkptr_t *)bp;
1019 }
1020 zio->io_bp_orig = *bp;
1021 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
1022 zio->io_logical = zio;
1023 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
1024 pipeline |= ZIO_GANG_STAGES;
1025 if (flags & ZIO_FLAG_PREALLOCATED) {
1026 BP_ZERO_DVAS(zio->io_bp);
1027 BP_SET_BIRTH(zio->io_bp, 0, 0);
1028 }
1029 }
1030
1031 zio->io_spa = spa;
1032 zio->io_txg = txg;
1033 zio->io_done = done;
1034 zio->io_private = private;
1035 zio->io_type = type;
1036 zio->io_priority = priority;
1037 zio->io_vd = vd;
1038 zio->io_offset = offset;
1039 zio->io_orig_abd = zio->io_abd = data;
1040 zio->io_orig_size = zio->io_size = psize;
1041 zio->io_lsize = lsize;
1042 zio->io_orig_flags = zio->io_flags = flags;
1043 zio->io_orig_stage = zio->io_stage = stage;
1044 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
1045 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
1046 zio->io_allocator = ZIO_ALLOCATOR_NONE;
1047
1048 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
1049 (pipeline & ZIO_STAGE_READY) == 0;
1050 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
1051
1052 if (zb != NULL)
1053 zio->io_bookmark = *zb;
1054
1055 if (pio != NULL) {
1056 zio->io_metaslab_class = pio->io_metaslab_class;
1057 if (zio->io_logical == NULL)
1058 zio->io_logical = pio->io_logical;
1059 if (zio->io_child_type == ZIO_CHILD_GANG)
1060 zio->io_gang_leader = pio->io_gang_leader;
1061 zio_add_child_first(pio, zio);
1062 }
1063
1064 taskq_init_ent(&zio->io_tqent);
1065
1066 return (zio);
1067 }
1068
1069 void
zio_destroy(zio_t * zio)1070 zio_destroy(zio_t *zio)
1071 {
1072 metaslab_trace_fini(&zio->io_alloc_list);
1073 list_destroy(&zio->io_parent_list);
1074 list_destroy(&zio->io_child_list);
1075 mutex_destroy(&zio->io_lock);
1076 cv_destroy(&zio->io_cv);
1077 kmem_cache_free(zio_cache, zio);
1078 }
1079
1080 /*
1081 * ZIO intended to be between others. Provides synchronization at READY
1082 * and DONE pipeline stages and calls the respective callbacks.
1083 */
1084 zio_t *
zio_null(zio_t * pio,spa_t * spa,vdev_t * vd,zio_done_func_t * done,void * private,zio_flag_t flags)1085 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
1086 void *private, zio_flag_t flags)
1087 {
1088 zio_t *zio;
1089
1090 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1091 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1092 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
1093
1094 return (zio);
1095 }
1096
1097 /*
1098 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a
1099 * READY pipeline stage (is ready on creation), so it should not be used
1100 * as child of any ZIO that may need waiting for grandchildren READY stage
1101 * (any other ZIO type).
1102 */
1103 zio_t *
zio_root(spa_t * spa,zio_done_func_t * done,void * private,zio_flag_t flags)1104 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
1105 {
1106 zio_t *zio;
1107
1108 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1109 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1110 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1111
1112 return (zio);
1113 }
1114
1115 static int
zfs_blkptr_verify_log(spa_t * spa,const blkptr_t * bp,enum blk_verify_flag blk_verify,const char * fmt,...)1116 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1117 enum blk_verify_flag blk_verify, const char *fmt, ...)
1118 {
1119 va_list adx;
1120 char buf[256];
1121
1122 va_start(adx, fmt);
1123 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
1124 va_end(adx);
1125
1126 zfs_dbgmsg("bad blkptr at %px: "
1127 "DVA[0]=%#llx/%#llx "
1128 "DVA[1]=%#llx/%#llx "
1129 "DVA[2]=%#llx/%#llx "
1130 "prop=%#llx "
1131 "pad=%#llx,%#llx "
1132 "phys_birth=%#llx "
1133 "birth=%#llx "
1134 "fill=%#llx "
1135 "cksum=%#llx/%#llx/%#llx/%#llx",
1136 bp,
1137 (long long)bp->blk_dva[0].dva_word[0],
1138 (long long)bp->blk_dva[0].dva_word[1],
1139 (long long)bp->blk_dva[1].dva_word[0],
1140 (long long)bp->blk_dva[1].dva_word[1],
1141 (long long)bp->blk_dva[2].dva_word[0],
1142 (long long)bp->blk_dva[2].dva_word[1],
1143 (long long)bp->blk_prop,
1144 (long long)bp->blk_pad[0],
1145 (long long)bp->blk_pad[1],
1146 (long long)BP_GET_PHYSICAL_BIRTH(bp),
1147 (long long)BP_GET_LOGICAL_BIRTH(bp),
1148 (long long)bp->blk_fill,
1149 (long long)bp->blk_cksum.zc_word[0],
1150 (long long)bp->blk_cksum.zc_word[1],
1151 (long long)bp->blk_cksum.zc_word[2],
1152 (long long)bp->blk_cksum.zc_word[3]);
1153 switch (blk_verify) {
1154 case BLK_VERIFY_HALT:
1155 zfs_panic_recover("%s: %s", spa_name(spa), buf);
1156 break;
1157 case BLK_VERIFY_LOG:
1158 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1159 break;
1160 case BLK_VERIFY_ONLY:
1161 break;
1162 }
1163
1164 return (1);
1165 }
1166
1167 /*
1168 * Verify the block pointer fields contain reasonable values. This means
1169 * it only contains known object types, checksum/compression identifiers,
1170 * block sizes within the maximum allowed limits, valid DVAs, etc.
1171 *
1172 * If everything checks out 0 is returned. The zfs_blkptr_verify
1173 * argument controls the behavior when an invalid field is detected.
1174 *
1175 * Values for blk_verify_flag:
1176 * BLK_VERIFY_ONLY: evaluate the block
1177 * BLK_VERIFY_LOG: evaluate the block and log problems
1178 * BLK_VERIFY_HALT: call zfs_panic_recover on error
1179 *
1180 * Values for blk_config_flag:
1181 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1182 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1183 * obtained for reader
1184 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1185 * performance
1186 */
1187 int
zfs_blkptr_verify(spa_t * spa,const blkptr_t * bp,enum blk_config_flag blk_config,enum blk_verify_flag blk_verify)1188 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1189 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
1190 {
1191 int errors = 0;
1192
1193 if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
1194 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1195 "blkptr at %px has invalid TYPE %llu",
1196 bp, (longlong_t)BP_GET_TYPE(bp));
1197 }
1198 if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
1199 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1200 "blkptr at %px has invalid COMPRESS %llu",
1201 bp, (longlong_t)BP_GET_COMPRESS(bp));
1202 }
1203 if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1204 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1205 "blkptr at %px has invalid LSIZE %llu",
1206 bp, (longlong_t)BP_GET_LSIZE(bp));
1207 }
1208 if (BP_IS_EMBEDDED(bp)) {
1209 if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
1210 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1211 "blkptr at %px has invalid ETYPE %llu",
1212 bp, (longlong_t)BPE_GET_ETYPE(bp));
1213 }
1214 if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
1215 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1216 "blkptr at %px has invalid PSIZE %llu",
1217 bp, (longlong_t)BPE_GET_PSIZE(bp));
1218 }
1219 return (errors ? ECKSUM : 0);
1220 } else if (BP_IS_HOLE(bp)) {
1221 /*
1222 * Holes are allowed (expected, even) to have no DVAs, no
1223 * checksum, and no psize.
1224 */
1225 return (errors ? ECKSUM : 0);
1226 } else if (unlikely(!DVA_IS_VALID(&bp->blk_dva[0]))) {
1227 /* Non-hole, non-embedded BPs _must_ have at least one DVA */
1228 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1229 "blkptr at %px has no valid DVAs", bp);
1230 }
1231 if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
1232 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1233 "blkptr at %px has invalid CHECKSUM %llu",
1234 bp, (longlong_t)BP_GET_CHECKSUM(bp));
1235 }
1236 if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
1237 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1238 "blkptr at %px has invalid PSIZE %llu",
1239 bp, (longlong_t)BP_GET_PSIZE(bp));
1240 }
1241
1242 /*
1243 * Do not verify individual DVAs if the config is not trusted. This
1244 * will be done once the zio is executed in vdev_mirror_map_alloc.
1245 */
1246 if (unlikely(!spa->spa_trust_config))
1247 return (errors ? ECKSUM : 0);
1248
1249 switch (blk_config) {
1250 case BLK_CONFIG_HELD:
1251 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
1252 break;
1253 case BLK_CONFIG_NEEDED:
1254 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1255 break;
1256 case BLK_CONFIG_NEEDED_TRY:
1257 if (!spa_config_tryenter(spa, SCL_VDEV, bp, RW_READER))
1258 return (EBUSY);
1259 break;
1260 case BLK_CONFIG_SKIP:
1261 return (errors ? ECKSUM : 0);
1262 default:
1263 panic("invalid blk_config %u", blk_config);
1264 }
1265
1266 /*
1267 * Pool-specific checks.
1268 *
1269 * Note: it would be nice to verify that the logical birth
1270 * and physical birth are not too large. However,
1271 * spa_freeze() allows the birth time of log blocks (and
1272 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1273 * large.
1274 */
1275 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1276 const dva_t *dva = &bp->blk_dva[i];
1277 uint64_t vdevid = DVA_GET_VDEV(dva);
1278
1279 if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
1280 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1281 "blkptr at %px DVA %u has invalid VDEV %llu",
1282 bp, i, (longlong_t)vdevid);
1283 continue;
1284 }
1285 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1286 if (unlikely(vd == NULL)) {
1287 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1288 "blkptr at %px DVA %u has invalid VDEV %llu",
1289 bp, i, (longlong_t)vdevid);
1290 continue;
1291 }
1292 if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
1293 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1294 "blkptr at %px DVA %u has hole VDEV %llu",
1295 bp, i, (longlong_t)vdevid);
1296 continue;
1297 }
1298 if (vd->vdev_ops == &vdev_missing_ops) {
1299 /*
1300 * "missing" vdevs are valid during import, but we
1301 * don't have their detailed info (e.g. asize), so
1302 * we can't perform any more checks on them.
1303 */
1304 continue;
1305 }
1306 uint64_t offset = DVA_GET_OFFSET(dva);
1307 uint64_t asize = DVA_GET_ASIZE(dva);
1308 if (DVA_GET_GANG(dva))
1309 asize = vdev_gang_header_asize(vd);
1310 if (unlikely(offset + asize > vd->vdev_asize)) {
1311 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1312 "blkptr at %px DVA %u has invalid OFFSET %llu",
1313 bp, i, (longlong_t)offset);
1314 }
1315 }
1316 if (blk_config == BLK_CONFIG_NEEDED || blk_config ==
1317 BLK_CONFIG_NEEDED_TRY)
1318 spa_config_exit(spa, SCL_VDEV, bp);
1319
1320 return (errors ? ECKSUM : 0);
1321 }
1322
1323 boolean_t
zfs_dva_valid(spa_t * spa,const dva_t * dva,const blkptr_t * bp)1324 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1325 {
1326 (void) bp;
1327 uint64_t vdevid = DVA_GET_VDEV(dva);
1328
1329 if (vdevid >= spa->spa_root_vdev->vdev_children)
1330 return (B_FALSE);
1331
1332 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1333 if (vd == NULL)
1334 return (B_FALSE);
1335
1336 if (vd->vdev_ops == &vdev_hole_ops)
1337 return (B_FALSE);
1338
1339 if (vd->vdev_ops == &vdev_missing_ops) {
1340 return (B_FALSE);
1341 }
1342
1343 uint64_t offset = DVA_GET_OFFSET(dva);
1344 uint64_t asize = DVA_GET_ASIZE(dva);
1345
1346 if (DVA_GET_GANG(dva))
1347 asize = vdev_gang_header_asize(vd);
1348 if (offset + asize > vd->vdev_asize)
1349 return (B_FALSE);
1350
1351 return (B_TRUE);
1352 }
1353
1354 zio_t *
zio_read(zio_t * pio,spa_t * spa,const blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1355 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
1356 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
1357 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
1358 {
1359 zio_t *zio;
1360
1361 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
1362 data, size, size, done, private,
1363 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
1364 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1365 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
1366
1367 return (zio);
1368 }
1369
1370 zio_t *
zio_write(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,const zio_prop_t * zp,zio_done_func_t * ready,zio_done_func_t * children_ready,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,const zbookmark_phys_t * zb)1371 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
1372 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
1373 zio_done_func_t *ready, zio_done_func_t *children_ready,
1374 zio_done_func_t *done, void *private, zio_priority_t priority,
1375 zio_flag_t flags, const zbookmark_phys_t *zb)
1376 {
1377 zio_t *zio;
1378 enum zio_stage pipeline = zp->zp_direct_write == B_TRUE ?
1379 ZIO_DIRECT_WRITE_PIPELINE : (flags & ZIO_FLAG_DDT_CHILD) ?
1380 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE;
1381
1382
1383 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
1384 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
1385 ZIO_STAGE_OPEN, pipeline);
1386
1387 zio->io_ready = ready;
1388 zio->io_children_ready = children_ready;
1389 zio->io_prop = *zp;
1390
1391 /*
1392 * Data can be NULL if we are going to call zio_write_override() to
1393 * provide the already-allocated BP. But we may need the data to
1394 * verify a dedup hit (if requested). In this case, don't try to
1395 * dedup (just take the already-allocated BP verbatim). Encrypted
1396 * dedup blocks need data as well so we also disable dedup in this
1397 * case.
1398 */
1399 if (data == NULL &&
1400 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
1401 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1402 }
1403
1404 return (zio);
1405 }
1406
1407 zio_t *
zio_rewrite(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,zbookmark_phys_t * zb)1408 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
1409 uint64_t size, zio_done_func_t *done, void *private,
1410 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
1411 {
1412 zio_t *zio;
1413
1414 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1415 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1416 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1417
1418 return (zio);
1419 }
1420
1421 void
zio_write_override(zio_t * zio,blkptr_t * bp,int copies,int gang_copies,boolean_t nopwrite,boolean_t brtwrite)1422 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, int gang_copies,
1423 boolean_t nopwrite, boolean_t brtwrite)
1424 {
1425 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1426 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1427 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1428 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1429 ASSERT(!brtwrite || !nopwrite);
1430
1431 /*
1432 * We must reset the io_prop to match the values that existed
1433 * when the bp was first written by dmu_sync() keeping in mind
1434 * that nopwrite and dedup are mutually exclusive.
1435 */
1436 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1437 zio->io_prop.zp_nopwrite = nopwrite;
1438 zio->io_prop.zp_brtwrite = brtwrite;
1439 zio->io_prop.zp_copies = copies;
1440 zio->io_prop.zp_gang_copies = gang_copies;
1441 zio->io_bp_override = bp;
1442 }
1443
1444 void
zio_free(spa_t * spa,uint64_t txg,const blkptr_t * bp)1445 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1446 {
1447
1448 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1449
1450 /*
1451 * The check for EMBEDDED is a performance optimization. We
1452 * process the free here (by ignoring it) rather than
1453 * putting it on the list and then processing it in zio_free_sync().
1454 */
1455 if (BP_IS_EMBEDDED(bp))
1456 return;
1457
1458 /*
1459 * Frees that are for the currently-syncing txg, are not going to be
1460 * deferred, and which will not need to do a read (i.e. not GANG or
1461 * DEDUP), can be processed immediately. Otherwise, put them on the
1462 * in-memory list for later processing.
1463 *
1464 * Note that we only defer frees after zfs_sync_pass_deferred_free
1465 * when the log space map feature is disabled. [see relevant comment
1466 * in spa_sync_iterate_to_convergence()]
1467 */
1468 if (BP_IS_GANG(bp) ||
1469 BP_GET_DEDUP(bp) ||
1470 txg != spa->spa_syncing_txg ||
1471 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1472 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1473 brt_maybe_exists(spa, bp)) {
1474 metaslab_check_free(spa, bp);
1475 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1476 } else {
1477 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
1478 }
1479 }
1480
1481 /*
1482 * To improve performance, this function may return NULL if we were able
1483 * to do the free immediately. This avoids the cost of creating a zio
1484 * (and linking it to the parent, etc).
1485 */
1486 zio_t *
zio_free_sync(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_flag_t flags)1487 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1488 zio_flag_t flags)
1489 {
1490 ASSERT(!BP_IS_HOLE(bp));
1491 ASSERT(spa_syncing_txg(spa) == txg);
1492
1493 if (BP_IS_EMBEDDED(bp))
1494 return (NULL);
1495
1496 metaslab_check_free(spa, bp);
1497 arc_freed(spa, bp);
1498 dsl_scan_freed(spa, bp);
1499
1500 if (BP_IS_GANG(bp) ||
1501 BP_GET_DEDUP(bp) ||
1502 brt_maybe_exists(spa, bp)) {
1503 /*
1504 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1505 * block header, the DDT or the BRT), so issue them
1506 * asynchronously so that this thread is not tied up.
1507 */
1508 enum zio_stage stage =
1509 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
1510
1511 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1512 BP_GET_PSIZE(bp), NULL, NULL,
1513 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1514 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1515 } else {
1516 metaslab_free(spa, bp, txg, B_FALSE);
1517 return (NULL);
1518 }
1519 }
1520
1521 zio_t *
zio_claim(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_done_func_t * done,void * private,zio_flag_t flags)1522 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1523 zio_done_func_t *done, void *private, zio_flag_t flags)
1524 {
1525 zio_t *zio;
1526
1527 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1528 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
1529
1530 if (BP_IS_EMBEDDED(bp))
1531 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1532
1533 /*
1534 * A claim is an allocation of a specific block. Claims are needed
1535 * to support immediate writes in the intent log. The issue is that
1536 * immediate writes contain committed data, but in a txg that was
1537 * *not* committed. Upon opening the pool after an unclean shutdown,
1538 * the intent log claims all blocks that contain immediate write data
1539 * so that the SPA knows they're in use.
1540 *
1541 * All claims *must* be resolved in the first txg -- before the SPA
1542 * starts allocating blocks -- so that nothing is allocated twice.
1543 * If txg == 0 we just verify that the block is claimable.
1544 */
1545 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
1546 spa_min_claim_txg(spa));
1547 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1548 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
1549
1550 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1551 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1552 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1553 ASSERT0(zio->io_queued_timestamp);
1554
1555 return (zio);
1556 }
1557
1558 zio_t *
zio_trim(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,enum trim_flag trim_flags)1559 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1560 zio_done_func_t *done, void *private, zio_priority_t priority,
1561 zio_flag_t flags, enum trim_flag trim_flags)
1562 {
1563 zio_t *zio;
1564
1565 ASSERT0(vd->vdev_children);
1566 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1567 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1568 ASSERT3U(size, !=, 0);
1569
1570 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1571 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1572 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1573 zio->io_trim_flags = trim_flags;
1574
1575 return (zio);
1576 }
1577
1578 zio_t *
zio_read_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1579 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1580 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1581 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1582 {
1583 zio_t *zio;
1584
1585 ASSERT(vd->vdev_children == 0);
1586 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1587 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1588 ASSERT3U(offset + size, <=, vd->vdev_psize);
1589
1590 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1591 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1592 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1593
1594 zio->io_prop.zp_checksum = checksum;
1595
1596 return (zio);
1597 }
1598
1599 zio_t *
zio_write_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,zio_flag_t flags,boolean_t labels)1600 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1601 abd_t *data, int checksum, zio_done_func_t *done, void *private,
1602 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
1603 {
1604 zio_t *zio;
1605
1606 ASSERT(vd->vdev_children == 0);
1607 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1608 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1609 ASSERT3U(offset + size, <=, vd->vdev_psize);
1610
1611 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1612 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1613 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1614
1615 zio->io_prop.zp_checksum = checksum;
1616
1617 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1618 /*
1619 * zec checksums are necessarily destructive -- they modify
1620 * the end of the write buffer to hold the verifier/checksum.
1621 * Therefore, we must make a local copy in case the data is
1622 * being written to multiple places in parallel.
1623 */
1624 abd_t *wbuf = abd_alloc_sametype(data, size);
1625 abd_copy(wbuf, data, size);
1626
1627 zio_push_transform(zio, wbuf, size, size, NULL);
1628 }
1629
1630 return (zio);
1631 }
1632
1633 /*
1634 * Create a child I/O to do some work for us.
1635 */
1636 zio_t *
zio_vdev_child_io(zio_t * pio,blkptr_t * bp,vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,int type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1637 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1638 abd_t *data, uint64_t size, int type, zio_priority_t priority,
1639 zio_flag_t flags, zio_done_func_t *done, void *private)
1640 {
1641 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1642 zio_t *zio;
1643
1644 /*
1645 * vdev child I/Os do not propagate their error to the parent.
1646 * Therefore, for correct operation the caller *must* check for
1647 * and handle the error in the child i/o's done callback.
1648 * The only exceptions are i/os that we don't care about
1649 * (OPTIONAL or REPAIR).
1650 */
1651 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1652 done != NULL);
1653
1654 if (type == ZIO_TYPE_READ && bp != NULL) {
1655 /*
1656 * If we have the bp, then the child should perform the
1657 * checksum and the parent need not. This pushes error
1658 * detection as close to the leaves as possible and
1659 * eliminates redundant checksums in the interior nodes.
1660 */
1661 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1662 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1663 /*
1664 * We never allow the mirror VDEV to attempt reading from any
1665 * additional data copies after the first Direct I/O checksum
1666 * verify failure. This is to avoid bad data being written out
1667 * through the mirror during self healing. See comment in
1668 * vdev_mirror_io_done() for more details.
1669 */
1670 ASSERT0(pio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
1671 } else if (type == ZIO_TYPE_WRITE &&
1672 pio->io_prop.zp_direct_write == B_TRUE) {
1673 /*
1674 * By default we only will verify checksums for Direct I/O
1675 * writes for Linux. FreeBSD is able to place user pages under
1676 * write protection before issuing them to the ZIO pipeline.
1677 *
1678 * Checksum validation errors will only be reported through
1679 * the top-level VDEV, which is set by this child ZIO.
1680 */
1681 ASSERT3P(bp, !=, NULL);
1682 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
1683 pipeline |= ZIO_STAGE_DIO_CHECKSUM_VERIFY;
1684 }
1685
1686 if (vd->vdev_ops->vdev_op_leaf) {
1687 ASSERT0(vd->vdev_children);
1688 offset += VDEV_LABEL_START_SIZE;
1689 }
1690
1691 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1692
1693 /*
1694 * If we've decided to do a repair, the write is not speculative --
1695 * even if the original read was.
1696 */
1697 if (flags & ZIO_FLAG_IO_REPAIR)
1698 flags &= ~ZIO_FLAG_SPECULATIVE;
1699
1700 /*
1701 * If we're creating a child I/O that is not associated with a
1702 * top-level vdev, then the child zio is not an allocating I/O.
1703 * If this is a retried I/O then we ignore it since we will
1704 * have already processed the original allocating I/O.
1705 */
1706 if (flags & ZIO_FLAG_IO_ALLOCATING &&
1707 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1708 ASSERT(pio->io_metaslab_class != NULL);
1709 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1710 ASSERT(type == ZIO_TYPE_WRITE);
1711 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1712 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1713 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1714 pio->io_child_type == ZIO_CHILD_GANG);
1715
1716 flags &= ~ZIO_FLAG_IO_ALLOCATING;
1717 }
1718
1719 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1720 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1721 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1722 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1723
1724 return (zio);
1725 }
1726
1727 zio_t *
zio_vdev_delegated_io(vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,zio_type_t type,zio_priority_t priority,zio_flag_t flags,zio_done_func_t * done,void * private)1728 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1729 zio_type_t type, zio_priority_t priority, zio_flag_t flags,
1730 zio_done_func_t *done, void *private)
1731 {
1732 zio_t *zio;
1733
1734 ASSERT(vd->vdev_ops->vdev_op_leaf);
1735
1736 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1737 data, size, size, done, private, type, priority,
1738 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1739 vd, offset, NULL,
1740 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1741
1742 return (zio);
1743 }
1744
1745
1746 /*
1747 * Send a flush command to the given vdev. Unlike most zio creation functions,
1748 * the flush zios are issued immediately. You can wait on pio to pause until
1749 * the flushes complete.
1750 */
1751 void
zio_flush(zio_t * pio,vdev_t * vd)1752 zio_flush(zio_t *pio, vdev_t *vd)
1753 {
1754 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1755 ZIO_FLAG_DONT_RETRY;
1756
1757 if (vd->vdev_nowritecache)
1758 return;
1759
1760 if (vd->vdev_children == 0) {
1761 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
1762 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1763 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
1764 } else {
1765 for (uint64_t c = 0; c < vd->vdev_children; c++)
1766 zio_flush(pio, vd->vdev_child[c]);
1767 }
1768 }
1769
1770 void
zio_shrink(zio_t * zio,uint64_t size)1771 zio_shrink(zio_t *zio, uint64_t size)
1772 {
1773 ASSERT3P(zio->io_executor, ==, NULL);
1774 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1775 ASSERT3U(size, <=, zio->io_size);
1776
1777 /*
1778 * We don't shrink for raidz because of problems with the
1779 * reconstruction when reading back less than the block size.
1780 * Note, BP_IS_RAIDZ() assumes no compression.
1781 */
1782 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1783 if (!BP_IS_RAIDZ(zio->io_bp)) {
1784 /* we are not doing a raw write */
1785 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1786 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1787 }
1788 }
1789
1790 /*
1791 * Round provided allocation size up to a value that can be allocated
1792 * by at least some vdev(s) in the pool with minimum or no additional
1793 * padding and without extra space usage on others
1794 */
1795 static uint64_t
zio_roundup_alloc_size(spa_t * spa,uint64_t size)1796 zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1797 {
1798 if (size > spa->spa_min_alloc)
1799 return (roundup(size, spa->spa_gcd_alloc));
1800 return (spa->spa_min_alloc);
1801 }
1802
1803 size_t
zio_get_compression_max_size(enum zio_compress compress,uint64_t gcd_alloc,uint64_t min_alloc,size_t s_len)1804 zio_get_compression_max_size(enum zio_compress compress, uint64_t gcd_alloc,
1805 uint64_t min_alloc, size_t s_len)
1806 {
1807 size_t d_len;
1808
1809 /* minimum 12.5% must be saved (legacy value, may be changed later) */
1810 d_len = s_len - (s_len >> 3);
1811
1812 /* ZLE can't use exactly d_len bytes, it needs more, so ignore it */
1813 if (compress == ZIO_COMPRESS_ZLE)
1814 return (d_len);
1815
1816 d_len = d_len - d_len % gcd_alloc;
1817
1818 if (d_len < min_alloc)
1819 return (BPE_PAYLOAD_SIZE);
1820 return (d_len);
1821 }
1822
1823 /*
1824 * ==========================================================================
1825 * Prepare to read and write logical blocks
1826 * ==========================================================================
1827 */
1828
1829 static zio_t *
zio_read_bp_init(zio_t * zio)1830 zio_read_bp_init(zio_t *zio)
1831 {
1832 blkptr_t *bp = zio->io_bp;
1833 uint64_t psize =
1834 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1835
1836 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1837
1838 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1839 zio->io_child_type == ZIO_CHILD_LOGICAL &&
1840 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1841 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1842 psize, psize, zio_decompress);
1843 }
1844
1845 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1846 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1847 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1848 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1849 psize, psize, zio_decrypt);
1850 }
1851
1852 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1853 int psize = BPE_GET_PSIZE(bp);
1854 void *data = abd_borrow_buf(zio->io_abd, psize);
1855
1856 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1857 decode_embedded_bp_compressed(bp, data);
1858 abd_return_buf_copy(zio->io_abd, data, psize);
1859 } else {
1860 ASSERT(!BP_IS_EMBEDDED(bp));
1861 }
1862
1863 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1864 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1865
1866 return (zio);
1867 }
1868
1869 static zio_t *
zio_write_bp_init(zio_t * zio)1870 zio_write_bp_init(zio_t *zio)
1871 {
1872 if (!IO_IS_ALLOCATING(zio))
1873 return (zio);
1874
1875 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1876
1877 if (zio->io_bp_override) {
1878 blkptr_t *bp = zio->io_bp;
1879 zio_prop_t *zp = &zio->io_prop;
1880
1881 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
1882
1883 *bp = *zio->io_bp_override;
1884 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1885
1886 if (zp->zp_brtwrite)
1887 return (zio);
1888
1889 ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1890
1891 if (BP_IS_EMBEDDED(bp))
1892 return (zio);
1893
1894 /*
1895 * If we've been overridden and nopwrite is set then
1896 * set the flag accordingly to indicate that a nopwrite
1897 * has already occurred.
1898 */
1899 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1900 ASSERT(!zp->zp_dedup);
1901 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1902 zio->io_flags |= ZIO_FLAG_NOPWRITE;
1903 return (zio);
1904 }
1905
1906 ASSERT(!zp->zp_nopwrite);
1907
1908 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1909 return (zio);
1910
1911 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1912 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1913
1914 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1915 !zp->zp_encrypt) {
1916 BP_SET_DEDUP(bp, 1);
1917 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1918 return (zio);
1919 }
1920
1921 /*
1922 * We were unable to handle this as an override bp, treat
1923 * it as a regular write I/O.
1924 */
1925 zio->io_bp_override = NULL;
1926 *bp = zio->io_bp_orig;
1927 zio->io_pipeline = zio->io_orig_pipeline;
1928 }
1929
1930 return (zio);
1931 }
1932
1933 static zio_t *
zio_write_compress(zio_t * zio)1934 zio_write_compress(zio_t *zio)
1935 {
1936 spa_t *spa = zio->io_spa;
1937 zio_prop_t *zp = &zio->io_prop;
1938 enum zio_compress compress = zp->zp_compress;
1939 blkptr_t *bp = zio->io_bp;
1940 uint64_t lsize = zio->io_lsize;
1941 uint64_t psize = zio->io_size;
1942 uint32_t pass = 1;
1943
1944 /*
1945 * If our children haven't all reached the ready stage,
1946 * wait for them and then repeat this pipeline stage.
1947 */
1948 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1949 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1950 return (NULL);
1951 }
1952
1953 if (!IO_IS_ALLOCATING(zio))
1954 return (zio);
1955
1956 if (zio->io_children_ready != NULL) {
1957 /*
1958 * Now that all our children are ready, run the callback
1959 * associated with this zio in case it wants to modify the
1960 * data to be written.
1961 */
1962 ASSERT3U(zp->zp_level, >, 0);
1963 zio->io_children_ready(zio);
1964 }
1965
1966 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1967 ASSERT(zio->io_bp_override == NULL);
1968
1969 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
1970 /*
1971 * We're rewriting an existing block, which means we're
1972 * working on behalf of spa_sync(). For spa_sync() to
1973 * converge, it must eventually be the case that we don't
1974 * have to allocate new blocks. But compression changes
1975 * the blocksize, which forces a reallocate, and makes
1976 * convergence take longer. Therefore, after the first
1977 * few passes, stop compressing to ensure convergence.
1978 */
1979 pass = spa_sync_pass(spa);
1980
1981 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1982 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1983 ASSERT(!BP_GET_DEDUP(bp));
1984
1985 if (pass >= zfs_sync_pass_dont_compress)
1986 compress = ZIO_COMPRESS_OFF;
1987
1988 /* Make sure someone doesn't change their mind on overwrites */
1989 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1990 MIN(zp->zp_copies, spa_max_replication(spa))
1991 == BP_GET_NDVAS(bp));
1992 }
1993
1994 /* If it's a compressed write that is not raw, compress the buffer. */
1995 if (compress != ZIO_COMPRESS_OFF &&
1996 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1997 abd_t *cabd = NULL;
1998 if (abd_cmp_zero(zio->io_abd, lsize) == 0)
1999 psize = 0;
2000 else if (compress == ZIO_COMPRESS_EMPTY)
2001 psize = lsize;
2002 else
2003 psize = zio_compress_data(compress, zio->io_abd, &cabd,
2004 lsize,
2005 zio_get_compression_max_size(compress,
2006 spa->spa_gcd_alloc, spa->spa_min_alloc, lsize),
2007 zp->zp_complevel);
2008 if (psize == 0) {
2009 compress = ZIO_COMPRESS_OFF;
2010 } else if (psize >= lsize) {
2011 compress = ZIO_COMPRESS_OFF;
2012 if (cabd != NULL)
2013 abd_free(cabd);
2014 } else if (psize <= BPE_PAYLOAD_SIZE && !zp->zp_encrypt &&
2015 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
2016 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
2017 void *cbuf = abd_borrow_buf_copy(cabd, lsize);
2018 encode_embedded_bp_compressed(bp,
2019 cbuf, compress, lsize, psize);
2020 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
2021 BP_SET_TYPE(bp, zio->io_prop.zp_type);
2022 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
2023 abd_return_buf(cabd, cbuf, lsize);
2024 abd_free(cabd);
2025 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
2026 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2027 ASSERT(spa_feature_is_active(spa,
2028 SPA_FEATURE_EMBEDDED_DATA));
2029 return (zio);
2030 } else {
2031 /*
2032 * Round compressed size up to the minimum allocation
2033 * size of the smallest-ashift device, and zero the
2034 * tail. This ensures that the compressed size of the
2035 * BP (and thus compressratio property) are correct,
2036 * in that we charge for the padding used to fill out
2037 * the last sector.
2038 */
2039 size_t rounded = (size_t)zio_roundup_alloc_size(spa,
2040 psize);
2041 if (rounded >= lsize) {
2042 compress = ZIO_COMPRESS_OFF;
2043 abd_free(cabd);
2044 psize = lsize;
2045 } else {
2046 abd_zero_off(cabd, psize, rounded - psize);
2047 psize = rounded;
2048 zio_push_transform(zio, cabd,
2049 psize, lsize, NULL);
2050 }
2051 }
2052
2053 /*
2054 * We were unable to handle this as an override bp, treat
2055 * it as a regular write I/O.
2056 */
2057 zio->io_bp_override = NULL;
2058 *bp = zio->io_bp_orig;
2059 zio->io_pipeline = zio->io_orig_pipeline;
2060
2061 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
2062 zp->zp_type == DMU_OT_DNODE) {
2063 /*
2064 * The DMU actually relies on the zio layer's compression
2065 * to free metadnode blocks that have had all contained
2066 * dnodes freed. As a result, even when doing a raw
2067 * receive, we must check whether the block can be compressed
2068 * to a hole.
2069 */
2070 if (abd_cmp_zero(zio->io_abd, lsize) == 0) {
2071 psize = 0;
2072 compress = ZIO_COMPRESS_OFF;
2073 } else {
2074 psize = lsize;
2075 }
2076 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
2077 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
2078 /*
2079 * If we are raw receiving an encrypted dataset we should not
2080 * take this codepath because it will change the on-disk block
2081 * and decryption will fail.
2082 */
2083 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
2084 lsize);
2085
2086 if (rounded != psize) {
2087 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
2088 abd_zero_off(cdata, psize, rounded - psize);
2089 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
2090 psize = rounded;
2091 zio_push_transform(zio, cdata,
2092 psize, rounded, NULL);
2093 }
2094 } else {
2095 ASSERT3U(psize, !=, 0);
2096 }
2097
2098 /*
2099 * The final pass of spa_sync() must be all rewrites, but the first
2100 * few passes offer a trade-off: allocating blocks defers convergence,
2101 * but newly allocated blocks are sequential, so they can be written
2102 * to disk faster. Therefore, we allow the first few passes of
2103 * spa_sync() to allocate new blocks, but force rewrites after that.
2104 * There should only be a handful of blocks after pass 1 in any case.
2105 */
2106 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
2107 BP_GET_PSIZE(bp) == psize &&
2108 pass >= zfs_sync_pass_rewrite) {
2109 VERIFY3U(psize, !=, 0);
2110 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
2111
2112 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
2113 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
2114 } else {
2115 BP_ZERO(bp);
2116 zio->io_pipeline = ZIO_WRITE_PIPELINE;
2117 }
2118
2119 if (psize == 0) {
2120 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
2121 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
2122 BP_SET_LSIZE(bp, lsize);
2123 BP_SET_TYPE(bp, zp->zp_type);
2124 BP_SET_LEVEL(bp, zp->zp_level);
2125 BP_SET_BIRTH(bp, zio->io_txg, 0);
2126 }
2127 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2128 } else {
2129 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
2130 BP_SET_LSIZE(bp, lsize);
2131 BP_SET_TYPE(bp, zp->zp_type);
2132 BP_SET_LEVEL(bp, zp->zp_level);
2133 BP_SET_PSIZE(bp, psize);
2134 BP_SET_COMPRESS(bp, compress);
2135 BP_SET_CHECKSUM(bp, zp->zp_checksum);
2136 BP_SET_DEDUP(bp, zp->zp_dedup);
2137 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
2138 if (zp->zp_dedup) {
2139 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2140 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2141 ASSERT(!zp->zp_encrypt ||
2142 DMU_OT_IS_ENCRYPTED(zp->zp_type));
2143 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
2144 }
2145 if (zp->zp_nopwrite) {
2146 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2147 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2148 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
2149 }
2150 }
2151 return (zio);
2152 }
2153
2154 static zio_t *
zio_free_bp_init(zio_t * zio)2155 zio_free_bp_init(zio_t *zio)
2156 {
2157 blkptr_t *bp = zio->io_bp;
2158
2159 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2160 if (BP_GET_DEDUP(bp))
2161 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
2162 }
2163
2164 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2165
2166 return (zio);
2167 }
2168
2169 /*
2170 * ==========================================================================
2171 * Execute the I/O pipeline
2172 * ==========================================================================
2173 */
2174
2175 static void
zio_taskq_dispatch(zio_t * zio,zio_taskq_type_t q,boolean_t cutinline)2176 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
2177 {
2178 spa_t *spa = zio->io_spa;
2179 zio_type_t t = zio->io_type;
2180
2181 /*
2182 * If we're a config writer or a probe, the normal issue and
2183 * interrupt threads may all be blocked waiting for the config lock.
2184 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
2185 */
2186 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
2187 t = ZIO_TYPE_NULL;
2188
2189 /*
2190 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
2191 */
2192 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2193 t = ZIO_TYPE_NULL;
2194
2195 /*
2196 * If this is a high priority I/O, then use the high priority taskq if
2197 * available or cut the line otherwise.
2198 */
2199 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
2200 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2201 q++;
2202 else
2203 cutinline = B_TRUE;
2204 }
2205
2206 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
2207
2208 spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
2209 }
2210
2211 static boolean_t
zio_taskq_member(zio_t * zio,zio_taskq_type_t q)2212 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
2213 {
2214 spa_t *spa = zio->io_spa;
2215
2216 taskq_t *tq = taskq_of_curthread();
2217
2218 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
2219 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2220 uint_t i;
2221 for (i = 0; i < tqs->stqs_count; i++) {
2222 if (tqs->stqs_taskq[i] == tq)
2223 return (B_TRUE);
2224 }
2225 }
2226
2227 return (B_FALSE);
2228 }
2229
2230 static zio_t *
zio_issue_async(zio_t * zio)2231 zio_issue_async(zio_t *zio)
2232 {
2233 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
2234 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2235 return (NULL);
2236 }
2237
2238 void
zio_interrupt(void * zio)2239 zio_interrupt(void *zio)
2240 {
2241 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
2242 }
2243
2244 void
zio_delay_interrupt(zio_t * zio)2245 zio_delay_interrupt(zio_t *zio)
2246 {
2247 /*
2248 * The timeout_generic() function isn't defined in userspace, so
2249 * rather than trying to implement the function, the zio delay
2250 * functionality has been disabled for userspace builds.
2251 */
2252
2253 #ifdef _KERNEL
2254 /*
2255 * If io_target_timestamp is zero, then no delay has been registered
2256 * for this IO, thus jump to the end of this function and "skip" the
2257 * delay; issuing it directly to the zio layer.
2258 */
2259 if (zio->io_target_timestamp != 0) {
2260 hrtime_t now = gethrtime();
2261
2262 if (now >= zio->io_target_timestamp) {
2263 /*
2264 * This IO has already taken longer than the target
2265 * delay to complete, so we don't want to delay it
2266 * any longer; we "miss" the delay and issue it
2267 * directly to the zio layer. This is likely due to
2268 * the target latency being set to a value less than
2269 * the underlying hardware can satisfy (e.g. delay
2270 * set to 1ms, but the disks take 10ms to complete an
2271 * IO request).
2272 */
2273
2274 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2275 hrtime_t, now);
2276
2277 zio_interrupt(zio);
2278 } else {
2279 taskqid_t tid;
2280 hrtime_t diff = zio->io_target_timestamp - now;
2281 int ticks = MAX(1, NSEC_TO_TICK(diff));
2282 clock_t expire_at_tick = ddi_get_lbolt() + ticks;
2283
2284 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2285 hrtime_t, now, hrtime_t, diff);
2286
2287 tid = taskq_dispatch_delay(system_taskq, zio_interrupt,
2288 zio, TQ_NOSLEEP, expire_at_tick);
2289 if (tid == TASKQID_INVALID) {
2290 /*
2291 * Couldn't allocate a task. Just finish the
2292 * zio without a delay.
2293 */
2294 zio_interrupt(zio);
2295 }
2296 }
2297 return;
2298 }
2299 #endif
2300 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2301 zio_interrupt(zio);
2302 }
2303
2304 static void
zio_deadman_impl(zio_t * pio,int ziodepth)2305 zio_deadman_impl(zio_t *pio, int ziodepth)
2306 {
2307 zio_t *cio, *cio_next;
2308 zio_link_t *zl = NULL;
2309 vdev_t *vd = pio->io_vd;
2310
2311 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2312 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
2313 zbookmark_phys_t *zb = &pio->io_bookmark;
2314 uint64_t delta = gethrtime() - pio->io_timestamp;
2315 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2316
2317 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
2318 "delta=%llu queued=%llu io=%llu "
2319 "path=%s "
2320 "last=%llu type=%d "
2321 "priority=%d flags=0x%llx stage=0x%x "
2322 "pipeline=0x%x pipeline-trace=0x%x "
2323 "objset=%llu object=%llu "
2324 "level=%llu blkid=%llu "
2325 "offset=%llu size=%llu "
2326 "error=%d",
2327 ziodepth, pio, pio->io_timestamp,
2328 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2329 vd ? vd->vdev_path : "NULL",
2330 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2331 pio->io_priority, (u_longlong_t)pio->io_flags,
2332 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
2333 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2334 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2335 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2336 pio->io_error);
2337 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
2338 pio->io_spa, vd, zb, pio, 0);
2339
2340 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2341 taskq_empty_ent(&pio->io_tqent)) {
2342 zio_interrupt(pio);
2343 }
2344 }
2345
2346 mutex_enter(&pio->io_lock);
2347 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2348 cio_next = zio_walk_children(pio, &zl);
2349 zio_deadman_impl(cio, ziodepth + 1);
2350 }
2351 mutex_exit(&pio->io_lock);
2352 }
2353
2354 /*
2355 * Log the critical information describing this zio and all of its children
2356 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2357 */
2358 void
zio_deadman(zio_t * pio,const char * tag)2359 zio_deadman(zio_t *pio, const char *tag)
2360 {
2361 spa_t *spa = pio->io_spa;
2362 char *name = spa_name(spa);
2363
2364 if (!zfs_deadman_enabled || spa_suspended(spa))
2365 return;
2366
2367 zio_deadman_impl(pio, 0);
2368
2369 switch (spa_get_deadman_failmode(spa)) {
2370 case ZIO_FAILURE_MODE_WAIT:
2371 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2372 break;
2373
2374 case ZIO_FAILURE_MODE_CONTINUE:
2375 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2376 break;
2377
2378 case ZIO_FAILURE_MODE_PANIC:
2379 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2380 break;
2381 }
2382 }
2383
2384 /*
2385 * Execute the I/O pipeline until one of the following occurs:
2386 * (1) the I/O completes; (2) the pipeline stalls waiting for
2387 * dependent child I/Os; (3) the I/O issues, so we're waiting
2388 * for an I/O completion interrupt; (4) the I/O is delegated by
2389 * vdev-level caching or aggregation; (5) the I/O is deferred
2390 * due to vdev-level queueing; (6) the I/O is handed off to
2391 * another thread. In all cases, the pipeline stops whenever
2392 * there's no CPU work; it never burns a thread in cv_wait_io().
2393 *
2394 * There's no locking on io_stage because there's no legitimate way
2395 * for multiple threads to be attempting to process the same I/O.
2396 */
2397 static zio_pipe_stage_t *zio_pipeline[];
2398
2399 /*
2400 * zio_execute() is a wrapper around the static function
2401 * __zio_execute() so that we can force __zio_execute() to be
2402 * inlined. This reduces stack overhead which is important
2403 * because __zio_execute() is called recursively in several zio
2404 * code paths. zio_execute() itself cannot be inlined because
2405 * it is externally visible.
2406 */
2407 void
zio_execute(void * zio)2408 zio_execute(void *zio)
2409 {
2410 fstrans_cookie_t cookie;
2411
2412 cookie = spl_fstrans_mark();
2413 __zio_execute(zio);
2414 spl_fstrans_unmark(cookie);
2415 }
2416
2417 /*
2418 * Used to determine if in the current context the stack is sized large
2419 * enough to allow zio_execute() to be called recursively. A minimum
2420 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2421 */
2422 static boolean_t
zio_execute_stack_check(zio_t * zio)2423 zio_execute_stack_check(zio_t *zio)
2424 {
2425 #if !defined(HAVE_LARGE_STACKS)
2426 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2427
2428 /* Executing in txg_sync_thread() context. */
2429 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2430 return (B_TRUE);
2431
2432 /* Pool initialization outside of zio_taskq context. */
2433 if (dp && spa_is_initializing(dp->dp_spa) &&
2434 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2435 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2436 return (B_TRUE);
2437 #else
2438 (void) zio;
2439 #endif /* HAVE_LARGE_STACKS */
2440
2441 return (B_FALSE);
2442 }
2443
2444 __attribute__((always_inline))
2445 static inline void
__zio_execute(zio_t * zio)2446 __zio_execute(zio_t *zio)
2447 {
2448 ASSERT3U(zio->io_queued_timestamp, >, 0);
2449
2450 while (zio->io_stage < ZIO_STAGE_DONE) {
2451 enum zio_stage pipeline = zio->io_pipeline;
2452 enum zio_stage stage = zio->io_stage;
2453
2454 zio->io_executor = curthread;
2455
2456 ASSERT(!MUTEX_HELD(&zio->io_lock));
2457 ASSERT(ISP2(stage));
2458 ASSERT(zio->io_stall == NULL);
2459
2460 do {
2461 stage <<= 1;
2462 } while ((stage & pipeline) == 0);
2463
2464 ASSERT(stage <= ZIO_STAGE_DONE);
2465
2466 /*
2467 * If we are in interrupt context and this pipeline stage
2468 * will grab a config lock that is held across I/O,
2469 * or may wait for an I/O that needs an interrupt thread
2470 * to complete, issue async to avoid deadlock.
2471 *
2472 * For VDEV_IO_START, we cut in line so that the io will
2473 * be sent to disk promptly.
2474 */
2475 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2476 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
2477 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2478 zio_requeue_io_start_cut_in_line : B_FALSE;
2479 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2480 return;
2481 }
2482
2483 /*
2484 * If the current context doesn't have large enough stacks
2485 * the zio must be issued asynchronously to prevent overflow.
2486 */
2487 if (zio_execute_stack_check(zio)) {
2488 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2489 zio_requeue_io_start_cut_in_line : B_FALSE;
2490 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2491 return;
2492 }
2493
2494 zio->io_stage = stage;
2495 zio->io_pipeline_trace |= zio->io_stage;
2496
2497 /*
2498 * The zio pipeline stage returns the next zio to execute
2499 * (typically the same as this one), or NULL if we should
2500 * stop.
2501 */
2502 zio = zio_pipeline[highbit64(stage) - 1](zio);
2503
2504 if (zio == NULL)
2505 return;
2506 }
2507 }
2508
2509
2510 /*
2511 * ==========================================================================
2512 * Initiate I/O, either sync or async
2513 * ==========================================================================
2514 */
2515 int
zio_wait(zio_t * zio)2516 zio_wait(zio_t *zio)
2517 {
2518 /*
2519 * Some routines, like zio_free_sync(), may return a NULL zio
2520 * to avoid the performance overhead of creating and then destroying
2521 * an unneeded zio. For the callers' simplicity, we accept a NULL
2522 * zio and ignore it.
2523 */
2524 if (zio == NULL)
2525 return (0);
2526
2527 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
2528 int error;
2529
2530 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2531 ASSERT3P(zio->io_executor, ==, NULL);
2532
2533 zio->io_waiter = curthread;
2534 ASSERT0(zio->io_queued_timestamp);
2535 zio->io_queued_timestamp = gethrtime();
2536
2537 if (zio->io_type == ZIO_TYPE_WRITE) {
2538 spa_select_allocator(zio);
2539 }
2540 __zio_execute(zio);
2541
2542 mutex_enter(&zio->io_lock);
2543 while (zio->io_executor != NULL) {
2544 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2545 ddi_get_lbolt() + timeout);
2546
2547 if (zfs_deadman_enabled && error == -1 &&
2548 gethrtime() - zio->io_queued_timestamp >
2549 spa_deadman_ziotime(zio->io_spa)) {
2550 mutex_exit(&zio->io_lock);
2551 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2552 zio_deadman(zio, FTAG);
2553 mutex_enter(&zio->io_lock);
2554 }
2555 }
2556 mutex_exit(&zio->io_lock);
2557
2558 error = zio->io_error;
2559 zio_destroy(zio);
2560
2561 return (error);
2562 }
2563
2564 void
zio_nowait(zio_t * zio)2565 zio_nowait(zio_t *zio)
2566 {
2567 /*
2568 * See comment in zio_wait().
2569 */
2570 if (zio == NULL)
2571 return;
2572
2573 ASSERT3P(zio->io_executor, ==, NULL);
2574
2575 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2576 list_is_empty(&zio->io_parent_list)) {
2577 zio_t *pio;
2578
2579 /*
2580 * This is a logical async I/O with no parent to wait for it.
2581 * We add it to the spa_async_root_zio "Godfather" I/O which
2582 * will ensure they complete prior to unloading the pool.
2583 */
2584 spa_t *spa = zio->io_spa;
2585 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
2586
2587 zio_add_child(pio, zio);
2588 }
2589
2590 ASSERT0(zio->io_queued_timestamp);
2591 zio->io_queued_timestamp = gethrtime();
2592 if (zio->io_type == ZIO_TYPE_WRITE) {
2593 spa_select_allocator(zio);
2594 }
2595 __zio_execute(zio);
2596 }
2597
2598 /*
2599 * ==========================================================================
2600 * Reexecute, cancel, or suspend/resume failed I/O
2601 * ==========================================================================
2602 */
2603
2604 static void
zio_reexecute(void * arg)2605 zio_reexecute(void *arg)
2606 {
2607 zio_t *pio = arg;
2608 zio_t *cio, *cio_next, *gio;
2609
2610 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2611 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
2612 ASSERT(pio->io_gang_leader == NULL);
2613 ASSERT(pio->io_gang_tree == NULL);
2614
2615 mutex_enter(&pio->io_lock);
2616 pio->io_flags = pio->io_orig_flags;
2617 pio->io_stage = pio->io_orig_stage;
2618 pio->io_pipeline = pio->io_orig_pipeline;
2619 pio->io_reexecute = 0;
2620 pio->io_flags |= ZIO_FLAG_REEXECUTED;
2621 pio->io_pipeline_trace = 0;
2622 pio->io_error = 0;
2623 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2624 (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2625 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2626
2627 /*
2628 * It's possible for a failed ZIO to be a descendant of more than one
2629 * ZIO tree. When reexecuting it, we have to be sure to add its wait
2630 * states to all parent wait counts.
2631 *
2632 * Those parents, in turn, may have other children that are currently
2633 * active, usually because they've already been reexecuted after
2634 * resuming. Those children may be executing and may call
2635 * zio_notify_parent() at the same time as we're updating our parent's
2636 * counts. To avoid races while updating the counts, we take
2637 * gio->io_lock before each update.
2638 */
2639 zio_link_t *zl = NULL;
2640 while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2641 mutex_enter(&gio->io_lock);
2642 for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2643 gio->io_children[pio->io_child_type][w] +=
2644 !pio->io_state[w];
2645 }
2646 mutex_exit(&gio->io_lock);
2647 }
2648
2649 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
2650 pio->io_child_error[c] = 0;
2651
2652 if (IO_IS_ALLOCATING(pio))
2653 BP_ZERO(pio->io_bp);
2654
2655 /*
2656 * As we reexecute pio's children, new children could be created.
2657 * New children go to the head of pio's io_child_list, however,
2658 * so we will (correctly) not reexecute them. The key is that
2659 * the remainder of pio's io_child_list, from 'cio_next' onward,
2660 * cannot be affected by any side effects of reexecuting 'cio'.
2661 */
2662 zl = NULL;
2663 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2664 cio_next = zio_walk_children(pio, &zl);
2665 mutex_exit(&pio->io_lock);
2666 zio_reexecute(cio);
2667 mutex_enter(&pio->io_lock);
2668 }
2669 mutex_exit(&pio->io_lock);
2670
2671 /*
2672 * Now that all children have been reexecuted, execute the parent.
2673 * We don't reexecute "The Godfather" I/O here as it's the
2674 * responsibility of the caller to wait on it.
2675 */
2676 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2677 pio->io_queued_timestamp = gethrtime();
2678 __zio_execute(pio);
2679 }
2680 }
2681
2682 void
zio_suspend(spa_t * spa,zio_t * zio,zio_suspend_reason_t reason)2683 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2684 {
2685 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2686 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2687 "failure and the failure mode property for this pool "
2688 "is set to panic.", spa_name(spa));
2689
2690 if (reason != ZIO_SUSPEND_MMP) {
2691 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2692 "I/O failure and has been suspended.", spa_name(spa));
2693 }
2694
2695 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2696 NULL, NULL, 0);
2697
2698 mutex_enter(&spa->spa_suspend_lock);
2699
2700 if (spa->spa_suspend_zio_root == NULL)
2701 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2702 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2703 ZIO_FLAG_GODFATHER);
2704
2705 spa->spa_suspended = reason;
2706
2707 if (zio != NULL) {
2708 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2709 ASSERT(zio != spa->spa_suspend_zio_root);
2710 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2711 ASSERT(zio_unique_parent(zio) == NULL);
2712 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2713 zio_add_child(spa->spa_suspend_zio_root, zio);
2714 }
2715
2716 mutex_exit(&spa->spa_suspend_lock);
2717 }
2718
2719 int
zio_resume(spa_t * spa)2720 zio_resume(spa_t *spa)
2721 {
2722 zio_t *pio;
2723
2724 /*
2725 * Reexecute all previously suspended i/o.
2726 */
2727 mutex_enter(&spa->spa_suspend_lock);
2728 if (spa->spa_suspended != ZIO_SUSPEND_NONE)
2729 cmn_err(CE_WARN, "Pool '%s' was suspended and is being "
2730 "resumed. Failed I/O will be retried.",
2731 spa_name(spa));
2732 spa->spa_suspended = ZIO_SUSPEND_NONE;
2733 cv_broadcast(&spa->spa_suspend_cv);
2734 pio = spa->spa_suspend_zio_root;
2735 spa->spa_suspend_zio_root = NULL;
2736 mutex_exit(&spa->spa_suspend_lock);
2737
2738 if (pio == NULL)
2739 return (0);
2740
2741 zio_reexecute(pio);
2742 return (zio_wait(pio));
2743 }
2744
2745 void
zio_resume_wait(spa_t * spa)2746 zio_resume_wait(spa_t *spa)
2747 {
2748 mutex_enter(&spa->spa_suspend_lock);
2749 while (spa_suspended(spa))
2750 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2751 mutex_exit(&spa->spa_suspend_lock);
2752 }
2753
2754 /*
2755 * ==========================================================================
2756 * Gang blocks.
2757 *
2758 * A gang block is a collection of small blocks that looks to the DMU
2759 * like one large block. When zio_dva_allocate() cannot find a block
2760 * of the requested size, due to either severe fragmentation or the pool
2761 * being nearly full, it calls zio_write_gang_block() to construct the
2762 * block from smaller fragments.
2763 *
2764 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2765 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
2766 * an indirect block: it's an array of block pointers. It consumes
2767 * only one sector and hence is allocatable regardless of fragmentation.
2768 * The gang header's bps point to its gang members, which hold the data.
2769 *
2770 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2771 * as the verifier to ensure uniqueness of the SHA256 checksum.
2772 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2773 * not the gang header. This ensures that data block signatures (needed for
2774 * deduplication) are independent of how the block is physically stored.
2775 *
2776 * Gang blocks can be nested: a gang member may itself be a gang block.
2777 * Thus every gang block is a tree in which root and all interior nodes are
2778 * gang headers, and the leaves are normal blocks that contain user data.
2779 * The root of the gang tree is called the gang leader.
2780 *
2781 * To perform any operation (read, rewrite, free, claim) on a gang block,
2782 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2783 * in the io_gang_tree field of the original logical i/o by recursively
2784 * reading the gang leader and all gang headers below it. This yields
2785 * an in-core tree containing the contents of every gang header and the
2786 * bps for every constituent of the gang block.
2787 *
2788 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2789 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2790 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2791 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2792 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2793 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2794 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2795 * of the gang header plus zio_checksum_compute() of the data to update the
2796 * gang header's blk_cksum as described above.
2797 *
2798 * The two-phase assemble/issue model solves the problem of partial failure --
2799 * what if you'd freed part of a gang block but then couldn't read the
2800 * gang header for another part? Assembling the entire gang tree first
2801 * ensures that all the necessary gang header I/O has succeeded before
2802 * starting the actual work of free, claim, or write. Once the gang tree
2803 * is assembled, free and claim are in-memory operations that cannot fail.
2804 *
2805 * In the event that a gang write fails, zio_dva_unallocate() walks the
2806 * gang tree to immediately free (i.e. insert back into the space map)
2807 * everything we've allocated. This ensures that we don't get ENOSPC
2808 * errors during repeated suspend/resume cycles due to a flaky device.
2809 *
2810 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2811 * the gang tree, we won't modify the block, so we can safely defer the free
2812 * (knowing that the block is still intact). If we *can* assemble the gang
2813 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2814 * each constituent bp and we can allocate a new block on the next sync pass.
2815 *
2816 * In all cases, the gang tree allows complete recovery from partial failure.
2817 * ==========================================================================
2818 */
2819
2820 static void
zio_gang_issue_func_done(zio_t * zio)2821 zio_gang_issue_func_done(zio_t *zio)
2822 {
2823 abd_free(zio->io_abd);
2824 }
2825
2826 static zio_t *
zio_read_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2827 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2828 uint64_t offset)
2829 {
2830 if (gn != NULL)
2831 return (pio);
2832
2833 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2834 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2835 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2836 &pio->io_bookmark));
2837 }
2838
2839 static zio_t *
zio_rewrite_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2840 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2841 uint64_t offset)
2842 {
2843 zio_t *zio;
2844
2845 if (gn != NULL) {
2846 abd_t *gbh_abd =
2847 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2848 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2849 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2850 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2851 &pio->io_bookmark);
2852 /*
2853 * As we rewrite each gang header, the pipeline will compute
2854 * a new gang block header checksum for it; but no one will
2855 * compute a new data checksum, so we do that here. The one
2856 * exception is the gang leader: the pipeline already computed
2857 * its data checksum because that stage precedes gang assembly.
2858 * (Presently, nothing actually uses interior data checksums;
2859 * this is just good hygiene.)
2860 */
2861 if (gn != pio->io_gang_leader->io_gang_tree) {
2862 abd_t *buf = abd_get_offset(data, offset);
2863
2864 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2865 buf, BP_GET_PSIZE(bp));
2866
2867 abd_free(buf);
2868 }
2869 /*
2870 * If we are here to damage data for testing purposes,
2871 * leave the GBH alone so that we can detect the damage.
2872 */
2873 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2874 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2875 } else {
2876 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2877 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2878 zio_gang_issue_func_done, NULL, pio->io_priority,
2879 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2880 }
2881
2882 return (zio);
2883 }
2884
2885 static zio_t *
zio_free_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2886 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2887 uint64_t offset)
2888 {
2889 (void) gn, (void) data, (void) offset;
2890
2891 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2892 ZIO_GANG_CHILD_FLAGS(pio));
2893 if (zio == NULL) {
2894 zio = zio_null(pio, pio->io_spa,
2895 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2896 }
2897 return (zio);
2898 }
2899
2900 static zio_t *
zio_claim_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2901 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2902 uint64_t offset)
2903 {
2904 (void) gn, (void) data, (void) offset;
2905 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2906 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2907 }
2908
2909 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2910 NULL,
2911 zio_read_gang,
2912 zio_rewrite_gang,
2913 zio_free_gang,
2914 zio_claim_gang,
2915 NULL
2916 };
2917
2918 static void zio_gang_tree_assemble_done(zio_t *zio);
2919
2920 static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t ** gnpp)2921 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2922 {
2923 zio_gang_node_t *gn;
2924
2925 ASSERT(*gnpp == NULL);
2926
2927 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2928 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2929 *gnpp = gn;
2930
2931 return (gn);
2932 }
2933
2934 static void
zio_gang_node_free(zio_gang_node_t ** gnpp)2935 zio_gang_node_free(zio_gang_node_t **gnpp)
2936 {
2937 zio_gang_node_t *gn = *gnpp;
2938
2939 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2940 ASSERT(gn->gn_child[g] == NULL);
2941
2942 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2943 kmem_free(gn, sizeof (*gn));
2944 *gnpp = NULL;
2945 }
2946
2947 static void
zio_gang_tree_free(zio_gang_node_t ** gnpp)2948 zio_gang_tree_free(zio_gang_node_t **gnpp)
2949 {
2950 zio_gang_node_t *gn = *gnpp;
2951
2952 if (gn == NULL)
2953 return;
2954
2955 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2956 zio_gang_tree_free(&gn->gn_child[g]);
2957
2958 zio_gang_node_free(gnpp);
2959 }
2960
2961 static void
zio_gang_tree_assemble(zio_t * gio,blkptr_t * bp,zio_gang_node_t ** gnpp)2962 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2963 {
2964 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2965 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2966
2967 ASSERT(gio->io_gang_leader == gio);
2968 ASSERT(BP_IS_GANG(bp));
2969
2970 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2971 zio_gang_tree_assemble_done, gn, gio->io_priority,
2972 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2973 }
2974
2975 static void
zio_gang_tree_assemble_done(zio_t * zio)2976 zio_gang_tree_assemble_done(zio_t *zio)
2977 {
2978 zio_t *gio = zio->io_gang_leader;
2979 zio_gang_node_t *gn = zio->io_private;
2980 blkptr_t *bp = zio->io_bp;
2981
2982 ASSERT(gio == zio_unique_parent(zio));
2983 ASSERT(list_is_empty(&zio->io_child_list));
2984
2985 if (zio->io_error)
2986 return;
2987
2988 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
2989 if (BP_SHOULD_BYTESWAP(bp))
2990 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2991
2992 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2993 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2994 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2995
2996 abd_free(zio->io_abd);
2997
2998 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2999 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
3000 if (!BP_IS_GANG(gbp))
3001 continue;
3002 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
3003 }
3004 }
3005
3006 static void
zio_gang_tree_issue(zio_t * pio,zio_gang_node_t * gn,blkptr_t * bp,abd_t * data,uint64_t offset)3007 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
3008 uint64_t offset)
3009 {
3010 zio_t *gio = pio->io_gang_leader;
3011 zio_t *zio;
3012
3013 ASSERT(BP_IS_GANG(bp) == !!gn);
3014 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
3015 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
3016
3017 /*
3018 * If you're a gang header, your data is in gn->gn_gbh.
3019 * If you're a gang member, your data is in 'data' and gn == NULL.
3020 */
3021 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
3022
3023 if (gn != NULL) {
3024 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
3025
3026 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
3027 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
3028 if (BP_IS_HOLE(gbp))
3029 continue;
3030 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
3031 offset);
3032 offset += BP_GET_PSIZE(gbp);
3033 }
3034 }
3035
3036 if (gn == gio->io_gang_tree)
3037 ASSERT3U(gio->io_size, ==, offset);
3038
3039 if (zio != pio)
3040 zio_nowait(zio);
3041 }
3042
3043 static zio_t *
zio_gang_assemble(zio_t * zio)3044 zio_gang_assemble(zio_t *zio)
3045 {
3046 blkptr_t *bp = zio->io_bp;
3047
3048 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
3049 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3050
3051 zio->io_gang_leader = zio;
3052
3053 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
3054
3055 return (zio);
3056 }
3057
3058 static zio_t *
zio_gang_issue(zio_t * zio)3059 zio_gang_issue(zio_t *zio)
3060 {
3061 blkptr_t *bp = zio->io_bp;
3062
3063 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
3064 return (NULL);
3065 }
3066
3067 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
3068 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3069
3070 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
3071 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
3072 0);
3073 else
3074 zio_gang_tree_free(&zio->io_gang_tree);
3075
3076 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3077
3078 return (zio);
3079 }
3080
3081 static void
zio_gang_inherit_allocator(zio_t * pio,zio_t * cio)3082 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
3083 {
3084 cio->io_allocator = pio->io_allocator;
3085 }
3086
3087 static void
zio_write_gang_member_ready(zio_t * zio)3088 zio_write_gang_member_ready(zio_t *zio)
3089 {
3090 zio_t *pio = zio_unique_parent(zio);
3091 dva_t *cdva = zio->io_bp->blk_dva;
3092 dva_t *pdva = pio->io_bp->blk_dva;
3093 uint64_t asize;
3094 zio_t *gio __maybe_unused = zio->io_gang_leader;
3095
3096 if (BP_IS_HOLE(zio->io_bp))
3097 return;
3098
3099 /*
3100 * If we're getting direct-invoked from zio_write_gang_block(),
3101 * the bp_orig will be set.
3102 */
3103 ASSERT(BP_IS_HOLE(&zio->io_bp_orig) ||
3104 zio->io_flags & ZIO_FLAG_PREALLOCATED);
3105
3106 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
3107 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
3108 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
3109 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
3110 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
3111
3112 mutex_enter(&pio->io_lock);
3113 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
3114 ASSERT(DVA_GET_GANG(&pdva[d]));
3115 asize = DVA_GET_ASIZE(&pdva[d]);
3116 asize += DVA_GET_ASIZE(&cdva[d]);
3117 DVA_SET_ASIZE(&pdva[d], asize);
3118 }
3119 mutex_exit(&pio->io_lock);
3120 }
3121
3122 static void
zio_write_gang_done(zio_t * zio)3123 zio_write_gang_done(zio_t *zio)
3124 {
3125 /*
3126 * The io_abd field will be NULL for a zio with no data. The io_flags
3127 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
3128 * check for it here as it is cleared in zio_ready.
3129 */
3130 if (zio->io_abd != NULL)
3131 abd_free(zio->io_abd);
3132 }
3133
3134 static zio_t *
zio_write_gang_block(zio_t * pio,metaslab_class_t * mc)3135 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
3136 {
3137 spa_t *spa = pio->io_spa;
3138 blkptr_t *bp = pio->io_bp;
3139 zio_t *gio = pio->io_gang_leader;
3140 zio_t *zio;
3141 zio_gang_node_t *gn, **gnpp;
3142 zio_gbh_phys_t *gbh;
3143 abd_t *gbh_abd;
3144 uint64_t txg = pio->io_txg;
3145 uint64_t resid = pio->io_size;
3146 zio_prop_t zp;
3147 int error;
3148 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
3149
3150 /*
3151 * Store multiple copies of the GBH, so that we can still traverse
3152 * all the data (e.g. to free or scrub) even if a block is damaged.
3153 * This value respects the redundant_metadata property.
3154 */
3155 int gbh_copies = gio->io_prop.zp_gang_copies;
3156 if (gbh_copies == 0) {
3157 /*
3158 * This should only happen in the case where we're filling in
3159 * DDT entries for a parent that wants more copies than the DDT
3160 * has. In that case, we cannot gang without creating a mixed
3161 * blkptr, which is illegal.
3162 */
3163 ASSERT3U(gio->io_child_type, ==, ZIO_CHILD_DDT);
3164 pio->io_error = EAGAIN;
3165 return (pio);
3166 }
3167 ASSERT3S(gbh_copies, >, 0);
3168 ASSERT3S(gbh_copies, <=, SPA_DVAS_PER_BP);
3169
3170 ASSERT(ZIO_HAS_ALLOCATOR(pio));
3171 int flags = METASLAB_GANG_HEADER;
3172 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3173 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3174 ASSERT(has_data);
3175
3176 flags |= METASLAB_ASYNC_ALLOC;
3177 }
3178
3179 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
3180 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
3181 &pio->io_alloc_list, pio->io_allocator, pio);
3182 if (error) {
3183 pio->io_error = error;
3184 return (pio);
3185 }
3186
3187 if (pio == gio) {
3188 gnpp = &gio->io_gang_tree;
3189 } else {
3190 gnpp = pio->io_private;
3191 ASSERT(pio->io_ready == zio_write_gang_member_ready);
3192 }
3193
3194 gn = zio_gang_node_alloc(gnpp);
3195 gbh = gn->gn_gbh;
3196 memset(gbh, 0, SPA_GANGBLOCKSIZE);
3197 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
3198
3199 /*
3200 * Create the gang header.
3201 */
3202 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3203 zio_write_gang_done, NULL, pio->io_priority,
3204 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3205
3206 zio_gang_inherit_allocator(pio, zio);
3207 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3208 boolean_t more;
3209 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies,
3210 zio, B_TRUE, &more));
3211 }
3212
3213 /*
3214 * Create and nowait the gang children. First, we try to do
3215 * opportunistic allocations. If that fails to generate enough
3216 * space, we fall back to normal zio_write calls for nested gang.
3217 */
3218 for (int g = 0; resid != 0; g++) {
3219 flags &= METASLAB_ASYNC_ALLOC;
3220 flags |= METASLAB_GANG_CHILD;
3221 zp.zp_checksum = gio->io_prop.zp_checksum;
3222 zp.zp_compress = ZIO_COMPRESS_OFF;
3223 zp.zp_complevel = gio->io_prop.zp_complevel;
3224 zp.zp_type = zp.zp_storage_type = DMU_OT_NONE;
3225 zp.zp_level = 0;
3226 zp.zp_copies = gio->io_prop.zp_copies;
3227 zp.zp_gang_copies = gio->io_prop.zp_gang_copies;
3228 zp.zp_dedup = B_FALSE;
3229 zp.zp_dedup_verify = B_FALSE;
3230 zp.zp_nopwrite = B_FALSE;
3231 zp.zp_encrypt = gio->io_prop.zp_encrypt;
3232 zp.zp_byteorder = gio->io_prop.zp_byteorder;
3233 zp.zp_direct_write = B_FALSE;
3234 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3235 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3236 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
3237
3238 uint64_t min_size = zio_roundup_alloc_size(spa,
3239 resid / (SPA_GBH_NBLKPTRS - g));
3240 min_size = MIN(min_size, resid);
3241 bp = &gbh->zg_blkptr[g];
3242
3243 zio_alloc_list_t cio_list;
3244 metaslab_trace_init(&cio_list);
3245 uint64_t allocated_size = UINT64_MAX;
3246 error = metaslab_alloc_range(spa, mc, min_size, resid,
3247 bp, gio->io_prop.zp_copies, txg, NULL,
3248 flags, &cio_list, zio->io_allocator, NULL, &allocated_size);
3249
3250 boolean_t allocated = error == 0;
3251
3252 uint64_t psize = allocated ? MIN(resid, allocated_size) :
3253 min_size;
3254
3255 zio_t *cio = zio_write(zio, spa, txg, bp, has_data ?
3256 abd_get_offset(pio->io_abd, pio->io_size - resid) : NULL,
3257 psize, psize, &zp, zio_write_gang_member_ready, NULL,
3258 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3259 ZIO_GANG_CHILD_FLAGS(pio) |
3260 (allocated ? ZIO_FLAG_PREALLOCATED : 0), &pio->io_bookmark);
3261
3262 resid -= psize;
3263 zio_gang_inherit_allocator(zio, cio);
3264 if (allocated) {
3265 metaslab_trace_move(&cio_list, &cio->io_alloc_list);
3266 metaslab_group_alloc_increment_all(spa,
3267 &cio->io_bp_orig, zio->io_allocator, flags, psize,
3268 cio);
3269 }
3270 /*
3271 * We do not reserve for the child writes, since we already
3272 * reserved for the parent. Unreserve though will be called
3273 * for individual children. We can do this since sum of all
3274 * child's physical sizes is equal to parent's physical size.
3275 * It would not work for potentially bigger allocation sizes.
3276 */
3277
3278 zio_nowait(cio);
3279 }
3280
3281 /*
3282 * Set pio's pipeline to just wait for zio to finish.
3283 */
3284 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3285
3286 zio_nowait(zio);
3287
3288 return (pio);
3289 }
3290
3291 /*
3292 * The zio_nop_write stage in the pipeline determines if allocating a
3293 * new bp is necessary. The nopwrite feature can handle writes in
3294 * either syncing or open context (i.e. zil writes) and as a result is
3295 * mutually exclusive with dedup.
3296 *
3297 * By leveraging a cryptographically secure checksum, such as SHA256, we
3298 * can compare the checksums of the new data and the old to determine if
3299 * allocating a new block is required. Note that our requirements for
3300 * cryptographic strength are fairly weak: there can't be any accidental
3301 * hash collisions, but we don't need to be secure against intentional
3302 * (malicious) collisions. To trigger a nopwrite, you have to be able
3303 * to write the file to begin with, and triggering an incorrect (hash
3304 * collision) nopwrite is no worse than simply writing to the file.
3305 * That said, there are no known attacks against the checksum algorithms
3306 * used for nopwrite, assuming that the salt and the checksums
3307 * themselves remain secret.
3308 */
3309 static zio_t *
zio_nop_write(zio_t * zio)3310 zio_nop_write(zio_t *zio)
3311 {
3312 blkptr_t *bp = zio->io_bp;
3313 blkptr_t *bp_orig = &zio->io_bp_orig;
3314 zio_prop_t *zp = &zio->io_prop;
3315
3316 ASSERT(BP_IS_HOLE(bp));
3317 ASSERT(BP_GET_LEVEL(bp) == 0);
3318 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3319 ASSERT(zp->zp_nopwrite);
3320 ASSERT(!zp->zp_dedup);
3321 ASSERT(zio->io_bp_override == NULL);
3322 ASSERT(IO_IS_ALLOCATING(zio));
3323
3324 /*
3325 * Check to see if the original bp and the new bp have matching
3326 * characteristics (i.e. same checksum, compression algorithms, etc).
3327 * If they don't then just continue with the pipeline which will
3328 * allocate a new bp.
3329 */
3330 if (BP_IS_HOLE(bp_orig) ||
3331 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3332 ZCHECKSUM_FLAG_NOPWRITE) ||
3333 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
3334 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3335 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3336 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3337 zp->zp_copies != BP_GET_NDVAS(bp_orig))
3338 return (zio);
3339
3340 /*
3341 * If the checksums match then reset the pipeline so that we
3342 * avoid allocating a new bp and issuing any I/O.
3343 */
3344 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3345 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3346 ZCHECKSUM_FLAG_NOPWRITE);
3347 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3348 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3349 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
3350 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
3351
3352 /*
3353 * If we're overwriting a block that is currently on an
3354 * indirect vdev, then ignore the nopwrite request and
3355 * allow a new block to be allocated on a concrete vdev.
3356 */
3357 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3358 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3359 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3360 DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3361 if (tvd->vdev_ops == &vdev_indirect_ops) {
3362 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3363 return (zio);
3364 }
3365 }
3366 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3367
3368 *bp = *bp_orig;
3369 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3370 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3371 }
3372
3373 return (zio);
3374 }
3375
3376 /*
3377 * ==========================================================================
3378 * Block Reference Table
3379 * ==========================================================================
3380 */
3381 static zio_t *
zio_brt_free(zio_t * zio)3382 zio_brt_free(zio_t *zio)
3383 {
3384 blkptr_t *bp;
3385
3386 bp = zio->io_bp;
3387
3388 if (BP_GET_LEVEL(bp) > 0 ||
3389 BP_IS_METADATA(bp) ||
3390 !brt_maybe_exists(zio->io_spa, bp)) {
3391 return (zio);
3392 }
3393
3394 if (!brt_entry_decref(zio->io_spa, bp)) {
3395 /*
3396 * This isn't the last reference, so we cannot free
3397 * the data yet.
3398 */
3399 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3400 }
3401
3402 return (zio);
3403 }
3404
3405 /*
3406 * ==========================================================================
3407 * Dedup
3408 * ==========================================================================
3409 */
3410 static void
zio_ddt_child_read_done(zio_t * zio)3411 zio_ddt_child_read_done(zio_t *zio)
3412 {
3413 blkptr_t *bp = zio->io_bp;
3414 ddt_t *ddt;
3415 ddt_entry_t *dde = zio->io_private;
3416 zio_t *pio = zio_unique_parent(zio);
3417
3418 mutex_enter(&pio->io_lock);
3419 ddt = ddt_select(zio->io_spa, bp);
3420
3421 if (zio->io_error == 0) {
3422 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
3423 /* this phys variant doesn't need repair */
3424 ddt_phys_clear(dde->dde_phys, v);
3425 }
3426
3427 if (zio->io_error == 0 && dde->dde_io->dde_repair_abd == NULL)
3428 dde->dde_io->dde_repair_abd = zio->io_abd;
3429 else
3430 abd_free(zio->io_abd);
3431 mutex_exit(&pio->io_lock);
3432 }
3433
3434 static zio_t *
zio_ddt_read_start(zio_t * zio)3435 zio_ddt_read_start(zio_t *zio)
3436 {
3437 blkptr_t *bp = zio->io_bp;
3438
3439 ASSERT(BP_GET_DEDUP(bp));
3440 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3441 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3442
3443 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3444 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3445 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3446 ddt_phys_variant_t v_self = ddt_phys_select(ddt, dde, bp);
3447 ddt_univ_phys_t *ddp = dde->dde_phys;
3448 blkptr_t blk;
3449
3450 ASSERT(zio->io_vsd == NULL);
3451 zio->io_vsd = dde;
3452
3453 if (v_self == DDT_PHYS_NONE)
3454 return (zio);
3455
3456 /* issue I/O for the other copies */
3457 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3458 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3459
3460 if (ddt_phys_birth(ddp, v) == 0 || v == v_self)
3461 continue;
3462
3463 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key,
3464 ddp, v, &blk);
3465 zio_nowait(zio_read(zio, zio->io_spa, &blk,
3466 abd_alloc_for_io(zio->io_size, B_TRUE),
3467 zio->io_size, zio_ddt_child_read_done, dde,
3468 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3469 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
3470 }
3471 return (zio);
3472 }
3473
3474 zio_nowait(zio_read(zio, zio->io_spa, bp,
3475 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
3476 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3477
3478 return (zio);
3479 }
3480
3481 static zio_t *
zio_ddt_read_done(zio_t * zio)3482 zio_ddt_read_done(zio_t *zio)
3483 {
3484 blkptr_t *bp = zio->io_bp;
3485
3486 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
3487 return (NULL);
3488 }
3489
3490 ASSERT(BP_GET_DEDUP(bp));
3491 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3492 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3493
3494 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3495 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3496 ddt_entry_t *dde = zio->io_vsd;
3497 if (ddt == NULL) {
3498 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
3499 return (zio);
3500 }
3501 if (dde == NULL) {
3502 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3503 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
3504 return (NULL);
3505 }
3506 if (dde->dde_io->dde_repair_abd != NULL) {
3507 abd_copy(zio->io_abd, dde->dde_io->dde_repair_abd,
3508 zio->io_size);
3509 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3510 }
3511 ddt_repair_done(ddt, dde);
3512 zio->io_vsd = NULL;
3513 }
3514
3515 ASSERT(zio->io_vsd == NULL);
3516
3517 return (zio);
3518 }
3519
3520 static boolean_t
zio_ddt_collision(zio_t * zio,ddt_t * ddt,ddt_entry_t * dde)3521 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3522 {
3523 spa_t *spa = zio->io_spa;
3524 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
3525
3526 ASSERT(!(zio->io_bp_override && do_raw));
3527
3528 /*
3529 * Note: we compare the original data, not the transformed data,
3530 * because when zio->io_bp is an override bp, we will not have
3531 * pushed the I/O transforms. That's an important optimization
3532 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
3533 * However, we should never get a raw, override zio so in these
3534 * cases we can compare the io_abd directly. This is useful because
3535 * it allows us to do dedup verification even if we don't have access
3536 * to the original data (for instance, if the encryption keys aren't
3537 * loaded).
3538 */
3539
3540 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3541 if (DDT_PHYS_IS_DITTO(ddt, p))
3542 continue;
3543
3544 if (dde->dde_io == NULL)
3545 continue;
3546
3547 zio_t *lio = dde->dde_io->dde_lead_zio[p];
3548 if (lio == NULL)
3549 continue;
3550
3551 if (do_raw)
3552 return (lio->io_size != zio->io_size ||
3553 abd_cmp(zio->io_abd, lio->io_abd) != 0);
3554
3555 return (lio->io_orig_size != zio->io_orig_size ||
3556 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
3557 }
3558
3559 for (int p = 0; p < DDT_NPHYS(ddt); p++) {
3560 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3561 uint64_t phys_birth = ddt_phys_birth(dde->dde_phys, v);
3562
3563 if (phys_birth != 0 && do_raw) {
3564 blkptr_t blk = *zio->io_bp;
3565 uint64_t psize;
3566 abd_t *tmpabd;
3567 int error;
3568
3569 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3570 psize = BP_GET_PSIZE(&blk);
3571
3572 if (psize != zio->io_size)
3573 return (B_TRUE);
3574
3575 ddt_exit(ddt);
3576
3577 tmpabd = abd_alloc_for_io(psize, B_TRUE);
3578
3579 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
3580 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3581 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3582 ZIO_FLAG_RAW, &zio->io_bookmark));
3583
3584 if (error == 0) {
3585 if (abd_cmp(tmpabd, zio->io_abd) != 0)
3586 error = SET_ERROR(ENOENT);
3587 }
3588
3589 abd_free(tmpabd);
3590 ddt_enter(ddt);
3591 return (error != 0);
3592 } else if (phys_birth != 0) {
3593 arc_buf_t *abuf = NULL;
3594 arc_flags_t aflags = ARC_FLAG_WAIT;
3595 blkptr_t blk = *zio->io_bp;
3596 int error;
3597
3598 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth);
3599
3600 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3601 return (B_TRUE);
3602
3603 ddt_exit(ddt);
3604
3605 error = arc_read(NULL, spa, &blk,
3606 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3607 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3608 &aflags, &zio->io_bookmark);
3609
3610 if (error == 0) {
3611 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
3612 zio->io_orig_size) != 0)
3613 error = SET_ERROR(ENOENT);
3614 arc_buf_destroy(abuf, &abuf);
3615 }
3616
3617 ddt_enter(ddt);
3618 return (error != 0);
3619 }
3620 }
3621
3622 return (B_FALSE);
3623 }
3624
3625 static void
zio_ddt_child_write_done(zio_t * zio)3626 zio_ddt_child_write_done(zio_t *zio)
3627 {
3628 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3629 ddt_entry_t *dde = zio->io_private;
3630
3631 zio_link_t *zl = NULL;
3632 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3633
3634 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3635 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3636 ddt_univ_phys_t *ddp = dde->dde_phys;
3637
3638 ddt_enter(ddt);
3639
3640 /* we're the lead, so once we're done there's no one else outstanding */
3641 if (dde->dde_io->dde_lead_zio[p] == zio)
3642 dde->dde_io->dde_lead_zio[p] = NULL;
3643
3644 ddt_univ_phys_t *orig = &dde->dde_io->dde_orig_phys;
3645
3646 if (zio->io_error != 0) {
3647 /*
3648 * The write failed, so we're about to abort the entire IO
3649 * chain. We need to revert the entry back to what it was at
3650 * the last time it was successfully extended.
3651 */
3652 ddt_phys_unextend(ddp, orig, v);
3653 ddt_phys_clear(orig, v);
3654
3655 ddt_exit(ddt);
3656 return;
3657 }
3658
3659 /*
3660 * Add references for all dedup writes that were waiting on the
3661 * physical one, skipping any other physical writes that are waiting.
3662 */
3663 zio_t *pio;
3664 zl = NULL;
3665 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3666 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3667 ddt_phys_addref(ddp, v);
3668 }
3669
3670 /*
3671 * We've successfully added new DVAs to the entry. Clear the saved
3672 * state or, if there's still outstanding IO, remember it so we can
3673 * revert to a known good state if that IO fails.
3674 */
3675 if (dde->dde_io->dde_lead_zio[p] == NULL)
3676 ddt_phys_clear(orig, v);
3677 else
3678 ddt_phys_copy(orig, ddp, v);
3679
3680 ddt_exit(ddt);
3681 }
3682
3683 static void
zio_ddt_child_write_ready(zio_t * zio)3684 zio_ddt_child_write_ready(zio_t *zio)
3685 {
3686 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3687 ddt_entry_t *dde = zio->io_private;
3688
3689 zio_link_t *zl = NULL;
3690 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL);
3691
3692 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies);
3693 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3694
3695 if (ddt_phys_is_gang(dde->dde_phys, v)) {
3696 for (int i = 0; i < BP_GET_NDVAS(zio->io_bp); i++) {
3697 dva_t *d = &zio->io_bp->blk_dva[i];
3698 metaslab_group_alloc_decrement(zio->io_spa,
3699 DVA_GET_VDEV(d), zio->io_allocator,
3700 METASLAB_ASYNC_ALLOC, zio->io_size, zio);
3701 }
3702 zio->io_error = EAGAIN;
3703 }
3704
3705 if (zio->io_error != 0)
3706 return;
3707
3708 ddt_enter(ddt);
3709
3710 ddt_phys_extend(dde->dde_phys, v, zio->io_bp);
3711
3712 zio_t *pio;
3713 zl = NULL;
3714 while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
3715 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
3716 ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg);
3717 }
3718
3719 ddt_exit(ddt);
3720 }
3721
3722 static zio_t *
zio_ddt_write(zio_t * zio)3723 zio_ddt_write(zio_t *zio)
3724 {
3725 spa_t *spa = zio->io_spa;
3726 blkptr_t *bp = zio->io_bp;
3727 uint64_t txg = zio->io_txg;
3728 zio_prop_t *zp = &zio->io_prop;
3729 ddt_t *ddt = ddt_select(spa, bp);
3730 ddt_entry_t *dde;
3731
3732 ASSERT(BP_GET_DEDUP(bp));
3733 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3734 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
3735 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
3736 /*
3737 * Deduplication will not take place for Direct I/O writes. The
3738 * ddt_tree will be emptied in syncing context. Direct I/O writes take
3739 * place in the open-context. Direct I/O write can not attempt to
3740 * modify the ddt_tree while issuing out a write.
3741 */
3742 ASSERT3B(zio->io_prop.zp_direct_write, ==, B_FALSE);
3743
3744 ddt_enter(ddt);
3745 /*
3746 * Search DDT for matching entry. Skip DVAs verification here, since
3747 * they can go only from override, and once we get here the override
3748 * pointer can't have "D" flag to be confused with pruned DDT entries.
3749 */
3750 IMPLY(zio->io_bp_override, !BP_GET_DEDUP(zio->io_bp_override));
3751 dde = ddt_lookup(ddt, bp, B_FALSE);
3752 if (dde == NULL) {
3753 /* DDT size is over its quota so no new entries */
3754 zp->zp_dedup = B_FALSE;
3755 BP_SET_DEDUP(bp, B_FALSE);
3756 if (zio->io_bp_override == NULL)
3757 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3758 ddt_exit(ddt);
3759 return (zio);
3760 }
3761
3762 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3763 /*
3764 * If we're using a weak checksum, upgrade to a strong checksum
3765 * and try again. If we're already using a strong checksum,
3766 * we can't resolve it, so just convert to an ordinary write.
3767 * (And automatically e-mail a paper to Nature?)
3768 */
3769 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3770 ZCHECKSUM_FLAG_DEDUP)) {
3771 zp->zp_checksum = spa_dedup_checksum(spa);
3772 zio_pop_transforms(zio);
3773 zio->io_stage = ZIO_STAGE_OPEN;
3774 BP_ZERO(bp);
3775 } else {
3776 zp->zp_dedup = B_FALSE;
3777 BP_SET_DEDUP(bp, B_FALSE);
3778 }
3779 ASSERT(!BP_GET_DEDUP(bp));
3780 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3781 ddt_exit(ddt);
3782 return (zio);
3783 }
3784
3785 int p = DDT_PHYS_FOR_COPIES(ddt, zp->zp_copies);
3786 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
3787 ddt_univ_phys_t *ddp = dde->dde_phys;
3788
3789 /*
3790 * In the common cases, at this point we have a regular BP with no
3791 * allocated DVAs, and the corresponding DDT entry for its checksum.
3792 * Our goal is to fill the BP with enough DVAs to satisfy its copies=
3793 * requirement.
3794 *
3795 * One of three things needs to happen to fulfill this:
3796 *
3797 * - if the DDT entry has enough DVAs to satisfy the BP, we just copy
3798 * them out of the entry and return;
3799 *
3800 * - if the DDT entry has no DVAs (ie its brand new), then we have to
3801 * issue the write as normal so that DVAs can be allocated and the
3802 * data land on disk. We then copy the DVAs into the DDT entry on
3803 * return.
3804 *
3805 * - if the DDT entry has some DVAs, but too few, we have to issue the
3806 * write, adjusted to have allocate fewer copies. When it returns, we
3807 * add the new DVAs to the DDT entry, and update the BP to have the
3808 * full amount it originally requested.
3809 *
3810 * In all cases, if there's already a writing IO in flight, we need to
3811 * defer the action until after the write is done. If our action is to
3812 * write, we need to adjust our request for additional DVAs to match
3813 * what will be in the DDT entry after it completes. In this way every
3814 * IO can be guaranteed to recieve enough DVAs simply by joining the
3815 * end of the chain and letting the sequence play out.
3816 */
3817
3818 /*
3819 * Number of DVAs in the DDT entry. If the BP is encrypted we ignore
3820 * the third one as normal.
3821 */
3822 int have_dvas = ddt_phys_dva_count(ddp, v, BP_IS_ENCRYPTED(bp));
3823 IMPLY(have_dvas == 0, ddt_phys_birth(ddp, v) == 0);
3824 boolean_t is_ganged = ddt_phys_is_gang(ddp, v);
3825
3826 /* Number of DVAs requested by the IO. */
3827 uint8_t need_dvas = zp->zp_copies;
3828 /* Number of DVAs in outstanding writes for this dde. */
3829 uint8_t parent_dvas = 0;
3830
3831 /*
3832 * What we do next depends on whether or not there's IO outstanding that
3833 * will update this entry.
3834 */
3835 if (dde->dde_io == NULL || dde->dde_io->dde_lead_zio[p] == NULL) {
3836 /*
3837 * No IO outstanding, so we only need to worry about ourselves.
3838 */
3839
3840 /*
3841 * Override BPs bring their own DVAs and their own problems.
3842 */
3843 if (zio->io_bp_override) {
3844 /*
3845 * For a brand-new entry, all the work has been done
3846 * for us, and we can just fill it out from the provided
3847 * block and leave.
3848 */
3849 if (have_dvas == 0) {
3850 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
3851 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3852 ddt_phys_extend(ddp, v, bp);
3853 ddt_phys_addref(ddp, v);
3854 ddt_exit(ddt);
3855 return (zio);
3856 }
3857
3858 /*
3859 * If we already have this entry, then we want to treat
3860 * it like a regular write. To do this we just wipe
3861 * them out and proceed like a regular write.
3862 *
3863 * Even if there are some DVAs in the entry, we still
3864 * have to clear them out. We can't use them to fill
3865 * out the dedup entry, as they are all referenced
3866 * together by a bp already on disk, and will be freed
3867 * as a group.
3868 */
3869 BP_ZERO_DVAS(bp);
3870 BP_SET_BIRTH(bp, 0, 0);
3871 }
3872
3873 /*
3874 * If there are enough DVAs in the entry to service our request,
3875 * then we can just use them as-is.
3876 */
3877 if (have_dvas >= need_dvas) {
3878 ddt_bp_fill(ddp, v, bp, txg);
3879 ddt_phys_addref(ddp, v);
3880 ddt_exit(ddt);
3881 return (zio);
3882 }
3883
3884 /*
3885 * Otherwise, we have to issue IO to fill the entry up to the
3886 * amount we need.
3887 */
3888 need_dvas -= have_dvas;
3889 } else {
3890 /*
3891 * There's a write in-flight. If there's already enough DVAs on
3892 * the entry, then either there were already enough to start
3893 * with, or the in-flight IO is between READY and DONE, and so
3894 * has extended the entry with new DVAs. Either way, we don't
3895 * need to do anything, we can just slot in behind it.
3896 */
3897
3898 if (zio->io_bp_override) {
3899 /*
3900 * If there's a write out, then we're soon going to
3901 * have our own copies of this block, so clear out the
3902 * override block and treat it as a regular dedup
3903 * write. See comment above.
3904 */
3905 BP_ZERO_DVAS(bp);
3906 BP_SET_BIRTH(bp, 0, 0);
3907 }
3908
3909 if (have_dvas >= need_dvas) {
3910 /*
3911 * A minor point: there might already be enough
3912 * committed DVAs in the entry to service our request,
3913 * but we don't know which are completed and which are
3914 * allocated but not yet written. In this case, should
3915 * the IO for the new DVAs fail, we will be on the end
3916 * of the IO chain and will also recieve an error, even
3917 * though our request could have been serviced.
3918 *
3919 * This is an extremely rare case, as it requires the
3920 * original block to be copied with a request for a
3921 * larger number of DVAs, then copied again requesting
3922 * the same (or already fulfilled) number of DVAs while
3923 * the first request is active, and then that first
3924 * request errors. In return, the logic required to
3925 * catch and handle it is complex. For now, I'm just
3926 * not going to bother with it.
3927 */
3928
3929 /*
3930 * We always fill the bp here as we may have arrived
3931 * after the in-flight write has passed READY, and so
3932 * missed out.
3933 */
3934 ddt_bp_fill(ddp, v, bp, txg);
3935 zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
3936 ddt_exit(ddt);
3937 return (zio);
3938 }
3939
3940 /*
3941 * There's not enough in the entry yet, so we need to look at
3942 * the write in-flight and see how many DVAs it will have once
3943 * it completes.
3944 *
3945 * The in-flight write has potentially had its copies request
3946 * reduced (if we're filling out an existing entry), so we need
3947 * to reach in and get the original write to find out what it is
3948 * expecting.
3949 *
3950 * Note that the parent of the lead zio will always have the
3951 * highest zp_copies of any zio in the chain, because ones that
3952 * can be serviced without additional IO are always added to
3953 * the back of the chain.
3954 */
3955 zio_link_t *zl = NULL;
3956 zio_t *pio =
3957 zio_walk_parents(dde->dde_io->dde_lead_zio[p], &zl);
3958 ASSERT(pio);
3959 parent_dvas = pio->io_prop.zp_copies;
3960
3961 if (parent_dvas >= need_dvas) {
3962 zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
3963 ddt_exit(ddt);
3964 return (zio);
3965 }
3966
3967 /*
3968 * Still not enough, so we will need to issue to get the
3969 * shortfall.
3970 */
3971 need_dvas -= parent_dvas;
3972 }
3973
3974 if (is_ganged) {
3975 zp->zp_dedup = B_FALSE;
3976 BP_SET_DEDUP(bp, B_FALSE);
3977 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3978 ddt_exit(ddt);
3979 return (zio);
3980 }
3981
3982 /*
3983 * We need to write. We will create a new write with the copies
3984 * property adjusted to match the number of DVAs we need to need to
3985 * grow the DDT entry by to satisfy the request.
3986 */
3987 zio_prop_t czp = *zp;
3988 if (have_dvas > 0 || parent_dvas > 0) {
3989 czp.zp_copies = need_dvas;
3990 czp.zp_gang_copies = 0;
3991 } else {
3992 ASSERT3U(czp.zp_copies, ==, need_dvas);
3993 }
3994
3995 zio_t *cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
3996 zio->io_orig_size, zio->io_orig_size, &czp,
3997 zio_ddt_child_write_ready, NULL,
3998 zio_ddt_child_write_done, dde, zio->io_priority,
3999 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
4000
4001 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
4002
4003 /*
4004 * We are the new lead zio, because our parent has the highest
4005 * zp_copies that has been requested for this entry so far.
4006 */
4007 ddt_alloc_entry_io(dde);
4008 if (dde->dde_io->dde_lead_zio[p] == NULL) {
4009 /*
4010 * First time out, take a copy of the stable entry to revert
4011 * to if there's an error (see zio_ddt_child_write_done())
4012 */
4013 ddt_phys_copy(&dde->dde_io->dde_orig_phys, dde->dde_phys, v);
4014 } else {
4015 /*
4016 * Make the existing chain our child, because it cannot
4017 * complete until we have.
4018 */
4019 zio_add_child(cio, dde->dde_io->dde_lead_zio[p]);
4020 }
4021 dde->dde_io->dde_lead_zio[p] = cio;
4022
4023 ddt_exit(ddt);
4024
4025 zio_nowait(cio);
4026
4027 return (zio);
4028 }
4029
4030 static ddt_entry_t *freedde; /* for debugging */
4031
4032 static zio_t *
zio_ddt_free(zio_t * zio)4033 zio_ddt_free(zio_t *zio)
4034 {
4035 spa_t *spa = zio->io_spa;
4036 blkptr_t *bp = zio->io_bp;
4037 ddt_t *ddt = ddt_select(spa, bp);
4038 ddt_entry_t *dde = NULL;
4039
4040 ASSERT(BP_GET_DEDUP(bp));
4041 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
4042
4043 ddt_enter(ddt);
4044 freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
4045 if (dde) {
4046 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp);
4047 if (v != DDT_PHYS_NONE)
4048 ddt_phys_decref(dde->dde_phys, v);
4049 }
4050 ddt_exit(ddt);
4051
4052 /*
4053 * When no entry was found, it must have been pruned,
4054 * so we can free it now instead of decrementing the
4055 * refcount in the DDT.
4056 */
4057 if (!dde) {
4058 BP_SET_DEDUP(bp, 0);
4059 zio->io_pipeline |= ZIO_STAGE_DVA_FREE;
4060 }
4061
4062 return (zio);
4063 }
4064
4065 /*
4066 * ==========================================================================
4067 * Allocate and free blocks
4068 * ==========================================================================
4069 */
4070
4071 static zio_t *
zio_io_to_allocate(metaslab_class_allocator_t * mca,boolean_t * more)4072 zio_io_to_allocate(metaslab_class_allocator_t *mca, boolean_t *more)
4073 {
4074 zio_t *zio;
4075
4076 ASSERT(MUTEX_HELD(&mca->mca_lock));
4077
4078 zio = avl_first(&mca->mca_tree);
4079 if (zio == NULL) {
4080 *more = B_FALSE;
4081 return (NULL);
4082 }
4083
4084 ASSERT(IO_IS_ALLOCATING(zio));
4085 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4086
4087 /*
4088 * Try to place a reservation for this zio. If we're unable to
4089 * reserve then we throttle.
4090 */
4091 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
4092 zio->io_prop.zp_copies, zio, B_FALSE, more)) {
4093 return (NULL);
4094 }
4095
4096 avl_remove(&mca->mca_tree, zio);
4097 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
4098
4099 if (avl_is_empty(&mca->mca_tree))
4100 *more = B_FALSE;
4101 return (zio);
4102 }
4103
4104 static zio_t *
zio_dva_throttle(zio_t * zio)4105 zio_dva_throttle(zio_t *zio)
4106 {
4107 spa_t *spa = zio->io_spa;
4108 zio_t *nio;
4109 metaslab_class_t *mc;
4110 boolean_t more;
4111
4112 /*
4113 * If not already chosen, choose an appropriate allocation class.
4114 */
4115 mc = zio->io_metaslab_class;
4116 if (mc == NULL)
4117 mc = spa_preferred_class(spa, zio);
4118
4119 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
4120 !mc->mc_alloc_throttle_enabled ||
4121 zio->io_child_type == ZIO_CHILD_GANG ||
4122 zio->io_flags & ZIO_FLAG_NODATA) {
4123 return (zio);
4124 }
4125
4126 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4127 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4128 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
4129 ASSERT3U(zio->io_queued_timestamp, >, 0);
4130 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
4131
4132 zio->io_metaslab_class = mc;
4133 metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
4134 mutex_enter(&mca->mca_lock);
4135 avl_add(&mca->mca_tree, zio);
4136 nio = zio_io_to_allocate(mca, &more);
4137 mutex_exit(&mca->mca_lock);
4138 return (nio);
4139 }
4140
4141 static void
zio_allocate_dispatch(metaslab_class_t * mc,int allocator)4142 zio_allocate_dispatch(metaslab_class_t *mc, int allocator)
4143 {
4144 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
4145 zio_t *zio;
4146 boolean_t more;
4147
4148 do {
4149 mutex_enter(&mca->mca_lock);
4150 zio = zio_io_to_allocate(mca, &more);
4151 mutex_exit(&mca->mca_lock);
4152 if (zio == NULL)
4153 return;
4154
4155 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
4156 ASSERT0(zio->io_error);
4157 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
4158 } while (more);
4159 }
4160
4161 static zio_t *
zio_dva_allocate(zio_t * zio)4162 zio_dva_allocate(zio_t *zio)
4163 {
4164 spa_t *spa = zio->io_spa;
4165 metaslab_class_t *mc;
4166 blkptr_t *bp = zio->io_bp;
4167 int error;
4168 int flags = 0;
4169
4170 if (zio->io_gang_leader == NULL) {
4171 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
4172 zio->io_gang_leader = zio;
4173 }
4174 if (zio->io_flags & ZIO_FLAG_PREALLOCATED) {
4175 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_GANG);
4176 memcpy(zio->io_bp->blk_dva, zio->io_bp_orig.blk_dva,
4177 3 * sizeof (dva_t));
4178 BP_SET_BIRTH(zio->io_bp, BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig),
4179 BP_GET_PHYSICAL_BIRTH(&zio->io_bp_orig));
4180 return (zio);
4181 }
4182
4183 ASSERT(BP_IS_HOLE(bp));
4184 ASSERT0(BP_GET_NDVAS(bp));
4185 ASSERT3U(zio->io_prop.zp_copies, >, 0);
4186
4187 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
4188 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
4189
4190 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
4191 flags |= METASLAB_GANG_CHILD;
4192 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
4193 flags |= METASLAB_ASYNC_ALLOC;
4194
4195 /*
4196 * If not already chosen, choose an appropriate allocation class.
4197 */
4198 mc = zio->io_metaslab_class;
4199 if (mc == NULL) {
4200 mc = spa_preferred_class(spa, zio);
4201 zio->io_metaslab_class = mc;
4202 }
4203 ZIOSTAT_BUMP(ziostat_total_allocations);
4204
4205 again:
4206 /*
4207 * Try allocating the block in the usual metaslab class.
4208 * If that's full, allocate it in the normal class.
4209 * If that's full, allocate as a gang block,
4210 * and if all are full, the allocation fails (which shouldn't happen).
4211 *
4212 * Note that we do not fall back on embedded slog (ZIL) space, to
4213 * preserve unfragmented slog space, which is critical for decent
4214 * sync write performance. If a log allocation fails, we will fall
4215 * back to spa_sync() which is abysmal for performance.
4216 */
4217 ASSERT(ZIO_HAS_ALLOCATOR(zio));
4218 error = metaslab_alloc(spa, mc, zio->io_size, bp,
4219 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
4220 &zio->io_alloc_list, zio->io_allocator, zio);
4221
4222 /*
4223 * Fallback to normal class when an alloc class is full
4224 */
4225 if (error == ENOSPC && mc != spa_normal_class(spa)) {
4226 /*
4227 * When the dedup or special class is spilling into the normal
4228 * class, there can still be significant space available due
4229 * to deferred frees that are in-flight. We track the txg when
4230 * this occurred and back off adding new DDT entries for a few
4231 * txgs to allow the free blocks to be processed.
4232 */
4233 if ((mc == spa_dedup_class(spa) || (spa_special_has_ddt(spa) &&
4234 mc == spa_special_class(spa))) &&
4235 spa->spa_dedup_class_full_txg != zio->io_txg) {
4236 spa->spa_dedup_class_full_txg = zio->io_txg;
4237 zfs_dbgmsg("%s[%d]: %s class spilling, req size %d, "
4238 "%llu allocated of %llu",
4239 spa_name(spa), (int)zio->io_txg,
4240 mc == spa_dedup_class(spa) ? "dedup" : "special",
4241 (int)zio->io_size,
4242 (u_longlong_t)metaslab_class_get_alloc(mc),
4243 (u_longlong_t)metaslab_class_get_space(mc));
4244 }
4245
4246 /*
4247 * If we are holding old class reservation, drop it.
4248 * Dispatch the next ZIO(s) there if some are waiting.
4249 */
4250 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4251 if (metaslab_class_throttle_unreserve(mc,
4252 zio->io_prop.zp_copies, zio)) {
4253 zio_allocate_dispatch(zio->io_metaslab_class,
4254 zio->io_allocator);
4255 }
4256 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
4257 }
4258
4259 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4260 zfs_dbgmsg("%s: metaslab allocation failure, "
4261 "trying normal class: zio %px, size %llu, error %d",
4262 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4263 error);
4264 }
4265 zio->io_metaslab_class = mc = spa_normal_class(spa);
4266 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
4267
4268 /*
4269 * If normal class uses throttling, return to that pipeline
4270 * stage. Otherwise just do another allocation attempt.
4271 */
4272 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
4273 mc->mc_alloc_throttle_enabled &&
4274 zio->io_child_type != ZIO_CHILD_GANG &&
4275 !(zio->io_flags & ZIO_FLAG_NODATA)) {
4276 zio->io_stage = ZIO_STAGE_DVA_THROTTLE >> 1;
4277 return (zio);
4278 }
4279 goto again;
4280 }
4281
4282 if (error == ENOSPC && zio->io_size > spa->spa_min_alloc) {
4283 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
4284 zfs_dbgmsg("%s: metaslab allocation failure, "
4285 "trying ganging: zio %px, size %llu, error %d",
4286 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4287 error);
4288 }
4289 ZIOSTAT_BUMP(ziostat_gang_writes);
4290 if (flags & METASLAB_GANG_CHILD)
4291 ZIOSTAT_BUMP(ziostat_gang_multilevel);
4292 return (zio_write_gang_block(zio, mc));
4293 }
4294 if (error != 0) {
4295 if (error != ENOSPC ||
4296 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
4297 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
4298 "size %llu, error %d",
4299 spa_name(spa), zio, (u_longlong_t)zio->io_size,
4300 error);
4301 }
4302 zio->io_error = error;
4303 }
4304
4305 return (zio);
4306 }
4307
4308 static zio_t *
zio_dva_free(zio_t * zio)4309 zio_dva_free(zio_t *zio)
4310 {
4311 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
4312
4313 return (zio);
4314 }
4315
4316 static zio_t *
zio_dva_claim(zio_t * zio)4317 zio_dva_claim(zio_t *zio)
4318 {
4319 int error;
4320
4321 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
4322 if (error)
4323 zio->io_error = error;
4324
4325 return (zio);
4326 }
4327
4328 /*
4329 * Undo an allocation. This is used by zio_done() when an I/O fails
4330 * and we want to give back the block we just allocated.
4331 * This handles both normal blocks and gang blocks.
4332 */
4333 static void
zio_dva_unallocate(zio_t * zio,zio_gang_node_t * gn,blkptr_t * bp)4334 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
4335 {
4336 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
4337 ASSERT(zio->io_bp_override == NULL);
4338
4339 if (!BP_IS_HOLE(bp)) {
4340 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
4341 B_TRUE);
4342 }
4343
4344 if (gn != NULL) {
4345 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
4346 zio_dva_unallocate(zio, gn->gn_child[g],
4347 &gn->gn_gbh->zg_blkptr[g]);
4348 }
4349 }
4350 }
4351
4352 /*
4353 * Try to allocate an intent log block. Return 0 on success, errno on failure.
4354 */
4355 int
zio_alloc_zil(spa_t * spa,objset_t * os,uint64_t txg,blkptr_t * new_bp,uint64_t size,boolean_t * slog)4356 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
4357 uint64_t size, boolean_t *slog)
4358 {
4359 int error = 1;
4360 zio_alloc_list_t io_alloc_list;
4361
4362 ASSERT(txg > spa_syncing_txg(spa));
4363
4364 metaslab_trace_init(&io_alloc_list);
4365
4366 /*
4367 * Block pointer fields are useful to metaslabs for stats and debugging.
4368 * Fill in the obvious ones before calling into metaslab_alloc().
4369 */
4370 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4371 BP_SET_PSIZE(new_bp, size);
4372 BP_SET_LEVEL(new_bp, 0);
4373
4374 /*
4375 * When allocating a zil block, we don't have information about
4376 * the final destination of the block except the objset it's part
4377 * of, so we just hash the objset ID to pick the allocator to get
4378 * some parallelism.
4379 */
4380 int flags = METASLAB_ZIL;
4381 int allocator = (uint_t)cityhash1(os->os_dsl_dataset->ds_object)
4382 % spa->spa_alloc_count;
4383 ZIOSTAT_BUMP(ziostat_total_allocations);
4384 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
4385 txg, NULL, flags, &io_alloc_list, allocator, NULL);
4386 *slog = (error == 0);
4387 if (error != 0) {
4388 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
4389 new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
4390 NULL);
4391 }
4392 if (error != 0) {
4393 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
4394 error = metaslab_alloc(spa, spa_normal_class(spa), size,
4395 new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
4396 NULL);
4397 }
4398 metaslab_trace_fini(&io_alloc_list);
4399
4400 if (error == 0) {
4401 BP_SET_LSIZE(new_bp, size);
4402 BP_SET_PSIZE(new_bp, size);
4403 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
4404 BP_SET_CHECKSUM(new_bp,
4405 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
4406 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
4407 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
4408 BP_SET_LEVEL(new_bp, 0);
4409 BP_SET_DEDUP(new_bp, 0);
4410 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
4411
4412 /*
4413 * encrypted blocks will require an IV and salt. We generate
4414 * these now since we will not be rewriting the bp at
4415 * rewrite time.
4416 */
4417 if (os->os_encrypted) {
4418 uint8_t iv[ZIO_DATA_IV_LEN];
4419 uint8_t salt[ZIO_DATA_SALT_LEN];
4420
4421 BP_SET_CRYPT(new_bp, B_TRUE);
4422 VERIFY0(spa_crypt_get_salt(spa,
4423 dmu_objset_id(os), salt));
4424 VERIFY0(zio_crypt_generate_iv(iv));
4425
4426 zio_crypt_encode_params_bp(new_bp, salt, iv);
4427 }
4428 } else {
4429 zfs_dbgmsg("%s: zil block allocation failure: "
4430 "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
4431 error);
4432 }
4433
4434 return (error);
4435 }
4436
4437 /*
4438 * ==========================================================================
4439 * Read and write to physical devices
4440 * ==========================================================================
4441 */
4442
4443 /*
4444 * Issue an I/O to the underlying vdev. Typically the issue pipeline
4445 * stops after this stage and will resume upon I/O completion.
4446 * However, there are instances where the vdev layer may need to
4447 * continue the pipeline when an I/O was not issued. Since the I/O
4448 * that was sent to the vdev layer might be different than the one
4449 * currently active in the pipeline (see vdev_queue_io()), we explicitly
4450 * force the underlying vdev layers to call either zio_execute() or
4451 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
4452 */
4453 static zio_t *
zio_vdev_io_start(zio_t * zio)4454 zio_vdev_io_start(zio_t *zio)
4455 {
4456 vdev_t *vd = zio->io_vd;
4457 uint64_t align;
4458 spa_t *spa = zio->io_spa;
4459
4460 zio->io_delay = 0;
4461
4462 ASSERT(zio->io_error == 0);
4463 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
4464
4465 if (vd == NULL) {
4466 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4467 spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
4468
4469 /*
4470 * The mirror_ops handle multiple DVAs in a single BP.
4471 */
4472 vdev_mirror_ops.vdev_op_io_start(zio);
4473 return (NULL);
4474 }
4475
4476 ASSERT3P(zio->io_logical, !=, zio);
4477 if (zio->io_type == ZIO_TYPE_WRITE) {
4478 ASSERT(spa->spa_trust_config);
4479
4480 /*
4481 * Note: the code can handle other kinds of writes,
4482 * but we don't expect them.
4483 */
4484 if (zio->io_vd->vdev_noalloc) {
4485 ASSERT(zio->io_flags &
4486 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
4487 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
4488 }
4489 }
4490
4491 align = 1ULL << vd->vdev_top->vdev_ashift;
4492
4493 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
4494 P2PHASE(zio->io_size, align) != 0) {
4495 /* Transform logical writes to be a full physical block size. */
4496 uint64_t asize = P2ROUNDUP(zio->io_size, align);
4497 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
4498 ASSERT(vd == vd->vdev_top);
4499 if (zio->io_type == ZIO_TYPE_WRITE) {
4500 abd_copy(abuf, zio->io_abd, zio->io_size);
4501 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
4502 }
4503 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
4504 }
4505
4506 /*
4507 * If this is not a physical io, make sure that it is properly aligned
4508 * before proceeding.
4509 */
4510 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
4511 ASSERT0(P2PHASE(zio->io_offset, align));
4512 ASSERT0(P2PHASE(zio->io_size, align));
4513 } else {
4514 /*
4515 * For physical writes, we allow 512b aligned writes and assume
4516 * the device will perform a read-modify-write as necessary.
4517 */
4518 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
4519 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
4520 }
4521
4522 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
4523
4524 /*
4525 * If this is a repair I/O, and there's no self-healing involved --
4526 * that is, we're just resilvering what we expect to resilver --
4527 * then don't do the I/O unless zio's txg is actually in vd's DTL.
4528 * This prevents spurious resilvering.
4529 *
4530 * There are a few ways that we can end up creating these spurious
4531 * resilver i/os:
4532 *
4533 * 1. A resilver i/o will be issued if any DVA in the BP has a
4534 * dirty DTL. The mirror code will issue resilver writes to
4535 * each DVA, including the one(s) that are not on vdevs with dirty
4536 * DTLs.
4537 *
4538 * 2. With nested replication, which happens when we have a
4539 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4540 * For example, given mirror(replacing(A+B), C), it's likely that
4541 * only A is out of date (it's the new device). In this case, we'll
4542 * read from C, then use the data to resilver A+B -- but we don't
4543 * actually want to resilver B, just A. The top-level mirror has no
4544 * way to know this, so instead we just discard unnecessary repairs
4545 * as we work our way down the vdev tree.
4546 *
4547 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4548 * The same logic applies to any form of nested replication: ditto
4549 * + mirror, RAID-Z + replacing, etc.
4550 *
4551 * However, indirect vdevs point off to other vdevs which may have
4552 * DTL's, so we never bypass them. The child i/os on concrete vdevs
4553 * will be properly bypassed instead.
4554 *
4555 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4556 * a dRAID spare vdev. For example, when a dRAID spare is first
4557 * used, its spare blocks need to be written to but the leaf vdev's
4558 * of such blocks can have empty DTL_PARTIAL.
4559 *
4560 * There seemed no clean way to allow such writes while bypassing
4561 * spurious ones. At this point, just avoid all bypassing for dRAID
4562 * for correctness.
4563 */
4564 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4565 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4566 zio->io_txg != 0 && /* not a delegated i/o */
4567 vd->vdev_ops != &vdev_indirect_ops &&
4568 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
4569 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4570 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4571 zio_vdev_io_bypass(zio);
4572 return (zio);
4573 }
4574
4575 /*
4576 * Select the next best leaf I/O to process. Distributed spares are
4577 * excluded since they dispatch the I/O directly to a leaf vdev after
4578 * applying the dRAID mapping.
4579 */
4580 if (vd->vdev_ops->vdev_op_leaf &&
4581 vd->vdev_ops != &vdev_draid_spare_ops &&
4582 (zio->io_type == ZIO_TYPE_READ ||
4583 zio->io_type == ZIO_TYPE_WRITE ||
4584 zio->io_type == ZIO_TYPE_TRIM)) {
4585
4586 if ((zio = vdev_queue_io(zio)) == NULL)
4587 return (NULL);
4588
4589 if (!vdev_accessible(vd, zio)) {
4590 zio->io_error = SET_ERROR(ENXIO);
4591 zio_interrupt(zio);
4592 return (NULL);
4593 }
4594 zio->io_delay = gethrtime();
4595
4596 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
4597 /*
4598 * "no-op" injections return success, but do no actual
4599 * work. Just return it.
4600 */
4601 zio_delay_interrupt(zio);
4602 return (NULL);
4603 }
4604 }
4605
4606 vd->vdev_ops->vdev_op_io_start(zio);
4607 return (NULL);
4608 }
4609
4610 static zio_t *
zio_vdev_io_done(zio_t * zio)4611 zio_vdev_io_done(zio_t *zio)
4612 {
4613 vdev_t *vd = zio->io_vd;
4614 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4615 boolean_t unexpected_error = B_FALSE;
4616
4617 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4618 return (NULL);
4619 }
4620
4621 ASSERT(zio->io_type == ZIO_TYPE_READ ||
4622 zio->io_type == ZIO_TYPE_WRITE ||
4623 zio->io_type == ZIO_TYPE_FLUSH ||
4624 zio->io_type == ZIO_TYPE_TRIM);
4625
4626 if (zio->io_delay)
4627 zio->io_delay = gethrtime() - zio->io_delay;
4628
4629 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4630 vd->vdev_ops != &vdev_draid_spare_ops) {
4631 if (zio->io_type != ZIO_TYPE_FLUSH)
4632 vdev_queue_io_done(zio);
4633
4634 if (zio_injection_enabled && zio->io_error == 0)
4635 zio->io_error = zio_handle_device_injections(vd, zio,
4636 EIO, EILSEQ);
4637
4638 if (zio_injection_enabled && zio->io_error == 0)
4639 zio->io_error = zio_handle_label_injection(zio, EIO);
4640
4641 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4642 zio->io_type != ZIO_TYPE_TRIM) {
4643 if (!vdev_accessible(vd, zio)) {
4644 zio->io_error = SET_ERROR(ENXIO);
4645 } else {
4646 unexpected_error = B_TRUE;
4647 }
4648 }
4649 }
4650
4651 ops->vdev_op_io_done(zio);
4652
4653 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
4654 VERIFY(vdev_probe(vd, zio) == NULL);
4655
4656 return (zio);
4657 }
4658
4659 /*
4660 * This function is used to change the priority of an existing zio that is
4661 * currently in-flight. This is used by the arc to upgrade priority in the
4662 * event that a demand read is made for a block that is currently queued
4663 * as a scrub or async read IO. Otherwise, the high priority read request
4664 * would end up having to wait for the lower priority IO.
4665 */
4666 void
zio_change_priority(zio_t * pio,zio_priority_t priority)4667 zio_change_priority(zio_t *pio, zio_priority_t priority)
4668 {
4669 zio_t *cio, *cio_next;
4670 zio_link_t *zl = NULL;
4671
4672 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4673
4674 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4675 vdev_queue_change_io_priority(pio, priority);
4676 } else {
4677 pio->io_priority = priority;
4678 }
4679
4680 mutex_enter(&pio->io_lock);
4681 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4682 cio_next = zio_walk_children(pio, &zl);
4683 zio_change_priority(cio, priority);
4684 }
4685 mutex_exit(&pio->io_lock);
4686 }
4687
4688 /*
4689 * For non-raidz ZIOs, we can just copy aside the bad data read from the
4690 * disk, and use that to finish the checksum ereport later.
4691 */
4692 static void
zio_vsd_default_cksum_finish(zio_cksum_report_t * zcr,const abd_t * good_buf)4693 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
4694 const abd_t *good_buf)
4695 {
4696 /* no processing needed */
4697 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4698 }
4699
4700 void
zio_vsd_default_cksum_report(zio_t * zio,zio_cksum_report_t * zcr)4701 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
4702 {
4703 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
4704
4705 abd_copy(abd, zio->io_abd, zio->io_size);
4706
4707 zcr->zcr_cbinfo = zio->io_size;
4708 zcr->zcr_cbdata = abd;
4709 zcr->zcr_finish = zio_vsd_default_cksum_finish;
4710 zcr->zcr_free = zio_abd_free;
4711 }
4712
4713 static zio_t *
zio_vdev_io_assess(zio_t * zio)4714 zio_vdev_io_assess(zio_t *zio)
4715 {
4716 vdev_t *vd = zio->io_vd;
4717
4718 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
4719 return (NULL);
4720 }
4721
4722 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4723 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4724
4725 if (zio->io_vsd != NULL) {
4726 zio->io_vsd_ops->vsd_free(zio);
4727 zio->io_vsd = NULL;
4728 }
4729
4730 /*
4731 * If a Direct I/O operation has a checksum verify error then this I/O
4732 * should not attempt to be issued again.
4733 */
4734 if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) {
4735 if (zio->io_type == ZIO_TYPE_WRITE) {
4736 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL);
4737 ASSERT3U(zio->io_error, ==, EIO);
4738 }
4739 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4740 return (zio);
4741 }
4742
4743 if (zio_injection_enabled && zio->io_error == 0)
4744 zio->io_error = zio_handle_fault_injection(zio, EIO);
4745
4746 /*
4747 * If the I/O failed, determine whether we should attempt to retry it.
4748 *
4749 * On retry, we cut in line in the issue queue, since we don't want
4750 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
4751 */
4752 if (zio->io_error && vd == NULL &&
4753 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4754 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4755 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
4756 zio->io_error = 0;
4757 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
4758 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4759 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4760 zio_requeue_io_start_cut_in_line);
4761 return (NULL);
4762 }
4763
4764 /*
4765 * If we got an error on a leaf device, convert it to ENXIO
4766 * if the device is not accessible at all.
4767 */
4768 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4769 !vdev_accessible(vd, zio))
4770 zio->io_error = SET_ERROR(ENXIO);
4771
4772 /*
4773 * If we can't write to an interior vdev (mirror or RAID-Z),
4774 * set vdev_cant_write so that we stop trying to allocate from it.
4775 */
4776 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
4777 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
4778 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4779 "cant_write=TRUE due to write failure with ENXIO",
4780 zio);
4781 vd->vdev_cant_write = B_TRUE;
4782 }
4783
4784 /*
4785 * If a cache flush returns ENOTSUP we know that no future
4786 * attempts will ever succeed. In this case we set a persistent
4787 * boolean flag so that we don't bother with it in the future, and
4788 * then we act like the flush succeeded.
4789 */
4790 if (zio->io_error == ENOTSUP && zio->io_type == ZIO_TYPE_FLUSH &&
4791 vd != NULL) {
4792 vd->vdev_nowritecache = B_TRUE;
4793 zio->io_error = 0;
4794 }
4795
4796 if (zio->io_error)
4797 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4798
4799 return (zio);
4800 }
4801
4802 void
zio_vdev_io_reissue(zio_t * zio)4803 zio_vdev_io_reissue(zio_t *zio)
4804 {
4805 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4806 ASSERT(zio->io_error == 0);
4807
4808 zio->io_stage >>= 1;
4809 }
4810
4811 void
zio_vdev_io_redone(zio_t * zio)4812 zio_vdev_io_redone(zio_t *zio)
4813 {
4814 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4815
4816 zio->io_stage >>= 1;
4817 }
4818
4819 void
zio_vdev_io_bypass(zio_t * zio)4820 zio_vdev_io_bypass(zio_t *zio)
4821 {
4822 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4823 ASSERT(zio->io_error == 0);
4824
4825 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
4826 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
4827 }
4828
4829 /*
4830 * ==========================================================================
4831 * Encrypt and store encryption parameters
4832 * ==========================================================================
4833 */
4834
4835
4836 /*
4837 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4838 * managing the storage of encryption parameters and passing them to the
4839 * lower-level encryption functions.
4840 */
4841 static zio_t *
zio_encrypt(zio_t * zio)4842 zio_encrypt(zio_t *zio)
4843 {
4844 zio_prop_t *zp = &zio->io_prop;
4845 spa_t *spa = zio->io_spa;
4846 blkptr_t *bp = zio->io_bp;
4847 uint64_t psize = BP_GET_PSIZE(bp);
4848 uint64_t dsobj = zio->io_bookmark.zb_objset;
4849 dmu_object_type_t ot = BP_GET_TYPE(bp);
4850 void *enc_buf = NULL;
4851 abd_t *eabd = NULL;
4852 uint8_t salt[ZIO_DATA_SALT_LEN];
4853 uint8_t iv[ZIO_DATA_IV_LEN];
4854 uint8_t mac[ZIO_DATA_MAC_LEN];
4855 boolean_t no_crypt = B_FALSE;
4856
4857 /* the root zio already encrypted the data */
4858 if (zio->io_child_type == ZIO_CHILD_GANG)
4859 return (zio);
4860
4861 /* only ZIL blocks are re-encrypted on rewrite */
4862 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
4863 return (zio);
4864
4865 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4866 BP_SET_CRYPT(bp, B_FALSE);
4867 return (zio);
4868 }
4869
4870 /* if we are doing raw encryption set the provided encryption params */
4871 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
4872 ASSERT0(BP_GET_LEVEL(bp));
4873 BP_SET_CRYPT(bp, B_TRUE);
4874 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4875 if (ot != DMU_OT_OBJSET)
4876 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
4877
4878 /* dnode blocks must be written out in the provided byteorder */
4879 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4880 ot == DMU_OT_DNODE) {
4881 void *bswap_buf = zio_buf_alloc(psize);
4882 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4883
4884 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4885 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4886 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4887 psize);
4888
4889 abd_take_ownership_of_buf(babd, B_TRUE);
4890 zio_push_transform(zio, babd, psize, psize, NULL);
4891 }
4892
4893 if (DMU_OT_IS_ENCRYPTED(ot))
4894 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
4895 return (zio);
4896 }
4897
4898 /* indirect blocks only maintain a cksum of the lower level MACs */
4899 if (BP_GET_LEVEL(bp) > 0) {
4900 BP_SET_CRYPT(bp, B_TRUE);
4901 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4902 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4903 mac));
4904 zio_crypt_encode_mac_bp(bp, mac);
4905 return (zio);
4906 }
4907
4908 /*
4909 * Objset blocks are a special case since they have 2 256-bit MACs
4910 * embedded within them.
4911 */
4912 if (ot == DMU_OT_OBJSET) {
4913 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4914 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4915 BP_SET_CRYPT(bp, B_TRUE);
4916 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4917 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
4918 return (zio);
4919 }
4920
4921 /* unencrypted object types are only authenticated with a MAC */
4922 if (!DMU_OT_IS_ENCRYPTED(ot)) {
4923 BP_SET_CRYPT(bp, B_TRUE);
4924 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4925 zio->io_abd, psize, mac));
4926 zio_crypt_encode_mac_bp(bp, mac);
4927 return (zio);
4928 }
4929
4930 /*
4931 * Later passes of sync-to-convergence may decide to rewrite data
4932 * in place to avoid more disk reallocations. This presents a problem
4933 * for encryption because this constitutes rewriting the new data with
4934 * the same encryption key and IV. However, this only applies to blocks
4935 * in the MOS (particularly the spacemaps) and we do not encrypt the
4936 * MOS. We assert that the zio is allocating or an intent log write
4937 * to enforce this.
4938 */
4939 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4940 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4941 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4942 ASSERT3U(psize, !=, 0);
4943
4944 enc_buf = zio_buf_alloc(psize);
4945 eabd = abd_get_from_buf(enc_buf, psize);
4946 abd_take_ownership_of_buf(eabd, B_TRUE);
4947
4948 /*
4949 * For an explanation of what encryption parameters are stored
4950 * where, see the block comment in zio_crypt.c.
4951 */
4952 if (ot == DMU_OT_INTENT_LOG) {
4953 zio_crypt_decode_params_bp(bp, salt, iv);
4954 } else {
4955 BP_SET_CRYPT(bp, B_TRUE);
4956 }
4957
4958 /* Perform the encryption. This should not fail */
4959 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4960 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4961 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
4962
4963 /* encode encryption metadata into the bp */
4964 if (ot == DMU_OT_INTENT_LOG) {
4965 /*
4966 * ZIL blocks store the MAC in the embedded checksum, so the
4967 * transform must always be applied.
4968 */
4969 zio_crypt_encode_mac_zil(enc_buf, mac);
4970 zio_push_transform(zio, eabd, psize, psize, NULL);
4971 } else {
4972 BP_SET_CRYPT(bp, B_TRUE);
4973 zio_crypt_encode_params_bp(bp, salt, iv);
4974 zio_crypt_encode_mac_bp(bp, mac);
4975
4976 if (no_crypt) {
4977 ASSERT3U(ot, ==, DMU_OT_DNODE);
4978 abd_free(eabd);
4979 } else {
4980 zio_push_transform(zio, eabd, psize, psize, NULL);
4981 }
4982 }
4983
4984 return (zio);
4985 }
4986
4987 /*
4988 * ==========================================================================
4989 * Generate and verify checksums
4990 * ==========================================================================
4991 */
4992 static zio_t *
zio_checksum_generate(zio_t * zio)4993 zio_checksum_generate(zio_t *zio)
4994 {
4995 blkptr_t *bp = zio->io_bp;
4996 enum zio_checksum checksum;
4997
4998 if (bp == NULL) {
4999 /*
5000 * This is zio_write_phys().
5001 * We're either generating a label checksum, or none at all.
5002 */
5003 checksum = zio->io_prop.zp_checksum;
5004
5005 if (checksum == ZIO_CHECKSUM_OFF)
5006 return (zio);
5007
5008 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
5009 } else {
5010 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
5011 ASSERT(!IO_IS_ALLOCATING(zio));
5012 checksum = ZIO_CHECKSUM_GANG_HEADER;
5013 } else {
5014 checksum = BP_GET_CHECKSUM(bp);
5015 }
5016 }
5017
5018 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
5019
5020 return (zio);
5021 }
5022
5023 static zio_t *
zio_checksum_verify(zio_t * zio)5024 zio_checksum_verify(zio_t *zio)
5025 {
5026 zio_bad_cksum_t info;
5027 blkptr_t *bp = zio->io_bp;
5028 int error;
5029
5030 ASSERT(zio->io_vd != NULL);
5031
5032 if (bp == NULL) {
5033 /*
5034 * This is zio_read_phys().
5035 * We're either verifying a label checksum, or nothing at all.
5036 */
5037 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
5038 return (zio);
5039
5040 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
5041 }
5042
5043 ASSERT0(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
5044 IMPLY(zio->io_flags & ZIO_FLAG_DIO_READ,
5045 !(zio->io_flags & ZIO_FLAG_SPECULATIVE));
5046
5047 if ((error = zio_checksum_error(zio, &info)) != 0) {
5048 zio->io_error = error;
5049 if (error == ECKSUM &&
5050 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
5051 if (zio->io_flags & ZIO_FLAG_DIO_READ) {
5052 zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
5053 zio_t *pio = zio_unique_parent(zio);
5054 /*
5055 * Any Direct I/O read that has a checksum
5056 * error must be treated as suspicous as the
5057 * contents of the buffer could be getting
5058 * manipulated while the I/O is taking place.
5059 *
5060 * The checksum verify error will only be
5061 * reported here for disk and file VDEV's and
5062 * will be reported on those that the failure
5063 * occurred on. Other types of VDEV's report the
5064 * verify failure in their own code paths.
5065 */
5066 if (pio->io_child_type == ZIO_CHILD_LOGICAL) {
5067 zio_dio_chksum_verify_error_report(zio);
5068 }
5069 } else {
5070 mutex_enter(&zio->io_vd->vdev_stat_lock);
5071 zio->io_vd->vdev_stat.vs_checksum_errors++;
5072 mutex_exit(&zio->io_vd->vdev_stat_lock);
5073 (void) zfs_ereport_start_checksum(zio->io_spa,
5074 zio->io_vd, &zio->io_bookmark, zio,
5075 zio->io_offset, zio->io_size, &info);
5076 }
5077 }
5078 }
5079
5080 return (zio);
5081 }
5082
5083 static zio_t *
zio_dio_checksum_verify(zio_t * zio)5084 zio_dio_checksum_verify(zio_t *zio)
5085 {
5086 zio_t *pio = zio_unique_parent(zio);
5087 int error;
5088
5089 ASSERT3P(zio->io_vd, !=, NULL);
5090 ASSERT3P(zio->io_bp, !=, NULL);
5091 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
5092 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5093 ASSERT3B(pio->io_prop.zp_direct_write, ==, B_TRUE);
5094 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
5095
5096 if (zfs_vdev_direct_write_verify == 0 || zio->io_error != 0)
5097 goto out;
5098
5099 if ((error = zio_checksum_error(zio, NULL)) != 0) {
5100 zio->io_error = error;
5101 if (error == ECKSUM) {
5102 zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
5103 zio_dio_chksum_verify_error_report(zio);
5104 }
5105 }
5106
5107 out:
5108 return (zio);
5109 }
5110
5111
5112 /*
5113 * Called by RAID-Z to ensure we don't compute the checksum twice.
5114 */
5115 void
zio_checksum_verified(zio_t * zio)5116 zio_checksum_verified(zio_t *zio)
5117 {
5118 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
5119 }
5120
5121 /*
5122 * Report Direct I/O checksum verify error and create ZED event.
5123 */
5124 void
zio_dio_chksum_verify_error_report(zio_t * zio)5125 zio_dio_chksum_verify_error_report(zio_t *zio)
5126 {
5127 ASSERT(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
5128
5129 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
5130 return;
5131
5132 mutex_enter(&zio->io_vd->vdev_stat_lock);
5133 zio->io_vd->vdev_stat.vs_dio_verify_errors++;
5134 mutex_exit(&zio->io_vd->vdev_stat_lock);
5135 if (zio->io_type == ZIO_TYPE_WRITE) {
5136 /*
5137 * Convert checksum error for writes into EIO.
5138 */
5139 zio->io_error = SET_ERROR(EIO);
5140 /*
5141 * Report dio_verify_wr ZED event.
5142 */
5143 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_WR,
5144 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5145 } else {
5146 /*
5147 * Report dio_verify_rd ZED event.
5148 */
5149 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_RD,
5150 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5151 }
5152 }
5153
5154 /*
5155 * ==========================================================================
5156 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
5157 * An error of 0 indicates success. ENXIO indicates whole-device failure,
5158 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
5159 * indicate errors that are specific to one I/O, and most likely permanent.
5160 * Any other error is presumed to be worse because we weren't expecting it.
5161 * ==========================================================================
5162 */
5163 int
zio_worst_error(int e1,int e2)5164 zio_worst_error(int e1, int e2)
5165 {
5166 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
5167 int r1, r2;
5168
5169 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
5170 if (e1 == zio_error_rank[r1])
5171 break;
5172
5173 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
5174 if (e2 == zio_error_rank[r2])
5175 break;
5176
5177 return (r1 > r2 ? e1 : e2);
5178 }
5179
5180 /*
5181 * ==========================================================================
5182 * I/O completion
5183 * ==========================================================================
5184 */
5185 static zio_t *
zio_ready(zio_t * zio)5186 zio_ready(zio_t *zio)
5187 {
5188 blkptr_t *bp = zio->io_bp;
5189 zio_t *pio, *pio_next;
5190 zio_link_t *zl = NULL;
5191
5192 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
5193 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
5194 return (NULL);
5195 }
5196
5197 if (zio->io_ready) {
5198 ASSERT(IO_IS_ALLOCATING(zio));
5199 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
5200 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
5201 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
5202
5203 zio->io_ready(zio);
5204 }
5205
5206 #ifdef ZFS_DEBUG
5207 if (bp != NULL && bp != &zio->io_bp_copy)
5208 zio->io_bp_copy = *bp;
5209 #endif
5210
5211 if (zio->io_error != 0) {
5212 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
5213
5214 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
5215 ASSERT(IO_IS_ALLOCATING(zio));
5216 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
5217 ASSERT(zio->io_metaslab_class != NULL);
5218 ASSERT(ZIO_HAS_ALLOCATOR(zio));
5219
5220 /*
5221 * We were unable to allocate anything, unreserve and
5222 * issue the next I/O to allocate.
5223 */
5224 if (metaslab_class_throttle_unreserve(
5225 zio->io_metaslab_class, zio->io_prop.zp_copies,
5226 zio)) {
5227 zio_allocate_dispatch(zio->io_metaslab_class,
5228 zio->io_allocator);
5229 }
5230 }
5231 }
5232
5233 mutex_enter(&zio->io_lock);
5234 zio->io_state[ZIO_WAIT_READY] = 1;
5235 pio = zio_walk_parents(zio, &zl);
5236 mutex_exit(&zio->io_lock);
5237
5238 /*
5239 * As we notify zio's parents, new parents could be added.
5240 * New parents go to the head of zio's io_parent_list, however,
5241 * so we will (correctly) not notify them. The remainder of zio's
5242 * io_parent_list, from 'pio_next' onward, cannot change because
5243 * all parents must wait for us to be done before they can be done.
5244 */
5245 for (; pio != NULL; pio = pio_next) {
5246 pio_next = zio_walk_parents(zio, &zl);
5247 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
5248 }
5249
5250 if (zio->io_flags & ZIO_FLAG_NODATA) {
5251 if (bp != NULL && BP_IS_GANG(bp)) {
5252 zio->io_flags &= ~ZIO_FLAG_NODATA;
5253 } else {
5254 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
5255 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
5256 }
5257 }
5258
5259 if (zio_injection_enabled &&
5260 zio->io_spa->spa_syncing_txg == zio->io_txg)
5261 zio_handle_ignored_writes(zio);
5262
5263 return (zio);
5264 }
5265
5266 /*
5267 * Update the allocation throttle accounting.
5268 */
5269 static void
zio_dva_throttle_done(zio_t * zio)5270 zio_dva_throttle_done(zio_t *zio)
5271 {
5272 zio_t *pio = zio_unique_parent(zio);
5273 vdev_t *vd = zio->io_vd;
5274 int flags = METASLAB_ASYNC_ALLOC;
5275 const void *tag = pio;
5276
5277 ASSERT3P(zio->io_bp, !=, NULL);
5278 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
5279 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
5280 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
5281 ASSERT(vd != NULL);
5282 ASSERT3P(vd, ==, vd->vdev_top);
5283 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
5284 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
5285 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
5286
5287 /*
5288 * Parents of gang children can have two flavors -- ones that allocated
5289 * the gang header (will have ZIO_FLAG_IO_REWRITE set) and ones that
5290 * allocated the constituent blocks. The first use their parent as tag.
5291 */
5292 if (pio->io_child_type == ZIO_CHILD_GANG &&
5293 (pio->io_flags & ZIO_FLAG_IO_REWRITE))
5294 tag = zio_unique_parent(pio);
5295
5296 ASSERT(IO_IS_ALLOCATING(pio) || (pio->io_child_type == ZIO_CHILD_GANG &&
5297 (pio->io_flags & ZIO_FLAG_IO_REWRITE)));
5298 ASSERT(ZIO_HAS_ALLOCATOR(pio));
5299 ASSERT3P(zio, !=, zio->io_logical);
5300 ASSERT(zio->io_logical != NULL);
5301 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
5302 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
5303 ASSERT(zio->io_metaslab_class != NULL);
5304 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
5305
5306 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id,
5307 pio->io_allocator, flags, pio->io_size, tag);
5308
5309 if (metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, pio)) {
5310 zio_allocate_dispatch(zio->io_metaslab_class,
5311 pio->io_allocator);
5312 }
5313 }
5314
5315 static zio_t *
zio_done(zio_t * zio)5316 zio_done(zio_t *zio)
5317 {
5318 /*
5319 * Always attempt to keep stack usage minimal here since
5320 * we can be called recursively up to 19 levels deep.
5321 */
5322 const uint64_t psize = zio->io_size;
5323 zio_t *pio, *pio_next;
5324 zio_link_t *zl = NULL;
5325
5326 /*
5327 * If our children haven't all completed,
5328 * wait for them and then repeat this pipeline stage.
5329 */
5330 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
5331 return (NULL);
5332 }
5333
5334 /*
5335 * If the allocation throttle is enabled, then update the accounting.
5336 * We only track child I/Os that are part of an allocating async
5337 * write. We must do this since the allocation is performed
5338 * by the logical I/O but the actual write is done by child I/Os.
5339 */
5340 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
5341 zio->io_child_type == ZIO_CHILD_VDEV)
5342 zio_dva_throttle_done(zio);
5343
5344 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
5345 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
5346 ASSERT(zio->io_children[c][w] == 0);
5347
5348 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
5349 ASSERT(zio->io_bp->blk_pad[0] == 0);
5350 ASSERT(zio->io_bp->blk_pad[1] == 0);
5351 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
5352 sizeof (blkptr_t)) == 0 ||
5353 (zio->io_bp == zio_unique_parent(zio)->io_bp));
5354 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
5355 zio->io_bp_override == NULL &&
5356 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
5357 ASSERT3U(zio->io_prop.zp_copies, <=,
5358 BP_GET_NDVAS(zio->io_bp));
5359 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
5360 (BP_COUNT_GANG(zio->io_bp) ==
5361 BP_GET_NDVAS(zio->io_bp)));
5362 }
5363 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
5364 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
5365 }
5366
5367 /*
5368 * If there were child vdev/gang/ddt errors, they apply to us now.
5369 */
5370 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
5371 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
5372 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
5373
5374 /*
5375 * If the I/O on the transformed data was successful, generate any
5376 * checksum reports now while we still have the transformed data.
5377 */
5378 if (zio->io_error == 0) {
5379 while (zio->io_cksum_report != NULL) {
5380 zio_cksum_report_t *zcr = zio->io_cksum_report;
5381 uint64_t align = zcr->zcr_align;
5382 uint64_t asize = P2ROUNDUP(psize, align);
5383 abd_t *adata = zio->io_abd;
5384
5385 if (adata != NULL && asize != psize) {
5386 adata = abd_alloc(asize, B_TRUE);
5387 abd_copy(adata, zio->io_abd, psize);
5388 abd_zero_off(adata, psize, asize - psize);
5389 }
5390
5391 zio->io_cksum_report = zcr->zcr_next;
5392 zcr->zcr_next = NULL;
5393 zcr->zcr_finish(zcr, adata);
5394 zfs_ereport_free_checksum(zcr);
5395
5396 if (adata != NULL && asize != psize)
5397 abd_free(adata);
5398 }
5399 }
5400
5401 zio_pop_transforms(zio); /* note: may set zio->io_error */
5402
5403 vdev_stat_update(zio, psize);
5404
5405 /*
5406 * If this I/O is attached to a particular vdev is slow, exceeding
5407 * 30 seconds to complete, post an error described the I/O delay.
5408 * We ignore these errors if the device is currently unavailable.
5409 */
5410 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
5411 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
5412 /*
5413 * We want to only increment our slow IO counters if
5414 * the IO is valid (i.e. not if the drive is removed).
5415 *
5416 * zfs_ereport_post() will also do these checks, but
5417 * it can also ratelimit and have other failures, so we
5418 * need to increment the slow_io counters independent
5419 * of it.
5420 */
5421 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
5422 zio->io_spa, zio->io_vd, zio)) {
5423 mutex_enter(&zio->io_vd->vdev_stat_lock);
5424 zio->io_vd->vdev_stat.vs_slow_ios++;
5425 mutex_exit(&zio->io_vd->vdev_stat_lock);
5426
5427 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
5428 zio->io_spa, zio->io_vd, &zio->io_bookmark,
5429 zio, 0);
5430 }
5431 }
5432 }
5433
5434 if (zio->io_error) {
5435 /*
5436 * If this I/O is attached to a particular vdev,
5437 * generate an error message describing the I/O failure
5438 * at the block level. We ignore these errors if the
5439 * device is currently unavailable.
5440 */
5441 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
5442 !vdev_is_dead(zio->io_vd) &&
5443 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
5444 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
5445 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
5446 if (ret != EALREADY) {
5447 mutex_enter(&zio->io_vd->vdev_stat_lock);
5448 if (zio->io_type == ZIO_TYPE_READ)
5449 zio->io_vd->vdev_stat.vs_read_errors++;
5450 else if (zio->io_type == ZIO_TYPE_WRITE)
5451 zio->io_vd->vdev_stat.vs_write_errors++;
5452 mutex_exit(&zio->io_vd->vdev_stat_lock);
5453 }
5454 }
5455
5456 if ((zio->io_error == EIO || !(zio->io_flags &
5457 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
5458 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) &&
5459 zio == zio->io_logical) {
5460 /*
5461 * For logical I/O requests, tell the SPA to log the
5462 * error and generate a logical data ereport.
5463 */
5464 spa_log_error(zio->io_spa, &zio->io_bookmark,
5465 BP_GET_LOGICAL_BIRTH(zio->io_bp));
5466 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
5467 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
5468 }
5469 }
5470
5471 if (zio->io_error && zio == zio->io_logical) {
5472
5473 /*
5474 * A DDT child tried to create a mixed gang/non-gang BP. We're
5475 * going to have to just retry as a non-dedup IO.
5476 */
5477 if (zio->io_error == EAGAIN && IO_IS_ALLOCATING(zio) &&
5478 zio->io_prop.zp_dedup) {
5479 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
5480 zio->io_prop.zp_dedup = B_FALSE;
5481 }
5482 /*
5483 * Determine whether zio should be reexecuted. This will
5484 * propagate all the way to the root via zio_notify_parent().
5485 */
5486 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
5487 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5488
5489 if (IO_IS_ALLOCATING(zio) &&
5490 !(zio->io_flags & ZIO_FLAG_CANFAIL) &&
5491 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
5492 if (zio->io_error != ENOSPC)
5493 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
5494 else
5495 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5496 }
5497
5498 if ((zio->io_type == ZIO_TYPE_READ ||
5499 zio->io_type == ZIO_TYPE_FREE) &&
5500 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
5501 zio->io_error == ENXIO &&
5502 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
5503 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
5504 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5505
5506 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
5507 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
5508
5509 /*
5510 * Here is a possibly good place to attempt to do
5511 * either combinatorial reconstruction or error correction
5512 * based on checksums. It also might be a good place
5513 * to send out preliminary ereports before we suspend
5514 * processing.
5515 */
5516 }
5517
5518 /*
5519 * If there were logical child errors, they apply to us now.
5520 * We defer this until now to avoid conflating logical child
5521 * errors with errors that happened to the zio itself when
5522 * updating vdev stats and reporting FMA events above.
5523 */
5524 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
5525
5526 if ((zio->io_error || zio->io_reexecute) &&
5527 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
5528 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
5529 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
5530
5531 zio_gang_tree_free(&zio->io_gang_tree);
5532
5533 /*
5534 * Godfather I/Os should never suspend.
5535 */
5536 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
5537 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
5538 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
5539
5540 if (zio->io_reexecute) {
5541 /*
5542 * A Direct I/O operation that has a checksum verify error
5543 * should not attempt to reexecute. Instead, the error should
5544 * just be propagated back.
5545 */
5546 ASSERT(!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR));
5547
5548 /*
5549 * This is a logical I/O that wants to reexecute.
5550 *
5551 * Reexecute is top-down. When an i/o fails, if it's not
5552 * the root, it simply notifies its parent and sticks around.
5553 * The parent, seeing that it still has children in zio_done(),
5554 * does the same. This percolates all the way up to the root.
5555 * The root i/o will reexecute or suspend the entire tree.
5556 *
5557 * This approach ensures that zio_reexecute() honors
5558 * all the original i/o dependency relationships, e.g.
5559 * parents not executing until children are ready.
5560 */
5561 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
5562
5563 zio->io_gang_leader = NULL;
5564
5565 mutex_enter(&zio->io_lock);
5566 zio->io_state[ZIO_WAIT_DONE] = 1;
5567 mutex_exit(&zio->io_lock);
5568
5569 /*
5570 * "The Godfather" I/O monitors its children but is
5571 * not a true parent to them. It will track them through
5572 * the pipeline but severs its ties whenever they get into
5573 * trouble (e.g. suspended). This allows "The Godfather"
5574 * I/O to return status without blocking.
5575 */
5576 zl = NULL;
5577 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
5578 pio = pio_next) {
5579 zio_link_t *remove_zl = zl;
5580 pio_next = zio_walk_parents(zio, &zl);
5581
5582 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
5583 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
5584 zio_remove_child(pio, zio, remove_zl);
5585 /*
5586 * This is a rare code path, so we don't
5587 * bother with "next_to_execute".
5588 */
5589 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
5590 NULL);
5591 }
5592 }
5593
5594 if ((pio = zio_unique_parent(zio)) != NULL) {
5595 /*
5596 * We're not a root i/o, so there's nothing to do
5597 * but notify our parent. Don't propagate errors
5598 * upward since we haven't permanently failed yet.
5599 */
5600 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
5601 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
5602 /*
5603 * This is a rare code path, so we don't bother with
5604 * "next_to_execute".
5605 */
5606 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
5607 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
5608 /*
5609 * We'd fail again if we reexecuted now, so suspend
5610 * until conditions improve (e.g. device comes online).
5611 */
5612 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
5613 } else {
5614 /*
5615 * Reexecution is potentially a huge amount of work.
5616 * Hand it off to the otherwise-unused claim taskq.
5617 */
5618 spa_taskq_dispatch(zio->io_spa,
5619 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
5620 zio_reexecute, zio, B_FALSE);
5621 }
5622 return (NULL);
5623 }
5624
5625 ASSERT(list_is_empty(&zio->io_child_list));
5626 ASSERT(zio->io_reexecute == 0);
5627 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
5628
5629 /*
5630 * Report any checksum errors, since the I/O is complete.
5631 */
5632 while (zio->io_cksum_report != NULL) {
5633 zio_cksum_report_t *zcr = zio->io_cksum_report;
5634 zio->io_cksum_report = zcr->zcr_next;
5635 zcr->zcr_next = NULL;
5636 zcr->zcr_finish(zcr, NULL);
5637 zfs_ereport_free_checksum(zcr);
5638 }
5639
5640 /*
5641 * It is the responsibility of the done callback to ensure that this
5642 * particular zio is no longer discoverable for adoption, and as
5643 * such, cannot acquire any new parents.
5644 */
5645 if (zio->io_done)
5646 zio->io_done(zio);
5647
5648 mutex_enter(&zio->io_lock);
5649 zio->io_state[ZIO_WAIT_DONE] = 1;
5650 mutex_exit(&zio->io_lock);
5651
5652 /*
5653 * We are done executing this zio. We may want to execute a parent
5654 * next. See the comment in zio_notify_parent().
5655 */
5656 zio_t *next_to_execute = NULL;
5657 zl = NULL;
5658 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5659 zio_link_t *remove_zl = zl;
5660 pio_next = zio_walk_parents(zio, &zl);
5661 zio_remove_child(pio, zio, remove_zl);
5662 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
5663 }
5664
5665 if (zio->io_waiter != NULL) {
5666 mutex_enter(&zio->io_lock);
5667 zio->io_executor = NULL;
5668 cv_broadcast(&zio->io_cv);
5669 mutex_exit(&zio->io_lock);
5670 } else {
5671 zio_destroy(zio);
5672 }
5673
5674 return (next_to_execute);
5675 }
5676
5677 /*
5678 * ==========================================================================
5679 * I/O pipeline definition
5680 * ==========================================================================
5681 */
5682 static zio_pipe_stage_t *zio_pipeline[] = {
5683 NULL,
5684 zio_read_bp_init,
5685 zio_write_bp_init,
5686 zio_free_bp_init,
5687 zio_issue_async,
5688 zio_write_compress,
5689 zio_encrypt,
5690 zio_checksum_generate,
5691 zio_nop_write,
5692 zio_brt_free,
5693 zio_ddt_read_start,
5694 zio_ddt_read_done,
5695 zio_ddt_write,
5696 zio_ddt_free,
5697 zio_gang_assemble,
5698 zio_gang_issue,
5699 zio_dva_throttle,
5700 zio_dva_allocate,
5701 zio_dva_free,
5702 zio_dva_claim,
5703 zio_ready,
5704 zio_vdev_io_start,
5705 zio_vdev_io_done,
5706 zio_vdev_io_assess,
5707 zio_checksum_verify,
5708 zio_dio_checksum_verify,
5709 zio_done
5710 };
5711
5712
5713
5714
5715 /*
5716 * Compare two zbookmark_phys_t's to see which we would reach first in a
5717 * pre-order traversal of the object tree.
5718 *
5719 * This is simple in every case aside from the meta-dnode object. For all other
5720 * objects, we traverse them in order (object 1 before object 2, and so on).
5721 * However, all of these objects are traversed while traversing object 0, since
5722 * the data it points to is the list of objects. Thus, we need to convert to a
5723 * canonical representation so we can compare meta-dnode bookmarks to
5724 * non-meta-dnode bookmarks.
5725 *
5726 * We do this by calculating "equivalents" for each field of the zbookmark.
5727 * zbookmarks outside of the meta-dnode use their own object and level, and
5728 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5729 * blocks this bookmark refers to) by multiplying their blkid by their span
5730 * (the number of L0 blocks contained within one block at their level).
5731 * zbookmarks inside the meta-dnode calculate their object equivalent
5732 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5733 * level + 1<<31 (any value larger than a level could ever be) for their level.
5734 * This causes them to always compare before a bookmark in their object
5735 * equivalent, compare appropriately to bookmarks in other objects, and to
5736 * compare appropriately to other bookmarks in the meta-dnode.
5737 */
5738 int
zbookmark_compare(uint16_t dbss1,uint8_t ibs1,uint16_t dbss2,uint8_t ibs2,const zbookmark_phys_t * zb1,const zbookmark_phys_t * zb2)5739 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5740 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5741 {
5742 /*
5743 * These variables represent the "equivalent" values for the zbookmark,
5744 * after converting zbookmarks inside the meta dnode to their
5745 * normal-object equivalents.
5746 */
5747 uint64_t zb1obj, zb2obj;
5748 uint64_t zb1L0, zb2L0;
5749 uint64_t zb1level, zb2level;
5750
5751 if (zb1->zb_object == zb2->zb_object &&
5752 zb1->zb_level == zb2->zb_level &&
5753 zb1->zb_blkid == zb2->zb_blkid)
5754 return (0);
5755
5756 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5757 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5758
5759 /*
5760 * BP_SPANB calculates the span in blocks.
5761 */
5762 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5763 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
5764
5765 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
5766 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5767 zb1L0 = 0;
5768 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5769 } else {
5770 zb1obj = zb1->zb_object;
5771 zb1level = zb1->zb_level;
5772 }
5773
5774 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5775 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5776 zb2L0 = 0;
5777 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5778 } else {
5779 zb2obj = zb2->zb_object;
5780 zb2level = zb2->zb_level;
5781 }
5782
5783 /* Now that we have a canonical representation, do the comparison. */
5784 if (zb1obj != zb2obj)
5785 return (zb1obj < zb2obj ? -1 : 1);
5786 else if (zb1L0 != zb2L0)
5787 return (zb1L0 < zb2L0 ? -1 : 1);
5788 else if (zb1level != zb2level)
5789 return (zb1level > zb2level ? -1 : 1);
5790 /*
5791 * This can (theoretically) happen if the bookmarks have the same object
5792 * and level, but different blkids, if the block sizes are not the same.
5793 * There is presently no way to change the indirect block sizes
5794 */
5795 return (0);
5796 }
5797
5798 /*
5799 * This function checks the following: given that last_block is the place that
5800 * our traversal stopped last time, does that guarantee that we've visited
5801 * every node under subtree_root? Therefore, we can't just use the raw output
5802 * of zbookmark_compare. We have to pass in a modified version of
5803 * subtree_root; by incrementing the block id, and then checking whether
5804 * last_block is before or equal to that, we can tell whether or not having
5805 * visited last_block implies that all of subtree_root's children have been
5806 * visited.
5807 */
5808 boolean_t
zbookmark_subtree_completed(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5809 zbookmark_subtree_completed(const dnode_phys_t *dnp,
5810 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5811 {
5812 zbookmark_phys_t mod_zb = *subtree_root;
5813 mod_zb.zb_blkid++;
5814 ASSERT0(last_block->zb_level);
5815
5816 /* The objset_phys_t isn't before anything. */
5817 if (dnp == NULL)
5818 return (B_FALSE);
5819
5820 /*
5821 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5822 * data block size in sectors, because that variable is only used if
5823 * the bookmark refers to a block in the meta-dnode. Since we don't
5824 * know without examining it what object it refers to, and there's no
5825 * harm in passing in this value in other cases, we always pass it in.
5826 *
5827 * We pass in 0 for the indirect block size shift because zb2 must be
5828 * level 0. The indirect block size is only used to calculate the span
5829 * of the bookmark, but since the bookmark must be level 0, the span is
5830 * always 1, so the math works out.
5831 *
5832 * If you make changes to how the zbookmark_compare code works, be sure
5833 * to make sure that this code still works afterwards.
5834 */
5835 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5836 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5837 last_block) <= 0);
5838 }
5839
5840 /*
5841 * This function is similar to zbookmark_subtree_completed(), but returns true
5842 * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5843 */
5844 boolean_t
zbookmark_subtree_tbd(const dnode_phys_t * dnp,const zbookmark_phys_t * subtree_root,const zbookmark_phys_t * last_block)5845 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5846 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5847 {
5848 ASSERT0(last_block->zb_level);
5849 if (dnp == NULL)
5850 return (B_FALSE);
5851 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5852 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5853 last_block) >= 0);
5854 }
5855
5856 EXPORT_SYMBOL(zio_type_name);
5857 EXPORT_SYMBOL(zio_buf_alloc);
5858 EXPORT_SYMBOL(zio_data_buf_alloc);
5859 EXPORT_SYMBOL(zio_buf_free);
5860 EXPORT_SYMBOL(zio_data_buf_free);
5861
5862 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
5863 "Max I/O completion time (milliseconds) before marking it as slow");
5864
5865 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5866 "Prioritize requeued I/O");
5867
5868 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
5869 "Defer frees starting in this pass");
5870
5871 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
5872 "Don't compress starting in this pass");
5873
5874 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
5875 "Rewrite new bps starting in this pass");
5876
5877 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
5878 "Throttle block allocations in the ZIO pipeline");
5879
5880 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
5881 "Log all slow ZIOs, not just those with vdevs");
5882